@@ -980,6 +980,8 @@ struct i915_gem_mm {
spinlock_t object_stat_lock;
u64 object_memory;
u32 object_count;
+
+ size_t phys_mem_total;
};
#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
@@ -2313,6 +2313,17 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
*/
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->mm.madv = __I915_MADV_PURGED;
+
+ /*
+ * Mark the object as not having backing pages, as physical space
+ * returned back to kernel
+ */
+ if (obj->has_backing_pages == 1) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ dev_priv->mm.phys_mem_total -= obj->base.size;
+ obj->has_backing_pages = 0;
+ }
obj->mm.pages = ERR_PTR(-EFAULT);
}
@@ -2600,6 +2611,13 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
+ if (obj->has_backing_pages == 0) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+
+ dev_priv->mm.phys_mem_total += obj->base.size;
+ obj->has_backing_pages = 1;
+ }
+
return 0;
err_sg:
@@ -4667,6 +4685,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
obj->mm.madv = I915_MADV_WILLNEED;
+ /*
+ * Mark the object as not having backing pages, as no allocation
+ * for it yet
+ */
+ obj->has_backing_pages = 0;
INIT_LIST_HEAD(&obj->pid_info);
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
@@ -4858,6 +4881,13 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
+ if (!obj->stolen && (obj->has_backing_pages == 1)) {
+ struct drm_i915_private *dev_priv =
+ obj->base.dev->dev_private;
+
+ dev_priv->mm.phys_mem_total -= obj->base.size;
+ obj->has_backing_pages = 0;
+ }
reservation_object_fini(&obj->__builtin_resv);
drm_gem_object_release(&obj->base);
@@ -6318,6 +6348,8 @@ static int i915_obj_shared_count(struct drm_i915_gem_object *obj,
u64 nr_bytes =
i915_obj_get_shmem_pages_alloced(obj)*PAGE_SIZE;
+ if (obj->has_backing_pages)
+ stats->num_obj_allocated++;
if (obj->mm.madv == I915_MADV_DONTNEED) {
stats->num_obj_purgeable++;
if (nr_bytes != 0)
@@ -6325,7 +6357,6 @@ static int i915_obj_shared_count(struct drm_i915_gem_object *obj,
}
if (nr_bytes != 0) {
- stats->num_obj_allocated++;
if (obj_shared_count > 1) {
stats->phys_space_allocated_shared += nr_bytes;
stats->phys_space_shared_proportion +=
@@ -6416,7 +6447,7 @@ static int i915_gem_object_pid_order(int id, void *ptr, void *data)
struct drm_device *dev)
{
struct drm_file *file;
-
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct name_entry *entry, *next;
struct pid_stat_entry *pid_entry, *temp_entry;
struct pid_stat_entry *new_pid_entry, *new_temp_entry;
@@ -6519,7 +6550,8 @@ static int i915_gem_object_pid_order(int id, void *ptr, void *data)
err_printf(m,
"\t\t\t\t\t\t\t\t%13zdK\t%12zdK\tTotal\n",
total_shared_prop_space, total_priv_space);
-
+ err_printf(m, "\nTotal used GFX Shmem Physical space %8zdK\n",
+ dev_priv->mm.phys_mem_total/1024);
if (ret)
return ret;
if (m->bytes == 0 && m->err)
@@ -147,6 +147,7 @@ struct drm_i915_gem_object {
#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
unsigned int cache_dirty:1;
+ unsigned int has_backing_pages:1;
/**
* @read_domains: Read memory domains.