@@ -13,11 +13,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
{
obj->mm.region = intel_memory_region_get(mem);
- if (obj->base.size <= mem->min_page_size)
- obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
-
mutex_lock(&mem->objects.lock);
-
list_add(&obj->mm.region_link, &mem->objects.list);
mutex_unlock(&mem->objects.lock);
}
@@ -284,6 +284,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
bool needs_clflush)
{
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
+ GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev)));
if (obj->mm.madv == I915_MADV_DONTNEED)
obj->mm.dirty = false;
@@ -302,6 +303,7 @@ void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_
struct pagevec pvec;
struct page *page;
+ GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev)));
__i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
@@ -560,6 +562,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
resource_size_t offset;
int err;
+ GEM_WARN_ON(IS_DGFX(dev_priv));
obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
if (IS_ERR(obj))
return obj;
@@ -286,6 +286,26 @@ static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
unsigned int cache_level;
+ unsigned int i;
+
+ /*
+ * If object was moved to an allowable region, update the object
+ * region to consider it migrated. Note that if it's currently not
+ * in an allowable region, it's evicted and we don't update the
+ * object region.
+ */
+ if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
+ for (i = 0; i < obj->mm.n_placements; ++i) {
+ struct intel_memory_region *mr = obj->mm.placements[i];
+
+ if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
+ mr != obj->mm.region) {
+ i915_gem_object_release_memory_region(obj);
+ i915_gem_object_init_memory_region(obj, mr);
+ break;
+ }
+ }
+ }
obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
@@ -615,13 +635,6 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
/* Move to the requested placement. */
i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
- /*
- * For now we support LMEM only with TTM.
- * TODO: Remove with system support
- */
- GEM_BUG_ON(requested.mem_type < I915_PL_LMEM0 ||
- busy[0].mem_type < I915_PL_LMEM0);
-
/* First try only the requested placement. No eviction. */
real_num_busy = fetch_and_zero(&placement.num_busy_placement);
ret = ttm_bo_validate(bo, &placement, &ctx);
@@ -635,9 +648,6 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
ret == -EAGAIN)
return ret;
- /* TODO: Remove this when we support system as TTM. */
- real_num_busy = 1;
-
/*
* If the initial attempt fails, allow all accepted placements,
* evicting if necessary.
@@ -873,3 +883,25 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
return 0;
}
+
+static const struct intel_memory_region_ops ttm_system_region_ops = {
+ .init_object = __i915_gem_ttm_object_init,
+};
+
+struct intel_memory_region *
+i915_gem_ttm_system_setup(struct drm_i915_private *i915,
+ u16 type, u16 instance)
+{
+ struct intel_memory_region *mr;
+
+ mr = intel_memory_region_create(i915, 0,
+ totalram_pages() << PAGE_SHIFT,
+ PAGE_SIZE, 0,
+ type, instance,
+ &ttm_system_region_ops);
+ if (IS_ERR(mr))
+ return mr;
+
+ intel_memory_region_set_name(mr, "system-ttm");
+ return mr;
+}
@@ -1751,9 +1751,6 @@ void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
void i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
-struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
- u16 type, u16 instance);
-
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
/*
@@ -173,7 +173,12 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
instance = intel_region_map[i].instance;
switch (type) {
case INTEL_MEMORY_SYSTEM:
- mem = i915_gem_shmem_setup(i915, type, instance);
+ if (IS_DGFX(i915))
+ mem = i915_gem_ttm_system_setup(i915, type,
+ instance);
+ else
+ mem = i915_gem_shmem_setup(i915, type,
+ instance);
break;
case INTEL_MEMORY_STOLEN_LOCAL:
mem = i915_gem_stolen_lmem_setup(i915, type, instance);
@@ -125,4 +125,12 @@ intel_memory_region_set_name(struct intel_memory_region *mem,
int intel_memory_region_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size);
+
+struct intel_memory_region *
+i915_gem_ttm_system_setup(struct drm_i915_private *i915,
+ u16 type, u16 instance);
+struct intel_memory_region *
+i915_gem_shmem_setup(struct drm_i915_private *i915,
+ u16 type, u16 instance);
+
#endif