@@ -5,8 +5,10 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_buddy.h>
#include "i915_drv.h"
+#include "i915_ttm_buddy_manager.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
@@ -20,6 +22,7 @@
#define I915_TTM_PRIO_PURGE 0
#define I915_TTM_PRIO_NO_PAGES 1
#define I915_TTM_PRIO_HAS_PAGES 2
+#define I915_TTM_PRIO_NEEDS_CPU_ACCESS 3
/*
* Size of struct ttm_place vector in on-stack struct ttm_placement allocs
@@ -337,6 +340,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct ttm_resource *res = bo->resource;
if (!obj)
return false;
@@ -350,7 +354,48 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
return false;
/* Will do for now. Our pinned objects are still on TTM's LRU lists */
- return i915_gem_object_evictable(obj);
+ if (!i915_gem_object_evictable(obj))
+ return false;
+
+ switch (res->mem_type) {
+ case I915_PL_LMEM0: {
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, res->mem_type);
+ struct i915_ttm_buddy_resource *bman_res =
+ to_ttm_buddy_resource(res);
+ struct drm_buddy *mm = bman_res->mm;
+ struct drm_buddy_block *block;
+
+ if (!place->fpfn && !place->lpfn)
+ return true;
+
+ GEM_BUG_ON(!place->lpfn);
+
+ /*
+ * If we just want something mappable then we can quickly check
+ * if the current victim resource is using any of the CPU
+ * visible portion.
+ */
+ if (!place->fpfn &&
+ place->lpfn == i915_ttm_buddy_man_visible_size(man))
+ return bman_res->used_visible_size > 0;
+
+ /* Real range allocation */
+ list_for_each_entry(block, &bman_res->blocks, link) {
+ unsigned long fpfn =
+ drm_buddy_block_offset(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
+
+ if (place->fpfn < lpfn && place->lpfn > fpfn)
+ return true;
+ }
+ return false;
+ } default:
+ break;
+ }
+
+ return true;
}
static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
@@ -850,7 +895,23 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
} else if (!i915_gem_object_has_pages(obj)) {
bo->priority = I915_TTM_PRIO_NO_PAGES;
} else {
- bo->priority = I915_TTM_PRIO_HAS_PAGES;
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
+
+ /*
+ * If we need to place an LMEM resource which doesn't need CPU
+ * access then we should try not to victimize mappable objects
+ * first, since we likely end up stealing more of the mappable
+ * portion. And likewise when we try to find space for a mappble
+ * object, we know not to ever victimize objects that don't
+ * occupy any mappable pages.
+ */
+ if (i915_ttm_cpu_maps_iomem(bo->resource) &&
+ i915_ttm_buddy_man_visible_size(man) < man->size &&
+ !(obj->flags & I915_BO_ALLOC_GPU_ONLY))
+ bo->priority = I915_TTM_PRIO_NEEDS_CPU_ACCESS;
+ else
+ bo->priority = I915_TTM_PRIO_HAS_PAGES;
}
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);