@@ -42,7 +42,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
return -ENOMEM;
}
- flags = I915_ALLOC_MIN_PAGE_SIZE;
+ flags = I915_ALLOC_MIN_PAGE_SIZE | I915_ALLOC_MAX_SEGMENT_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;
@@ -72,6 +72,7 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
struct list_head *blocks)
{
unsigned int min_order = 0;
+ unsigned int max_order;
unsigned long n_pages;
GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
@@ -92,13 +93,28 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
n_pages = size >> ilog2(mem->mm.chunk_size);
+ /*
+ * If we going to feed this into an sg list we should limit the block
+ * sizes such that we don't exceed the i915_sg_segment_size().
+ */
+ if (flags & I915_ALLOC_MAX_SEGMENT_SIZE) {
+ unsigned int max_segment = i915_sg_segment_size();
+
+ if (GEM_WARN_ON(max_segment < mem->mm.chunk_size))
+ max_order = 0;
+ else
+ max_order = ilog2(max_segment) - ilog2(mem->mm.chunk_size);
+ } else {
+ max_order = mem->mm.max_order;
+ }
+
mutex_lock(&mem->mm_lock);
do {
struct i915_buddy_block *block;
unsigned int order;
- order = fls(n_pages) - 1;
+ order = min_t(u32, fls(n_pages) - 1, max_order);
GEM_BUG_ON(order > mem->mm.max_order);
GEM_BUG_ON(order < min_order);
@@ -44,8 +44,9 @@ enum intel_region_id {
#define MEMORY_TYPE_FROM_REGION(r) (ilog2((r) >> INTEL_MEMORY_TYPE_SHIFT))
#define MEMORY_INSTANCE_FROM_REGION(r) (ilog2((r) & 0xffff))
-#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
-#define I915_ALLOC_CONTIGUOUS BIT(1)
+#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
+#define I915_ALLOC_CONTIGUOUS BIT(1)
+#define I915_ALLOC_MAX_SEGMENT_SIZE BIT(2)
#define for_each_memory_region(mr, i915, id) \
for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
@@ -337,6 +337,55 @@ static int igt_mock_splintered_region(void *arg)
return err;
}
+#define SZ_8G BIT_ULL(33)
+
+static int igt_mock_max_segment(void *arg)
+{
+ struct intel_memory_region *mem = arg;
+ struct drm_i915_private *i915 = mem->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_buddy_block *block;
+ LIST_HEAD(objects);
+ u64 size;
+ int err = 0;
+
+ /*
+ * The size of block are only limited by the largest power-of-two that
+ * will fit in the region size, but to construct an object we also
+ * require feeding it into an sg list, where the upper limit of the sg
+ * entry is at most UINT_MAX, therefore when allocating with
+ * I915_ALLOC_MAX_SEGMENT_SIZE we shouldn't see blocks larger than
+ * i915_sg_segment_size().
+ */
+
+ mem = mock_region_create(i915, 0, SZ_8G, PAGE_SIZE, 0);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ obj = igt_object_create(mem, &objects, size, 0);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_put;
+ }
+
+ list_for_each_entry(block, &obj->mm.blocks, link) {
+ if (i915_buddy_block_size(&mem->mm, block) > i915_sg_segment_size()) {
+ pr_err("%s found block size(%llu) larger than max sg_segment_size(%u)",
+ __func__,
+ i915_buddy_block_size(&mem->mm, block),
+ i915_sg_segment_size());
+ err = -EINVAL;
+ goto out_close;
+ }
+ }
+
+out_close:
+ close_objects(mem, &objects);
+out_put:
+ intel_memory_region_put(mem);
+ return err;
+}
+
static int igt_gpu_write_dw(struct intel_context *ce,
struct i915_vma *vma,
u32 dword,
@@ -848,6 +897,7 @@ int intel_memory_region_mock_selftests(void)
SUBTEST(igt_mock_fill),
SUBTEST(igt_mock_contiguous),
SUBTEST(igt_mock_splintered_region),
+ SUBTEST(igt_mock_max_segment),
};
struct intel_memory_region *mem;
struct drm_i915_private *i915;