@@ -43,7 +43,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
return -ENOMEM;
}
- flags = I915_ALLOC_MIN_PAGE_SIZE | I915_ALLOC_MAX_SEGMENT_SIZE;
+ flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;
@@ -72,7 +72,6 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
struct list_head *blocks)
{
unsigned int min_order = 0;
- unsigned int max_order;
unsigned long n_pages;
GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
@@ -93,28 +92,13 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
n_pages = size >> ilog2(mem->mm.chunk_size);
- /*
- * If we going to feed this into an sg list we should limit the block
- * sizes such that we don't exceed the i915_sg_segment_size().
- */
- if (flags & I915_ALLOC_MAX_SEGMENT_SIZE) {
- unsigned int max_segment = i915_sg_segment_size();
-
- if (GEM_WARN_ON(max_segment < mem->mm.chunk_size))
- max_order = 0;
- else
- max_order = ilog2(max_segment) - ilog2(mem->mm.chunk_size);
- } else {
- max_order = mem->mm.max_order;
- }
-
mutex_lock(&mem->mm_lock);
do {
struct i915_buddy_block *block;
unsigned int order;
- order = min_t(u32, fls(n_pages) - 1, max_order);
+ order = fls(n_pages) - 1;
GEM_BUG_ON(order > mem->mm.max_order);
GEM_BUG_ON(order < min_order);
@@ -44,9 +44,8 @@ enum intel_region_id {
#define MEMORY_TYPE_FROM_REGION(r) (ilog2((r) >> INTEL_MEMORY_TYPE_SHIFT))
#define MEMORY_INSTANCE_FROM_REGION(r) (ilog2((r) & 0xffff))
-#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
-#define I915_ALLOC_CONTIGUOUS BIT(1)
-#define I915_ALLOC_MAX_SEGMENT_SIZE BIT(2)
+#define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
+#define I915_ALLOC_CONTIGUOUS BIT(1)
#define for_each_memory_region(mr, i915, id) \
for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
@@ -356,21 +356,21 @@ static int igt_mock_splintered_region(void *arg)
static int igt_mock_max_segment(void *arg)
{
+ const unsigned int max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = arg;
struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
struct i915_buddy_block *block;
+ struct scatterlist *sg;
LIST_HEAD(objects);
u64 size;
int err = 0;
/*
- * The size of block are only limited by the largest power-of-two that
- * will fit in the region size, but to construct an object we also
- * require feeding it into an sg list, where the upper limit of the sg
- * entry is at most UINT_MAX, therefore when allocating with
- * I915_ALLOC_MAX_SEGMENT_SIZE we shouldn't see blocks larger than
- * i915_sg_segment_size().
+ * While we may create very large contiguous blocks, we may need
+ * to break those down for consumption elsewhere. In particular,
+ * dma-mapping with scatterlist elements have an implicit limit of
+ * UINT_MAX on each element.
*/
size = SZ_8G;
@@ -384,12 +384,23 @@ static int igt_mock_max_segment(void *arg)
goto out_put;
}
+ err = -EINVAL;
list_for_each_entry(block, &obj->mm.blocks, link) {
- if (i915_buddy_block_size(&mem->mm, block) > i915_sg_segment_size()) {
- pr_err("%s found block size(%llu) larger than max sg_segment_size(%u)",
- __func__,
- i915_buddy_block_size(&mem->mm, block),
- i915_sg_segment_size());
+ if (i915_buddy_block_size(&mem->mm, block) > max_segment) {
+ err = 0;
+ break;
+ }
+ }
+ if (err) {
+ pr_err("%s: Failed to create a huge contiguous block\n",
+ __func__);
+ goto out_close;
+ }
+
+ for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+ if (sg->length > max_segment) {
+ pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
+ __func__, sg->length, max_segment);
err = -EINVAL;
goto out_close;
}