@@ -266,8 +266,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gte = (gen8_pte_t __iomem *)ggtt->gsm;
gte += vma->node.start / I915_GTT_PAGE_SIZE;
- end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+ end = gte + vma->guard / I915_GTT_PAGE_SIZE;
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0]->encode);
+
+ end += (vma->node.size - vma->guard) / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, iter, vma->pages)
gen8_set_pte(gte++, pte_encode | addr);
GEM_BUG_ON(gte > end);
@@ -317,8 +321,12 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
gte = (gen6_pte_t __iomem *)ggtt->gsm;
gte += vma->node.start / I915_GTT_PAGE_SIZE;
- end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+ end = gte + vma->guard / I915_GTT_PAGE_SIZE;
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0]->encode);
+
+ end += (vma->node.size - vma->guard) / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, iter, vma->pages)
iowrite32(vm->pte_encode(addr, level, flags), gte++);
GEM_BUG_ON(gte > end);
@@ -41,6 +41,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_HIGH BIT_ULL(5)
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
+#define PIN_OFFSET_GUARD BIT_ULL(8)
#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
@@ -633,7 +633,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- unsigned long color;
+ unsigned long color, guard;
u64 start, end;
int ret;
@@ -641,7 +641,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
- alignment = max(alignment, vma->display_alignment);
+ alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
if (flags & PIN_MAPPABLE) {
size = max_t(typeof(size), size, vma->fence_size);
alignment = max_t(typeof(alignment),
@@ -652,6 +652,9 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(!is_power_of_2(alignment));
+ guard = vma->guard; /* retain guard across rebinds */
+ guard = ALIGN(guard, alignment);
+
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
@@ -661,12 +664,13 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(2 * guard > end);
/* If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain
* attempt to find space.
*/
- if (size > end) {
+ if (size > end - 2 * guard) {
DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
size, flags & PIN_MAPPABLE ? "mappable" : "total",
end);
@@ -679,16 +683,29 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
+
if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end))
return -EINVAL;
+ /*
+ * The caller knows not of the guard added by others and
+ * requests for the offset of the start of its buffer
+ * to be fixed, which may not be the same as the position
+ * of the vma->node due to the guard pages.
+ */
+ if (offset < guard || offset + size > end - guard)
+ return -ENOSPC;
+
ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
- size, offset, color,
- flags);
+ size + 2 * guard,
+ offset - guard,
+ color, flags);
if (ret)
return ret;
} else {
+ size += 2 * guard;
+
/*
* We only support huge gtt pages through the 48b PPGTT,
* however we also don't want to force any alignment for
@@ -735,6 +752,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
list_add_tail(&vma->vm_link, &vma->vm->bound_list);
+ vma->guard = guard;
return 0;
}
@@ -121,12 +121,13 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
static inline u64 i915_vma_size(const struct i915_vma *vma)
{
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- return vma->node.size;
+ return vma->node.size - 2 * vma->guard;
}
static inline u64 __i915_vma_offset(const struct i915_vma *vma)
{
- return vma->node.start;
+ /* The actual start of the vma->pages is after the guard pages. */
+ return vma->node.start + vma->guard;
}
static inline u64 i915_vma_offset(const struct i915_vma *vma)
@@ -196,14 +196,15 @@ struct i915_vma {
struct i915_fence_reg *fence;
u64 size;
- u64 display_alignment;
struct i915_page_sizes page_sizes;
/* mmap-offset associated with fencing for this vma */
struct i915_mmap_offset *mmo;
+ u32 guard; /* padding allocated around vma->pages within the node */
u32 fence_size;
u32 fence_alignment;
+ u32 display_alignment;
/**
* Count of the number of times this vma has been opened by different