@@ -55,7 +55,6 @@ drm_intel_bo *scratch_bo;
drm_intel_bo *staging_bo;
#define BO_SIZE (4*4096)
uint32_t devid;
-uint64_t mappable_gtt_limit;
int fd;
static void
@@ -90,9 +89,7 @@ blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, uint8_t val)
drm_intel_gem_bo_unmap_gtt(tmp_bo);
- if (bo->offset < mappable_gtt_limit &&
- (IS_G33(devid) || intel_gen(devid) >= 4))
- igt_trash_aperture();
+ igt_drop_caches_set(fd, DROP_BOUND);
copy_bo(tmp_bo, bo);
}
@@ -136,9 +133,6 @@ igt_main
gem_set_caching(fd, scratch_bo->handle, 1);
staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
-
- igt_init_aperture_trashers(bufmgr);
- mappable_gtt_limit = gem_mappable_aperture_size();
}
igt_subtest("reads") {
@@ -278,7 +272,6 @@ igt_main
}
igt_fixture {
- igt_cleanup_aperture_trashers();
drm_intel_bufmgr_destroy(bufmgr);
close(fd);
@@ -56,7 +56,6 @@ drm_intel_bo *scratch_bo;
drm_intel_bo *staging_bo;
#define BO_SIZE (4*4096)
uint32_t devid;
-uint64_t mappable_gtt_limit;
int fd;
static void
@@ -91,9 +90,7 @@ blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, uint8_t val)
drm_intel_gem_bo_unmap_gtt(tmp_bo);
- if (bo->offset < mappable_gtt_limit &&
- (IS_G33(devid) || intel_gen(devid) >= 4))
- igt_trash_aperture();
+ igt_drop_caches_set(fd, DROP_BOUND);
copy_bo(tmp_bo, bo);
}
@@ -264,9 +261,6 @@ igt_main
/* overallocate the buffers we're actually using because */
scratch_bo = drm_intel_bo_alloc(bufmgr, "scratch bo", BO_SIZE, 4096);
staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
-
- igt_init_aperture_trashers(bufmgr);
- mappable_gtt_limit = gem_mappable_aperture_size();
}
do_tests(-1, "");
@@ -277,7 +271,6 @@ igt_main
do_tests(2, "-display");
igt_fixture {
- igt_cleanup_aperture_trashers();
drm_intel_bufmgr_destroy(bufmgr);
close(fd);
@@ -61,7 +61,6 @@ drm_intel_bo *tiled_staging_bo;
unsigned long scratch_pitch;
#define BO_SIZE (32*4096)
uint32_t devid;
-uint64_t mappable_gtt_limit;
int fd;
static void
@@ -112,9 +111,7 @@ blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, int val)
drm_intel_gem_bo_unmap_gtt(tmp_bo);
- if (bo->offset < mappable_gtt_limit &&
- (IS_G33(devid) || intel_gen(devid) >= 4))
- igt_trash_aperture();
+ igt_drop_caches_set(fd, DROP_BOUND);
copy_bo(tmp_bo, 0, bo, 1);
}
@@ -295,9 +292,6 @@ igt_main
BO_SIZE/4096, 4,
&tiling_mode,
&scratch_pitch, 0);
-
- igt_init_aperture_trashers(bufmgr);
- mappable_gtt_limit = gem_mappable_aperture_size();
}
igt_subtest("reads")
@@ -310,7 +304,6 @@ igt_main
test_partial_read_writes();
igt_fixture {
- igt_cleanup_aperture_trashers();
drm_intel_bufmgr_destroy(bufmgr);
close(fd);
Currently we indirectly try to evict the test buffers by mmaping enough bo that should fill the aperture. However, this assumes that the kernel is trying to fill the aperture and does not use random replacement (which it does) or use small partials to avoid mmaping the whole object (which it does). Rather than assume, use the debugfs interface to force the eviction of the bound objects. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- tests/gem_caching.c | 9 +-------- tests/gem_partial_pwrite_pread.c | 9 +-------- tests/gem_tiled_partial_pwrite_pread.c | 9 +-------- 3 files changed, 3 insertions(+), 24 deletions(-)