Message ID | 20231012081547.852052-4-tvrtko.ursulin@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Client memory fdinfo test and intel_gpu_top support | expand |
Hi Tvrtko, On 2023-10-12 at 09:15:41 +0100, Tvrtko Ursulin wrote: > From: Tvrtko Ursulin <tvrtko.ursulin@intel.com> > > A few basic smoke tests to check per client memory info looks legit. > > Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Kamil Konieczny <kamil.konieczny@linux.intel.com> > --- > tests/intel/drm_fdinfo.c | 217 +++++++++++++++++++++++++++++++++++++++ > 1 file changed, 217 insertions(+) > > diff --git a/tests/intel/drm_fdinfo.c b/tests/intel/drm_fdinfo.c > index c4218b0d16e6..3ced658f2faa 100644 > --- a/tests/intel/drm_fdinfo.c > +++ b/tests/intel/drm_fdinfo.c > @@ -23,6 +23,7 @@ > */ > > #include <fcntl.h> > +#include <sys/ioctl.h> > > #include "igt.h" > #include "igt_core.h" > @@ -76,6 +77,16 @@ > * > * SUBTEST: virtual-idle > * > + * SUBTEST: memory-info-idle > + * > + * SUBTEST: memory-info-active > + * > + * SUBTEST: memory-info-resident > + * > + * SUBTEST: memory-info-purgeable > + * > + * SUBTEST: memory-info-shared > + * > * SUBTEST: context-close-stress > */ > > @@ -143,6 +154,11 @@ static unsigned int measured_usleep(unsigned int usec) > #define FLAG_HANG (8) > #define TEST_ISOLATION (16) > > +#define TEST_ACTIVE TEST_BUSY > +#define TEST_RESIDENT (32) > +#define TEST_PURGEABLE (64) > +#define TEST_SHARED (128) > + > static void end_spin(int fd, igt_spin_t *spin, unsigned int flags) > { > if (!spin) > @@ -772,6 +788,172 @@ static void stress_context_close(int i915) > igt_stop_helper(&reader); > } > > +static size_t read_fdinfo(char *buf, const size_t sz, int at, const char *name) > +{ > + size_t count; > + int fd; > + > + fd = openat(at, name, O_RDONLY); > + if (fd < 0) > + return 0; > + > + count = read(fd, buf, sz - 1); > + if (count > 0) > + buf[count - 1] = 0; > + close(fd); > + > + return count > 0 ? count : 0; > +} > + > +/* > + * At least this much, but maybe less if we started with a driver internal > + * baseline which can go away behind our back. > + */ > +#define fdinfo_assert_gte(cur, prev, sz, base) \ > +({ \ > + int64_t __sz = (sz) - (base); \ > + int64_t __d = (cur) - (prev); \ > + igt_assert_f(__d >= __sz, \ > + "prev=%"PRIu64" cur=%"PRIu64" delta=%"PRId64" sz=%"PRIu64" baseline=%"PRIu64"\n%s\n", \ > + (prev), (cur), __d, (sz), (base), fdinfo_buf); \ > +}) > + > +#define fdinfo_assert_eq(cur, prev, sz, base) \ > +({ \ > + int64_t __d = (cur) - (prev); \ > + igt_assert_f(__d == 0, \ > + "prev=%"PRIu64" cur=%"PRIu64" delta=%"PRId64" sz=%"PRIu64" baseline=%"PRIu64"\n%s\n", \ > + (prev), (cur), __d, (sz), (base), fdinfo_buf); \ > +}) > + > +static void > +test_memory(int i915, struct gem_memory_region *mr, unsigned int flags) > +{ > + const unsigned int r = mr->ci.memory_class == I915_MEMORY_CLASS_SYSTEM ? 0 : 1; /* See region map */ > + const uint64_t max_mem = 512ull * 1024 * 1024; > + const uint64_t max_bo = 16ull * 1024 * 1024; > + struct drm_client_fdinfo base_info, prev_info = { }; > + struct drm_client_fdinfo info = { }; > + char buf[64], fdinfo_buf[4096]; > + igt_spin_t *spin = NULL; > + uint64_t total = 0, sz; > + uint64_t ahnd; > + int ret, dir; > + > + i915 = drm_reopen_driver(i915); > + > + ahnd = get_reloc_ahnd(i915, 0); > + > + ret = snprintf(buf, sizeof(buf), "%u", i915); > + igt_assert(ret > 0 && ret < sizeof(buf)); > + > + dir = open("/proc/self/fdinfo", O_DIRECTORY | O_RDONLY); > + igt_assert_fd(dir); > + > + gem_quiescent_gpu(i915); > + ret = __igt_parse_drm_fdinfo(dir, buf, &info, NULL, 0, NULL, 0); > + igt_assert(ret > 0); > + igt_require(info.num_regions); > + memcpy(&prev_info, &info, sizeof(info)); > + memcpy(&base_info, &info, sizeof(info)); > + > + while (total < max_mem) { > + static const char *region_map[] = { > + "system0", > + "local0", > + }; > + uint32_t bo; > + > + sz = random() % max_bo; > + ret = __gem_create_in_memory_region_list(i915, &bo, &sz, 0, > + &mr->ci, 1); > + igt_assert_eq(ret, 0); > + total += sz; > + > + if (flags & (TEST_RESIDENT | TEST_PURGEABLE | TEST_ACTIVE)) > + spin = igt_spin_new(i915, > + .dependency = bo, > + .ahnd = ahnd); > + else > + spin = NULL; > + > + if (flags & TEST_PURGEABLE) { > + gem_madvise(i915, bo, I915_MADV_DONTNEED); > + igt_spin_free(i915, spin); > + gem_quiescent_gpu(i915); > + spin = NULL; > + } > + > + if (flags & TEST_SHARED) { > + struct drm_gem_open open_struct; > + struct drm_gem_flink flink; > + > + flink.handle = bo; > + ret = ioctl(i915, DRM_IOCTL_GEM_FLINK, &flink); > + igt_assert_eq(ret, 0); > + > + open_struct.name = flink.name; > + ret = ioctl(i915, DRM_IOCTL_GEM_OPEN, &open_struct); > + igt_assert_eq(ret, 0); > + igt_assert(open_struct.handle != 0); > + } > + > + memset(&info, 0, sizeof(info)); > + ret = __igt_parse_drm_fdinfo(dir, buf, &info, > + NULL, 0, > + region_map, ARRAY_SIZE(region_map)); > + igt_assert(ret > 0); > + igt_assert(info.num_regions); > + > + read_fdinfo(fdinfo_buf, sizeof(fdinfo_buf), dir, buf); > + > + /* >= to account for objects out of our control */ > + fdinfo_assert_gte(info.region_mem[r].total, > + prev_info.region_mem[r].total, > + sz, > + base_info.region_mem[r].total); > + > + if (flags & TEST_SHARED) > + fdinfo_assert_gte(info.region_mem[r].shared, > + prev_info.region_mem[r].shared, > + sz, > + base_info.region_mem[r].shared); > + else > + fdinfo_assert_eq(info.region_mem[r].shared, > + prev_info.region_mem[r].shared, > + sz, > + base_info.region_mem[r].shared); > + > + if (flags & (TEST_RESIDENT | TEST_PURGEABLE | TEST_ACTIVE)) > + fdinfo_assert_gte(info.region_mem[r].resident, > + (uint64_t)0, /* We can only be sure the current buffer is resident. */ > + sz, > + (uint64_t)0); > + > + if (flags & TEST_PURGEABLE) > + fdinfo_assert_gte(info.region_mem[r].purgeable, > + (uint64_t)0, /* We can only be sure the current buffer is purgeable (subset of resident). */ > + sz, > + (uint64_t)0); > + > + if (flags & TEST_ACTIVE) > + fdinfo_assert_gte(info.region_mem[r].active, > + (uint64_t)0, /* We can only be sure the current buffer is active. */ > + sz, > + (uint64_t)0); > + > + memcpy(&prev_info, &info, sizeof(info)); > + > + if (spin) { > + igt_spin_free(i915, spin); > + gem_quiescent_gpu(i915); > + } > + } > + > + put_ahnd(ahnd); > + close(i915); > +} > + > #define test_each_engine(T, i915, ctx, e) \ > igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \ > igt_dynamic_f("%s", e->name) > @@ -903,6 +1085,41 @@ igt_main > test_each_engine("isolation", i915, ctx, e) > single(i915, ctx, e, TEST_BUSY | TEST_ISOLATION); > > + igt_subtest_with_dynamic("memory-info-idle") { > + for_each_memory_region(r, i915) { > + igt_dynamic_f("%s", r->name) > + test_memory(i915, r, 0); > + } > + } > + > + igt_subtest_with_dynamic("memory-info-resident") { > + for_each_memory_region(r, i915) { > + igt_dynamic_f("%s", r->name) > + test_memory(i915, r, TEST_RESIDENT); > + } > + } > + > + igt_subtest_with_dynamic("memory-info-purgeable") { > + for_each_memory_region(r, i915) { > + igt_dynamic_f("%s", r->name) > + test_memory(i915, r, TEST_PURGEABLE); > + } > + } > + > + igt_subtest_with_dynamic("memory-info-active") { > + for_each_memory_region(r, i915) { > + igt_dynamic_f("%s", r->name) > + test_memory(i915, r, TEST_ACTIVE); > + } > + } > + > + igt_subtest_with_dynamic("memory-info-shared") { > + for_each_memory_region(r, i915) { > + igt_dynamic_f("%s", r->name) > + test_memory(i915, r, TEST_SHARED); > + } > + } > + > igt_subtest_group { > int newfd; > > -- > 2.39.2 >
diff --git a/tests/intel/drm_fdinfo.c b/tests/intel/drm_fdinfo.c index c4218b0d16e6..3ced658f2faa 100644 --- a/tests/intel/drm_fdinfo.c +++ b/tests/intel/drm_fdinfo.c @@ -23,6 +23,7 @@ */ #include <fcntl.h> +#include <sys/ioctl.h> #include "igt.h" #include "igt_core.h" @@ -76,6 +77,16 @@ * * SUBTEST: virtual-idle * + * SUBTEST: memory-info-idle + * + * SUBTEST: memory-info-active + * + * SUBTEST: memory-info-resident + * + * SUBTEST: memory-info-purgeable + * + * SUBTEST: memory-info-shared + * * SUBTEST: context-close-stress */ @@ -143,6 +154,11 @@ static unsigned int measured_usleep(unsigned int usec) #define FLAG_HANG (8) #define TEST_ISOLATION (16) +#define TEST_ACTIVE TEST_BUSY +#define TEST_RESIDENT (32) +#define TEST_PURGEABLE (64) +#define TEST_SHARED (128) + static void end_spin(int fd, igt_spin_t *spin, unsigned int flags) { if (!spin) @@ -772,6 +788,172 @@ static void stress_context_close(int i915) igt_stop_helper(&reader); } +static size_t read_fdinfo(char *buf, const size_t sz, int at, const char *name) +{ + size_t count; + int fd; + + fd = openat(at, name, O_RDONLY); + if (fd < 0) + return 0; + + count = read(fd, buf, sz - 1); + if (count > 0) + buf[count - 1] = 0; + close(fd); + + return count > 0 ? count : 0; +} + +/* + * At least this much, but maybe less if we started with a driver internal + * baseline which can go away behind our back. + */ +#define fdinfo_assert_gte(cur, prev, sz, base) \ +({ \ + int64_t __sz = (sz) - (base); \ + int64_t __d = (cur) - (prev); \ + igt_assert_f(__d >= __sz, \ + "prev=%"PRIu64" cur=%"PRIu64" delta=%"PRId64" sz=%"PRIu64" baseline=%"PRIu64"\n%s\n", \ + (prev), (cur), __d, (sz), (base), fdinfo_buf); \ +}) + +#define fdinfo_assert_eq(cur, prev, sz, base) \ +({ \ + int64_t __d = (cur) - (prev); \ + igt_assert_f(__d == 0, \ + "prev=%"PRIu64" cur=%"PRIu64" delta=%"PRId64" sz=%"PRIu64" baseline=%"PRIu64"\n%s\n", \ + (prev), (cur), __d, (sz), (base), fdinfo_buf); \ +}) + +static void +test_memory(int i915, struct gem_memory_region *mr, unsigned int flags) +{ + const unsigned int r = mr->ci.memory_class == I915_MEMORY_CLASS_SYSTEM ? 0 : 1; /* See region map */ + const uint64_t max_mem = 512ull * 1024 * 1024; + const uint64_t max_bo = 16ull * 1024 * 1024; + struct drm_client_fdinfo base_info, prev_info = { }; + struct drm_client_fdinfo info = { }; + char buf[64], fdinfo_buf[4096]; + igt_spin_t *spin = NULL; + uint64_t total = 0, sz; + uint64_t ahnd; + int ret, dir; + + i915 = drm_reopen_driver(i915); + + ahnd = get_reloc_ahnd(i915, 0); + + ret = snprintf(buf, sizeof(buf), "%u", i915); + igt_assert(ret > 0 && ret < sizeof(buf)); + + dir = open("/proc/self/fdinfo", O_DIRECTORY | O_RDONLY); + igt_assert_fd(dir); + + gem_quiescent_gpu(i915); + ret = __igt_parse_drm_fdinfo(dir, buf, &info, NULL, 0, NULL, 0); + igt_assert(ret > 0); + igt_require(info.num_regions); + memcpy(&prev_info, &info, sizeof(info)); + memcpy(&base_info, &info, sizeof(info)); + + while (total < max_mem) { + static const char *region_map[] = { + "system0", + "local0", + }; + uint32_t bo; + + sz = random() % max_bo; + ret = __gem_create_in_memory_region_list(i915, &bo, &sz, 0, + &mr->ci, 1); + igt_assert_eq(ret, 0); + total += sz; + + if (flags & (TEST_RESIDENT | TEST_PURGEABLE | TEST_ACTIVE)) + spin = igt_spin_new(i915, + .dependency = bo, + .ahnd = ahnd); + else + spin = NULL; + + if (flags & TEST_PURGEABLE) { + gem_madvise(i915, bo, I915_MADV_DONTNEED); + igt_spin_free(i915, spin); + gem_quiescent_gpu(i915); + spin = NULL; + } + + if (flags & TEST_SHARED) { + struct drm_gem_open open_struct; + struct drm_gem_flink flink; + + flink.handle = bo; + ret = ioctl(i915, DRM_IOCTL_GEM_FLINK, &flink); + igt_assert_eq(ret, 0); + + open_struct.name = flink.name; + ret = ioctl(i915, DRM_IOCTL_GEM_OPEN, &open_struct); + igt_assert_eq(ret, 0); + igt_assert(open_struct.handle != 0); + } + + memset(&info, 0, sizeof(info)); + ret = __igt_parse_drm_fdinfo(dir, buf, &info, + NULL, 0, + region_map, ARRAY_SIZE(region_map)); + igt_assert(ret > 0); + igt_assert(info.num_regions); + + read_fdinfo(fdinfo_buf, sizeof(fdinfo_buf), dir, buf); + + /* >= to account for objects out of our control */ + fdinfo_assert_gte(info.region_mem[r].total, + prev_info.region_mem[r].total, + sz, + base_info.region_mem[r].total); + + if (flags & TEST_SHARED) + fdinfo_assert_gte(info.region_mem[r].shared, + prev_info.region_mem[r].shared, + sz, + base_info.region_mem[r].shared); + else + fdinfo_assert_eq(info.region_mem[r].shared, + prev_info.region_mem[r].shared, + sz, + base_info.region_mem[r].shared); + + if (flags & (TEST_RESIDENT | TEST_PURGEABLE | TEST_ACTIVE)) + fdinfo_assert_gte(info.region_mem[r].resident, + (uint64_t)0, /* We can only be sure the current buffer is resident. */ + sz, + (uint64_t)0); + + if (flags & TEST_PURGEABLE) + fdinfo_assert_gte(info.region_mem[r].purgeable, + (uint64_t)0, /* We can only be sure the current buffer is purgeable (subset of resident). */ + sz, + (uint64_t)0); + + if (flags & TEST_ACTIVE) + fdinfo_assert_gte(info.region_mem[r].active, + (uint64_t)0, /* We can only be sure the current buffer is active. */ + sz, + (uint64_t)0); + + memcpy(&prev_info, &info, sizeof(info)); + + if (spin) { + igt_spin_free(i915, spin); + gem_quiescent_gpu(i915); + } + } + + put_ahnd(ahnd); + close(i915); +} + #define test_each_engine(T, i915, ctx, e) \ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \ igt_dynamic_f("%s", e->name) @@ -903,6 +1085,41 @@ igt_main test_each_engine("isolation", i915, ctx, e) single(i915, ctx, e, TEST_BUSY | TEST_ISOLATION); + igt_subtest_with_dynamic("memory-info-idle") { + for_each_memory_region(r, i915) { + igt_dynamic_f("%s", r->name) + test_memory(i915, r, 0); + } + } + + igt_subtest_with_dynamic("memory-info-resident") { + for_each_memory_region(r, i915) { + igt_dynamic_f("%s", r->name) + test_memory(i915, r, TEST_RESIDENT); + } + } + + igt_subtest_with_dynamic("memory-info-purgeable") { + for_each_memory_region(r, i915) { + igt_dynamic_f("%s", r->name) + test_memory(i915, r, TEST_PURGEABLE); + } + } + + igt_subtest_with_dynamic("memory-info-active") { + for_each_memory_region(r, i915) { + igt_dynamic_f("%s", r->name) + test_memory(i915, r, TEST_ACTIVE); + } + } + + igt_subtest_with_dynamic("memory-info-shared") { + for_each_memory_region(r, i915) { + igt_dynamic_f("%s", r->name) + test_memory(i915, r, TEST_SHARED); + } + } + igt_subtest_group { int newfd;