@@ -104,6 +104,19 @@ bool gem_scheduler_has_preemption(int fd)
LOCAL_I915_SCHEDULER_CAP_PREEMPTION;
}
+/**
+ * gem_scheduler_has_frequency_control:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether the driver supports controlling the
+ * GPU frequency for individual contexts and requests.
+ */
+bool gem_scheduler_has_frequency_control(int fd)
+{
+ return gem_scheduler_capability(fd) &
+ LOCAL_I915_SCHEDULER_CAP_FREQUENCY;
+}
+
/**
* gem_scheduler_print_capability:
* @fd: open i915 drm file descriptor
@@ -122,4 +135,6 @@ void gem_scheduler_print_capability(int fd)
igt_info(" - With priority sorting\n");
if (caps & LOCAL_I915_SCHEDULER_CAP_PREEMPTION)
igt_info(" - With preemption enabled\n");
+ if (caps & LOCAL_I915_SCHEDULER_CAP_FREQUENCY)
+ igt_info(" - With frequency control\n");
}
@@ -27,11 +27,13 @@
#define LOCAL_I915_SCHEDULER_CAP_ENABLED (1 << 0)
#define LOCAL_I915_SCHEDULER_CAP_PRIORITY (1 << 1)
#define LOCAL_I915_SCHEDULER_CAP_PREEMPTION (1 << 2)
+#define LOCAL_I915_SCHEDULER_CAP_FREQUENCY (1 << 3)
unsigned gem_scheduler_capability(int fd);
bool gem_scheduler_enabled(int fd);
bool gem_scheduler_has_ctx_priority(int fd);
bool gem_scheduler_has_preemption(int fd);
+bool gem_scheduler_has_frequency_control(int fd);
void gem_scheduler_print_capability(int fd);
#endif /* GEM_SCHEDULER_H */
@@ -104,6 +104,7 @@ drm_import_export_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
drm_import_export_LDADD = $(LDADD) -lpthread
gem_close_race_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
gem_close_race_LDADD = $(LDADD) -lpthread
+gem_ctx_freq_LDADD = $(LDADD) $(top_builddir)/lib/libigt_perf.la
gem_ctx_thrash_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
gem_ctx_thrash_LDADD = $(LDADD) -lpthread
gem_exec_parallel_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
@@ -58,6 +58,7 @@ TESTS_progs = \
gem_ctx_bad_exec \
gem_ctx_create \
gem_ctx_exec \
+ gem_ctx_freq \
gem_ctx_isolation \
gem_ctx_param \
gem_ctx_switch \
new file mode 100644
@@ -0,0 +1,953 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include "igt.h"
+#include "igt_perf.h"
+#include "igt_sysfs.h"
+#include "sw_sync.h"
+
+#define LOCAL_CONTEXT_PARAM_FREQUENCY 8
+
+#define SAMPLE_PERIOD (USEC_PER_SEC / 10)
+#define PMU_TOLERANCE 100
+
+static int sysfs = -1;
+
+static int __set_freq(int fd, uint32_t ctx, uint32_t min, uint32_t max)
+{
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = ctx,
+ .param = LOCAL_CONTEXT_PARAM_FREQUENCY,
+ .value = (uint64_t)max << 32 | min,
+ };
+
+ return __gem_context_set_param(fd, ¶m);
+}
+
+static void set_freq(int fd, uint32_t ctx, uint32_t min, uint32_t max)
+{
+ igt_assert_eq(__set_freq(fd, ctx, min, max), 0);
+}
+
+static void get_freq(int fd, uint32_t ctx, uint32_t *min, uint32_t *max)
+{
+ struct drm_i915_gem_context_param param = {
+ .ctx_id = ctx,
+ .param = LOCAL_CONTEXT_PARAM_FREQUENCY,
+ };
+
+ gem_context_get_param(fd, ¶m);
+
+ *min = param.value & 0xffffffff;
+ *max = param.value >> 32;
+}
+
+static uint32_t set_single_freq(int fd, uint32_t ctx, uint32_t freq)
+{
+ uint32_t discard;
+
+ set_freq(fd, ctx, freq, freq);
+ get_freq(fd, ctx, &freq, &discard);
+ igt_assert_eq(freq, discard);
+
+ return freq;
+}
+
+static void kick_rps_worker(void)
+{
+ sched_yield();
+ usleep(SAMPLE_PERIOD);
+}
+
+static double measure_frequency(int pmu, int period_us)
+{
+ uint64_t data[2];
+ uint64_t d_t, d_v;
+
+ kick_rps_worker(); /* let the kthreads (intel_rps_work) run */
+
+ igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
+ d_v = -data[0];
+ d_t = -data[1];
+
+ usleep(period_us);
+
+ igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
+ d_v += data[0];
+ d_t += data[1];
+
+ return d_v * 1e9 / d_t;
+}
+
+static bool __pmu_within_tolerance(double actual, double target)
+{
+ return (actual > target - PMU_TOLERANCE &&
+ actual < target + PMU_TOLERANCE);
+}
+
+static void pmu_assert(double actual, double target)
+{
+ igt_assert_f(__pmu_within_tolerance(actual, target),
+ "Measured frequency %.2fMHz, is beyond target %.0f±%dMhz\n",
+ actual, target, PMU_TOLERANCE);
+}
+
+static void busy_wait_until_idle(int fd, igt_spin_t *spin)
+{
+ igt_spin_batch_end(spin);
+ do {
+ usleep(10000);
+ } while (gem_bo_busy(fd, spin->handle));
+}
+
+static void __igt_spin_batch_free_idle(int fd, igt_spin_t *spin)
+{
+ busy_wait_until_idle(fd, spin);
+
+ igt_spin_batch_free(fd, spin);
+}
+
+#define TRIANGLE_SIZE(x) (2 * (x) + 1)
+static void triangle_fill(uint32_t *t, unsigned int nstep,
+ uint32_t min, uint32_t max)
+{
+ for (unsigned int step = 0; step <= 2*nstep; step++) {
+ int frac = step > nstep ? 2*nstep - step : step;
+ t[step] = min + (max - min) * frac / nstep;
+ }
+}
+
+static void single(int fd, const struct intel_execution_engine *e)
+{
+#define N_STEPS 10
+ const unsigned int engine = e->exec_id | e->flags;
+ uint32_t ctx = gem_context_create(fd);
+ uint32_t frequencies[TRIANGLE_SIZE(N_STEPS)];
+ uint32_t min, max;
+ double measured;
+ igt_spin_t *spin;
+ int pmu;
+
+ /* Check that the requests runs at our context frequency. */
+
+ get_freq(fd, ctx, &min, &max);
+ igt_info("Min freq: %dMHz; Max freq: %dMHz\n", min, max);
+ triangle_fill(frequencies, N_STEPS, min, max);
+
+ pmu = perf_i915_open(I915_PMU_REQUESTED_FREQUENCY);
+ igt_require(pmu >= 0);
+
+ for (int pass = 0; pass < 3; pass++) {
+ for (int i = 0; i < ARRAY_SIZE(frequencies); i++) {
+ uint32_t freq = frequencies[i];
+
+ freq = set_single_freq(fd, ctx, freq);
+
+ gem_quiescent_gpu(fd);
+ spin = __igt_spin_batch_new(fd, ctx, engine, 0);
+
+ measured = measure_frequency(pmu, SAMPLE_PERIOD);
+ igt_debugfs_dump(fd, "i915_rps_boost_info");
+
+ __igt_spin_batch_free_idle(fd, spin);
+
+ igt_info("%s(%s): Measured %.1fMHz, expected %dMhz\n",
+ e->name, __func__, measured, freq);
+ pmu_assert(measured, freq);
+ }
+
+ igt_permute_array(frequencies,
+ ARRAY_SIZE(frequencies),
+ igt_exchange_int);
+ }
+ gem_quiescent_gpu(fd);
+
+ close(pmu);
+ gem_context_destroy(fd, ctx);
+
+#undef N_STEPS
+}
+
+static void continuous(int fd, const struct intel_execution_engine *e)
+{
+#define N_STEPS 10
+ const unsigned int engine = e->exec_id | e->flags;
+ uint32_t ctx = gem_context_create(fd);
+ uint32_t frequencies[TRIANGLE_SIZE(N_STEPS)];
+ uint32_t min, max;
+ double measured;
+ igt_spin_t *spin;
+ int pmu;
+
+ /*
+ * Check that we can adjust the frequency range of new requests
+ * on the fly, even if the context is already executing.
+ */
+
+ get_freq(fd, ctx, &min, &max);
+ igt_info("Min freq: %dMHz; Max freq: %dMHz\n", min, max);
+ triangle_fill(frequencies, N_STEPS, min, max);
+
+ pmu = perf_i915_open(I915_PMU_REQUESTED_FREQUENCY);
+ igt_require(pmu >= 0);
+
+ gem_quiescent_gpu(fd);
+ spin = __igt_spin_batch_new(fd, ctx, engine, 0);
+ for (int pass = 0; pass < 3; pass++) {
+ for (int i = 0; i < ARRAY_SIZE(frequencies); i++) {
+ uint32_t freq;
+ igt_spin_t *kick;
+
+ freq = set_single_freq(fd, ctx, frequencies[i]);
+
+ /*
+ * When requesting a new frequency on the currently
+ * executing context, it does not take effect until the
+ * next context switch. In this case, we trigger a lite
+ * restore.
+ */
+ kick = __igt_spin_batch_new(fd, ctx, engine, 0);
+ igt_spin_batch_free(fd, spin);
+ spin = kick;
+
+ measured = measure_frequency(pmu, SAMPLE_PERIOD);
+ igt_debugfs_dump(fd, "i915_rps_boost_info");
+
+ igt_info("%s(continuous): Measured %.1fMHz, expected %dMhz\n",
+ e->name, measured, freq);
+ pmu_assert(measured, freq);
+ }
+
+ igt_permute_array(frequencies,
+ ARRAY_SIZE(frequencies),
+ igt_exchange_int);
+ }
+ igt_spin_batch_free(fd, spin);
+ gem_quiescent_gpu(fd);
+
+ close(pmu);
+ gem_context_destroy(fd, ctx);
+#undef N_STEPS
+}
+
+static void set_sysfs_freq(uint32_t min, uint32_t max)
+{
+ igt_sysfs_printf(sysfs, "gt_min_freq_mhz", "%u", min);
+ igt_sysfs_printf(sysfs, "gt_max_freq_mhz", "%u", max);
+}
+
+static void get_sysfs_freq(uint32_t *min, uint32_t *max)
+{
+ igt_sysfs_scanf(sysfs, "gt_min_freq_mhz", "%u", min);
+ igt_sysfs_scanf(sysfs, "gt_max_freq_mhz", "%u", max);
+}
+
+static void sysfs_clamp(int fd, const struct intel_execution_engine *e)
+{
+#define N_STEPS 10
+ const unsigned int engine = e->exec_id | e->flags;
+ uint32_t ctx = gem_context_create(fd);
+ uint32_t frequencies[TRIANGLE_SIZE(N_STEPS)];
+ uint32_t sys_min, sys_max;
+ uint32_t min, max;
+ double measured;
+ igt_spin_t *spin;
+ int pmu;
+
+ /*
+ * The sysfs interface sets the global limits and overrides the
+ * user's request. So we can to check that if the user requests
+ * a range outside of the sysfs, the requests are only run at the
+ * constriained sysfs range.
+ */
+
+ get_sysfs_freq(&sys_min, &sys_max);
+ igt_info("System min freq: %dMHz; max freq: %dMHz\n", sys_min, sys_max);
+
+ get_freq(fd, ctx, &min, &max);
+ igt_info("Context min freq: %dMHz; max freq: %dMHz\n", min, max);
+ triangle_fill(frequencies, N_STEPS, min, max);
+
+ pmu = perf_i915_open(I915_PMU_REQUESTED_FREQUENCY);
+ igt_require(pmu >= 0);
+
+ for (int outer = 0; outer <= 2*N_STEPS; outer++) {
+ uint32_t sys_freq = frequencies[outer];
+
+ for (int inner = 0; inner <= 2*N_STEPS; inner++) {
+ uint32_t ctx_freq, cur, discard;
+
+ ctx_freq = set_single_freq(fd, ctx, frequencies[inner]);
+
+ gem_quiescent_gpu(fd);
+ spin = __igt_spin_batch_new(fd, ctx, engine, 0);
+ usleep(10000);
+
+ set_sysfs_freq(sys_freq, sys_freq);
+ get_sysfs_freq(&cur, &discard);
+
+ measured = measure_frequency(pmu, SAMPLE_PERIOD);
+ igt_debugfs_dump(fd, "i915_rps_boost_info");
+
+ set_sysfs_freq(sys_min, sys_max);
+ __igt_spin_batch_free_idle(fd, spin);
+
+ igt_info("%s(sysfs): Measured %.1fMHz, context %dMhz, expected %dMhz\n",
+ e->name, measured, ctx_freq, cur);
+ pmu_assert(measured, cur);
+ }
+ }
+ gem_quiescent_gpu(fd);
+
+ close(pmu);
+ gem_context_destroy(fd, ctx);
+
+#undef N_STEPS
+}
+
+static void sandwich_engine(int fd, unsigned int engine, int timeout)
+{
+ uint32_t ctx = gem_context_create(fd);
+ uint32_t min, max;
+ int pmu;
+
+ pmu = perf_i915_open(I915_PMU_REQUESTED_FREQUENCY);
+ igt_require(pmu >= 0);
+
+ get_freq(fd, ctx, &min, &max);
+
+ igt_until_timeout(timeout) {
+ uint32_t range[2];
+ igt_spin_t *spin;
+ double measured;
+
+ /* make sure we keep an overlap between all engines */
+ range[0] = min + (rand() % (max - min) / 2);
+ range[1] = max - (rand() % (max - min) / 2);
+
+ set_freq(fd, ctx, range[0], range[1]);
+ get_freq(fd, ctx, &range[0], &range[1]);
+
+ spin = __igt_spin_batch_new(fd, ctx, engine, 0);
+
+ measured = measure_frequency(pmu, SAMPLE_PERIOD);
+ igt_spin_batch_free(fd, spin);
+
+ igt_assert(measured >= range[0] - PMU_TOLERANCE &&
+ measured <= range[1] + PMU_TOLERANCE);
+ }
+
+ gem_context_destroy(fd, ctx);
+ close(pmu);
+}
+
+static void sandwich(int fd, int timeout)
+{
+ unsigned int engine;
+
+ for_each_physical_engine(fd, engine) {
+ igt_fork(child, 1)
+ sandwich_engine(fd, engine, timeout);
+ }
+
+ igt_waitchildren();
+ gem_quiescent_gpu(fd);
+}
+
+static void inheritance(int fd)
+{
+ const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct drm_i915_gem_exec_object2 obj[2] = {
+ { .flags = EXEC_OBJECT_WRITE, },
+ { .handle = gem_create(fd, 4096) }
+ };
+ uint32_t ctx_lo, ctx_hi;
+ uint32_t min, max;
+ unsigned int engine;
+ IGT_CORK_FENCE(cork);
+ int pmu;
+
+ /*
+ * We support "priority inheritance" across requests. So if a later
+ * higher priority request is submitted depending on some earlier
+ * requests, those earlier requests are bumped to the new priority
+ * and rescheduled. They also inherit the extended frequency range
+ * of the higher priority request.
+ */
+
+ igt_require_sw_sync();
+ get_freq(fd, 0, &min, &max);
+
+ ctx_lo = gem_context_create(fd);
+ gem_context_set_priority(fd, ctx_lo, I915_CONTEXT_MIN_USER_PRIORITY);
+ set_freq(fd, ctx_lo, min, min);
+
+ ctx_hi = gem_context_create(fd);
+ gem_context_set_priority(fd, ctx_hi, I915_CONTEXT_MAX_USER_PRIORITY);
+ set_freq(fd, ctx_hi, max, max);
+
+ gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
+
+ pmu = perf_i915_open(I915_PMU_REQUESTED_FREQUENCY);
+ igt_require(pmu >= 0);
+
+ gem_quiescent_gpu(fd);
+ for_each_physical_engine(fd, engine) {
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffer_count = 2,
+ .buffers_ptr = to_user_pointer(obj),
+ .flags = engine | I915_EXEC_FENCE_IN,
+ .rsvd1 = ctx_hi,
+ .rsvd2 = igt_cork_plug(&cork, fd),
+ };
+ igt_spin_t *spin = igt_spin_batch_new(fd, ctx_lo, engine, 0);
+ double measured;
+
+ measured = measure_frequency(pmu, SAMPLE_PERIOD);
+
+ igt_info("%s/lo: Measured %.1fMHz, expected %dMhz\n",
+ e__->name, measured, min);
+ pmu_assert(measured, min);
+
+ /* Add a dependency from hi to lo. */
+ obj[0].handle = spin->handle;
+
+ /* Submit high priority work, but don't let it run */
+ gem_execbuf(fd, &eb);
+
+ measured = measure_frequency(pmu, SAMPLE_PERIOD);
+
+ igt_info("%s/hi: Measured %.1fMHz, expected %dMhz\n",
+ e__->name, measured, max);
+ igt_debugfs_dump(fd, "i915_engine_info");
+ igt_debugfs_dump(fd, "i915_rps_boost_info");
+
+ igt_spin_batch_free(fd, spin);
+ igt_cork_unplug(&cork);
+
+ pmu_assert(measured, max);
+ gem_quiescent_gpu(fd);
+ }
+
+ /* Also works between engines */
+ for_each_physical_engine(fd, engine) {
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffer_count = 2,
+ .buffers_ptr = to_user_pointer(obj),
+ .flags = I915_EXEC_DEFAULT | I915_EXEC_FENCE_IN,
+ .rsvd1 = ctx_hi,
+ .rsvd2 = igt_cork_plug(&cork, fd),
+ };
+ igt_spin_t *spin = igt_spin_batch_new(fd, ctx_lo, engine, 0);
+ double measured;
+
+ obj[0].handle = spin->handle;
+ gem_execbuf(fd, &eb);
+
+ measured = measure_frequency(pmu, SAMPLE_PERIOD);
+
+ igt_info("%s-default: Measured %.1fMHz, expected %dMhz\n",
+ e__->name, measured, max);
+ igt_debugfs_dump(fd, "i915_engine_info");
+ igt_debugfs_dump(fd, "i915_rps_boost_info");
+
+ igt_spin_batch_free(fd, spin);
+ igt_cork_unplug(&cork);
+
+ pmu_assert(measured, max);
+ gem_quiescent_gpu(fd);
+ }
+
+ gem_close(fd, obj[1].handle);
+ close(pmu);
+}
+
+static void pwm(int fd, unsigned int *engines, unsigned int nengine, int link)
+{
+ uint32_t ctx[nengine];
+
+ fcntl(link, F_SETFL, fcntl(link, F_GETFL) | O_NONBLOCK);
+
+ for (unsigned int n = 0; n < nengine; n++)
+ ctx[n] = gem_context_create(fd);
+
+ do {
+ igt_spin_t *spin;
+ struct {
+ uint32_t engine;
+ uint32_t min;
+ uint32_t max;
+ } req;
+
+ while (read(link, &req, sizeof(req)) > 0) {
+ if ((req.engine | req.min | req.max) == 0)
+ goto out;
+
+ igt_assert(req.engine < nengine);
+ set_freq(fd, ctx[req.engine], req.min, req.max);
+ }
+
+ /* Create a ~20% load on this engine using busy spinners */
+ spin = __igt_spin_batch_new(fd, ctx[0], engines[0], 0);
+ for (unsigned int n = 1; n < nengine; n++) {
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = spin->handle,
+ };
+ struct drm_i915_gem_execbuffer2 eb = {
+ .buffer_count = 1,
+ .buffers_ptr = to_user_pointer(&obj),
+ .flags = engines[n],
+ .rsvd1 = ctx[n],
+ };
+ gem_execbuf(fd, &eb);
+ }
+ usleep(100);
+
+ __igt_spin_batch_free_idle(fd, spin);
+
+ usleep(400);
+ } while (1);
+
+out:
+ for (unsigned int n = 0; n < nengine; n++)
+ gem_context_destroy(fd, ctx[n]);
+}
+
+static void smoketest(int fd, int timeout)
+{
+ unsigned int engines[16];
+ unsigned int nengine;
+ unsigned int engine;
+ uint32_t min[16], max[16];
+ int pmu, link[2];
+
+ /*
+ * Basic stress testing (smoketesting).
+ *
+ * Apply a random workload that lets the frequency oscillate, giving
+ * plenty of opportunity for RPS to perform min/max evaluations
+ * searching for an instance where it dies.
+ */
+
+ get_freq(fd, 0, &min[0], &max[0]);
+
+ nengine = 0;
+ for_each_physical_engine(fd, engine) {
+ if (nengine == ARRAY_SIZE(engines) - 1)
+ break;
+
+ min[nengine] = min[0];
+ max[nengine] = max[0];
+ engines[nengine] = engine;
+ nengine++;
+ }
+ igt_require(nengine);
+
+ igt_assert(pipe(link) == 0);
+ igt_fork(child, 1)
+ pwm(fd, engines, nengine, link[0]);
+ close(link[0]);
+
+ pmu = perf_i915_open(I915_PMU_REQUESTED_FREQUENCY);
+ igt_require(pmu >= 0);
+
+ igt_until_timeout(timeout) {
+ struct {
+ uint32_t engine;
+ uint32_t min;
+ uint32_t max;
+ } req;
+ double measured;
+ uint32_t ctx;
+
+ req.engine = rand() % nengine;
+
+ /*
+ * Since we apply a different range to each engine, we
+ * want to ensure that all ranges are conjoint (overlap).
+ * This allows us to apply the simple rule that in order
+ * to satisfy the independent contexts running on each
+ * engine, the frequency must be within the intersection
+ * of all ranges. If that intersection is empty, we leave
+ * it up to the kernel to decide (and do not want to imply
+ * any contract to the user, so ignore it).
+ */
+ ctx = gem_context_create(fd);
+ get_freq(fd, ctx, &req.min, &req.max);
+ req.min = rand() % (req.max - req.min) + req.min;
+ req.max = rand() % (req.max - req.min) + req.min;
+ set_freq(fd, ctx, req.min, req.max);
+ get_freq(fd, ctx, &req.min, &req.max);
+
+ igt_debug("Replacing (%d, %d) on engine %x with (%d, %d)\n",
+ min[req.engine], max[req.engine], req.engine,
+ req.min, req.max);
+ igt_assert(write(link[1], &req, sizeof(req)) == sizeof(req));
+ gem_context_destroy(fd, ctx);
+
+ min[req.engine] = req.min;
+ max[req.engine] = req.max;
+
+ for (unsigned int n = 0; n < nengine; n++) {
+ igt_debug("[%d]: [%d, %d]\n", n, min[n], max[n]);
+ if (min[n] < req.min)
+ req.min = min[n];
+ if (max[n] > req.max)
+ req.max = max[n];
+ }
+ igt_assert(req.max >= req.min);
+
+ usleep(50000);
+ measured = measure_frequency(pmu, SAMPLE_PERIOD);
+
+ if (measured <= req.min - PMU_TOLERANCE ||
+ measured >= req.max + PMU_TOLERANCE)
+ igt_debugfs_dump(fd, "i915_rps_boost_info");
+ igt_info("Measured %.1fMHz, expected [%d, %d]Mhz\n",
+ measured, req.min, req.max);
+ igt_assert(measured > req.min - PMU_TOLERANCE &&
+ measured < req.max + PMU_TOLERANCE);
+ }
+
+ do {
+ struct {
+ uint32_t engine;
+ uint32_t min;
+ uint32_t max;
+ } req = {};
+
+ write(link[1], &req, sizeof(req));
+ close(link[1]);
+ } while (0);
+ igt_waitchildren();
+ gem_quiescent_gpu(fd);
+
+ close(pmu);
+}
+
+static void invalid_context(int fd, uint32_t ctx, uint32_t min, uint32_t max)
+{
+ const struct test {
+ uint32_t min, max;
+ } tests[] = {
+ { min - 50, max - 50 },
+ { min - 50, max },
+ { min - 50, max + 50 },
+ { min, max + 50 },
+ { min + 50, max + 50 },
+
+ { min - 50, min - 50 },
+
+ { min - 50, min },
+ { min + 50, min },
+ { min, min - 50 },
+
+ { max + 50, max },
+ { max, max + 50 },
+ { max, max - 50 },
+
+ { max + 50, max + 50 },
+
+ {}
+ };
+
+ for (const struct test *t = tests; t->min | t->max; t++) {
+ uint32_t cur_min, cur_max;
+
+ igt_assert_f(__set_freq(fd, ctx, t->min, t->max) == -EINVAL,
+ "Failed to reject invalid [%d, %d] (valid range [%d, %d]) on context %d\n",
+ t->min, t->max, min, max, ctx);
+
+ get_freq(fd, 0, &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, max);
+ }
+}
+
+static void uabi_invalid(int fd)
+{
+ uint32_t min, max, ctx;
+
+ /* Minimal negative testing for rejection of invalid parameters */
+
+ get_freq(fd, 0, &min, &max);
+
+ invalid_context(fd, 0, min, max);
+
+ ctx = gem_context_create(fd);
+ invalid_context(fd, ctx, min, max);
+ gem_context_destroy(fd, ctx);
+}
+
+static void idempotent_context(int fd, uint32_t ctx)
+{
+ uint32_t min, max;
+ uint32_t cur_min, cur_max;
+ uint32_t *all, count;
+
+ get_freq(fd, ctx, &min, &max);
+
+ set_freq(fd, ctx, max, max);
+ get_freq(fd, ctx, &cur_min, &cur_max);
+ igt_assert_eq(cur_min, max);
+ igt_assert_eq(cur_max, max);
+
+ set_freq(fd, ctx, min, min);
+ get_freq(fd, ctx, &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, min);
+
+ /* Bin each frequency in range and check that each is reported back */
+ count = max - min + 1;
+ all = malloc(sizeof(uint32_t) * count);
+ igt_assert(all);
+ for (uint32_t n = 0; n < count; n++) {
+ set_freq(fd, ctx, n + min, n + min);
+ get_freq(fd, ctx, &cur_min, &cur_max);
+ all[n] = cur_min;
+ }
+ igt_permute_array(all, count, igt_exchange_int);
+ for (uint32_t n = 0; n < count; n++) {
+ set_freq(fd, ctx, min, all[n]);
+ get_freq(fd, ctx, &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, all[n]);
+
+ set_freq(fd, ctx, all[n], max);
+ get_freq(fd, ctx, &cur_min, &cur_max);
+ igt_assert_eq(cur_min, all[n]);
+ igt_assert_eq(cur_max, max);
+
+ set_freq(fd, ctx, all[n], all[n]);
+ get_freq(fd, ctx, &cur_min, &cur_max);
+ igt_assert_eq(cur_min, all[n]);
+ igt_assert_eq(cur_max, all[n]);
+ }
+
+ set_freq(fd, ctx, min, max);
+ get_freq(fd, ctx, &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, max);
+}
+
+static void uabi_idempotent(int fd)
+{
+ uint32_t ctx;
+
+ /*
+ * A general principle for an API of least-surprise is that the
+ * user receives the same value as they put in. We apply this
+ * principle for I915_CONTEXT_PARAM_FREQUENCY, modulo rounding
+ * into the nearest bin.
+ */
+
+ idempotent_context(fd, 0);
+
+ ctx = gem_context_create(fd);
+ idempotent_context(fd, ctx);
+ gem_context_destroy(fd, ctx);
+}
+
+static void range_context(int fd, uint32_t ctx)
+{
+ uint32_t min, max;
+ uint32_t cur_min, cur_max;
+
+ get_freq(fd, ctx, &min, &max);
+
+ for (uint32_t freq = min; freq <= max; freq++) {
+ set_freq(fd, ctx, freq, freq);
+ get_freq(fd, ctx, &cur_min, &cur_max);
+
+ igt_assert(cur_min >= min);
+ igt_assert(cur_max <= max);
+ }
+
+ set_freq(fd, ctx, min, max);
+ get_freq(fd, ctx, &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, max);
+}
+
+static void uabi_range(int fd)
+{
+ uint32_t ctx;
+
+ /* Check that all reqported frequencies are within the min/max */
+
+ range_context(fd, 0);
+
+ ctx = gem_context_create(fd);
+ range_context(fd, ctx);
+ gem_context_destroy(fd, ctx);
+}
+
+static void uabi_independent(int fd)
+{
+ uint32_t min, max;
+ uint32_t cur_min, cur_max;
+ uint32_t ctx[2];
+
+ /*
+ * Newly created contexts should not derive their limits from older
+ * contexts, but be always created with the same full range.
+ */
+
+ get_freq(fd, 0, &min, &max);
+
+ set_freq(fd, 0, max, max);
+ ctx[0] = gem_context_create(fd);
+ get_freq(fd, ctx[0], &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, max);
+
+ set_freq(fd, 0, min, min);
+ get_freq(fd, ctx[0], &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, max);
+
+ ctx[1] = gem_context_create(fd);
+ get_freq(fd, ctx[1], &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, max);
+
+ set_freq(fd, ctx[1], max, max);
+ get_freq(fd, ctx[0], &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, max);
+
+ get_freq(fd, 0, &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, min);
+
+ get_freq(fd, ctx[1], &cur_min, &cur_max);
+ igt_assert_eq(cur_min, max);
+ igt_assert_eq(cur_max, max);
+ gem_context_destroy(fd, ctx[1]);
+
+ get_freq(fd, ctx[0], &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, max);
+ gem_context_destroy(fd, ctx[0]);
+
+ set_freq(fd, 0, min, max);
+ get_freq(fd, 0, &cur_min, &cur_max);
+ igt_assert_eq(cur_min, min);
+ igt_assert_eq(cur_max, max);
+}
+
+static void restore_sysfs_freq(int sig)
+{
+ char buf[256];
+
+ if (igt_sysfs_read(sysfs, "gt_RPn_freq_mhz", buf, sizeof(buf)) > 0) {
+ igt_sysfs_set(sysfs, "gt_idle_freq_mhz", buf);
+ igt_sysfs_set(sysfs, "gt_min_freq_mhz", buf);
+ }
+
+ if (igt_sysfs_read(sysfs, "gt_RP0_freq_mhz", buf, sizeof(buf)) > 0) {
+ igt_sysfs_set(sysfs, "gt_max_freq_mhz", buf);
+ igt_sysfs_set(sysfs, "gt_boost_freq_mhz", buf);
+ }
+}
+
+static void disable_boost(int fd)
+{
+ char *value;
+
+ value = igt_sysfs_get(fd, "gt_RPn_freq_mhz");
+ igt_sysfs_set(fd, "gt_min_freq_mhz", value);
+ igt_sysfs_set(fd, "gt_boost_freq_mhz", value);
+ free(value);
+
+ value = igt_sysfs_get(fd, "gt_RP0_freq_mhz");
+ igt_sysfs_set(fd, "gt_max_freq_mhz", value);
+ free(value);
+}
+
+igt_main
+{
+ const struct intel_execution_engine *e;
+ int fd = -1;
+
+ igt_fixture {
+ fd = drm_open_driver(DRIVER_INTEL);
+ igt_require_gem(fd);
+ igt_require(gem_scheduler_has_frequency_control(fd));
+
+ sysfs = igt_sysfs_open(fd, NULL);
+ igt_assert(sysfs != -1);
+ igt_install_exit_handler(restore_sysfs_freq);
+
+ disable_boost(sysfs);
+ }
+
+ igt_subtest("uabi-invalid")
+ uabi_invalid(fd);
+
+ igt_subtest("uabi-range")
+ uabi_range(fd);
+
+ igt_subtest("uabi-idempotent")
+ uabi_idempotent(fd);
+
+ igt_subtest("uabi-independent")
+ uabi_independent(fd);
+
+ igt_skip_on_simulation();
+
+ for (e = intel_execution_engines; e->name; e++) {
+ igt_subtest_group {
+ igt_fixture {
+ gem_require_ring(fd, e->exec_id | e->flags);
+ }
+
+ igt_subtest_f("%s-single", e->name)
+ single(fd, e);
+ igt_subtest_f("%s-continuous", e->name)
+ continuous(fd, e);
+ igt_subtest_f("%s-sysfs", e->name)
+ sysfs_clamp(fd, e);
+ }
+ }
+
+ igt_subtest("sandwich")
+ sandwich(fd, 20);
+
+ igt_subtest("inheritance")
+ inheritance(fd);
+
+ igt_subtest("smoketest") {
+ igt_fork_hang_detector(fd);
+ smoketest(fd, 20);
+ igt_stop_hang_detector();
+ }
+}
@@ -1,3 +1,9 @@
+# those will be linked with lib_igt_perf
+perf_test_progs = [
+ 'gem_ctx_freq',
+ 'perf_pmu',
+]
+
test_progs = [
'core_auth',
'core_get_client_auth',
@@ -35,6 +41,7 @@ test_progs = [
'gem_ctx_bad_exec',
'gem_ctx_create',
'gem_ctx_exec',
+ 'gem_ctx_freq',
'gem_ctx_isolation',
'gem_ctx_param',
'gem_ctx_switch',
@@ -295,12 +302,14 @@ test_executables += executable('gem_eio', 'gem_eio.c',
install : true)
test_progs += 'gem_eio'
-test_executables += executable('perf_pmu', 'perf_pmu.c',
- dependencies : test_deps + [ lib_igt_perf ],
- install_dir : libexecdir,
- install_rpath : rpathdir,
- install : true)
-test_progs += 'perf_pmu'
+foreach prog : perf_test_progs
+ test_executables += executable(prog, prog + '.c',
+ dependencies : test_deps + [ lib_igt_perf ],
+ install_dir : libexecdir,
+ install_rpath : rpathdir,
+ install : true)
+ test_progs += prog
+endforeach
executable('testdisplay', ['testdisplay.c', 'testdisplay_hotplug.c'],
dependencies : test_deps,