From patchwork Fri Nov 8 22:30:23 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chris Wilson X-Patchwork-Id: 11235655 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id CE7BA15AB for ; Fri, 8 Nov 2019 22:30:33 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id B755920865 for ; Fri, 8 Nov 2019 22:30:33 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org B755920865 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=chris-wilson.co.uk Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=intel-gfx-bounces@lists.freedesktop.org Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 9AEB96E0D8; Fri, 8 Nov 2019 22:30:32 +0000 (UTC) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from fireflyinternet.com (mail.fireflyinternet.com [109.228.58.192]) by gabe.freedesktop.org (Postfix) with ESMTPS id 248476E0D8 for ; Fri, 8 Nov 2019 22:30:30 +0000 (UTC) X-Default-Received-SPF: pass (skip=forwardok (res=PASS)) x-ip-name=78.156.65.138; Received: from haswell.alporthouse.com (unverified [78.156.65.138]) by fireflyinternet.com (Firefly Internet (M1)) with ESMTP id 19135005-1500050 for multiple; Fri, 08 Nov 2019 22:30:25 +0000 From: Chris Wilson To: intel-gfx@lists.freedesktop.org Date: Fri, 8 Nov 2019 22:30:23 +0000 Message-Id: <20191108223023.8674-1-chris@chris-wilson.co.uk> X-Mailer: git-send-email 2.24.0 MIME-Version: 1.0 Subject: [Intel-gfx] [PATCH i-g-t] i915/gem_exec_schedule: Beware priority inversion from iova faults X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" Check that if two contexts (one high priority, one low) share the same buffer that has taken a page fault that we do not create an implicit dependency between the two contexts for servicing that page fault and binding the vma. Signed-off-by: Chris Wilson --- tests/i915/gem_exec_schedule.c | 154 ++++++++++++++++++++++++++++++++- 1 file changed, 151 insertions(+), 3 deletions(-) diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c index 0af7b4c6d..a4f894b18 100644 --- a/tests/i915/gem_exec_schedule.c +++ b/tests/i915/gem_exec_schedule.c @@ -1637,11 +1637,16 @@ static int userfaultfd(int flags) } struct ufd_thread { - pthread_t thread; uint32_t batch; + uint32_t scratch; uint32_t *page; unsigned int engine; + unsigned int flags; int i915; + + pthread_mutex_t mutex; + pthread_cond_t cond; + int count; }; static uint32_t create_userptr(int i915, void *page) @@ -1685,6 +1690,7 @@ static void test_pi_userfault(int i915, unsigned int engine) struct uffdio_copy copy; struct uffd_msg msg; struct ufd_thread t; + pthread_t thread; char poison[4096]; int ufd; @@ -1726,7 +1732,7 @@ static void test_pi_userfault(int i915, unsigned int engine) igt_assert(reg.ioctls == UFFD_API_RANGE_IOCTLS); /* Kick off the low priority submission */ - igt_assert(pthread_create(&t.thread, NULL, ufd_thread, &t) == 0); + igt_assert(pthread_create(&thread, NULL, ufd_thread, &t) == 0); /* Wait until the low priority thread is blocked on a fault */ igt_assert_eq(read(ufd, &msg, sizeof(msg)), sizeof(msg)); @@ -1770,9 +1776,145 @@ static void test_pi_userfault(int i915, unsigned int engine) copy.len = 4096; do_ioctl(ufd, UFFDIO_COPY, ©); - pthread_join(t.thread, NULL); + pthread_join(thread, NULL); + + gem_close(i915, t.batch); + munmap(t.page, 4096); + close(ufd); +} + +static void *iova_thread(struct ufd_thread *t, int prio) +{ + uint32_t ctx = + gem_context_clone(t->i915, 0, + t->flags & SHARED ? I915_CONTEXT_CLONE_VM : 0, + 0); + + gem_context_set_priority(t->i915, ctx, prio); + + store_dword_plug(t->i915, ctx, t->engine, + t->scratch, 0, prio, + t->batch, 0 /* no write hazard! */); + + pthread_mutex_lock(&t->mutex); + if (!--t->count) + pthread_cond_signal(&t->cond); + pthread_mutex_unlock(&t->mutex); + + gem_context_destroy(t->i915, ctx); + return NULL; +} + +static void *iova_low(void *arg) +{ + return iova_thread(arg, MIN_PRIO); +} + +static void *iova_high(void *arg) +{ + return iova_thread(arg, MAX_PRIO); +} + +static void test_pi_iova(int i915, unsigned int engine, unsigned int flags) +{ + struct uffdio_api api = { .api = UFFD_API }; + struct uffdio_register reg; + struct uffdio_copy copy; + struct uffd_msg msg; + struct ufd_thread t; + igt_spin_t *spin; + pthread_t hi, lo; + char poison[4096]; + uint32_t result; + int ufd; + + /* + * In this scenario, we have a pair of contending contexts that + * share the same resource. That resource is stuck behind a slow + * page fault such that neither context has immediate access to it. + * What is expected is that as soon as that resource becomes available, + * the two contexts are queued with the high priority context taking + * precedence. We need to check that we do not cross-contaminate + * the two contents with the page fault on the shared resource + * initiated by the low priority context. (Consider that the low + * priority context may install an exclusive fence for the page + * fault, which is then used for strict ordering by the high priority + * context, causing an unwanted implicit dependency between the two + * and promoting the low priority context to high.) + * + * SHARED: the two contexts share a vm, but still have separate + * timelines that should not mingle. + */ + + ufd = userfaultfd(0); + igt_require_f(ufd != -1, "kernel support for userfaultfd\n"); + igt_require_f(ioctl(ufd, UFFDIO_API, &api) == 0 && api.api == UFFD_API, + "userfaultfd API v%lld:%lld\n", UFFD_API, api.api); + + t.i915 = i915; + t.engine = engine; + t.flags = flags; + + t.count = 2; + pthread_cond_init(&t.cond, NULL); + pthread_mutex_init(&t.mutex, NULL); + + t.page = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, 0, 0); + igt_assert(t.page != MAP_FAILED); + t.batch = create_userptr(i915, t.page); + t.scratch = gem_create(i915, 4096); + + /* Register our fault handler for t.page */ + memset(®, 0, sizeof(reg)); + reg.mode = UFFDIO_REGISTER_MODE_MISSING; + reg.range.start = to_user_pointer(t.page); + reg.range.len = 4096; + do_ioctl(ufd, UFFDIO_REGISTER, ®); + igt_assert(reg.ioctls == UFFD_API_RANGE_IOCTLS); + + /* Fill the engine with spinners; the store_dword() is too quick */ + spin = igt_spin_new(i915, .engine = engine); + for (int i = 0; i < MAX_ELSP_QLEN; i++) { + spin->execbuf.rsvd1 = create_highest_priority(i915); + gem_execbuf(i915, &spin->execbuf); + gem_context_destroy(i915, spin->execbuf.rsvd1); + } + + /* Kick off the submission threads */ + igt_assert(pthread_create(&lo, NULL, iova_low, &t) == 0); + + /* Wait until the low priority thread is blocked on the fault */ + igt_assert_eq(read(ufd, &msg, sizeof(msg)), sizeof(msg)); + igt_assert_eq(msg.event, UFFD_EVENT_PAGEFAULT); + igt_assert(from_user_pointer(msg.arg.pagefault.address) == t.page); + + /* Then release a very similar thread, but at high priority! */ + igt_assert(pthread_create(&hi, NULL, iova_high, &t) == 0); + + /* Service the fault; releasing the low priority context */ + memset(©, 0, sizeof(copy)); + copy.dst = msg.arg.pagefault.address; + copy.src = to_user_pointer(memset(poison, 0xc5, sizeof(poison))); + copy.len = 4096; + do_ioctl(ufd, UFFDIO_COPY, ©); + + /* Wait until both threads have had a chance to submit */ + pthread_mutex_lock(&t.mutex); + while (t.count) + pthread_cond_wait(&t.cond, &t.mutex); + pthread_mutex_unlock(&t.mutex); + igt_debugfs_dump(i915, "i915_engine_info"); + igt_spin_free(i915, spin); + pthread_join(hi, NULL); + pthread_join(lo, NULL); gem_close(i915, t.batch); + + gem_sync(i915, t.scratch); /* write hazard lies */ + gem_read(i915, t.scratch, 0, &result, sizeof(result)); + igt_assert_eq(result, MIN_PRIO); + gem_close(i915, t.scratch); + munmap(t.page, 4096); close(ufd); } @@ -2019,6 +2161,12 @@ igt_main igt_subtest_f("pi-userfault-%s", e->name) test_pi_userfault(fd, eb_ring(e)); + + igt_subtest_f("pi-distinct-iova-%s", e->name) + test_pi_iova(fd, eb_ring(e), 0); + + igt_subtest_f("pi-shared-iova-%s", e->name) + test_pi_iova(fd, eb_ring(e), SHARED); } } }