From patchwork Fri Jul 31 22:22:23 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alex Deucher X-Patchwork-Id: 6920761 Return-Path: X-Original-To: patchwork-dri-devel@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id D5FD29F52D for ; Fri, 31 Jul 2015 22:23:09 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 925BE20631 for ; Fri, 31 Jul 2015 22:23:08 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id D523020645 for ; Fri, 31 Jul 2015 22:23:06 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 693226EDF6; Fri, 31 Jul 2015 15:23:03 -0700 (PDT) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from mail-yk0-f169.google.com (mail-yk0-f169.google.com [209.85.160.169]) by gabe.freedesktop.org (Postfix) with ESMTPS id 950D66EDF8 for ; Fri, 31 Jul 2015 15:22:59 -0700 (PDT) Received: by ykax123 with SMTP id x123so70528416yka.1 for ; Fri, 31 Jul 2015 15:22:59 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=SYkAmUYVIq9nBkpVbpDT5d+KOnPQMo8hMlcTYAu17rI=; b=ZSYloh+w1TF8H87u+rdWIz2nsXN3+xHgPOuSW6qg2CDNI3VflwgozqhjwwL7zgo1Li 4E3E9Nt22fQ+G9Zp2e0J7Bcp+IGSq3Pq+3oqx6thjzuu8Q62y4uYDmSu6ZGaXbluRcGs yyiMhfAf0Cd5fbVUw5l9FBIal7H0uvFVNkJ0kqMRb4a1d7l03C8xaVYXLmAQVHs+8WA8 YZq2kF8DhKxahmeMH/b9pxq3JxZOa83nCufS1k1+07Hsi1IuiW0rzBZV8duzajVYOvz8 ZU0ljHt//7LT1RQLxYUUE3B60W/eEcqmG+AQMBQ7IF4HiEnfpxjUfD3MUEx6BFxTPy5v gb3Q== X-Received: by 10.170.221.86 with SMTP id n83mr6431453ykf.7.1438381378927; Fri, 31 Jul 2015 15:22:58 -0700 (PDT) Received: from localhost.localdomain (static-74-96-105-49.washdc.fios.verizon.net. [74.96.105.49]) by smtp.gmail.com with ESMTPSA id u78sm5438990ywu.13.2015.07.31.15.22.58 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Fri, 31 Jul 2015 15:22:58 -0700 (PDT) From: Alex Deucher X-Google-Original-From: Alex Deucher To: dri-devel@lists.freedesktop.org Subject: [PATCH 07/31] drm/amdgpu: dispatch jobs in cs Date: Fri, 31 Jul 2015 18:22:23 -0400 Message-Id: <1438381367-24980-8-git-send-email-alexander.deucher@amd.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1438381367-24980-1-git-send-email-alexander.deucher@amd.com> References: <1438381367-24980-1-git-send-email-alexander.deucher@amd.com> X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: dri-devel-bounces@lists.freedesktop.org Sender: "dri-devel" X-Spam-Status: No, score=-5.5 required=5.0 tests=BAYES_00, DKIM_ADSP_CUSTOM_MED, DKIM_SIGNED, FREEMAIL_FROM, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, T_DKIM_INVALID, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Chunming Zhou BO validation is moved to scheduler except usrptr which must be validated in user process Signed-off-by: Chunming Zhou Acked-by: Christian K?nig Reviewed-by: Jammy Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 256 +++++++++++++++++++++++++-------- 2 files changed, 200 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index da924ed..20639d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1239,6 +1239,7 @@ struct amdgpu_cs_parser { struct work_struct job_work; int (*prepare_job)(struct amdgpu_cs_parser *sched_job); int (*run_job)(struct amdgpu_cs_parser *sched_job); + int (*free_job)(struct amdgpu_cs_parser *sched_job); }; static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index bc0a704..f9d4fe9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -41,6 +41,11 @@ struct amdgpu_cs_buckets { struct list_head bucket[AMDGPU_CS_NUM_BUCKETS]; }; +static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, + int error, bool backoff); +static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff); +static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser); + static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b) { unsigned i; @@ -126,12 +131,52 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, return 0; } +static void amdgpu_job_work_func(struct work_struct *work) +{ + struct amdgpu_cs_parser *sched_job = + container_of(work, struct amdgpu_cs_parser, + job_work); + mutex_lock(&sched_job->job_lock); + sched_job->free_job(sched_job); + mutex_unlock(&sched_job->job_lock); + /* after processing job, free memory */ + kfree(sched_job); +} +struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, + struct drm_file *filp, + struct amdgpu_ctx *ctx, + struct amdgpu_ib *ibs, + uint32_t num_ibs) +{ + struct amdgpu_cs_parser *parser; + int i; + + parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); + if (!parser) + return NULL; + + parser->adev = adev; + parser->filp = filp; + parser->ctx = ctx; + parser->ibs = ibs; + parser->num_ibs = num_ibs; + if (amdgpu_enable_scheduler) { + mutex_init(&parser->job_lock); + INIT_WORK(&parser->job_work, amdgpu_job_work_func); + } + for (i = 0; i < num_ibs; i++) + ibs[i].ctx = ctx; + + return parser; +} + int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) { union drm_amdgpu_cs *cs = data; uint64_t *chunk_array_user; uint64_t *chunk_array = NULL; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; + struct amdgpu_bo_list *bo_list = NULL; unsigned size, i; int r = 0; @@ -143,7 +188,17 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) r = -EINVAL; goto out; } - p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); + bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); + if (bo_list && !bo_list->has_userptr) { + p->bo_list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL); + if (!p->bo_list) + return -ENOMEM; + amdgpu_bo_list_copy(p->adev, p->bo_list, bo_list); + amdgpu_bo_list_put(bo_list); + } else if (bo_list && bo_list->has_userptr) + p->bo_list = bo_list; + else + p->bo_list = NULL; /* get chunks */ INIT_LIST_HEAD(&p->validated); @@ -424,8 +479,26 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, **/ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) { - unsigned i; + amdgpu_cs_parser_fini_early(parser, error, backoff); + amdgpu_cs_parser_fini_late(parser); +} +static int amdgpu_cs_parser_run_job( + struct amdgpu_cs_parser *sched_job) +{ + amdgpu_cs_parser_fini_early(sched_job, 0, true); + return 0; +} + +static int amdgpu_cs_parser_free_job( + struct amdgpu_cs_parser *sched_job) +{ + amdgpu_cs_parser_fini_late(sched_job); + return 0; +} + +static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) +{ if (!error) { /* Sort the buffer list from the smallest to largest buffer, * which affects the order of buffers in the LRU list. @@ -446,11 +519,19 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); } +} +static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) +{ + unsigned i; if (parser->ctx) amdgpu_ctx_put(parser->ctx); - if (parser->bo_list) - amdgpu_bo_list_put(parser->bo_list); + if (parser->bo_list) { + if (!parser->bo_list->has_userptr) + amdgpu_bo_list_free(parser->bo_list); + else + amdgpu_bo_list_put(parser->bo_list); + } drm_free_large(parser->vm_bos); for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); @@ -461,6 +542,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo kfree(parser->ibs); if (parser->uf.bo) drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); + + if (!amdgpu_enable_scheduler) + kfree(parser); } static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, @@ -533,9 +617,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, goto out; } amdgpu_cs_sync_rings(parser); - - r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, - parser->filp); + if (!amdgpu_enable_scheduler) + r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, + parser->filp); out: mutex_unlock(&vm->mutex); @@ -731,35 +815,16 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, return 0; } -int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser *sched_job) { - struct amdgpu_device *adev = dev->dev_private; - union drm_amdgpu_cs *cs = data; - struct amdgpu_cs_parser parser; - int r, i; - bool reserved_buffers = false; - - down_read(&adev->exclusive_lock); - if (!adev->accel_working) { - up_read(&adev->exclusive_lock); - return -EBUSY; - } - /* initialize parser */ - memset(&parser, 0, sizeof(struct amdgpu_cs_parser)); - parser.filp = filp; - parser.adev = adev; - r = amdgpu_cs_parser_init(&parser, data); - if (r) { - DRM_ERROR("Failed to initialize parser !\n"); - amdgpu_cs_parser_fini(&parser, r, false); - up_read(&adev->exclusive_lock); - r = amdgpu_cs_handle_lockup(adev, r); - return r; - } - - r = amdgpu_cs_parser_relocs(&parser); - if (r) { - if (r != -ERESTARTSYS) { + int r, i; + struct amdgpu_cs_parser *parser = sched_job; + struct amdgpu_device *adev = sched_job->adev; + bool reserved_buffers = false; + + r = amdgpu_cs_parser_relocs(parser); + if (r) { + if (r != -ERESTARTSYS) { if (r == -ENOMEM) DRM_ERROR("Not enough memory for command submission!\n"); else @@ -769,33 +834,104 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (!r) { reserved_buffers = true; - r = amdgpu_cs_ib_fill(adev, &parser); + r = amdgpu_cs_ib_fill(adev, parser); } - if (!r) { - r = amdgpu_cs_dependencies(adev, &parser); + r = amdgpu_cs_dependencies(adev, parser); if (r) DRM_ERROR("Failed in the dependencies handling %d!\n", r); } + if (r) { + amdgpu_cs_parser_fini(parser, r, reserved_buffers); + return r; + } + + for (i = 0; i < parser->num_ibs; i++) + trace_amdgpu_cs(parser, i); + + r = amdgpu_cs_ib_vm_chunk(adev, parser); + return r; +} + +static struct amdgpu_ring *amdgpu_cs_parser_get_ring( + struct amdgpu_device *adev, + struct amdgpu_cs_parser *parser) +{ + int i, r; + + struct amdgpu_cs_chunk *chunk; + struct drm_amdgpu_cs_chunk_ib *chunk_ib; + struct amdgpu_ring *ring; + for (i = 0; i < parser->nchunks; i++) { + chunk = &parser->chunks[i]; + chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata; + + if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) + continue; + + r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type, + chunk_ib->ip_instance, chunk_ib->ring, + &ring); + if (r) + return NULL; + break; + } + return ring; +} + +int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) +{ + struct amdgpu_device *adev = dev->dev_private; + union drm_amdgpu_cs *cs = data; + struct amdgpu_cs_parser *parser; + int r; + + down_read(&adev->exclusive_lock); + if (!adev->accel_working) { + up_read(&adev->exclusive_lock); + return -EBUSY; + } + parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); + if (!parser) + return -ENOMEM; + r = amdgpu_cs_parser_init(parser, data); if (r) { - amdgpu_cs_parser_fini(&parser, r, reserved_buffers); + DRM_ERROR("Failed to initialize parser !\n"); + amdgpu_cs_parser_fini(parser, r, false); up_read(&adev->exclusive_lock); r = amdgpu_cs_handle_lockup(adev, r); return r; } - for (i = 0; i < parser.num_ibs; i++) - trace_amdgpu_cs(&parser, i); - - r = amdgpu_cs_ib_vm_chunk(adev, &parser); - if (r) { - goto out; + if (amdgpu_enable_scheduler && parser->num_ibs) { + struct amdgpu_ring * ring = + amdgpu_cs_parser_get_ring(adev, parser); + parser->uf.sequence = atomic64_inc_return( + &parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq); + if ((parser->bo_list && parser->bo_list->has_userptr)) { + r = amdgpu_cs_parser_prepare_job(parser); + if (r) + goto out; + } else + parser->prepare_job = amdgpu_cs_parser_prepare_job; + + parser->run_job = amdgpu_cs_parser_run_job; + parser->free_job = amdgpu_cs_parser_free_job; + amd_sched_push_job(ring->scheduler, + &parser->ctx->rings[ring->idx].c_entity, + parser); + cs->out.handle = parser->uf.sequence; + up_read(&adev->exclusive_lock); + return 0; } + r = amdgpu_cs_parser_prepare_job(parser); + if (r) + goto out; - cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; + cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; out: - amdgpu_cs_parser_fini(&parser, r, true); + amdgpu_cs_parser_fini(parser, r, true); up_read(&adev->exclusive_lock); r = amdgpu_cs_handle_lockup(adev, r); return r; @@ -829,18 +965,24 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id); if (ctx == NULL) return -EINVAL; - - fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); - if (IS_ERR(fence)) - r = PTR_ERR(fence); - - else if (fence) { - r = fence_wait_timeout(fence, true, timeout); - fence_put(fence); - - } else + if (amdgpu_enable_scheduler) { + r = amd_sched_wait_ts(&ctx->rings[ring->idx].c_entity, + wait->in.handle, true, timeout); + if (r) + return r; r = 1; + } else { + fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle); + if (IS_ERR(fence)) + r = PTR_ERR(fence); + else if (fence) { + r = fence_wait_timeout(fence, true, timeout); + fence_put(fence); + + } else + r = 1; + } amdgpu_ctx_put(ctx); if (r < 0) return r;