@@ -1280,66 +1280,38 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
*/
int
drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
- struct ww_acquire_ctx *acquire_ctx)
+ struct dma_resv_ctx *acquire_ctx)
{
- int contended = -1;
int i, ret;
- ww_acquire_init(acquire_ctx, &reservation_ww_class);
+ dma_resv_ctx_init(acquire_ctx);
retry:
- if (contended != -1) {
- struct drm_gem_object *obj = objs[contended];
-
- ret = dma_resv_lock_slow_interruptible(obj->resv,
- acquire_ctx);
- if (ret) {
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
for (i = 0; i < count; i++) {
- if (i == contended)
- continue;
-
- ret = dma_resv_lock_interruptible(objs[i]->resv,
- acquire_ctx);
- if (ret) {
- int j;
-
- for (j = 0; j < i; j++)
- dma_resv_unlock(objs[j]->resv);
-
- if (contended != -1 && contended >= i)
- dma_resv_unlock(objs[contended]->resv);
-
- if (ret == -EDEADLK) {
- contended = i;
- goto retry;
- }
-
- ww_acquire_done(acquire_ctx);
- return ret;
- }
+ ret = dma_resv_ctx_lock(acquire_ctx, objs[i]->resv, true);
+ if (ret)
+ goto error;
}
- ww_acquire_done(acquire_ctx);
-
+ dma_resv_ctx_done(acquire_ctx);
return 0;
+
+error:
+ if (ret == -EDEADLK)
+ goto retry;
+
+ dma_resv_ctx_unlock_all(acquire_ctx);
+ dma_resv_ctx_done(acquire_ctx);
+ return ret;
}
EXPORT_SYMBOL(drm_gem_lock_reservations);
void
drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
- struct ww_acquire_ctx *acquire_ctx)
+ struct dma_resv_ctx *acquire_ctx)
{
- int i;
-
- for (i = 0; i < count; i++)
- dma_resv_unlock(objs[i]->resv);
-
- ww_acquire_fini(acquire_ctx);
+ dma_resv_ctx_unlock_all(acquire_ctx);
+ dma_resv_ctx_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
@@ -218,7 +218,7 @@ int panfrost_job_push(struct panfrost_job *job)
struct panfrost_device *pfdev = job->pfdev;
int slot = panfrost_job_get_slot(job);
struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
- struct ww_acquire_ctx acquire_ctx;
+ struct dma_resv_ctx acquire_ctx;
int ret = 0;
mutex_lock(&pfdev->sched_lock);
@@ -248,7 +248,7 @@ v3d_invalidate_caches(struct v3d_dev *v3d)
*/
static int
v3d_lock_bo_reservations(struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx)
+ struct dma_resv_ctx *acquire_ctx)
{
int i, ret;
@@ -486,7 +486,7 @@ v3d_push_job(struct v3d_file_priv *v3d_priv,
static void
v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
struct v3d_job *job,
- struct ww_acquire_ctx *acquire_ctx,
+ struct dma_resv_ctx *acquire_ctx,
u32 out_sync,
struct dma_fence *done_fence)
{
@@ -530,7 +530,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_v3d_submit_cl *args = data;
struct v3d_bin_job *bin = NULL;
struct v3d_render_job *render;
- struct ww_acquire_ctx acquire_ctx;
+ struct dma_resv_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
@@ -642,7 +642,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_tfu *args = data;
struct v3d_tfu_job *job;
- struct ww_acquire_ctx acquire_ctx;
+ struct dma_resv_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
@@ -738,7 +738,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct drm_v3d_submit_csd *args = data;
struct v3d_csd_job *job;
struct v3d_job *clean_job;
- struct ww_acquire_ctx acquire_ctx;
+ struct dma_resv_ctx acquire_ctx;
int ret;
trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
@@ -77,7 +77,7 @@ struct virtio_gpu_object {
container_of((gobj), struct virtio_gpu_object, base.base)
struct virtio_gpu_object_array {
- struct ww_acquire_ctx ticket;
+ struct dma_resv_ctx ticket;
struct list_head next;
u32 nents, total;
struct drm_gem_object *objs[];
@@ -36,6 +36,7 @@
#include <linux/kref.h>
#include <linux/dma-resv.h>
+#include <linux/dma-resv-ctx.h>
#include <drm/drm_vma_manager.h>
@@ -393,9 +394,9 @@ struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout);
int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
- struct ww_acquire_ctx *acquire_ctx);
+ struct dma_resv_ctx *acquire_ctx);
void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
- struct ww_acquire_ctx *acquire_ctx);
+ struct dma_resv_ctx *acquire_ctx);
int drm_gem_fence_array_add(struct xarray *fence_array,
struct dma_fence *fence);
int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
Use the new dma_resv_ctx object instead of implementing deadlock handling on our own. Signed-off-by: Christian König <christian.koenig@amd.com> --- drivers/gpu/drm/drm_gem.c | 62 +++++++------------------ drivers/gpu/drm/panfrost/panfrost_job.c | 2 +- drivers/gpu/drm/v3d/v3d_gem.c | 10 ++-- drivers/gpu/drm/virtio/virtgpu_drv.h | 2 +- include/drm/drm_gem.h | 5 +- 5 files changed, 27 insertions(+), 54 deletions(-)