Message ID | 20250410195508.233367-1-ian.forbes@broadcom.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [1/2] drm/vmwgfx: Update last_read_seqno under the fence lock | expand |
On 4/10/25 12:55, Ian Forbes wrote: > There was a possible race in vmw_update_seqno. Because of this race it > was possible for last_read_seqno to go backwards. Remove this function > and replace it with vmw_update_fences which now sets and returns the > last_read_seqno while holding the fence lock. This serialization via the > fence lock ensures that last_read_seqno is monotonic again. > > Signed-off-by: Ian Forbes <ian.forbes@broadcom.com> > --- > drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c | 2 +- > drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 1 - > drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 3 +-- > drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 18 +++++++++--------- > drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 2 +- > drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 12 +----------- > 6 files changed, 13 insertions(+), 25 deletions(-) > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c > index dd4ca6a9c690..8fe02131a6c4 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c > @@ -544,7 +544,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) > cmd_fence = (struct svga_fifo_cmd_fence *) fm; > cmd_fence->fence = *seqno; > vmw_cmd_commit_flush(dev_priv, bytes); > - vmw_update_seqno(dev_priv); > + vmw_fences_update(dev_priv->fman); > > out_err: > return ret; > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h > index 594af8eb04c6..6d4a68f0ae37 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h > @@ -1006,7 +1006,6 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, > uint32_t seqno, > bool interruptible, > unsigned long timeout); > -extern void vmw_update_seqno(struct vmw_private *dev_priv); > extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); > extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); > extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c > index e831e324e737..90ce5372343b 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c > @@ -3878,8 +3878,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, > > fence_rep.handle = fence_handle; > fence_rep.seqno = fence->base.seqno; > - vmw_update_seqno(dev_priv); > - fence_rep.passed_seqno = dev_priv->last_read_seqno; > + fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman); > } > > /* > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c > index 588d50ababf6..9d1465558839 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c > @@ -172,7 +172,7 @@ vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) > wake_up_process(wait->task); > } > > -static void __vmw_fences_update(struct vmw_fence_manager *fman); > +static u32 __vmw_fences_update(struct vmw_fence_manager *fman); > > static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) > { > @@ -457,7 +457,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) > return true; > } > > -static void __vmw_fences_update(struct vmw_fence_manager *fman) > +static u32 __vmw_fences_update(struct vmw_fence_manager *fman) > { > struct vmw_fence_obj *fence, *next_fence; > struct list_head action_list; > @@ -495,13 +495,16 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman) > > if (!list_empty(&fman->cleanup_list)) > (void) schedule_work(&fman->work); > + return (fman->dev_priv->last_read_seqno = seqno); Should this be WRITE_ONCE(fman->dev_priv->last_read_seqno) = seqno ? > } > > -void vmw_fences_update(struct vmw_fence_manager *fman) > +u32 vmw_fences_update(struct vmw_fence_manager *fman) > { > + u32 seqno; > spin_lock(&fman->lock); > - __vmw_fences_update(fman); > + seqno = __vmw_fences_update(fman); > spin_unlock(&fman->lock); > + return seqno; > } > > bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) > @@ -778,7 +781,6 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, > (struct drm_vmw_fence_signaled_arg *) data; > struct ttm_base_object *base; > struct vmw_fence_obj *fence; > - struct vmw_fence_manager *fman; > struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; > struct vmw_private *dev_priv = vmw_priv(dev); > > @@ -787,14 +789,12 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, > return PTR_ERR(base); > > fence = &(container_of(base, struct vmw_user_fence, base)->fence); > - fman = fman_from_fence(fence); > > arg->signaled = vmw_fence_obj_signaled(fence); > > arg->signaled_flags = arg->flags; > - spin_lock(&fman->lock); > - arg->passed_seqno = dev_priv->last_read_seqno; > - spin_unlock(&fman->lock); > + arg->passed_seqno = READ_ONCE(dev_priv->last_read_seqno); > + > > ttm_base_object_unref(&base); > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h > index a7eee579c76a..10264dab5f6a 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h > @@ -86,7 +86,7 @@ vmw_fence_obj_reference(struct vmw_fence_obj *fence) > return fence; > } > > -extern void vmw_fences_update(struct vmw_fence_manager *fman); > +u32 vmw_fences_update(struct vmw_fence_manager *fman); > > extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence); > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c > index 086e69a130d4..548ef2f86508 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c > @@ -123,16 +123,6 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) > return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); > } > > -void vmw_update_seqno(struct vmw_private *dev_priv) > -{ > - uint32_t seqno = vmw_fence_read(dev_priv); > - > - if (dev_priv->last_read_seqno != seqno) { > - dev_priv->last_read_seqno = seqno; > - vmw_fences_update(dev_priv->fman); > - } > -} > - > bool vmw_seqno_passed(struct vmw_private *dev_priv, > uint32_t seqno) > { > @@ -141,7 +131,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv, > if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) > return true; > > - vmw_update_seqno(dev_priv); > + vmw_fences_update(dev_priv->fman); > if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) > return true; >
On 4/10/25 12:55, Ian Forbes wrote: > There was a possible race in vmw_update_seqno. Because of this race it > was possible for last_read_seqno to go backwards. Remove this function > and replace it with vmw_update_fences which now sets and returns the > last_read_seqno while holding the fence lock. This serialization via the > fence lock ensures that last_read_seqno is monotonic again. > > Signed-off-by: Ian Forbes <ian.forbes@broadcom.com> > --- > drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c | 2 +- > drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 1 - > drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 3 +-- > drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 18 +++++++++--------- > drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 2 +- > drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 12 +----------- > 6 files changed, 13 insertions(+), 25 deletions(-) > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c > index dd4ca6a9c690..8fe02131a6c4 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c > @@ -544,7 +544,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) > cmd_fence = (struct svga_fifo_cmd_fence *) fm; > cmd_fence->fence = *seqno; > vmw_cmd_commit_flush(dev_priv, bytes); > - vmw_update_seqno(dev_priv); > + vmw_fences_update(dev_priv->fman); > > out_err: > return ret; > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h > index 594af8eb04c6..6d4a68f0ae37 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h > @@ -1006,7 +1006,6 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, > uint32_t seqno, > bool interruptible, > unsigned long timeout); > -extern void vmw_update_seqno(struct vmw_private *dev_priv); > extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); > extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); > extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c > index e831e324e737..90ce5372343b 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c > @@ -3878,8 +3878,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, > > fence_rep.handle = fence_handle; > fence_rep.seqno = fence->base.seqno; > - vmw_update_seqno(dev_priv); > - fence_rep.passed_seqno = dev_priv->last_read_seqno; > + fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman); > } > > /* > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c > index 588d50ababf6..9d1465558839 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c > @@ -172,7 +172,7 @@ vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) > wake_up_process(wait->task); > } > > -static void __vmw_fences_update(struct vmw_fence_manager *fman); > +static u32 __vmw_fences_update(struct vmw_fence_manager *fman); > > static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) > { > @@ -457,7 +457,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) > return true; > } > > -static void __vmw_fences_update(struct vmw_fence_manager *fman) > +static u32 __vmw_fences_update(struct vmw_fence_manager *fman) > { > struct vmw_fence_obj *fence, *next_fence; > struct list_head action_list; > @@ -495,13 +495,16 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman) > > if (!list_empty(&fman->cleanup_list)) > (void) schedule_work(&fman->work); > + return (fman->dev_priv->last_read_seqno = seqno); > } > > -void vmw_fences_update(struct vmw_fence_manager *fman) > +u32 vmw_fences_update(struct vmw_fence_manager *fman) > { > + u32 seqno; > spin_lock(&fman->lock); > - __vmw_fences_update(fman); > + seqno = __vmw_fences_update(fman); > spin_unlock(&fman->lock); > + return seqno; > } > > bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) > @@ -778,7 +781,6 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, > (struct drm_vmw_fence_signaled_arg *) data; > struct ttm_base_object *base; > struct vmw_fence_obj *fence; > - struct vmw_fence_manager *fman; > struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; > struct vmw_private *dev_priv = vmw_priv(dev); > > @@ -787,14 +789,12 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, > return PTR_ERR(base); > > fence = &(container_of(base, struct vmw_user_fence, base)->fence); > - fman = fman_from_fence(fence); > > arg->signaled = vmw_fence_obj_signaled(fence); > > arg->signaled_flags = arg->flags; > - spin_lock(&fman->lock); > - arg->passed_seqno = dev_priv->last_read_seqno; > - spin_unlock(&fman->lock); > + arg->passed_seqno = READ_ONCE(dev_priv->last_read_seqno); > + Why are is the spinlock being removed here? As far as I understand it, READ_ONCE() will prevent compiler optimizations that might change how the variable is read, but we would still need the lock to synchronize access at runtime, since we are using that lock for write access when invoking __vmw_fences_update(). Also, if we are using the spinlock, doesn't that make READ_ONCE() redundant? > > ttm_base_object_unref(&base); > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h > index a7eee579c76a..10264dab5f6a 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h > @@ -86,7 +86,7 @@ vmw_fence_obj_reference(struct vmw_fence_obj *fence) > return fence; > } > > -extern void vmw_fences_update(struct vmw_fence_manager *fman); > +u32 vmw_fences_update(struct vmw_fence_manager *fman); > > extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence); > > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c > index 086e69a130d4..548ef2f86508 100644 > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c > @@ -123,16 +123,6 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) > return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); > } > > -void vmw_update_seqno(struct vmw_private *dev_priv) > -{ > - uint32_t seqno = vmw_fence_read(dev_priv); > - > - if (dev_priv->last_read_seqno != seqno) { > - dev_priv->last_read_seqno = seqno; > - vmw_fences_update(dev_priv->fman); > - } > -} > - > bool vmw_seqno_passed(struct vmw_private *dev_priv, > uint32_t seqno) > { > @@ -141,7 +131,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv, > if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) > return true; > > - vmw_update_seqno(dev_priv); > + vmw_fences_update(dev_priv->fman); > if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) > return true; >
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index dd4ca6a9c690..8fe02131a6c4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -544,7 +544,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) cmd_fence = (struct svga_fifo_cmd_fence *) fm; cmd_fence->fence = *seqno; vmw_cmd_commit_flush(dev_priv, bytes); - vmw_update_seqno(dev_priv); + vmw_fences_update(dev_priv->fman); out_err: return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 594af8eb04c6..6d4a68f0ae37 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1006,7 +1006,6 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, uint32_t seqno, bool interruptible, unsigned long timeout); -extern void vmw_update_seqno(struct vmw_private *dev_priv); extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index e831e324e737..90ce5372343b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3878,8 +3878,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, fence_rep.handle = fence_handle; fence_rep.seqno = fence->base.seqno; - vmw_update_seqno(dev_priv); - fence_rep.passed_seqno = dev_priv->last_read_seqno; + fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman); } /* diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 588d50ababf6..9d1465558839 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -172,7 +172,7 @@ vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) wake_up_process(wait->task); } -static void __vmw_fences_update(struct vmw_fence_manager *fman); +static u32 __vmw_fences_update(struct vmw_fence_manager *fman); static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout) { @@ -457,7 +457,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) return true; } -static void __vmw_fences_update(struct vmw_fence_manager *fman) +static u32 __vmw_fences_update(struct vmw_fence_manager *fman) { struct vmw_fence_obj *fence, *next_fence; struct list_head action_list; @@ -495,13 +495,16 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman) if (!list_empty(&fman->cleanup_list)) (void) schedule_work(&fman->work); + return (fman->dev_priv->last_read_seqno = seqno); } -void vmw_fences_update(struct vmw_fence_manager *fman) +u32 vmw_fences_update(struct vmw_fence_manager *fman) { + u32 seqno; spin_lock(&fman->lock); - __vmw_fences_update(fman); + seqno = __vmw_fences_update(fman); spin_unlock(&fman->lock); + return seqno; } bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) @@ -778,7 +781,6 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, (struct drm_vmw_fence_signaled_arg *) data; struct ttm_base_object *base; struct vmw_fence_obj *fence; - struct vmw_fence_manager *fman; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); @@ -787,14 +789,12 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, return PTR_ERR(base); fence = &(container_of(base, struct vmw_user_fence, base)->fence); - fman = fman_from_fence(fence); arg->signaled = vmw_fence_obj_signaled(fence); arg->signaled_flags = arg->flags; - spin_lock(&fman->lock); - arg->passed_seqno = dev_priv->last_read_seqno; - spin_unlock(&fman->lock); + arg->passed_seqno = READ_ONCE(dev_priv->last_read_seqno); + ttm_base_object_unref(&base); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h index a7eee579c76a..10264dab5f6a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h @@ -86,7 +86,7 @@ vmw_fence_obj_reference(struct vmw_fence_obj *fence) return fence; } -extern void vmw_fences_update(struct vmw_fence_manager *fman); +u32 vmw_fences_update(struct vmw_fence_manager *fman); extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 086e69a130d4..548ef2f86508 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -123,16 +123,6 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); } -void vmw_update_seqno(struct vmw_private *dev_priv) -{ - uint32_t seqno = vmw_fence_read(dev_priv); - - if (dev_priv->last_read_seqno != seqno) { - dev_priv->last_read_seqno = seqno; - vmw_fences_update(dev_priv->fman); - } -} - bool vmw_seqno_passed(struct vmw_private *dev_priv, uint32_t seqno) { @@ -141,7 +131,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv, if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) return true; - vmw_update_seqno(dev_priv); + vmw_fences_update(dev_priv->fman); if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) return true;
There was a possible race in vmw_update_seqno. Because of this race it was possible for last_read_seqno to go backwards. Remove this function and replace it with vmw_update_fences which now sets and returns the last_read_seqno while holding the fence lock. This serialization via the fence lock ensures that last_read_seqno is monotonic again. Signed-off-by: Ian Forbes <ian.forbes@broadcom.com> --- drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 1 - drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 3 +-- drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 18 +++++++++--------- drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 12 +----------- 6 files changed, 13 insertions(+), 25 deletions(-)