Message ID | 1504249706-6661-1-git-send-email-changbin.du@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Quoting changbin.du@intel.com (2017-09-01 08:08:26) > diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c > index 5fe2cd8..429ce5f 100644 > --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c > +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c > @@ -360,6 +360,50 @@ i915_vma_get_fence(struct i915_vma *vma) > } > > /** > + * i915_reserve_fence - Reserve a fence for vGPU > + * @dev_priv: i915 device private > + * > + * This function walks the fence regs looking for a free one and remove > + * it from the fence_list. It is used to reserve fence for vGPU to use. > + */ > +struct drm_i915_fence_reg * > +i915_reserve_fence(struct drm_i915_private *dev_priv) > +{ > + struct drm_i915_fence_reg *fence; > + int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); A reminder for when we move fences to a seperate mutex. > + > + /* Host at least need one fence available for display engine. */ > + if (unlikely(dev_priv->mm.fence_list.prev == > + dev_priv->mm.fence_list.next)) > + return ERR_PTR(-ENOSPC); I would prefer one free fence. int count; /* Keep at least one fence available for the display engine */ count = 0; list_for_each_entry(fence, &dev_priv->mm.fence_list, link) count += !fence->pin_count; if (count <= 1) return ERR_PTR(-ENOSPC); > + > + fence = fence_find(dev_priv); > + if (IS_ERR(fence)) > + return fence; > + > + if (fence->vma) { > + /* Force-remove fence from VMA */ > + ret = fence_update(fence, NULL); > + if (ret) > + return ERR_PTR(ret); > + } > + > + list_del(&fence->link); > + return fence; > +} > + > +/** > + * i915_unreserve_fence - Reclaim a reserved fence > + * @fence: the fence reg > + * > + * This function add a reserved fence register from vGPU to the fence_list. > + */ > +void i915_unreserve_fence(struct drm_i915_fence_reg *fence) > +{ lockdep_assert_held(&dev_priv->drm.struct_mutex); If you agree to "one free fence", Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Otherwise, we need to look carefully at what happens when the search fails. And that may suggest we need to keep one free per crtc, or that we can survive without (but that usually means giving up on fbc and whatnot, thinking of fbc is why I want to keep one spare). (Time to write some unittests for find-a-fence.) -Chris
On Fri, Sep 01, 2017 at 11:59:53AM +0100, Chris Wilson wrote: > Quoting changbin.du@intel.com (2017-09-01 08:08:26) > > diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c > > index 5fe2cd8..429ce5f 100644 > > --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c > > +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c > > @@ -360,6 +360,50 @@ i915_vma_get_fence(struct i915_vma *vma) > > } > > > > /** > > + * i915_reserve_fence - Reserve a fence for vGPU > > + * @dev_priv: i915 device private > > + * > > + * This function walks the fence regs looking for a free one and remove > > + * it from the fence_list. It is used to reserve fence for vGPU to use. > > + */ > > +struct drm_i915_fence_reg * > > +i915_reserve_fence(struct drm_i915_private *dev_priv) > > +{ > > + struct drm_i915_fence_reg *fence; > > + int ret; > > lockdep_assert_held(&dev_priv->drm.struct_mutex); > > A reminder for when we move fences to a seperate mutex. > ok. > > + > > + /* Host at least need one fence available for display engine. */ > > + if (unlikely(dev_priv->mm.fence_list.prev == > > + dev_priv->mm.fence_list.next)) > > + return ERR_PTR(-ENOSPC); > > I would prefer one free fence. > > int count; > > /* Keep at least one fence available for the display engine */ > count = 0; > list_for_each_entry(fence, &dev_priv->mm.fence_list, link) > count += !fence->pin_count; > if (count <= 1) > return ERR_PTR(-ENOSPC); > Got your idea, will update. > > + > > + fence = fence_find(dev_priv); > > + if (IS_ERR(fence)) > > + return fence; > > + > > + if (fence->vma) { > > + /* Force-remove fence from VMA */ > > + ret = fence_update(fence, NULL); > > + if (ret) > > + return ERR_PTR(ret); > > + } > > + > > + list_del(&fence->link); > > + return fence; > > +} > > + > > +/** > > + * i915_unreserve_fence - Reclaim a reserved fence > > + * @fence: the fence reg > > + * > > + * This function add a reserved fence register from vGPU to the fence_list. > > + */ > > +void i915_unreserve_fence(struct drm_i915_fence_reg *fence) > > +{ > > lockdep_assert_held(&dev_priv->drm.struct_mutex); > > If you agree to "one free fence", > Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> > > Otherwise, we need to look carefully at what happens when the search > fails. And that may suggest we need to keep one free per crtc, or that > we can survive without (but that usually means giving up on fbc and > whatnot, thinking of fbc is why I want to keep one spare). > > (Time to write some unittests for find-a-fence.) > -Chris I agree to "one free fence", which is simpler. Currently GVTg will at most use 4x7=28 fence registers (4 per VM, and up to 7 VMs), so at least 4 will remained for host. I agree to your concern, it is significative.
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index ca3d192..7c9ec4f 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -173,8 +173,8 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; - list_add_tail(®->link, - &dev_priv->mm.fence_list); + i915_unreserve_fence(reg); + vgpu->fence.regs[i] = NULL; } mutex_unlock(&dev_priv->drm.struct_mutex); @@ -187,24 +187,19 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_fence_reg *reg; int i; - struct list_head *pos, *q; intel_runtime_pm_get(dev_priv); /* Request fences from host */ mutex_lock(&dev_priv->drm.struct_mutex); - i = 0; - list_for_each_safe(pos, q, &dev_priv->mm.fence_list) { - reg = list_entry(pos, struct drm_i915_fence_reg, link); - if (reg->pin_count || reg->vma) - continue; - list_del(pos); + + for (i = 0; i < vgpu_fence_sz(vgpu); i++) { + reg = i915_reserve_fence(dev_priv); + if (IS_ERR(reg)) + goto out_free_fence; + vgpu->fence.regs[i] = reg; - if (++i == vgpu_fence_sz(vgpu)) - break; } - if (i != vgpu_fence_sz(vgpu)) - goto out_free_fence; _clear_vgpu_fence(vgpu); @@ -212,13 +207,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) intel_runtime_pm_put(dev_priv); return 0; out_free_fence: + gvt_vgpu_err("Failed to alloc fences\n"); /* Return fences to host, if fail */ for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; if (!reg) continue; - list_add_tail(®->link, - &dev_priv->mm.fence_list); + i915_unreserve_fence(reg); + vgpu->fence.regs[i] = NULL; } mutex_unlock(&dev_priv->drm.struct_mutex); intel_runtime_pm_put(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7587ef5..763a720 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3650,6 +3650,9 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) /* i915_gem_fence_reg.c */ int __must_check i915_vma_get_fence(struct i915_vma *vma); int __must_check i915_vma_put_fence(struct i915_vma *vma); +struct drm_i915_fence_reg * +i915_reserve_fence(struct drm_i915_private *dev_priv); +void i915_unreserve_fence(struct drm_i915_fence_reg *fence); void i915_gem_revoke_fences(struct drm_i915_private *dev_priv); void i915_gem_restore_fences(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 5fe2cd8..429ce5f 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -360,6 +360,50 @@ i915_vma_get_fence(struct i915_vma *vma) } /** + * i915_reserve_fence - Reserve a fence for vGPU + * @dev_priv: i915 device private + * + * This function walks the fence regs looking for a free one and remove + * it from the fence_list. It is used to reserve fence for vGPU to use. + */ +struct drm_i915_fence_reg * +i915_reserve_fence(struct drm_i915_private *dev_priv) +{ + struct drm_i915_fence_reg *fence; + int ret; + + /* Host at least need one fence available for display engine. */ + if (unlikely(dev_priv->mm.fence_list.prev == + dev_priv->mm.fence_list.next)) + return ERR_PTR(-ENOSPC); + + fence = fence_find(dev_priv); + if (IS_ERR(fence)) + return fence; + + if (fence->vma) { + /* Force-remove fence from VMA */ + ret = fence_update(fence, NULL); + if (ret) + return ERR_PTR(ret); + } + + list_del(&fence->link); + return fence; +} + +/** + * i915_unreserve_fence - Reclaim a reserved fence + * @fence: the fence reg + * + * This function add a reserved fence register from vGPU to the fence_list. + */ +void i915_unreserve_fence(struct drm_i915_fence_reg *fence) +{ + list_add_tail(&fence->link, &fence->i915->mm.fence_list); +} + +/** * i915_gem_revoke_fences - revoke fence state * @dev_priv: i915 device private *