diff mbox series

[07/10] dma-buf/resv: add new fences container implementation

Message ID 20190821123147.110736-8-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [01/10] dma-buf: make to_dma_fence_array NULL safe | expand

Commit Message

Christian König Aug. 21, 2019, 12:31 p.m. UTC
Add a new container for fences which internally uses
dma_fence_array's to store the fences.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/dma-buf/dma-resv.c | 221 +++++++++++++++++++++++++++++++++++++
 include/linux/dma-resv.h   |  49 ++++++++
 2 files changed, 270 insertions(+)

Comments

Daniel Vetter Aug. 21, 2019, 4:04 p.m. UTC | #1
On Wed, Aug 21, 2019 at 02:31:44PM +0200, Christian König wrote:
> Add a new container for fences which internally uses
> dma_fence_array's to store the fences.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c | 221 +++++++++++++++++++++++++++++++++++++
>  include/linux/dma-resv.h   |  49 ++++++++
>  2 files changed, 270 insertions(+)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index d3a9a3bb15f0..83033b3e8521 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -33,6 +33,7 @@
>   */
>  
>  #include <linux/dma-resv.h>
> +#include <linux/dma-fence-array.h>
>  #include <linux/export.h>
>  
>  /**
> @@ -55,6 +56,226 @@ EXPORT_SYMBOL(reservation_seqcount_class);
>  const char reservation_seqcount_string[] = "reservation_seqcount";
>  EXPORT_SYMBOL(reservation_seqcount_string);
>  
> +static void dma_resv_fences_init(struct dma_resv_fences *fences)
> +{
> +	RCU_INIT_POINTER(fences->fence, NULL);
> +	fences->staged = NULL;
> +}
> +
> +static void dma_resv_fences_fini(struct dma_resv_fences *fences)
> +{
> +	/*
> +	 * This object should be dead and all references must have
> +	 * been released to it, so no need to be protected with rcu.
> +	 */
> +	dma_fence_put(rcu_dereference_protected(fences->fence, true));
> +	dma_fence_array_free(fences->staged);
> +}
> +
> +/**
> + * dma_resv_fences_reserve - allocate fence slots
> + * @fences: fences object where we need slots
> + * @num_fences: number of fence slots we need
> + *
> + * Make sure that we have at least @num_fences + all the existing ones free
> + * slots in the staged dma_fence_array.
> + *
> + * Returns -ENOMEM on allocation failure, 0 otherwise.
> + */
> +int dma_resv_fences_reserve(struct dma_resv *obj,
> +			    struct dma_resv_fences *fences,
> +			    unsigned int num_fences)
> +{
> +	struct dma_fence *fence = dma_resv_fences_deref(obj, fences);
> +	struct dma_fence_array *staged, *array;
> +	unsigned int i;
> +
> +	array = fences->staged;
> +	if (!array)
> +		array = to_dma_fence_array(fence);
> +
> +	if (array)
> +		num_fences += array->num_fences;
> +	else if (fence)
> +		num_fences += 1;
> +
> +	staged = fences->staged;
> +	if (staged && dma_fence_array_max_fences(staged) >= num_fences)
> +		return 0;
> +
> +	staged = dma_fence_array_alloc(num_fences, NULL);
> +	if (!staged)
> +		return -ENOMEM;
> +
> +	/* Copy over all fences from the old object */
> +	if (array) {
> +		for (i = 0; i < array->num_fences; ++i) {
> +			struct dma_fence *f = array->fences[i];
> +
> +			staged->fences[i] = dma_fence_get(f);
> +		}
> +		staged->num_fences = array->num_fences;
> +
> +	} else if (fence) {
> +		staged->fences[0] = dma_fence_get(fence);
> +		staged->num_fences = 1;
> +
> +	} else {
> +		staged->num_fences = 0;
> +	}
> +
> +	dma_fence_array_free(fences->staged);
> +	fences->staged = staged;
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL(dma_resv_fences_reserve);
> +
> +/**
> + * dma_resv_fences_assign - set the singleton fence
> + * @fences: fences object where to set the fence
> + * @fence: singleton fence for the object
> + *
> + * Internal helper to assign the signleton fence without grapping a reference.
> + * If the old fence is a dma_fence_array try to recycle it.
> + */
> +static void dma_resv_fences_assign(struct dma_resv *obj,
> +				   struct dma_resv_fences *fences,
> +				   struct dma_fence *fence)
> +{
> +	struct dma_fence_array *array, *staged;
> +	unsigned int num_fences, i;
> +	struct dma_fence *old;
> +
> +	old = dma_resv_fences_deref(obj, fences);
> +	rcu_assign_pointer(fences->fence, fence);
> +
> +	dma_fence_array_free(fences->staged);
> +	fences->staged = NULL;
> +
> +	/* Try to recycle the old fence array */
> +	staged = to_dma_fence_array(old);
> +	if (!staged)
> +		goto drop_old;
> +
> +	array = to_dma_fence_array(fence);
> +	if (array)
> +		num_fences = array->num_fences;
> +	else
> +		num_fences = fence ? 1 : 0;
> +
> +	if (dma_fence_array_max_fences(staged) < num_fences)
> +		goto drop_old;
> +
> +	/* Try to drop the last reference */
> +	if (!dma_fence_array_recycle(staged))

Without an rcu barrier here you're not syncing to new clients at all.
I don't think this works, and I expect that once you've readded all the
barriers and retry loops we're back to seqlocks.
-Daniel

> +		return;
> +
> +	/* Make sure the staged array has the latest fences */
> +	if (array) {
> +		for (i = 0; i < array->num_fences; ++i) {
> +			struct dma_fence *f = array->fences[i];
> +
> +			if (f == staged->fences[i])
> +				continue;
> +
> +			dma_fence_put(staged->fences[i]);
> +			staged->fences[i] = dma_fence_get(f);
> +		}
> +		for (;i < staged->num_fences; ++i)
> +			dma_fence_put(staged->fences[i]);
> +		staged->num_fences = array->num_fences;
> +
> +	} else if (fence) {
> +		for (i = 0; i < staged->num_fences; ++i)
> +			dma_fence_put(staged->fences[i]);
> +		staged->fences[0] = dma_fence_get(fence);
> +		staged->num_fences = 1;
> +	} else {
> +		for (i = 0; i < staged->num_fences; ++i)
> +			dma_fence_put(staged->fences[i]);
> +		staged->num_fences = 0;
> +	}
> +
> +	fences->staged = staged;
> +	return;
> +
> +drop_old:
> +	dma_fence_put(old);
> +}
> +
> +/**
> + * dma_resv_fences_set - set the singleton fence
> + * @fences: fences object where to set the fence
> + * @fence: singleton fence for the object
> + *
> + * Grabs a reference to the new fence and replaces the current singleton fence
> + * with a new one. If the old fence is a dma_fence_array try to recycle it.
> + */
> +void dma_resv_fences_set(struct dma_resv *obj,
> +			 struct dma_resv_fences *fences,
> +			 struct dma_fence *fence)
> +{
> +	dma_fence_get(fence);
> +	dma_resv_fences_assign(obj, fences, fence);
> +}
> +EXPORT_SYMBOL(dma_resv_fences_set);
> +
> +/**
> + * dma_resv_fences_add - add a fence to the staged fence_array
> + * @fences: fences object where to add the fence to
> + * @fence: fence to add
> + *
> + * Add a new fence to the staged fence_array.
> + */
> +void dma_resv_fences_add(struct dma_resv_fences *fences,
> +			 struct dma_fence *fence)
> +{
> +	struct dma_fence_array *staged = fences->staged;
> +	struct dma_fence *old;
> +	unsigned int i;
> +
> +#ifndef CONFIG_DEBUG_MUTEXES
> +	for (i = 0; i < staged->num_fences; ++i) {
> +		old = staged->fences[i];
> +
> +		if (old->context == fence->context ||
> +		    dma_fence_is_signaled(old)) {
> +			dma_fence_put(old);
> +			goto replace;
> +		}
> +	}
> +#endif
> +
> +	BUG_ON(staged->num_fences >= dma_fence_array_max_fences(staged));
> +	i = staged->num_fences++;
> +
> +replace:
> +	staged->fences[i] = dma_fence_get(fence);
> +}
> +EXPORT_SYMBOL(dma_resv_fences_add);
> +
> +/**
> + * dma_resv_fences_commit - commit the staged dma_fence_array
> + * @fences: fences object where the commit should happen
> + *
> + * Commit the fences staged in the dma_fence_array and make them visible to
> + * other threads.
> + */
> +void dma_resv_fences_commit(struct dma_resv *obj,
> +			    struct dma_resv_fences *fences)
> +{
> +	struct dma_fence_array *staged = fences->staged;
> +
> +	if (!staged || !staged->num_fences)
> +		return;
> +
> +	fences->staged = NULL;
> +	dma_fence_array_init(staged, dma_fence_context_alloc(1), 1, false);
> +	dma_resv_fences_assign(obj, fences, &staged->base);
> +}
> +EXPORT_SYMBOL(dma_resv_fences_commit);
> +
>  /**
>   * dma_resv_list_alloc - allocate fence list
>   * @shared_max: number of fences we need space for
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 03b0f95682b0..c70f13fa6789 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -45,10 +45,33 @@
>  #include <linux/seqlock.h>
>  #include <linux/rcupdate.h>
>  
> +struct dma_resv;
> +
>  extern struct ww_class reservation_ww_class;
>  extern struct lock_class_key reservation_seqcount_class;
>  extern const char reservation_seqcount_string[];
>  
> +/**
> + * struct dma_resv_fences - fences inside a reservation object
> + * @fence: the current RCU protected singleton fence
> + * @staged: optional staged dma_fence_array to replace @fence
> + */
> +struct dma_resv_fences {
> +	struct dma_fence __rcu *fence;
> +	struct dma_fence_array *staged;
> +};
> +
> +int dma_resv_fences_reserve(struct dma_resv *obj,
> +			    struct dma_resv_fences *fences,
> +			    unsigned int num_fences);
> +void dma_resv_fences_set(struct dma_resv *obj,
> +			 struct dma_resv_fences *fences,
> +			 struct dma_fence *fence);
> +void dma_resv_fences_add(struct dma_resv_fences *fences,
> +			 struct dma_fence *fence);
> +void dma_resv_fences_commit(struct dma_resv *obj,
> +			    struct dma_resv_fences *fences);
> +
>  /**
>   * struct dma_resv_list - a list of shared fences
>   * @rcu: for internal use
> @@ -80,6 +103,32 @@ struct dma_resv {
>  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
>  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
>  
> +/**
> + * dma_resv_fences_deref - get singleton fence
> + * @obj: the reservation object
> + * @fences: the fences object
> + *
> + * Returns the singleton fence from a resv_fences object.
> + */
> +static inline struct dma_fence *
> +dma_resv_fences_deref(struct dma_resv *obj, struct dma_resv_fences *fences)
> +{
> +	return rcu_dereference_protected(fences->fence,
> +					 dma_resv_held(obj));
> +}
> +
> +/**
> + * dma_resv_fences_get_rcu - RCU get single fence
> + * @fences: fences structure where we need to get a reference for
> + *
> + * Get a reference to the single fence representing the synchronization.
> + */
> +static inline struct dma_fence *
> +dma_resv_fences_get_rcu(struct dma_resv_fences *fences)
> +{
> +	return dma_fence_get_rcu_safe(&fences->fence);
> +}
> +
>  /**
>   * dma_resv_get_list - get the reservation object's
>   * shared fence list, with update-side lock held
> -- 
> 2.17.1
>
Christian König Aug. 22, 2019, 8:23 a.m. UTC | #2
Am 21.08.19 um 18:04 schrieb Daniel Vetter:
> On Wed, Aug 21, 2019 at 02:31:44PM +0200, Christian König wrote:
>> [SNIP]
>> +	/* Try to drop the last reference */
>> +	if (!dma_fence_array_recycle(staged))
> Without an rcu barrier here you're not syncing to new clients at all.
> I don't think this works, and I expect that once you've readded all the
> barriers and retry loops we're back to seqlocks.

The key difference is that RCU users now use dma_fence_get_rcu_safe() to 
grab a reference to the current set of fences.

In other words the whole array is reference counted and RCU protected 
instead of each individual entry in the array.

This way you don't need the sequence count any more because you grab a 
reference to all of them at once and then can be sure that they don't 
change.

Regards,
Christian.

> -Daniel
Daniel Vetter Aug. 22, 2019, 1:02 p.m. UTC | #3
On Thu, Aug 22, 2019 at 10:23:29AM +0200, Christian König wrote:
> Am 21.08.19 um 18:04 schrieb Daniel Vetter:
> > On Wed, Aug 21, 2019 at 02:31:44PM +0200, Christian König wrote:
> > > [SNIP]
> > > +	/* Try to drop the last reference */
> > > +	if (!dma_fence_array_recycle(staged))
> > Without an rcu barrier here you're not syncing to new clients at all.
> > I don't think this works, and I expect that once you've readded all the
> > barriers and retry loops we're back to seqlocks.
> 
> The key difference is that RCU users now use dma_fence_get_rcu_safe() to
> grab a reference to the current set of fences.
> 
> In other words the whole array is reference counted and RCU protected
> instead of each individual entry in the array.
> 
> This way you don't need the sequence count any more because you grab a
> reference to all of them at once and then can be sure that they don't
> change.

Hm yeah ... I think there's still some users left that have an open-coded
rcu section though. But yeah if you can concince Chris that this is ok I
think it makes sense as an overall cleanup of the hand-rolled fences array
we have for shared fences. But I'd really like to untangle it from the
entire semantics discussion, since that seems entirely unrelated.
-Daniel
Christian König Aug. 22, 2019, 1:53 p.m. UTC | #4
Am 22.08.19 um 15:02 schrieb Daniel Vetter:
> On Thu, Aug 22, 2019 at 10:23:29AM +0200, Christian König wrote:
>> Am 21.08.19 um 18:04 schrieb Daniel Vetter:
>>> On Wed, Aug 21, 2019 at 02:31:44PM +0200, Christian König wrote:
>>>> [SNIP]
>>>> +	/* Try to drop the last reference */
>>>> +	if (!dma_fence_array_recycle(staged))
>>> Without an rcu barrier here you're not syncing to new clients at all.
>>> I don't think this works, and I expect that once you've readded all the
>>> barriers and retry loops we're back to seqlocks.
>> The key difference is that RCU users now use dma_fence_get_rcu_safe() to
>> grab a reference to the current set of fences.
>>
>> In other words the whole array is reference counted and RCU protected
>> instead of each individual entry in the array.
>>
>> This way you don't need the sequence count any more because you grab a
>> reference to all of them at once and then can be sure that they don't
>> change.
> Hm yeah ... I think there's still some users left that have an open-coded
> rcu section though. But yeah if you can concince Chris that this is ok I
> think it makes sense as an overall cleanup of the hand-rolled fences array
> we have for shared fences. But I'd really like to untangle it from the
> entire semantics discussion, since that seems entirely unrelated.

Yeah, agree. To untangle that is a really good idea.

Going to send out the dma_fence_array as a replacement for shared fences 
separately first.

Christian.

> -Daniel
diff mbox series

Patch

diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index d3a9a3bb15f0..83033b3e8521 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -33,6 +33,7 @@ 
  */
 
 #include <linux/dma-resv.h>
+#include <linux/dma-fence-array.h>
 #include <linux/export.h>
 
 /**
@@ -55,6 +56,226 @@  EXPORT_SYMBOL(reservation_seqcount_class);
 const char reservation_seqcount_string[] = "reservation_seqcount";
 EXPORT_SYMBOL(reservation_seqcount_string);
 
+static void dma_resv_fences_init(struct dma_resv_fences *fences)
+{
+	RCU_INIT_POINTER(fences->fence, NULL);
+	fences->staged = NULL;
+}
+
+static void dma_resv_fences_fini(struct dma_resv_fences *fences)
+{
+	/*
+	 * This object should be dead and all references must have
+	 * been released to it, so no need to be protected with rcu.
+	 */
+	dma_fence_put(rcu_dereference_protected(fences->fence, true));
+	dma_fence_array_free(fences->staged);
+}
+
+/**
+ * dma_resv_fences_reserve - allocate fence slots
+ * @fences: fences object where we need slots
+ * @num_fences: number of fence slots we need
+ *
+ * Make sure that we have at least @num_fences + all the existing ones free
+ * slots in the staged dma_fence_array.
+ *
+ * Returns -ENOMEM on allocation failure, 0 otherwise.
+ */
+int dma_resv_fences_reserve(struct dma_resv *obj,
+			    struct dma_resv_fences *fences,
+			    unsigned int num_fences)
+{
+	struct dma_fence *fence = dma_resv_fences_deref(obj, fences);
+	struct dma_fence_array *staged, *array;
+	unsigned int i;
+
+	array = fences->staged;
+	if (!array)
+		array = to_dma_fence_array(fence);
+
+	if (array)
+		num_fences += array->num_fences;
+	else if (fence)
+		num_fences += 1;
+
+	staged = fences->staged;
+	if (staged && dma_fence_array_max_fences(staged) >= num_fences)
+		return 0;
+
+	staged = dma_fence_array_alloc(num_fences, NULL);
+	if (!staged)
+		return -ENOMEM;
+
+	/* Copy over all fences from the old object */
+	if (array) {
+		for (i = 0; i < array->num_fences; ++i) {
+			struct dma_fence *f = array->fences[i];
+
+			staged->fences[i] = dma_fence_get(f);
+		}
+		staged->num_fences = array->num_fences;
+
+	} else if (fence) {
+		staged->fences[0] = dma_fence_get(fence);
+		staged->num_fences = 1;
+
+	} else {
+		staged->num_fences = 0;
+	}
+
+	dma_fence_array_free(fences->staged);
+	fences->staged = staged;
+
+	return 0;
+}
+EXPORT_SYMBOL(dma_resv_fences_reserve);
+
+/**
+ * dma_resv_fences_assign - set the singleton fence
+ * @fences: fences object where to set the fence
+ * @fence: singleton fence for the object
+ *
+ * Internal helper to assign the signleton fence without grapping a reference.
+ * If the old fence is a dma_fence_array try to recycle it.
+ */
+static void dma_resv_fences_assign(struct dma_resv *obj,
+				   struct dma_resv_fences *fences,
+				   struct dma_fence *fence)
+{
+	struct dma_fence_array *array, *staged;
+	unsigned int num_fences, i;
+	struct dma_fence *old;
+
+	old = dma_resv_fences_deref(obj, fences);
+	rcu_assign_pointer(fences->fence, fence);
+
+	dma_fence_array_free(fences->staged);
+	fences->staged = NULL;
+
+	/* Try to recycle the old fence array */
+	staged = to_dma_fence_array(old);
+	if (!staged)
+		goto drop_old;
+
+	array = to_dma_fence_array(fence);
+	if (array)
+		num_fences = array->num_fences;
+	else
+		num_fences = fence ? 1 : 0;
+
+	if (dma_fence_array_max_fences(staged) < num_fences)
+		goto drop_old;
+
+	/* Try to drop the last reference */
+	if (!dma_fence_array_recycle(staged))
+		return;
+
+	/* Make sure the staged array has the latest fences */
+	if (array) {
+		for (i = 0; i < array->num_fences; ++i) {
+			struct dma_fence *f = array->fences[i];
+
+			if (f == staged->fences[i])
+				continue;
+
+			dma_fence_put(staged->fences[i]);
+			staged->fences[i] = dma_fence_get(f);
+		}
+		for (;i < staged->num_fences; ++i)
+			dma_fence_put(staged->fences[i]);
+		staged->num_fences = array->num_fences;
+
+	} else if (fence) {
+		for (i = 0; i < staged->num_fences; ++i)
+			dma_fence_put(staged->fences[i]);
+		staged->fences[0] = dma_fence_get(fence);
+		staged->num_fences = 1;
+	} else {
+		for (i = 0; i < staged->num_fences; ++i)
+			dma_fence_put(staged->fences[i]);
+		staged->num_fences = 0;
+	}
+
+	fences->staged = staged;
+	return;
+
+drop_old:
+	dma_fence_put(old);
+}
+
+/**
+ * dma_resv_fences_set - set the singleton fence
+ * @fences: fences object where to set the fence
+ * @fence: singleton fence for the object
+ *
+ * Grabs a reference to the new fence and replaces the current singleton fence
+ * with a new one. If the old fence is a dma_fence_array try to recycle it.
+ */
+void dma_resv_fences_set(struct dma_resv *obj,
+			 struct dma_resv_fences *fences,
+			 struct dma_fence *fence)
+{
+	dma_fence_get(fence);
+	dma_resv_fences_assign(obj, fences, fence);
+}
+EXPORT_SYMBOL(dma_resv_fences_set);
+
+/**
+ * dma_resv_fences_add - add a fence to the staged fence_array
+ * @fences: fences object where to add the fence to
+ * @fence: fence to add
+ *
+ * Add a new fence to the staged fence_array.
+ */
+void dma_resv_fences_add(struct dma_resv_fences *fences,
+			 struct dma_fence *fence)
+{
+	struct dma_fence_array *staged = fences->staged;
+	struct dma_fence *old;
+	unsigned int i;
+
+#ifndef CONFIG_DEBUG_MUTEXES
+	for (i = 0; i < staged->num_fences; ++i) {
+		old = staged->fences[i];
+
+		if (old->context == fence->context ||
+		    dma_fence_is_signaled(old)) {
+			dma_fence_put(old);
+			goto replace;
+		}
+	}
+#endif
+
+	BUG_ON(staged->num_fences >= dma_fence_array_max_fences(staged));
+	i = staged->num_fences++;
+
+replace:
+	staged->fences[i] = dma_fence_get(fence);
+}
+EXPORT_SYMBOL(dma_resv_fences_add);
+
+/**
+ * dma_resv_fences_commit - commit the staged dma_fence_array
+ * @fences: fences object where the commit should happen
+ *
+ * Commit the fences staged in the dma_fence_array and make them visible to
+ * other threads.
+ */
+void dma_resv_fences_commit(struct dma_resv *obj,
+			    struct dma_resv_fences *fences)
+{
+	struct dma_fence_array *staged = fences->staged;
+
+	if (!staged || !staged->num_fences)
+		return;
+
+	fences->staged = NULL;
+	dma_fence_array_init(staged, dma_fence_context_alloc(1), 1, false);
+	dma_resv_fences_assign(obj, fences, &staged->base);
+}
+EXPORT_SYMBOL(dma_resv_fences_commit);
+
 /**
  * dma_resv_list_alloc - allocate fence list
  * @shared_max: number of fences we need space for
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index 03b0f95682b0..c70f13fa6789 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -45,10 +45,33 @@ 
 #include <linux/seqlock.h>
 #include <linux/rcupdate.h>
 
+struct dma_resv;
+
 extern struct ww_class reservation_ww_class;
 extern struct lock_class_key reservation_seqcount_class;
 extern const char reservation_seqcount_string[];
 
+/**
+ * struct dma_resv_fences - fences inside a reservation object
+ * @fence: the current RCU protected singleton fence
+ * @staged: optional staged dma_fence_array to replace @fence
+ */
+struct dma_resv_fences {
+	struct dma_fence __rcu *fence;
+	struct dma_fence_array *staged;
+};
+
+int dma_resv_fences_reserve(struct dma_resv *obj,
+			    struct dma_resv_fences *fences,
+			    unsigned int num_fences);
+void dma_resv_fences_set(struct dma_resv *obj,
+			 struct dma_resv_fences *fences,
+			 struct dma_fence *fence);
+void dma_resv_fences_add(struct dma_resv_fences *fences,
+			 struct dma_fence *fence);
+void dma_resv_fences_commit(struct dma_resv *obj,
+			    struct dma_resv_fences *fences);
+
 /**
  * struct dma_resv_list - a list of shared fences
  * @rcu: for internal use
@@ -80,6 +103,32 @@  struct dma_resv {
 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
 
+/**
+ * dma_resv_fences_deref - get singleton fence
+ * @obj: the reservation object
+ * @fences: the fences object
+ *
+ * Returns the singleton fence from a resv_fences object.
+ */
+static inline struct dma_fence *
+dma_resv_fences_deref(struct dma_resv *obj, struct dma_resv_fences *fences)
+{
+	return rcu_dereference_protected(fences->fence,
+					 dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_fences_get_rcu - RCU get single fence
+ * @fences: fences structure where we need to get a reference for
+ *
+ * Get a reference to the single fence representing the synchronization.
+ */
+static inline struct dma_fence *
+dma_resv_fences_get_rcu(struct dma_resv_fences *fences)
+{
+	return dma_fence_get_rcu_safe(&fences->fence);
+}
+
 /**
  * dma_resv_get_list - get the reservation object's
  * shared fence list, with update-side lock held