diff mbox

[RFC] drm/i915: Add sync framework support to execbuff IOCTL

Message ID 55966F4D.7000500@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Tvrtko Ursulin July 3, 2015, 11:17 a.m. UTC
On 07/02/2015 04:55 PM, Chris Wilson wrote:
> It would be nice if we could reuse one seqno both for internal/external
> fences. If you need to expose a fence ordering within a timeline that is
> based on the creation stamp rather than execution stamp, it seems like
> we could just add such a stamp when creating the sync_pt and not worry
> about its relationship to the execution seqno.
> 
> Doing so does expose that requests are reordered to userspace since the
> signalling timeline is not the same as userspace's ordered timeline. Not
> sure if that is a problem or not.
> 
> Afaict the sync uapi is based on waiting for all of a set of fences to
> retire. It doesn't seem to rely on fence ordering (that is knowing that
> fence A will signal before fence B so it need only wait on fence B).
> 
> Here's hoping that we can have both simplicity and efficiency...

Jumping in with not even perfect understanding of everything here - but
timeline business has always been confusing me. There is nothing in the 
uapi which needs it afaics and iirc there was some discussion at the time
Jesse floated his patches that it can be removed. Based on that when I
squashed his patches and ported them on top of John's request to fence
conversion it ended up something like the below (manually edited a bit to
be less noisy and some prep patches omitted):

This implements the ioctl based uapi and indeed seqnos are not actually
used in waits. So is this insufficient for some reason? (Other that it
does not implement the input fence side of things.)

Comments

Daniel Vetter July 6, 2015, 9:29 a.m. UTC | #1
On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
> 
> On 07/02/2015 04:55 PM, Chris Wilson wrote:
> > It would be nice if we could reuse one seqno both for internal/external
> > fences. If you need to expose a fence ordering within a timeline that is
> > based on the creation stamp rather than execution stamp, it seems like
> > we could just add such a stamp when creating the sync_pt and not worry
> > about its relationship to the execution seqno.
> > 
> > Doing so does expose that requests are reordered to userspace since the
> > signalling timeline is not the same as userspace's ordered timeline. Not
> > sure if that is a problem or not.
> > 
> > Afaict the sync uapi is based on waiting for all of a set of fences to
> > retire. It doesn't seem to rely on fence ordering (that is knowing that
> > fence A will signal before fence B so it need only wait on fence B).
> > 
> > Here's hoping that we can have both simplicity and efficiency...
> 
> Jumping in with not even perfect understanding of everything here - but
> timeline business has always been confusing me. There is nothing in the 
> uapi which needs it afaics and iirc there was some discussion at the time
> Jesse floated his patches that it can be removed. Based on that when I
> squashed his patches and ported them on top of John's request to fence
> conversion it ended up something like the below (manually edited a bit to
> be less noisy and some prep patches omitted):
> 
> This implements the ioctl based uapi and indeed seqnos are not actually
> used in waits. So is this insufficient for some reason? (Other that it
> does not implement the input fence side of things.)

Yeah android syncpt on top of struct fence embedded int i915 request is
what I'd have expected.
> 
> diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
> index 74acca9..07f6ad9 100644
> --- a/drivers/gpu/drm/i915/Kconfig
> +++ b/drivers/gpu/drm/i915/Kconfig
> @@ -71,3 +71,17 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
>  	  option changes the default for that module option.
>  
>  	  If in doubt, say "N".
> +
> +config DRM_I915_SYNC
> +	bool "Enable explicit sync support"
> +	depends on DRM_I915
> +	default y if STAGING
> +	depends on STAGING
> +	select ANDROID
> +	select SYNC
> +	help

No Kconfig for userspace ABI please. Yes this means we need to destage
android syncpts first. The problem I see there is that apparently google
is still changing the uabi a lot, and that's a no-go for upstream. And it
needs to be cleaned up to work more seamlessly with struct fence (i.e.
anything that's missing there should be moved to struct fence, drivers
should only use fd_to_fence and fenct_to_fd functions similar to dma-buf).

And we don't have anyone except android using syncpts, so a bit a trouble
with finding userspace vehicles for this. We probably need agreement from
google to be happy with a frozen abi for syncpts first ...
-Daniel

> +	  Choose this option to enable Android native sync support and the
> +	  corresponding i915 driver code to expose it.  Slightly increases
> +	  driver size and pulls in sync support from staging.
> +
> +	  If in doubt, say "Y".
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index db21c93..93a3bc0 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -91,6 +91,9 @@ i915-y += i915_vgpu.o
>  # legacy horrors
>  i915-y += i915_dma.o
>  
> +# sync points
> +i915-$(CONFIG_DRM_I915_SYNC)	+= i915_sync.o
> +
>  obj-$(CONFIG_DRM_I915)  += i915.o
>  
>  CFLAGS_i915_trace_points.o := -I$(src)
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 3ef3997..2cf4d3f 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2753,6 +2753,26 @@ void i915_init_vm(struct drm_i915_private *dev_priv,
>  void i915_gem_free_object(struct drm_gem_object *obj);
>  void i915_gem_vma_destroy(struct i915_vma *vma);
>  
> +/* i915_sync.c */
> +struct sync_fence;
> +
> +#ifdef CONFIG_DRM_I915_SYNC
> +int i915_fence_ring_fill_driver_data(struct fence *fence, void *data, int size);
> +void i915_fence_ring_value_str(struct fence *fence, char *str, int size);
> +void i915_fence_ring_timeline_value_str(struct fence *fence, char *str,
> +					int size);
> +
> +int i915_create_sync_fence_ring(struct drm_i915_gem_request *req,
> +				struct sync_fence **sync_fence, int *fence_fd);
> +#else
> +static inline
> +int i915_create_sync_fence_ring(struct drm_i915_gem_request *req,
> +				struct sync_fence **sync_fence, int *fence_fd)
> +{
> +	return -ENODEV;
> +}
> +#endif
> +
>  #define PIN_MAPPABLE 0x1
>  #define PIN_NONBLOCK 0x2
>  #define PIN_GLOBAL 0x4
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 560d244..a04853c 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2633,7 +2633,7 @@ static const char *i915_gem_request_get_timeline_name(struct fence *req_fence)
>  	return req->ring->name;
>  }
>  
> -#if 0
> +#if CONFIG_DRM_I915_SYNC
>  static bool i915_gem_request_is_signaled(struct fence *req_fence)
>  {
>  	struct drm_i915_gem_request *req = container_of(req_fence,
> @@ -2770,12 +2770,14 @@ static const struct fence_ops i915_gem_request_fops = {
>  	.get_driver_name	= i915_gem_request_get_driver_name,
>  	.get_timeline_name	= i915_gem_request_get_timeline_name,
>  	.enable_signaling	= i915_gem_request_enable_signaling,
>  	.wait			= fence_default_wait,
>  	.release		= i915_gem_request_free,
> +#if CONFIG_DRM_I915_SYNC
> +	.signaled		= i915_gem_request_is_signaled,
> +	.fill_driver_data	= i915_fence_ring_fill_driver_data,
> +	.fence_value_str	= i915_fence_ring_value_str,
> +	.timeline_value_str	= i915_fence_ring_timeline_value_str
> +#endif
>  };
>  
>  int _i915_gem_request_alloc(struct intel_engine_cs *ring,
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 182c730..e6342ac 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -32,6 +32,7 @@
>  #include "i915_trace.h"
>  #include "intel_drv.h"
>  #include <linux/dma_remapping.h>
> +#include "../../../staging/android/sync.h"
>  
>  #define  __EXEC_OBJECT_HAS_PIN (1<<31)
>  #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
> @@ -1417,6 +1418,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  	u32 dispatch_flags;
>  	int ret;
>  	bool need_relocs;
> +	struct sync_fence *sync_fence = NULL;
> +	int fence_fd = -1;
>  
>  	if (!i915_gem_check_execbuffer(args))
>  		return -EINVAL;
> @@ -1610,6 +1613,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  	if (ret)
>  		goto err_batch_unpin;
>  
> +	if (args->flags & I915_EXEC_FENCE_OUT) {
> +		ret = i915_create_sync_fence_ring(params->request, &sync_fence,
> +						  &fence_fd);
> +		if (ret)
> +			goto err_batch_unpin;
> +	}
> +
>  	ret = i915_gem_request_add_to_client(params->request, file);
>  	if (ret)
>  		goto err_batch_unpin;
> @@ -1628,6 +1638,26 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  	params->ctx                     = ctx;
>  
>  	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
> +	if (ret)
> +		goto err_submit;
> +
> +	if (sync_fence) {
> +		sync_fence_install(sync_fence, fence_fd);
> +		args->rsvd2 = fence_fd;
> +		sync_fence = NULL;
> +	}
> +
> +err_submit:
> +	if (sync_fence) {
> +		/*
> +		 * We are under the struct mutex here and sync fence we
> +		 * created will attempt to grab it in its destructor.
> +		 * Therefore remove the lock before unreferencing.
> +		 */
> +		sync_fence->lock = NULL;
> +		fput(sync_fence->file);
> +		put_unused_fd(fence_fd);
> +	}
>  
>  err_batch_unpin:
>  	/*
> diff --git a/drivers/gpu/drm/i915/i915_sync.c b/drivers/gpu/drm/i915/i915_sync.c
> new file mode 100644
> index 0000000..1a50610
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/i915_sync.c
> @@ -0,0 +1,106 @@
> +/*
> + * Copyright © 2013-2015 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> + * IN THE SOFTWARE.
> + *
> + * Authors:
> + *    Jesse Barnes <jbarnes@virtuousgeek.org>
> + *    Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> + *
> + */
> +
> +#include <linux/fs.h>
> +#include <linux/fence.h>
> +#include "../../../staging/android/sync.h"
> +#include "i915_drv.h"
> +
> +/*
> + * i915 Android native sync fences.
> + *
> + * We implement sync points in terms of i915 seqnos. They're exposed through
> + * the DRM_IOCTL_I915_GEM_EXECBUFFER2 ioctl, and can be mixed and matched
> + * with other Android timelines and aggregated into sync_fences, etc.
> + *
> + * TODO:
> + *   * Display engine fences.
> + *   * Extend driver data with context id / ring id.
> + */
> +
> +int i915_fence_ring_fill_driver_data(struct fence *fence, void *data, int size)
> +{
> +	struct drm_i915_gem_request *req = container_of(fence, typeof(*req),
> +							fence);
> +
> +	if (size < sizeof(req->seqno))
> +		return -ENOMEM;
> +
> +	memcpy(data, &req->seqno, sizeof(req->seqno));
> +
> +	return sizeof(req->seqno);
> +}
> +
> +void i915_fence_ring_value_str(struct fence *fence, char *str, int size)
> +{
> +	struct drm_i915_gem_request *req = container_of(fence, typeof(*req),
> +							fence);
> +
> +	snprintf(str, size, "%u", req->seqno);
> +}
> +
> +void i915_fence_ring_timeline_value_str(struct fence *fence, char *str,
> +					int size)
> +{
> +	struct drm_i915_gem_request *req = container_of(fence, typeof(*req),
> +							fence);
> +	struct intel_engine_cs *ring = req->ring;
> +
> +	snprintf(str, size, "%u", ring->get_seqno(ring, false));
> +}
> +
> +int i915_create_sync_fence_ring(struct drm_i915_gem_request *req,
> +				struct sync_fence **sync_fence, int *fence_fd)
> +{
> +	struct drm_device *dev = req->i915->dev;
> +	struct intel_engine_cs *ring = req->ring;
> +	struct sync_fence *sfence;
> +	char ring_name[6] = "ring0";
> +	int fd;
> +
> +	fd = get_unused_fd_flags(O_CLOEXEC);
> +	if (fd < 0) {
> +		DRM_DEBUG("No available file descriptors!\n");
> +		return fd;
> +	}
> +
> +	ring_name[4] += ring->id;
> +	sfence = sync_fence_create_dma(ring_name, &req->fence,
> +				       &dev->struct_mutex);
> +	if (!sfence) {
> +		put_unused_fd(fd);
> +		return -ENOMEM;
> +	}
> +
> +	*sync_fence = sfence;
> +	*fence_fd = fd;
> +
> +	fence_get(&req->fence);
> +
> +	return 0;
> +}
> diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
> index 4851d66..2522f78 100644
> --- a/include/uapi/drm/i915_drm.h
> +++ b/include/uapi/drm/i915_drm.h
> @@ -246,7 +246,7 @@ typedef struct _drm_i915_sarea {
>  #define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
>  #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
>  #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
> -#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
> +#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
>  #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
>  #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
>  #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
> @@ -722,7 +722,7 @@ struct drm_i915_gem_execbuffer2 {
>  #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
>  	__u64 flags;
>  	__u64 rsvd1; /* now used for context info */
> -	__u64 rsvd2;
> +	__u64 rsvd2; /* now used for fence fd */
>  };
>  
>  /** Resets the SO write offset registers for transform feedback on gen7. */
> @@ -760,7 +760,9 @@ struct drm_i915_gem_execbuffer2 {
>  #define I915_EXEC_BSD_RING1		(1<<13)
>  #define I915_EXEC_BSD_RING2		(2<<13)
>  
> -#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
> +#define I915_EXEC_FENCE_OUT		(1<<15)
> +
> +#define __I915_EXEC_UNKNOWN_FLAGS -(1<<16)
>  
>  #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
>  #define i915_execbuffer2_set_context_id(eb2, context) \
> -- 
> 2.4.0
> 
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
John Harrison July 6, 2015, 12:58 p.m. UTC | #2
On 06/07/2015 10:29, Daniel Vetter wrote:
> On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
>> On 07/02/2015 04:55 PM, Chris Wilson wrote:
>>> It would be nice if we could reuse one seqno both for internal/external
>>> fences. If you need to expose a fence ordering within a timeline that is
>>> based on the creation stamp rather than execution stamp, it seems like
>>> we could just add such a stamp when creating the sync_pt and not worry
>>> about its relationship to the execution seqno.
>>>
>>> Doing so does expose that requests are reordered to userspace since the
>>> signalling timeline is not the same as userspace's ordered timeline. Not
>>> sure if that is a problem or not.
>>>
>>> Afaict the sync uapi is based on waiting for all of a set of fences to
>>> retire. It doesn't seem to rely on fence ordering (that is knowing that
>>> fence A will signal before fence B so it need only wait on fence B).
>>>
>>> Here's hoping that we can have both simplicity and efficiency...
>> Jumping in with not even perfect understanding of everything here - but
>> timeline business has always been confusing me. There is nothing in the
>> uapi which needs it afaics and iirc there was some discussion at the time
>> Jesse floated his patches that it can be removed. Based on that when I
>> squashed his patches and ported them on top of John's request to fence
>> conversion it ended up something like the below (manually edited a bit to
>> be less noisy and some prep patches omitted):
>>
>> This implements the ioctl based uapi and indeed seqnos are not actually
>> used in waits. So is this insufficient for some reason? (Other that it
>> does not implement the input fence side of things.)
> Yeah android syncpt on top of struct fence embedded int i915 request is
> what I'd have expected.
The thing I'm not happy with in that plan is that it leaves the kernel 
driver at the mercy of user land applications. If we return a fence 
object to user land via a file descriptor (or indeed any other 
mechanism) then that fence object must be locked until user land closes 
the file. If the fence object is the one embedded within our request 
structure then that means user land is effectively locking our request 
structure. Given that more and more stuff is being attached to the 
request, that could be a fair bit of memory tied up that we can do 
nothing about. E.g. if a rogue/buggy application requests a fence be 
returned for every batch buffer submitted but never closes them. 
Whereas, if we go the route of a separate fence object specifically for 
user land then they can leak them like a sieve and we won't really care 
so much.


>> diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
>> index 74acca9..07f6ad9 100644
>> --- a/drivers/gpu/drm/i915/Kconfig
>> +++ b/drivers/gpu/drm/i915/Kconfig
>> @@ -71,3 +71,17 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
>>   	  option changes the default for that module option.
>>   
>>   	  If in doubt, say "N".
>> +
>> +config DRM_I915_SYNC
>> +	bool "Enable explicit sync support"
>> +	depends on DRM_I915
>> +	default y if STAGING
>> +	depends on STAGING
>> +	select ANDROID
>> +	select SYNC
>> +	help
> No Kconfig for userspace ABI please. Yes this means we need to destage
> android syncpts first.

There is already a CONFIG_SYNC flag that wraps up all the existing sync 
code in the staging branch. There's not a lot we can do about that is 
there? We have to at least wrap the sync specific code in the i915 
driver with '#if CONFIG_SYNC' otherwise it won't compile.


> The problem I see there is that apparently google
> is still changing the uabi a lot, and that's a no-go for upstream. And it
> needs to be cleaned up to work more seamlessly with struct fence (i.e.
> anything that's missing there should be moved to struct fence, drivers
> should only use fd_to_fence and fenct_to_fd functions similar to dma-buf).

Are Google changing it or is it upstream that are changing it? The only 
changes to android/staging/sync.c have been a few minor bug fixes and 
Maarten Lankhorst's conversion to use struct fence which was back in 
July last year.



> And we don't have anyone except android using syncpts, so a bit a trouble
> with finding userspace vehicles for this. We probably need agreement from
> google to be happy with a frozen abi for syncpts first ...
> -Daniel

I believe Jesse is wanting to use it for his work.


John.
Daniel Vetter July 6, 2015, 1:59 p.m. UTC | #3
On Mon, Jul 06, 2015 at 01:58:25PM +0100, John Harrison wrote:
> On 06/07/2015 10:29, Daniel Vetter wrote:
> >On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
> >>On 07/02/2015 04:55 PM, Chris Wilson wrote:
> >>>It would be nice if we could reuse one seqno both for internal/external
> >>>fences. If you need to expose a fence ordering within a timeline that is
> >>>based on the creation stamp rather than execution stamp, it seems like
> >>>we could just add such a stamp when creating the sync_pt and not worry
> >>>about its relationship to the execution seqno.
> >>>
> >>>Doing so does expose that requests are reordered to userspace since the
> >>>signalling timeline is not the same as userspace's ordered timeline. Not
> >>>sure if that is a problem or not.
> >>>
> >>>Afaict the sync uapi is based on waiting for all of a set of fences to
> >>>retire. It doesn't seem to rely on fence ordering (that is knowing that
> >>>fence A will signal before fence B so it need only wait on fence B).
> >>>
> >>>Here's hoping that we can have both simplicity and efficiency...
> >>Jumping in with not even perfect understanding of everything here - but
> >>timeline business has always been confusing me. There is nothing in the
> >>uapi which needs it afaics and iirc there was some discussion at the time
> >>Jesse floated his patches that it can be removed. Based on that when I
> >>squashed his patches and ported them on top of John's request to fence
> >>conversion it ended up something like the below (manually edited a bit to
> >>be less noisy and some prep patches omitted):
> >>
> >>This implements the ioctl based uapi and indeed seqnos are not actually
> >>used in waits. So is this insufficient for some reason? (Other that it
> >>does not implement the input fence side of things.)
> >Yeah android syncpt on top of struct fence embedded int i915 request is
> >what I'd have expected.
> The thing I'm not happy with in that plan is that it leaves the kernel
> driver at the mercy of user land applications. If we return a fence object
> to user land via a file descriptor (or indeed any other mechanism) then that
> fence object must be locked until user land closes the file. If the fence
> object is the one embedded within our request structure then that means user
> land is effectively locking our request structure. Given that more and more
> stuff is being attached to the request, that could be a fair bit of memory
> tied up that we can do nothing about. E.g. if a rogue/buggy application
> requests a fence be returned for every batch buffer submitted but never
> closes them. Whereas, if we go the route of a separate fence object
> specifically for user land then they can leak them like a sieve and we won't
> really care so much.

Userspace can exhaust kernel allocations, that's nothing new. And if we
keep it userspace simply needs to leak a few more fence fds than if
there's a bit more data attached to it.

The solution to this problem is to have a mem cgroup limit set. No need to
complicate our kernel code.

> >>diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
> >>index 74acca9..07f6ad9 100644
> >>--- a/drivers/gpu/drm/i915/Kconfig
> >>+++ b/drivers/gpu/drm/i915/Kconfig
> >>@@ -71,3 +71,17 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
> >>  	  option changes the default for that module option.
> >>  	  If in doubt, say "N".
> >>+
> >>+config DRM_I915_SYNC
> >>+	bool "Enable explicit sync support"
> >>+	depends on DRM_I915
> >>+	default y if STAGING
> >>+	depends on STAGING
> >>+	select ANDROID
> >>+	select SYNC
> >>+	help
> >No Kconfig for userspace ABI please. Yes this means we need to destage
> >android syncpts first.
> 
> There is already a CONFIG_SYNC flag that wraps up all the existing sync code
> in the staging branch. There's not a lot we can do about that is there? We
> have to at least wrap the sync specific code in the i915 driver with '#if
> CONFIG_SYNC' otherwise it won't compile.

User-settable CONFIG_SYNC is one of these bits we need to fix up when
de-staging - it should be an internal variable which is selected by i915,
like all the other optional kernel services we use.

> >The problem I see there is that apparently google
> >is still changing the uabi a lot, and that's a no-go for upstream. And it
> >needs to be cleaned up to work more seamlessly with struct fence (i.e.
> >anything that's missing there should be moved to struct fence, drivers
> >should only use fd_to_fence and fenct_to_fd functions similar to dma-buf).
> 
> Are Google changing it or is it upstream that are changing it? The only
> changes to android/staging/sync.c have been a few minor bug fixes and
> Maarten Lankhorst's conversion to use struct fence which was back in July
> last year.

destaging android syncpt will probably require a few changes, but more so
it will freeze the abi. If we do that effort and google ignores it it's
fairly pointless (as long as android is the only serious user of syncpts).

> >And we don't have anyone except android using syncpts, so a bit a trouble
> >with finding userspace vehicles for this. We probably need agreement from
> >google to be happy with a frozen abi for syncpts first ...
> >-Daniel
> 
> I believe Jesse is wanting to use it for his work.

Yes I know, but afaik it's also a long way off. Which means not useful as
an open-source demonstration vehicle unfortunately.
-Daniel
John Harrison July 6, 2015, 2:26 p.m. UTC | #4
On 06/07/2015 14:59, Daniel Vetter wrote:
> On Mon, Jul 06, 2015 at 01:58:25PM +0100, John Harrison wrote:
>> On 06/07/2015 10:29, Daniel Vetter wrote:
>>> On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
>>>> On 07/02/2015 04:55 PM, Chris Wilson wrote:
>>>>> It would be nice if we could reuse one seqno both for internal/external
>>>>> fences. If you need to expose a fence ordering within a timeline that is
>>>>> based on the creation stamp rather than execution stamp, it seems like
>>>>> we could just add such a stamp when creating the sync_pt and not worry
>>>>> about its relationship to the execution seqno.
>>>>>
>>>>> Doing so does expose that requests are reordered to userspace since the
>>>>> signalling timeline is not the same as userspace's ordered timeline. Not
>>>>> sure if that is a problem or not.
>>>>>
>>>>> Afaict the sync uapi is based on waiting for all of a set of fences to
>>>>> retire. It doesn't seem to rely on fence ordering (that is knowing that
>>>>> fence A will signal before fence B so it need only wait on fence B).
>>>>>
>>>>> Here's hoping that we can have both simplicity and efficiency...
>>>> Jumping in with not even perfect understanding of everything here - but
>>>> timeline business has always been confusing me. There is nothing in the
>>>> uapi which needs it afaics and iirc there was some discussion at the time
>>>> Jesse floated his patches that it can be removed. Based on that when I
>>>> squashed his patches and ported them on top of John's request to fence
>>>> conversion it ended up something like the below (manually edited a bit to
>>>> be less noisy and some prep patches omitted):
>>>>
>>>> This implements the ioctl based uapi and indeed seqnos are not actually
>>>> used in waits. So is this insufficient for some reason? (Other that it
>>>> does not implement the input fence side of things.)
>>> Yeah android syncpt on top of struct fence embedded int i915 request is
>>> what I'd have expected.
>> The thing I'm not happy with in that plan is that it leaves the kernel
>> driver at the mercy of user land applications. If we return a fence object
>> to user land via a file descriptor (or indeed any other mechanism) then that
>> fence object must be locked until user land closes the file. If the fence
>> object is the one embedded within our request structure then that means user
>> land is effectively locking our request structure. Given that more and more
>> stuff is being attached to the request, that could be a fair bit of memory
>> tied up that we can do nothing about. E.g. if a rogue/buggy application
>> requests a fence be returned for every batch buffer submitted but never
>> closes them. Whereas, if we go the route of a separate fence object
>> specifically for user land then they can leak them like a sieve and we won't
>> really care so much.
> Userspace can exhaust kernel allocations, that's nothing new. And if we
> keep it userspace simply needs to leak a few more fence fds than if
> there's a bit more data attached to it.
>
> The solution to this problem is to have a mem cgroup limit set. No need to
> complicate our kernel code.

There is still the extra complication that request unreferencing cannot 
require any kind of mutex lock if we are allowing it to happen from 
outside of the driver. That means the unreference callback must move the 
request to a 'please clean me later' list, schedule a worker thread to 
run, and thus do the clean up asynchronously.


>>>> diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
>>>> index 74acca9..07f6ad9 100644
>>>> --- a/drivers/gpu/drm/i915/Kconfig
>>>> +++ b/drivers/gpu/drm/i915/Kconfig
>>>> @@ -71,3 +71,17 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
>>>>   	  option changes the default for that module option.
>>>>   	  If in doubt, say "N".
>>>> +
>>>> +config DRM_I915_SYNC
>>>> +	bool "Enable explicit sync support"
>>>> +	depends on DRM_I915
>>>> +	default y if STAGING
>>>> +	depends on STAGING
>>>> +	select ANDROID
>>>> +	select SYNC
>>>> +	help
>>> No Kconfig for userspace ABI please. Yes this means we need to destage
>>> android syncpts first.
>> There is already a CONFIG_SYNC flag that wraps up all the existing sync code
>> in the staging branch. There's not a lot we can do about that is there? We
>> have to at least wrap the sync specific code in the i915 driver with '#if
>> CONFIG_SYNC' otherwise it won't compile.
> User-settable CONFIG_SYNC is one of these bits we need to fix up when
> de-staging - it should be an internal variable which is selected by i915,
> like all the other optional kernel services we use.
>
>>> The problem I see there is that apparently google
>>> is still changing the uabi a lot, and that's a no-go for upstream. And it
>>> needs to be cleaned up to work more seamlessly with struct fence (i.e.
>>> anything that's missing there should be moved to struct fence, drivers
>>> should only use fd_to_fence and fenct_to_fd functions similar to dma-buf).
>> Are Google changing it or is it upstream that are changing it? The only
>> changes to android/staging/sync.c have been a few minor bug fixes and
>> Maarten Lankhorst's conversion to use struct fence which was back in July
>> last year.
> destaging android syncpt will probably require a few changes, but more so
> it will freeze the abi. If we do that effort and google ignores it it's
> fairly pointless (as long as android is the only serious user of syncpts).
>
>>> And we don't have anyone except android using syncpts, so a bit a trouble
>>> with finding userspace vehicles for this. We probably need agreement from
>>> google to be happy with a frozen abi for syncpts first ...
>>> -Daniel
>> I believe Jesse is wanting to use it for his work.
> Yes I know, but afaik it's also a long way off. Which means not useful as
> an open-source demonstration vehicle unfortunately.
> -Daniel
Daniel Vetter July 6, 2015, 2:41 p.m. UTC | #5
On Mon, Jul 06, 2015 at 03:26:12PM +0100, John Harrison wrote:
> On 06/07/2015 14:59, Daniel Vetter wrote:
> >On Mon, Jul 06, 2015 at 01:58:25PM +0100, John Harrison wrote:
> >>On 06/07/2015 10:29, Daniel Vetter wrote:
> >>>On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
> >>>>On 07/02/2015 04:55 PM, Chris Wilson wrote:
> >>>>>It would be nice if we could reuse one seqno both for internal/external
> >>>>>fences. If you need to expose a fence ordering within a timeline that is
> >>>>>based on the creation stamp rather than execution stamp, it seems like
> >>>>>we could just add such a stamp when creating the sync_pt and not worry
> >>>>>about its relationship to the execution seqno.
> >>>>>
> >>>>>Doing so does expose that requests are reordered to userspace since the
> >>>>>signalling timeline is not the same as userspace's ordered timeline. Not
> >>>>>sure if that is a problem or not.
> >>>>>
> >>>>>Afaict the sync uapi is based on waiting for all of a set of fences to
> >>>>>retire. It doesn't seem to rely on fence ordering (that is knowing that
> >>>>>fence A will signal before fence B so it need only wait on fence B).
> >>>>>
> >>>>>Here's hoping that we can have both simplicity and efficiency...
> >>>>Jumping in with not even perfect understanding of everything here - but
> >>>>timeline business has always been confusing me. There is nothing in the
> >>>>uapi which needs it afaics and iirc there was some discussion at the time
> >>>>Jesse floated his patches that it can be removed. Based on that when I
> >>>>squashed his patches and ported them on top of John's request to fence
> >>>>conversion it ended up something like the below (manually edited a bit to
> >>>>be less noisy and some prep patches omitted):
> >>>>
> >>>>This implements the ioctl based uapi and indeed seqnos are not actually
> >>>>used in waits. So is this insufficient for some reason? (Other that it
> >>>>does not implement the input fence side of things.)
> >>>Yeah android syncpt on top of struct fence embedded int i915 request is
> >>>what I'd have expected.
> >>The thing I'm not happy with in that plan is that it leaves the kernel
> >>driver at the mercy of user land applications. If we return a fence object
> >>to user land via a file descriptor (or indeed any other mechanism) then that
> >>fence object must be locked until user land closes the file. If the fence
> >>object is the one embedded within our request structure then that means user
> >>land is effectively locking our request structure. Given that more and more
> >>stuff is being attached to the request, that could be a fair bit of memory
> >>tied up that we can do nothing about. E.g. if a rogue/buggy application
> >>requests a fence be returned for every batch buffer submitted but never
> >>closes them. Whereas, if we go the route of a separate fence object
> >>specifically for user land then they can leak them like a sieve and we won't
> >>really care so much.
> >Userspace can exhaust kernel allocations, that's nothing new. And if we
> >keep it userspace simply needs to leak a few more fence fds than if
> >there's a bit more data attached to it.
> >
> >The solution to this problem is to have a mem cgroup limit set. No need to
> >complicate our kernel code.
> 
> There is still the extra complication that request unreferencing cannot
> require any kind of mutex lock if we are allowing it to happen from outside
> of the driver. That means the unreference callback must move the request to
> a 'please clean me later' list, schedule a worker thread to run, and thus do
> the clean up asynchronously.

Yeah, struct_mutex locking design is terribly, and we'll pay the prize for
that dearly until it's eventually fixed up. We can optimize it at least
with a mutex_try_lock.

Or we just fix up request tracking to not require struct_mutex, that might
be better. All the references we hold onto the request should point one
way with no weak references going the other direction, so this should be
possible.
-Daniel
Tvrtko Ursulin July 6, 2015, 2:46 p.m. UTC | #6
On 07/06/2015 03:26 PM, John Harrison wrote:
> On 06/07/2015 14:59, Daniel Vetter wrote:
>> On Mon, Jul 06, 2015 at 01:58:25PM +0100, John Harrison wrote:
>>> On 06/07/2015 10:29, Daniel Vetter wrote:
>>>> On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
>>>>> On 07/02/2015 04:55 PM, Chris Wilson wrote:
>>>>>> It would be nice if we could reuse one seqno both for
>>>>>> internal/external
>>>>>> fences. If you need to expose a fence ordering within a timeline
>>>>>> that is
>>>>>> based on the creation stamp rather than execution stamp, it seems
>>>>>> like
>>>>>> we could just add such a stamp when creating the sync_pt and not
>>>>>> worry
>>>>>> about its relationship to the execution seqno.
>>>>>>
>>>>>> Doing so does expose that requests are reordered to userspace
>>>>>> since the
>>>>>> signalling timeline is not the same as userspace's ordered
>>>>>> timeline. Not
>>>>>> sure if that is a problem or not.
>>>>>>
>>>>>> Afaict the sync uapi is based on waiting for all of a set of
>>>>>> fences to
>>>>>> retire. It doesn't seem to rely on fence ordering (that is knowing
>>>>>> that
>>>>>> fence A will signal before fence B so it need only wait on fence B).
>>>>>>
>>>>>> Here's hoping that we can have both simplicity and efficiency...
>>>>> Jumping in with not even perfect understanding of everything here -
>>>>> but
>>>>> timeline business has always been confusing me. There is nothing in
>>>>> the
>>>>> uapi which needs it afaics and iirc there was some discussion at
>>>>> the time
>>>>> Jesse floated his patches that it can be removed. Based on that when I
>>>>> squashed his patches and ported them on top of John's request to fence
>>>>> conversion it ended up something like the below (manually edited a
>>>>> bit to
>>>>> be less noisy and some prep patches omitted):
>>>>>
>>>>> This implements the ioctl based uapi and indeed seqnos are not
>>>>> actually
>>>>> used in waits. So is this insufficient for some reason? (Other that it
>>>>> does not implement the input fence side of things.)
>>>> Yeah android syncpt on top of struct fence embedded int i915 request is
>>>> what I'd have expected.
>>> The thing I'm not happy with in that plan is that it leaves the kernel
>>> driver at the mercy of user land applications. If we return a fence
>>> object
>>> to user land via a file descriptor (or indeed any other mechanism)
>>> then that
>>> fence object must be locked until user land closes the file. If the
>>> fence
>>> object is the one embedded within our request structure then that
>>> means user
>>> land is effectively locking our request structure. Given that more
>>> and more
>>> stuff is being attached to the request, that could be a fair bit of
>>> memory
>>> tied up that we can do nothing about. E.g. if a rogue/buggy application
>>> requests a fence be returned for every batch buffer submitted but never
>>> closes them. Whereas, if we go the route of a separate fence object
>>> specifically for user land then they can leak them like a sieve and
>>> we won't
>>> really care so much.
>> Userspace can exhaust kernel allocations, that's nothing new. And if we
>> keep it userspace simply needs to leak a few more fence fds than if
>> there's a bit more data attached to it.
>>
>> The solution to this problem is to have a mem cgroup limit set. No
>> need to
>> complicate our kernel code.
>
> There is still the extra complication that request unreferencing cannot
> require any kind of mutex lock if we are allowing it to happen from
> outside of the driver. That means the unreference callback must move the
> request to a 'please clean me later' list, schedule a worker thread to
> run, and thus do the clean up asynchronously.

For this particular issue my solution was to extend the sync_fence 
constructor to take a mutex and store it inside the object. Then at 
destruction time, which happens at sync_fd->f_ops->release() time, it is 
just a matter of calling kref_put_mutex instead of kref_put.

Seemed to work under some quick testing but that is as much as I did 
back then.

Regards,

Tvrtko
Daniel Vetter July 6, 2015, 3:12 p.m. UTC | #7
On Mon, Jul 06, 2015 at 03:46:49PM +0100, Tvrtko Ursulin wrote:
> 
> On 07/06/2015 03:26 PM, John Harrison wrote:
> >On 06/07/2015 14:59, Daniel Vetter wrote:
> >>On Mon, Jul 06, 2015 at 01:58:25PM +0100, John Harrison wrote:
> >>>On 06/07/2015 10:29, Daniel Vetter wrote:
> >>>>On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
> >>>>>On 07/02/2015 04:55 PM, Chris Wilson wrote:
> >>>>>>It would be nice if we could reuse one seqno both for
> >>>>>>internal/external
> >>>>>>fences. If you need to expose a fence ordering within a timeline
> >>>>>>that is
> >>>>>>based on the creation stamp rather than execution stamp, it seems
> >>>>>>like
> >>>>>>we could just add such a stamp when creating the sync_pt and not
> >>>>>>worry
> >>>>>>about its relationship to the execution seqno.
> >>>>>>
> >>>>>>Doing so does expose that requests are reordered to userspace
> >>>>>>since the
> >>>>>>signalling timeline is not the same as userspace's ordered
> >>>>>>timeline. Not
> >>>>>>sure if that is a problem or not.
> >>>>>>
> >>>>>>Afaict the sync uapi is based on waiting for all of a set of
> >>>>>>fences to
> >>>>>>retire. It doesn't seem to rely on fence ordering (that is knowing
> >>>>>>that
> >>>>>>fence A will signal before fence B so it need only wait on fence B).
> >>>>>>
> >>>>>>Here's hoping that we can have both simplicity and efficiency...
> >>>>>Jumping in with not even perfect understanding of everything here -
> >>>>>but
> >>>>>timeline business has always been confusing me. There is nothing in
> >>>>>the
> >>>>>uapi which needs it afaics and iirc there was some discussion at
> >>>>>the time
> >>>>>Jesse floated his patches that it can be removed. Based on that when I
> >>>>>squashed his patches and ported them on top of John's request to fence
> >>>>>conversion it ended up something like the below (manually edited a
> >>>>>bit to
> >>>>>be less noisy and some prep patches omitted):
> >>>>>
> >>>>>This implements the ioctl based uapi and indeed seqnos are not
> >>>>>actually
> >>>>>used in waits. So is this insufficient for some reason? (Other that it
> >>>>>does not implement the input fence side of things.)
> >>>>Yeah android syncpt on top of struct fence embedded int i915 request is
> >>>>what I'd have expected.
> >>>The thing I'm not happy with in that plan is that it leaves the kernel
> >>>driver at the mercy of user land applications. If we return a fence
> >>>object
> >>>to user land via a file descriptor (or indeed any other mechanism)
> >>>then that
> >>>fence object must be locked until user land closes the file. If the
> >>>fence
> >>>object is the one embedded within our request structure then that
> >>>means user
> >>>land is effectively locking our request structure. Given that more
> >>>and more
> >>>stuff is being attached to the request, that could be a fair bit of
> >>>memory
> >>>tied up that we can do nothing about. E.g. if a rogue/buggy application
> >>>requests a fence be returned for every batch buffer submitted but never
> >>>closes them. Whereas, if we go the route of a separate fence object
> >>>specifically for user land then they can leak them like a sieve and
> >>>we won't
> >>>really care so much.
> >>Userspace can exhaust kernel allocations, that's nothing new. And if we
> >>keep it userspace simply needs to leak a few more fence fds than if
> >>there's a bit more data attached to it.
> >>
> >>The solution to this problem is to have a mem cgroup limit set. No
> >>need to
> >>complicate our kernel code.
> >
> >There is still the extra complication that request unreferencing cannot
> >require any kind of mutex lock if we are allowing it to happen from
> >outside of the driver. That means the unreference callback must move the
> >request to a 'please clean me later' list, schedule a worker thread to
> >run, and thus do the clean up asynchronously.
> 
> For this particular issue my solution was to extend the sync_fence
> constructor to take a mutex and store it inside the object. Then at
> destruction time, which happens at sync_fd->f_ops->release() time, it is
> just a matter of calling kref_put_mutex instead of kref_put.
> 
> Seemed to work under some quick testing but that is as much as I did back
> then.

The problem is that it doesn't scale since everyone wants some other kind
of mutex to serialize the final kref_put. If something is supposed to be
cross-subsystem/driver (which is the case for fences) then we really can't
do that kind of leaky locking design. Imo we should have a kref_put_mutex
considered harmful sign somewhere ...

If you have weak references somewhere and need to prevent the object from
disappearing untimely while chasing that weak reference then imo the
better design pattern is to use kref_get_unless_zero. If you need the
serialization the mutex provides for some other reason (someone is only
hodling the mutex instead of grabbing a proper refernce when they really
should grab one) then your refcounting scheme probably needs another kind
of fixup patch.
-Daniel
Tvrtko Ursulin July 6, 2015, 3:21 p.m. UTC | #8
On 07/06/2015 04:12 PM, Daniel Vetter wrote:
> On Mon, Jul 06, 2015 at 03:46:49PM +0100, Tvrtko Ursulin wrote:
>> On 07/06/2015 03:26 PM, John Harrison wrote:
>>> On 06/07/2015 14:59, Daniel Vetter wrote:
>>>> On Mon, Jul 06, 2015 at 01:58:25PM +0100, John Harrison wrote:
>>>>> On 06/07/2015 10:29, Daniel Vetter wrote:
>>>>>> On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
>>>>>>> On 07/02/2015 04:55 PM, Chris Wilson wrote:
>>>>>>>> It would be nice if we could reuse one seqno both for
>>>>>>>> internal/external
>>>>>>>> fences. If you need to expose a fence ordering within a timeline
>>>>>>>> that is
>>>>>>>> based on the creation stamp rather than execution stamp, it seems
>>>>>>>> like
>>>>>>>> we could just add such a stamp when creating the sync_pt and not
>>>>>>>> worry
>>>>>>>> about its relationship to the execution seqno.
>>>>>>>>
>>>>>>>> Doing so does expose that requests are reordered to userspace
>>>>>>>> since the
>>>>>>>> signalling timeline is not the same as userspace's ordered
>>>>>>>> timeline. Not
>>>>>>>> sure if that is a problem or not.
>>>>>>>>
>>>>>>>> Afaict the sync uapi is based on waiting for all of a set of
>>>>>>>> fences to
>>>>>>>> retire. It doesn't seem to rely on fence ordering (that is knowing
>>>>>>>> that
>>>>>>>> fence A will signal before fence B so it need only wait on fence B).
>>>>>>>>
>>>>>>>> Here's hoping that we can have both simplicity and efficiency...
>>>>>>> Jumping in with not even perfect understanding of everything here -
>>>>>>> but
>>>>>>> timeline business has always been confusing me. There is nothing in
>>>>>>> the
>>>>>>> uapi which needs it afaics and iirc there was some discussion at
>>>>>>> the time
>>>>>>> Jesse floated his patches that it can be removed. Based on that when I
>>>>>>> squashed his patches and ported them on top of John's request to fence
>>>>>>> conversion it ended up something like the below (manually edited a
>>>>>>> bit to
>>>>>>> be less noisy and some prep patches omitted):
>>>>>>>
>>>>>>> This implements the ioctl based uapi and indeed seqnos are not
>>>>>>> actually
>>>>>>> used in waits. So is this insufficient for some reason? (Other that it
>>>>>>> does not implement the input fence side of things.)
>>>>>> Yeah android syncpt on top of struct fence embedded int i915 request is
>>>>>> what I'd have expected.
>>>>> The thing I'm not happy with in that plan is that it leaves the kernel
>>>>> driver at the mercy of user land applications. If we return a fence
>>>>> object
>>>>> to user land via a file descriptor (or indeed any other mechanism)
>>>>> then that
>>>>> fence object must be locked until user land closes the file. If the
>>>>> fence
>>>>> object is the one embedded within our request structure then that
>>>>> means user
>>>>> land is effectively locking our request structure. Given that more
>>>>> and more
>>>>> stuff is being attached to the request, that could be a fair bit of
>>>>> memory
>>>>> tied up that we can do nothing about. E.g. if a rogue/buggy application
>>>>> requests a fence be returned for every batch buffer submitted but never
>>>>> closes them. Whereas, if we go the route of a separate fence object
>>>>> specifically for user land then they can leak them like a sieve and
>>>>> we won't
>>>>> really care so much.
>>>> Userspace can exhaust kernel allocations, that's nothing new. And if we
>>>> keep it userspace simply needs to leak a few more fence fds than if
>>>> there's a bit more data attached to it.
>>>>
>>>> The solution to this problem is to have a mem cgroup limit set. No
>>>> need to
>>>> complicate our kernel code.
>>>
>>> There is still the extra complication that request unreferencing cannot
>>> require any kind of mutex lock if we are allowing it to happen from
>>> outside of the driver. That means the unreference callback must move the
>>> request to a 'please clean me later' list, schedule a worker thread to
>>> run, and thus do the clean up asynchronously.
>>
>> For this particular issue my solution was to extend the sync_fence
>> constructor to take a mutex and store it inside the object. Then at
>> destruction time, which happens at sync_fd->f_ops->release() time, it is
>> just a matter of calling kref_put_mutex instead of kref_put.
>>
>> Seemed to work under some quick testing but that is as much as I did back
>> then.
>
> The problem is that it doesn't scale since everyone wants some other kind
> of mutex to serialize the final kref_put. If something is supposed to be
> cross-subsystem/driver (which is the case for fences) then we really can't
> do that kind of leaky locking design. Imo we should have a kref_put_mutex
> considered harmful sign somewhere ...

I get the argument about everything wanting to add their own not 
scaling, but don't tie with the leaky comment? Also mutex is a pretty 
standard thing, especially since kref_put_mutex. :D If you look at it 
from that angle it kind of just exposes to the super class what the base 
class can do.

> If you have weak references somewhere and need to prevent the object from
> disappearing untimely while chasing that weak reference then imo the
> better design pattern is to use kref_get_unless_zero. If you need the
> serialization the mutex provides for some other reason (someone is only
> hodling the mutex instead of grabbing a proper refernce when they really
> should grab one) then your refcounting scheme probably needs another kind
> of fixup patch.

I don't see how weak references can work since if the request goes 
information is lost, unless stored somewhere else.

Regards,

Tvrtko
Daniel Vetter July 6, 2015, 3:37 p.m. UTC | #9
On Mon, Jul 06, 2015 at 04:21:28PM +0100, Tvrtko Ursulin wrote:
> 
> On 07/06/2015 04:12 PM, Daniel Vetter wrote:
> >On Mon, Jul 06, 2015 at 03:46:49PM +0100, Tvrtko Ursulin wrote:
> >>On 07/06/2015 03:26 PM, John Harrison wrote:
> >>>On 06/07/2015 14:59, Daniel Vetter wrote:
> >>>>On Mon, Jul 06, 2015 at 01:58:25PM +0100, John Harrison wrote:
> >>>>>On 06/07/2015 10:29, Daniel Vetter wrote:
> >>>>>>On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
> >>>>>>>On 07/02/2015 04:55 PM, Chris Wilson wrote:
> >>>>>>>>It would be nice if we could reuse one seqno both for
> >>>>>>>>internal/external
> >>>>>>>>fences. If you need to expose a fence ordering within a timeline
> >>>>>>>>that is
> >>>>>>>>based on the creation stamp rather than execution stamp, it seems
> >>>>>>>>like
> >>>>>>>>we could just add such a stamp when creating the sync_pt and not
> >>>>>>>>worry
> >>>>>>>>about its relationship to the execution seqno.
> >>>>>>>>
> >>>>>>>>Doing so does expose that requests are reordered to userspace
> >>>>>>>>since the
> >>>>>>>>signalling timeline is not the same as userspace's ordered
> >>>>>>>>timeline. Not
> >>>>>>>>sure if that is a problem or not.
> >>>>>>>>
> >>>>>>>>Afaict the sync uapi is based on waiting for all of a set of
> >>>>>>>>fences to
> >>>>>>>>retire. It doesn't seem to rely on fence ordering (that is knowing
> >>>>>>>>that
> >>>>>>>>fence A will signal before fence B so it need only wait on fence B).
> >>>>>>>>
> >>>>>>>>Here's hoping that we can have both simplicity and efficiency...
> >>>>>>>Jumping in with not even perfect understanding of everything here -
> >>>>>>>but
> >>>>>>>timeline business has always been confusing me. There is nothing in
> >>>>>>>the
> >>>>>>>uapi which needs it afaics and iirc there was some discussion at
> >>>>>>>the time
> >>>>>>>Jesse floated his patches that it can be removed. Based on that when I
> >>>>>>>squashed his patches and ported them on top of John's request to fence
> >>>>>>>conversion it ended up something like the below (manually edited a
> >>>>>>>bit to
> >>>>>>>be less noisy and some prep patches omitted):
> >>>>>>>
> >>>>>>>This implements the ioctl based uapi and indeed seqnos are not
> >>>>>>>actually
> >>>>>>>used in waits. So is this insufficient for some reason? (Other that it
> >>>>>>>does not implement the input fence side of things.)
> >>>>>>Yeah android syncpt on top of struct fence embedded int i915 request is
> >>>>>>what I'd have expected.
> >>>>>The thing I'm not happy with in that plan is that it leaves the kernel
> >>>>>driver at the mercy of user land applications. If we return a fence
> >>>>>object
> >>>>>to user land via a file descriptor (or indeed any other mechanism)
> >>>>>then that
> >>>>>fence object must be locked until user land closes the file. If the
> >>>>>fence
> >>>>>object is the one embedded within our request structure then that
> >>>>>means user
> >>>>>land is effectively locking our request structure. Given that more
> >>>>>and more
> >>>>>stuff is being attached to the request, that could be a fair bit of
> >>>>>memory
> >>>>>tied up that we can do nothing about. E.g. if a rogue/buggy application
> >>>>>requests a fence be returned for every batch buffer submitted but never
> >>>>>closes them. Whereas, if we go the route of a separate fence object
> >>>>>specifically for user land then they can leak them like a sieve and
> >>>>>we won't
> >>>>>really care so much.
> >>>>Userspace can exhaust kernel allocations, that's nothing new. And if we
> >>>>keep it userspace simply needs to leak a few more fence fds than if
> >>>>there's a bit more data attached to it.
> >>>>
> >>>>The solution to this problem is to have a mem cgroup limit set. No
> >>>>need to
> >>>>complicate our kernel code.
> >>>
> >>>There is still the extra complication that request unreferencing cannot
> >>>require any kind of mutex lock if we are allowing it to happen from
> >>>outside of the driver. That means the unreference callback must move the
> >>>request to a 'please clean me later' list, schedule a worker thread to
> >>>run, and thus do the clean up asynchronously.
> >>
> >>For this particular issue my solution was to extend the sync_fence
> >>constructor to take a mutex and store it inside the object. Then at
> >>destruction time, which happens at sync_fd->f_ops->release() time, it is
> >>just a matter of calling kref_put_mutex instead of kref_put.
> >>
> >>Seemed to work under some quick testing but that is as much as I did back
> >>then.
> >
> >The problem is that it doesn't scale since everyone wants some other kind
> >of mutex to serialize the final kref_put. If something is supposed to be
> >cross-subsystem/driver (which is the case for fences) then we really can't
> >do that kind of leaky locking design. Imo we should have a kref_put_mutex
> >considered harmful sign somewhere ...
> 
> I get the argument about everything wanting to add their own not scaling,
> but don't tie with the leaky comment? Also mutex is a pretty standard thing,
> especially since kref_put_mutex. :D If you look at it from that angle it
> kind of just exposes to the super class what the base class can do.

leaky not as in leaking memory, but leaky abstraction - we impose locking
internals for i915 onto users of the fence interface (somewhat at least).
> 
> >If you have weak references somewhere and need to prevent the object from
> >disappearing untimely while chasing that weak reference then imo the
> >better design pattern is to use kref_get_unless_zero. If you need the
> >serialization the mutex provides for some other reason (someone is only
> >hodling the mutex instead of grabbing a proper refernce when they really
> >should grab one) then your refcounting scheme probably needs another kind
> >of fixup patch.
> 
> I don't see how weak references can work since if the request goes
> information is lost, unless stored somewhere else.

So weak refernce is a pointer which doesn't hold a reference to the object
controlled with kref, but protected by some lock. Latest when the object
is destroyed we need to clear that pointer in the free callback of
kref_put (and so also grab the lock that protects that pointer). The
problem is that anyone else chasing that weak reference might race with
the final kref_put and increase the refcount from 0 to 1, which isn't
good.

There's two ways to do that:
- kref_put_mutex on the release side.
- in the acquire side do a kref_get_unless_zero _while_ holding that lock.

Two upsides of the later approach:
- You can have an unrestricted amount of weak references, each protected
  with their own lock. kref_put_mutex only works up to one.
- It doesn't serialize the final kref_put with the lock - doing that
  allows folks to rely on the mutex instead of a proper refcount to make
  the obj stick around, which is imo a design antipattern that I suffered
  through trying to cleaning it up agian a lot.

But that's really a tangent to the discussion here, I have no idea whether
this applies here since I didn't read your patch in detail.
-Daniel
Tvrtko Ursulin July 6, 2015, 4:34 p.m. UTC | #10
On 07/06/2015 04:37 PM, Daniel Vetter wrote:
> On Mon, Jul 06, 2015 at 04:21:28PM +0100, Tvrtko Ursulin wrote:
>>
>> On 07/06/2015 04:12 PM, Daniel Vetter wrote:
>>> On Mon, Jul 06, 2015 at 03:46:49PM +0100, Tvrtko Ursulin wrote:
>>
>>> If you have weak references somewhere and need to prevent the object from
>>> disappearing untimely while chasing that weak reference then imo the
>>> better design pattern is to use kref_get_unless_zero. If you need the
>>> serialization the mutex provides for some other reason (someone is only
>>> hodling the mutex instead of grabbing a proper refernce when they really
>>> should grab one) then your refcounting scheme probably needs another kind
>>> of fixup patch.
>>
>> I don't see how weak references can work since if the request goes
>> information is lost, unless stored somewhere else.
>
> So weak refernce is a pointer which doesn't hold a reference to the object
> controlled with kref, but protected by some lock. Latest when the object
> is destroyed we need to clear that pointer in the free callback of
> kref_put (and so also grab the lock that protects that pointer). The
> problem is that anyone else chasing that weak reference might race with
> the final kref_put and increase the refcount from 0 to 1, which isn't
> good.
>
> There's two ways to do that:
> - kref_put_mutex on the release side.
> - in the acquire side do a kref_get_unless_zero _while_ holding that lock.
>
> Two upsides of the later approach:
> - You can have an unrestricted amount of weak references, each protected
>    with their own lock. kref_put_mutex only works up to one.
> - It doesn't serialize the final kref_put with the lock - doing that
>    allows folks to rely on the mutex instead of a proper refcount to make
>    the obj stick around, which is imo a design antipattern that I suffered
>    through trying to cleaning it up agian a lot.
>
> But that's really a tangent to the discussion here, I have no idea whether
> this applies here since I didn't read your patch in detail.

I think it is not about my patch but whether the Android native sync 
implementation handles losing the weak reference on a fence.

I don't think it does and I am not sure how easy or hard would it be to 
change it right now. (The whole area traditionally confuses me since 
there are too many things called sync something and fence something.)

I would need to handle it to be able at least report the status of a 
fence to userspace and gracefully fail other operations. And that is 
assuming that wouldn't break existing userspace.

Regards,

Tvrtko
Daniel Vetter July 6, 2015, 5:58 p.m. UTC | #11
On Mon, Jul 06, 2015 at 05:34:22PM +0100, Tvrtko Ursulin wrote:
> 
> On 07/06/2015 04:37 PM, Daniel Vetter wrote:
> >On Mon, Jul 06, 2015 at 04:21:28PM +0100, Tvrtko Ursulin wrote:
> >>
> >>On 07/06/2015 04:12 PM, Daniel Vetter wrote:
> >>>On Mon, Jul 06, 2015 at 03:46:49PM +0100, Tvrtko Ursulin wrote:
> >>
> >>>If you have weak references somewhere and need to prevent the object from
> >>>disappearing untimely while chasing that weak reference then imo the
> >>>better design pattern is to use kref_get_unless_zero. If you need the
> >>>serialization the mutex provides for some other reason (someone is only
> >>>hodling the mutex instead of grabbing a proper refernce when they really
> >>>should grab one) then your refcounting scheme probably needs another kind
> >>>of fixup patch.
> >>
> >>I don't see how weak references can work since if the request goes
> >>information is lost, unless stored somewhere else.
> >
> >So weak refernce is a pointer which doesn't hold a reference to the object
> >controlled with kref, but protected by some lock. Latest when the object
> >is destroyed we need to clear that pointer in the free callback of
> >kref_put (and so also grab the lock that protects that pointer). The
> >problem is that anyone else chasing that weak reference might race with
> >the final kref_put and increase the refcount from 0 to 1, which isn't
> >good.
> >
> >There's two ways to do that:
> >- kref_put_mutex on the release side.
> >- in the acquire side do a kref_get_unless_zero _while_ holding that lock.
> >
> >Two upsides of the later approach:
> >- You can have an unrestricted amount of weak references, each protected
> >   with their own lock. kref_put_mutex only works up to one.
> >- It doesn't serialize the final kref_put with the lock - doing that
> >   allows folks to rely on the mutex instead of a proper refcount to make
> >   the obj stick around, which is imo a design antipattern that I suffered
> >   through trying to cleaning it up agian a lot.
> >
> >But that's really a tangent to the discussion here, I have no idea whether
> >this applies here since I didn't read your patch in detail.
> 
> I think it is not about my patch but whether the Android native sync
> implementation handles losing the weak reference on a fence.
> 
> I don't think it does and I am not sure how easy or hard would it be to
> change it right now. (The whole area traditionally confuses me since there
> are too many things called sync something and fence something.)
> 
> I would need to handle it to be able at least report the status of a fence
> to userspace and gracefully fail other operations. And that is assuming that
> wouldn't break existing userspace.

For reporting status it should grab a strong reference. I thought this was
about i915-internal lists and stuff we use to keep track of our own
requests, for which we'd need struct_mutex. struct_mutex won't protect any
of the syncpt internal state. I guess I'm rather confused now what this is
about ...
-Daniel
Tvrtko Ursulin July 7, 2015, 9:15 a.m. UTC | #12
On 07/06/2015 01:58 PM, John Harrison wrote:
> On 06/07/2015 10:29, Daniel Vetter wrote:
>> On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
>>> On 07/02/2015 04:55 PM, Chris Wilson wrote:
>>>> It would be nice if we could reuse one seqno both for internal/external
>>>> fences. If you need to expose a fence ordering within a timeline
>>>> that is
>>>> based on the creation stamp rather than execution stamp, it seems like
>>>> we could just add such a stamp when creating the sync_pt and not worry
>>>> about its relationship to the execution seqno.
>>>>
>>>> Doing so does expose that requests are reordered to userspace since the
>>>> signalling timeline is not the same as userspace's ordered timeline.
>>>> Not
>>>> sure if that is a problem or not.
>>>>
>>>> Afaict the sync uapi is based on waiting for all of a set of fences to
>>>> retire. It doesn't seem to rely on fence ordering (that is knowing that
>>>> fence A will signal before fence B so it need only wait on fence B).
>>>>
>>>> Here's hoping that we can have both simplicity and efficiency...
>>> Jumping in with not even perfect understanding of everything here - but
>>> timeline business has always been confusing me. There is nothing in the
>>> uapi which needs it afaics and iirc there was some discussion at the
>>> time
>>> Jesse floated his patches that it can be removed. Based on that when I
>>> squashed his patches and ported them on top of John's request to fence
>>> conversion it ended up something like the below (manually edited a
>>> bit to
>>> be less noisy and some prep patches omitted):
>>>
>>> This implements the ioctl based uapi and indeed seqnos are not actually
>>> used in waits. So is this insufficient for some reason? (Other that it
>>> does not implement the input fence side of things.)
>> Yeah android syncpt on top of struct fence embedded int i915 request is
>> what I'd have expected.
> The thing I'm not happy with in that plan is that it leaves the kernel
> driver at the mercy of user land applications. If we return a fence
> object to user land via a file descriptor (or indeed any other
> mechanism) then that fence object must be locked until user land closes
> the file. If the fence object is the one embedded within our request
> structure then that means user land is effectively locking our request
> structure. Given that more and more stuff is being attached to the
> request, that could be a fair bit of memory tied up that we can do
> nothing about. E.g. if a rogue/buggy application requests a fence be
> returned for every batch buffer submitted but never closes them.
> Whereas, if we go the route of a separate fence object specifically for
> user land then they can leak them like a sieve and we won't really care
> so much.

I am starting to agree gradually with this view. Given all the 
complications, referencing requests for exporting via fds feels quite 
heavy-weight, with potentially unbound dependencies and more trickiness 
in the future, even if we agreed on referencing and locking details.

Seqnos per context sounds like a significantly more light-weight and 
decoupled implementation.

Regards,

Tvrtko
Jesse Barnes July 29, 2015, 9:19 p.m. UTC | #13
On 07/07/2015 02:15 AM, Tvrtko Ursulin wrote:
> 
> On 07/06/2015 01:58 PM, John Harrison wrote:
>> On 06/07/2015 10:29, Daniel Vetter wrote:
>>> On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
>>>> On 07/02/2015 04:55 PM, Chris Wilson wrote:
>>>>> It would be nice if we could reuse one seqno both for internal/external
>>>>> fences. If you need to expose a fence ordering within a timeline
>>>>> that is
>>>>> based on the creation stamp rather than execution stamp, it seems like
>>>>> we could just add such a stamp when creating the sync_pt and not worry
>>>>> about its relationship to the execution seqno.
>>>>>
>>>>> Doing so does expose that requests are reordered to userspace since the
>>>>> signalling timeline is not the same as userspace's ordered timeline.
>>>>> Not
>>>>> sure if that is a problem or not.
>>>>>
>>>>> Afaict the sync uapi is based on waiting for all of a set of fences to
>>>>> retire. It doesn't seem to rely on fence ordering (that is knowing that
>>>>> fence A will signal before fence B so it need only wait on fence B).
>>>>>
>>>>> Here's hoping that we can have both simplicity and efficiency...
>>>> Jumping in with not even perfect understanding of everything here - but
>>>> timeline business has always been confusing me. There is nothing in the
>>>> uapi which needs it afaics and iirc there was some discussion at the
>>>> time
>>>> Jesse floated his patches that it can be removed. Based on that when I
>>>> squashed his patches and ported them on top of John's request to fence
>>>> conversion it ended up something like the below (manually edited a
>>>> bit to
>>>> be less noisy and some prep patches omitted):
>>>>
>>>> This implements the ioctl based uapi and indeed seqnos are not actually
>>>> used in waits. So is this insufficient for some reason? (Other that it
>>>> does not implement the input fence side of things.)
>>> Yeah android syncpt on top of struct fence embedded int i915 request is
>>> what I'd have expected.
>> The thing I'm not happy with in that plan is that it leaves the kernel
>> driver at the mercy of user land applications. If we return a fence
>> object to user land via a file descriptor (or indeed any other
>> mechanism) then that fence object must be locked until user land closes
>> the file. If the fence object is the one embedded within our request
>> structure then that means user land is effectively locking our request
>> structure. Given that more and more stuff is being attached to the
>> request, that could be a fair bit of memory tied up that we can do
>> nothing about. E.g. if a rogue/buggy application requests a fence be
>> returned for every batch buffer submitted but never closes them.
>> Whereas, if we go the route of a separate fence object specifically for
>> user land then they can leak them like a sieve and we won't really care
>> so much.
> 
> I am starting to agree gradually with this view. Given all the
> complications, referencing requests for exporting via fds feels quite
> heavy-weight, with potentially unbound dependencies and more
> trickiness in the future, even if we agreed on referencing and locking
> details.
> 
> Seqnos per context sounds like a significantly more light-weight and
> decoupled implementation.

I think this is the right long term direction as well; conceptually the
per-context seqnos make the most sense in light of scheduling, and they
let us keep things simple for sync pts as well.  Only question is, who
is signed up to make it all work?

Jesse
John Harrison July 30, 2015, 11:36 a.m. UTC | #14
On 29/07/2015 22:19, Jesse Barnes wrote:
> On 07/07/2015 02:15 AM, Tvrtko Ursulin wrote:
>> On 07/06/2015 01:58 PM, John Harrison wrote:
>>> On 06/07/2015 10:29, Daniel Vetter wrote:
>>>> On Fri, Jul 03, 2015 at 12:17:33PM +0100, Tvrtko Ursulin wrote:
>>>>> On 07/02/2015 04:55 PM, Chris Wilson wrote:
>>>>>> It would be nice if we could reuse one seqno both for internal/external
>>>>>> fences. If you need to expose a fence ordering within a timeline
>>>>>> that is
>>>>>> based on the creation stamp rather than execution stamp, it seems like
>>>>>> we could just add such a stamp when creating the sync_pt and not worry
>>>>>> about its relationship to the execution seqno.
>>>>>>
>>>>>> Doing so does expose that requests are reordered to userspace since the
>>>>>> signalling timeline is not the same as userspace's ordered timeline.
>>>>>> Not
>>>>>> sure if that is a problem or not.
>>>>>>
>>>>>> Afaict the sync uapi is based on waiting for all of a set of fences to
>>>>>> retire. It doesn't seem to rely on fence ordering (that is knowing that
>>>>>> fence A will signal before fence B so it need only wait on fence B).
>>>>>>
>>>>>> Here's hoping that we can have both simplicity and efficiency...
>>>>> Jumping in with not even perfect understanding of everything here - but
>>>>> timeline business has always been confusing me. There is nothing in the
>>>>> uapi which needs it afaics and iirc there was some discussion at the
>>>>> time
>>>>> Jesse floated his patches that it can be removed. Based on that when I
>>>>> squashed his patches and ported them on top of John's request to fence
>>>>> conversion it ended up something like the below (manually edited a
>>>>> bit to
>>>>> be less noisy and some prep patches omitted):
>>>>>
>>>>> This implements the ioctl based uapi and indeed seqnos are not actually
>>>>> used in waits. So is this insufficient for some reason? (Other that it
>>>>> does not implement the input fence side of things.)
>>>> Yeah android syncpt on top of struct fence embedded int i915 request is
>>>> what I'd have expected.
>>> The thing I'm not happy with in that plan is that it leaves the kernel
>>> driver at the mercy of user land applications. If we return a fence
>>> object to user land via a file descriptor (or indeed any other
>>> mechanism) then that fence object must be locked until user land closes
>>> the file. If the fence object is the one embedded within our request
>>> structure then that means user land is effectively locking our request
>>> structure. Given that more and more stuff is being attached to the
>>> request, that could be a fair bit of memory tied up that we can do
>>> nothing about. E.g. if a rogue/buggy application requests a fence be
>>> returned for every batch buffer submitted but never closes them.
>>> Whereas, if we go the route of a separate fence object specifically for
>>> user land then they can leak them like a sieve and we won't really care
>>> so much.
>> I am starting to agree gradually with this view. Given all the
>> complications, referencing requests for exporting via fds feels quite
>> heavy-weight, with potentially unbound dependencies and more
>> trickiness in the future, even if we agreed on referencing and locking
>> details.
>>
>> Seqnos per context sounds like a significantly more light-weight and
>> decoupled implementation.
> I think this is the right long term direction as well; conceptually the
> per-context seqnos make the most sense in light of scheduling, and they
> let us keep things simple for sync pts as well.  Only question is, who
> is signed up to make it all work?
>
> Jesse
>

That's the version I had originally. A separate fence object using a per 
context per ring timeline that is safe to export to user land. However, 
Daniel Vetter was very strongly convinced that using a single shared 
fence object both internally and externally was the better solution.

My current implementation has a per context per ring timeline which is 
used to give the fence a definitely in order and sensible seqno. It is 
just not the same seqno that goes through the hardware. At least, not 
yet! Although that change could be quite significant.

John.
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 74acca9..07f6ad9 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -71,3 +71,17 @@  config DRM_I915_PRELIMINARY_HW_SUPPORT
 	  option changes the default for that module option.
 
 	  If in doubt, say "N".
+
+config DRM_I915_SYNC
+	bool "Enable explicit sync support"
+	depends on DRM_I915
+	default y if STAGING
+	depends on STAGING
+	select ANDROID
+	select SYNC
+	help
+	  Choose this option to enable Android native sync support and the
+	  corresponding i915 driver code to expose it.  Slightly increases
+	  driver size and pulls in sync support from staging.
+
+	  If in doubt, say "Y".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index db21c93..93a3bc0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -91,6 +91,9 @@  i915-y += i915_vgpu.o
 # legacy horrors
 i915-y += i915_dma.o
 
+# sync points
+i915-$(CONFIG_DRM_I915_SYNC)	+= i915_sync.o
+
 obj-$(CONFIG_DRM_I915)  += i915.o
 
 CFLAGS_i915_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3ef3997..2cf4d3f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2753,6 +2753,26 @@  void i915_init_vm(struct drm_i915_private *dev_priv,
 void i915_gem_free_object(struct drm_gem_object *obj);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 
+/* i915_sync.c */
+struct sync_fence;
+
+#ifdef CONFIG_DRM_I915_SYNC
+int i915_fence_ring_fill_driver_data(struct fence *fence, void *data, int size);
+void i915_fence_ring_value_str(struct fence *fence, char *str, int size);
+void i915_fence_ring_timeline_value_str(struct fence *fence, char *str,
+					int size);
+
+int i915_create_sync_fence_ring(struct drm_i915_gem_request *req,
+				struct sync_fence **sync_fence, int *fence_fd);
+#else
+static inline
+int i915_create_sync_fence_ring(struct drm_i915_gem_request *req,
+				struct sync_fence **sync_fence, int *fence_fd)
+{
+	return -ENODEV;
+}
+#endif
+
 #define PIN_MAPPABLE 0x1
 #define PIN_NONBLOCK 0x2
 #define PIN_GLOBAL 0x4
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 560d244..a04853c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2633,7 +2633,7 @@  static const char *i915_gem_request_get_timeline_name(struct fence *req_fence)
 	return req->ring->name;
 }
 
-#if 0
+#if CONFIG_DRM_I915_SYNC
 static bool i915_gem_request_is_signaled(struct fence *req_fence)
 {
 	struct drm_i915_gem_request *req = container_of(req_fence,
@@ -2770,12 +2770,14 @@  static const struct fence_ops i915_gem_request_fops = {
 	.get_driver_name	= i915_gem_request_get_driver_name,
 	.get_timeline_name	= i915_gem_request_get_timeline_name,
 	.enable_signaling	= i915_gem_request_enable_signaling,
 	.wait			= fence_default_wait,
 	.release		= i915_gem_request_free,
+#if CONFIG_DRM_I915_SYNC
+	.signaled		= i915_gem_request_is_signaled,
+	.fill_driver_data	= i915_fence_ring_fill_driver_data,
+	.fence_value_str	= i915_fence_ring_value_str,
+	.timeline_value_str	= i915_fence_ring_timeline_value_str
+#endif
 };
 
 int _i915_gem_request_alloc(struct intel_engine_cs *ring,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 182c730..e6342ac 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@ 
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
+#include "../../../staging/android/sync.h"
 
 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
@@ -1417,6 +1418,8 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	u32 dispatch_flags;
 	int ret;
 	bool need_relocs;
+	struct sync_fence *sync_fence = NULL;
+	int fence_fd = -1;
 
 	if (!i915_gem_check_execbuffer(args))
 		return -EINVAL;
@@ -1610,6 +1613,13 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	if (ret)
 		goto err_batch_unpin;
 
+	if (args->flags & I915_EXEC_FENCE_OUT) {
+		ret = i915_create_sync_fence_ring(params->request, &sync_fence,
+						  &fence_fd);
+		if (ret)
+			goto err_batch_unpin;
+	}
+
 	ret = i915_gem_request_add_to_client(params->request, file);
 	if (ret)
 		goto err_batch_unpin;
@@ -1628,6 +1638,26 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	params->ctx                     = ctx;
 
 	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+	if (ret)
+		goto err_submit;
+
+	if (sync_fence) {
+		sync_fence_install(sync_fence, fence_fd);
+		args->rsvd2 = fence_fd;
+		sync_fence = NULL;
+	}
+
+err_submit:
+	if (sync_fence) {
+		/*
+		 * We are under the struct mutex here and sync fence we
+		 * created will attempt to grab it in its destructor.
+		 * Therefore remove the lock before unreferencing.
+		 */
+		sync_fence->lock = NULL;
+		fput(sync_fence->file);
+		put_unused_fd(fence_fd);
+	}
 
 err_batch_unpin:
 	/*
diff --git a/drivers/gpu/drm/i915/i915_sync.c b/drivers/gpu/drm/i915/i915_sync.c
new file mode 100644
index 0000000..1a50610
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sync.c
@@ -0,0 +1,106 @@ 
+/*
+ * Copyright © 2013-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jesse Barnes <jbarnes@virtuousgeek.org>
+ *    Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/fence.h>
+#include "../../../staging/android/sync.h"
+#include "i915_drv.h"
+
+/*
+ * i915 Android native sync fences.
+ *
+ * We implement sync points in terms of i915 seqnos. They're exposed through
+ * the DRM_IOCTL_I915_GEM_EXECBUFFER2 ioctl, and can be mixed and matched
+ * with other Android timelines and aggregated into sync_fences, etc.
+ *
+ * TODO:
+ *   * Display engine fences.
+ *   * Extend driver data with context id / ring id.
+ */
+
+int i915_fence_ring_fill_driver_data(struct fence *fence, void *data, int size)
+{
+	struct drm_i915_gem_request *req = container_of(fence, typeof(*req),
+							fence);
+
+	if (size < sizeof(req->seqno))
+		return -ENOMEM;
+
+	memcpy(data, &req->seqno, sizeof(req->seqno));
+
+	return sizeof(req->seqno);
+}
+
+void i915_fence_ring_value_str(struct fence *fence, char *str, int size)
+{
+	struct drm_i915_gem_request *req = container_of(fence, typeof(*req),
+							fence);
+
+	snprintf(str, size, "%u", req->seqno);
+}
+
+void i915_fence_ring_timeline_value_str(struct fence *fence, char *str,
+					int size)
+{
+	struct drm_i915_gem_request *req = container_of(fence, typeof(*req),
+							fence);
+	struct intel_engine_cs *ring = req->ring;
+
+	snprintf(str, size, "%u", ring->get_seqno(ring, false));
+}
+
+int i915_create_sync_fence_ring(struct drm_i915_gem_request *req,
+				struct sync_fence **sync_fence, int *fence_fd)
+{
+	struct drm_device *dev = req->i915->dev;
+	struct intel_engine_cs *ring = req->ring;
+	struct sync_fence *sfence;
+	char ring_name[6] = "ring0";
+	int fd;
+
+	fd = get_unused_fd_flags(O_CLOEXEC);
+	if (fd < 0) {
+		DRM_DEBUG("No available file descriptors!\n");
+		return fd;
+	}
+
+	ring_name[4] += ring->id;
+	sfence = sync_fence_create_dma(ring_name, &req->fence,
+				       &dev->struct_mutex);
+	if (!sfence) {
+		put_unused_fd(fd);
+		return -ENOMEM;
+	}
+
+	*sync_fence = sfence;
+	*fence_fd = fd;
+
+	fence_get(&req->fence);
+
+	return 0;
+}
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 4851d66..2522f78 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -246,7 +246,7 @@  typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
 #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
 #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
-#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
 #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
 #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
 #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
@@ -722,7 +722,7 @@  struct drm_i915_gem_execbuffer2 {
 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
 	__u64 flags;
 	__u64 rsvd1; /* now used for context info */
-	__u64 rsvd2;
+	__u64 rsvd2; /* now used for fence fd */
 };
 
 /** Resets the SO write offset registers for transform feedback on gen7. */
@@ -760,7 +760,9 @@  struct drm_i915_gem_execbuffer2 {
 #define I915_EXEC_BSD_RING1		(1<<13)
 #define I915_EXEC_BSD_RING2		(2<<13)
 
-#define __I915_EXEC_UNKNOWN_FLAGS -(1<<15)
+#define I915_EXEC_FENCE_OUT		(1<<15)
+
+#define __I915_EXEC_UNKNOWN_FLAGS -(1<<16)
 
 #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \