diff mbox series

[3/3] drm/doc/rfc: VM_BIND uapi definition

Message ID 20220610070711.32407-4-niranjana.vishwanathapura@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/doc/rfc: i915 VM_BIND feature design + uapi | expand

Commit Message

Niranjana Vishwanathapura June 10, 2022, 7:07 a.m. UTC
VM_BIND and related uapi definitions

Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
---
 Documentation/gpu/rfc/i915_vm_bind.h | 490 +++++++++++++++++++++++++++
 1 file changed, 490 insertions(+)
 create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h

Comments

Matthew Brost June 10, 2022, 8:53 a.m. UTC | #1
On Fri, Jun 10, 2022 at 12:07:11AM -0700, Niranjana Vishwanathapura wrote:
> VM_BIND and related uapi definitions
> 
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> ---
>  Documentation/gpu/rfc/i915_vm_bind.h | 490 +++++++++++++++++++++++++++
>  1 file changed, 490 insertions(+)
>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
> 
> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
> new file mode 100644
> index 000000000000..9fc854969cfb
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
> @@ -0,0 +1,490 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2022 Intel Corporation
> + */
> +
> +/**
> + * DOC: I915_PARAM_HAS_VM_BIND
> + *
> + * VM_BIND feature availability.
> + * See typedef drm_i915_getparam_t param.
> + * bit[0]: If set, VM_BIND is supported, otherwise not.
> + * bits[8-15]: VM_BIND implementation version.
> + * version 0 will not have VM_BIND/UNBIND timeline fence array support.
> + */
> +#define I915_PARAM_HAS_VM_BIND		57
> +
> +/**
> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
> + *
> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
> + * See struct drm_i915_gem_vm_control flags.
> + *
> + * The older execbuf2 ioctl will not support VM_BIND mode of operation.
> + * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
> + * execlist (See struct drm_i915_gem_execbuffer3 for more details).
> + *
> + */
> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
> +
> +/**
> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
> + *
> + * Flag to declare context as long running.
> + * See struct drm_i915_gem_context_create_ext flags.
> + *
> + * Usage of dma-fence expects that they complete in reasonable amount of time.
> + * Compute on the other hand can be long running. Hence it is not appropriate
> + * for compute contexts to export request completion dma-fence to user.
> + * The dma-fence usage will be limited to in-kernel consumption only.
> + * Compute contexts need to use user/memory fence.
> + *
> + * So, long running contexts do not support output fences. Hence,
> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is expected
> + * to be not used. DRM_I915_GEM_WAIT ioctl call is also not supported for
> + * objects mapped to long running contexts.
> + */
> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
> +
> +/* VM_BIND related ioctls */
> +#define DRM_I915_GEM_VM_BIND		0x3d
> +#define DRM_I915_GEM_VM_UNBIND		0x3e
> +#define DRM_I915_GEM_EXECBUFFER3	0x3f
> +#define DRM_I915_GEM_WAIT_USER_FENCE	0x40
> +
> +#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
> +#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
> +
> +/**
> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
> + *
> + * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
> + * virtual address (VA) range to the section of an object that should be bound
> + * in the device page table of the specified address space (VM).
> + * The VA range specified must be unique (ie., not currently bound) and can
> + * be mapped to whole object or a section of the object (partial binding).
> + * Multiple VA mappings can be created to the same section of the object
> + * (aliasing).
> + *
> + * The @queue_idx specifies the queue to use for binding. Same queue can be
> + * used for both VM_BIND and VM_UNBIND calls. All submitted bind and unbind
> + * operations in a queue are performed in the order of submission.
> + *
> + * The @start, @offset and @length should be 4K page aligned. However the DG2
> + * and XEHPSDV has 64K page size for device local-memory and has compact page
> + * table. On those platforms, for binding device local-memory objects, the
> + * @start should be 2M aligned, @offset and @length should be 64K aligned.
> + * Also, on those platforms, it is not allowed to bind an device local-memory
> + * object and a system memory object in a single 2M section of VA range.
> + */
> +struct drm_i915_gem_vm_bind {
> +	/** @vm_id: VM (address space) id to bind */
> +	__u32 vm_id;
> +
> +	/** @queue_idx: Index of queue for binding */
> +	__u32 queue_idx;
> +
> +	/** @rsvd: Reserved, MBZ */
> +	__u32 rsvd;
> +
> +	/** @handle: Object handle */
> +	__u32 handle;
> +
> +	/** @start: Virtual Address start to bind */
> +	__u64 start;
> +
> +	/** @offset: Offset in object to bind */
> +	__u64 offset;
> +
> +	/** @length: Length of mapping to bind */
> +	__u64 length;

This probably isn't needed. We are never going to unbind a subset of a
VMA are we? That being said it can't hurt as a sanity check (e.g.
internal vma->length == user unbind length).

> +
> +	/**
> +	 * @flags: Supported flags are:
> +	 *
> +	 * I915_GEM_VM_BIND_READONLY:
> +	 * Mapping is read-only.
> +	 *
> +	 * I915_GEM_VM_BIND_CAPTURE:
> +	 * Capture this mapping in the dump upon GPU error.
> +	 */
> +	__u64 flags;
> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
> +
> +	/**
> +	 * @extensions: 0-terminated chain of extensions for this operation.
> +	 *
> +	 * I915_VM_BIND_EXT_TIMELINE_FENCES:
> +	 * Specifies an array of input or output timeline fences for this
> +	 * binding operation. See struct drm_i915_vm_bind_ext_timeline_fences.
> +	 *
> +	 * I915_VM_BIND_EXT_USER_FENCES:
> +	 * Specifies an array of input or output user fences for this
> +	 * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
> +	 * This is required for compute contexts.
> +	 */
> +	__u64 extensions;
> +#define I915_VM_BIND_EXT_TIMELINE_FENCES	0
> +#define I915_VM_BIND_EXT_USER_FENCES		1
> +};
> +
> +/**
> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
> + *
> + * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
> + * address (VA) range that should be unbound from the device page table of the
> + * specified address space (VM). The specified VA range must match one of the
> + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
> + * completion.
> + *
> + * The @queue_idx specifies the queue to use for unbinding.
> + * See struct drm_i915_gem_vm_unbind for more information on @queue_idx.
> + *
> + * The @start and @length musy specify a unique mapping bound with VM_BIND
> + * ioctl.
> + */
> +struct drm_i915_gem_vm_unbind {
> +	/** @vm_id: VM (address space) id to bind */
> +	__u32 vm_id;
> +
> +	/** @queue_idx: Index of queue for unbinding */
> +	__u32 queue_idx;
> +
> +	/** @start: Virtual Address start to unbind */
> +	__u64 start;
> +
> +	/** @length: Length of mapping to unbind */
> +	__u64 length;
> +
> +	/** @flags: Reserved for future usage, currently MBZ */
> +	__u64 flags;
> +
> +	/**
> +	 * @extensions: 0-terminated chain of extensions for this operation.
> +	 *
> +	 * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
> +	 * Specifies an array of input or output timeline fences for this
> +	 * unbind operation.
> +	 * It has same format as struct drm_i915_vm_bind_ext_timeline_fences.
> +	 *
> +	 * I915_VM_UNBIND_EXT_USER_FENCES:
> +	 * Specifies an array of input or output user fences for this
> +	 * unbind operation. This is required for compute contexts.
> +	 * It has same format as struct drm_i915_vm_bind_ext_user_fence.
> +	 */
> +	__u64 extensions;
> +#define I915_VM_UNBIND_EXT_TIMELINE_FENCES	0
> +#define I915_VM_UNBIND_EXT_USER_FENCES		1
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
> + * or the vm_unbind work.
> + *
> + * The vm_bind or vm_unbind aync worker will wait for input fence to signal
> + * before starting the binding or unbinding.
> + *
> + * The vm_bind or vm_unbind async worker will signal the returned output fence
> + * after the completion of binding or unbinding.
> + */
> +struct drm_i915_vm_bind_fence {
> +	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
> +	__u32 handle;
> +
> +	/**
> +	 * @flags: Supported flags are:
> +	 *
> +	 * I915_VM_BIND_FENCE_WAIT:
> +	 * Wait for the input fence before binding/unbinding
> +	 *
> +	 * I915_VM_BIND_FENCE_SIGNAL:
> +	 * Return bind/unbind completion fence as output
> +	 */
> +	__u32 flags;
> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
> + * and vm_unbind.
> + *
> + * This structure describes an array of timeline drm_syncobj and associated
> + * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
> + * can be input or output fences (See struct drm_i915_vm_bind_fence).
> + */
> +struct drm_i915_vm_bind_ext_timeline_fences {
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/**
> +	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
> +	 * arrays.
> +	 */
> +	__u64 fence_count;
> +
> +	/**
> +	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
> +	 * of length @fence_count.
> +	 */
> +	__u64 handles_ptr;
> +
> +	/**
> +	 * @values_ptr: Pointer to an array of u64 values of length
> +	 * @fence_count.
> +	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
> +	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
> +	 * binary one.
> +	 */
> +	__u64 values_ptr;
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
> + * vm_bind or the vm_unbind work.
> + *
> + * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
> + * @addr to become equal to @val) before starting the binding or unbinding.
> + *
> + * The vm_bind or vm_unbind async worker will signal the output fence after
> + * the completion of binding or unbinding by writing @val to memory location at
> + * @addr
> + */
> +struct drm_i915_vm_bind_user_fence {
> +	/** @addr: User/Memory fence qword aligned process virtual address */
> +	__u64 addr;
> +
> +	/** @val: User/Memory fence value to be written after bind completion */
> +	__u64 val;
> +
> +	/**
> +	 * @flags: Supported flags are:
> +	 *
> +	 * I915_VM_BIND_USER_FENCE_WAIT:
> +	 * Wait for the input fence before binding/unbinding
> +	 *
> +	 * I915_VM_BIND_USER_FENCE_SIGNAL:
> +	 * Return bind/unbind completion fence as output
> +	 */
> +	__u32 flags;
> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
> +	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
> + * and vm_unbind.
> + *
> + * These user fences can be input or output fences
> + * (See struct drm_i915_vm_bind_user_fence).
> + */
> +struct drm_i915_vm_bind_ext_user_fence {
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/** @fence_count: Number of elements in the @user_fence_ptr array. */
> +	__u64 fence_count;
> +
> +	/**
> +	 * @user_fence_ptr: Pointer to an array of
> +	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
> +	 */
> +	__u64 user_fence_ptr;
> +};
> +
> +/**
> + * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
> + * ioctl.
> + *
> + * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
> + * only works with this ioctl for submission.
> + * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
> + */
> +struct drm_i915_gem_execbuffer3 {
> +	/**
> +	 * @ctx_id: Context id
> +	 *
> +	 * Only contexts with user engine map are allowed.
> +	 */
> +	__u32 ctx_id;
> +
> +	/**
> +	 * @engine_idx: Engine index
> +	 *
> +	 * An index in the user engine map of the context specified by @ctx_id.
> +	 */
> +	__u32 engine_idx;
> +
> +	/** @rsvd1: Reserved, MBZ */
> +	__u32 rsvd1;
> +
> +	/**
> +	 * @batch_count: Number of batches in @batch_address array.
> +	 *
> +	 * 0 is invalid. For parallel submission, it should be equal to the
> +	 * number of (parallel) engines involved in that submission.
> +	 */
> +	__u32 batch_count;
> +
> +	/**
> +	 * @batch_address: Array of batch gpu virtual addresses.
> +	 *
> +	 * If @batch_count is 1, then it is the gpu virtual address of the
> +	 * batch buffer. If @batch_count > 1, then it is a pointer to an array
> +	 * of batch buffer gpu virtual addresses.
> +	 */
> +	__u64 batch_address;
> +
> +	/**
> +	 * @flags: Supported flags are:
> +	 *
> +	 * I915_EXEC3_SECURE:
> +	 * Request a privileged ("secure") batch buffer/s.
> +	 * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
> +	 */
> +	__u64 flags;
> +#define I915_EXEC3_SECURE	(1<<0)
> +
> +	/** @rsvd2: Reserved, MBZ */
> +	__u64 rsvd2;
> +
> +	/**
> +	 * @extensions: Zero-terminated chain of extensions.
> +	 *
> +	 * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
> +	 * It has same format as DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
> +	 * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
> +	 * 
> +	 * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
> +	 * First level batch completion signaling extension.
> +	 * See struct drm_i915_gem_execbuffer3_ext_user_fence.
> +	 */
> +	__u64 extensions;
> +#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES	0
> +#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE		1
> +};
> +
> +/**
> + * struct drm_i915_gem_execbuffer3_ext_user_fence - First level batch completion
> + * signaling extension.
> + *
> + * This extension allows user to attach a user fence (@addr, @value pair) to
> + * execbuf3, to be signaled by the command streamer after the completion of first
> + * level batch, by writing the @value at specified @addr and triggering an
> + * interrupt.
> + * User can either poll for this user fence to signal or can also wait on it
> + * with i915_gem_wait_user_fence ioctl.
> + * This is very much usefaul for long running contexts where waiting on dma-fence
> + * by user (like i915_gem_wait ioctl) is not supported.
> + */
> +struct drm_i915_gem_execbuffer3_ext_user_fence {
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/**
> +	 * @addr: User/Memory fence qword aligned GPU virtual address.
> +	 *
> +	 * Address has to be a valid GPU virtual address at the time of
> +	 * first level batch completion.
> +	 */
> +	__u64 addr;
> +
> +	/**
> +	 * @value: User/Memory fence Value to be written to above address
> +	 * after first level batch completes.
> +	 */
> +	__u64 value;
> +
> +	/** @rsvd: Reserved, MBZ */
> +	__u64 rsvd;
> +};
> +

IMO all of these fence structs should be a generic sync interface shared
between both vm bind and exec3 rather than unique extenisons.

Both vm bind and exec3 should have something like this:

__64 syncs;	/* userptr to an array of generic syncs */
__64 n_syncs;

Having an array of syncs lets the kernel do one user copy for all the
syncs rather than reading them in a a chain.

A generic sync object encapsulates all possible syncs (in / out -
syncobj, syncobj timeline, ufence, future sync concepts).

e.g.

struct {
	__u32 user_ext;
	__u32 flag;	/* in / out, type, whatever else info we need */
	union {
		__u32 handle; 	/* to syncobj */
		__u64 addr; 	/* ufence address */
	};
	__64 seqno;	/* syncobj timeline, ufence write value */
	...reserve enough bits for future...
}

This unifies binds and execs by using the same sync interface
instilling the concept that binds and execs are the same op (queue'd
operation /w in/out fences).

Matt

> +/**
> + * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
> + * private to the specified VM.
> + *
> + * See struct drm_i915_gem_create_ext.
> + */
> +struct drm_i915_gem_create_ext_vm_private {
> +#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/** @vm_id: Id of the VM to which the object is private */
> +	__u32 vm_id;
> +};
> +
> +/**
> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
> + *
> + * User/Memory fence can be woken up either by:
> + *
> + * 1. GPU context indicated by @ctx_id, or,
> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
> + *    @ctx_id is ignored when this flag is set.
> + *
> + * Wakeup condition is,
> + * ``((*addr & mask) op (value & mask))``
> + *
> + * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
> + */
> +struct drm_i915_gem_wait_user_fence {
> +	/** @extensions: Zero-terminated chain of extensions. */
> +	__u64 extensions;
> +
> +	/** @addr: User/Memory fence address */
> +	__u64 addr;
> +
> +	/** @ctx_id: Id of the Context which will signal the fence. */
> +	__u32 ctx_id;
> +
> +	/** @op: Wakeup condition operator */
> +	__u16 op;
> +#define I915_UFENCE_WAIT_EQ      0
> +#define I915_UFENCE_WAIT_NEQ     1
> +#define I915_UFENCE_WAIT_GT      2
> +#define I915_UFENCE_WAIT_GTE     3
> +#define I915_UFENCE_WAIT_LT      4
> +#define I915_UFENCE_WAIT_LTE     5
> +#define I915_UFENCE_WAIT_BEFORE  6
> +#define I915_UFENCE_WAIT_AFTER   7
> +
> +	/**
> +	 * @flags: Supported flags are:
> +	 *
> +	 * I915_UFENCE_WAIT_SOFT:
> +	 *
> +	 * To be woken up by i915 driver async worker (not by GPU).
> +	 *
> +	 * I915_UFENCE_WAIT_ABSTIME:
> +	 *
> +	 * Wait timeout specified as absolute time.
> +	 */
> +	__u16 flags;
> +#define I915_UFENCE_WAIT_SOFT    0x1
> +#define I915_UFENCE_WAIT_ABSTIME 0x2
> +
> +	/** @value: Wakeup value */
> +	__u64 value;
> +
> +	/** @mask: Wakeup mask */
> +	__u64 mask;
> +#define I915_UFENCE_WAIT_U8     0xffu
> +#define I915_UFENCE_WAIT_U16    0xffffu
> +#define I915_UFENCE_WAIT_U32    0xfffffffful
> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
> +
> +	/**
> +	 * @timeout: Wait timeout in nanoseconds.
> +	 *
> +	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
> +	 * absolute time in nsec.
> +	 */
> +	__s64 timeout;
> +};
> -- 
> 2.21.0.rc0.32.g243a4c7e27
>
Matthew Brost June 10, 2022, 8:56 a.m. UTC | #2
On Fri, Jun 10, 2022 at 01:53:40AM -0700, Matthew Brost wrote:
> On Fri, Jun 10, 2022 at 12:07:11AM -0700, Niranjana Vishwanathapura wrote:
> > VM_BIND and related uapi definitions
> > 
> > Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> > ---
> >  Documentation/gpu/rfc/i915_vm_bind.h | 490 +++++++++++++++++++++++++++
> >  1 file changed, 490 insertions(+)
> >  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
> > 
> > diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
> > new file mode 100644
> > index 000000000000..9fc854969cfb
> > --- /dev/null
> > +++ b/Documentation/gpu/rfc/i915_vm_bind.h
> > @@ -0,0 +1,490 @@
> > +/* SPDX-License-Identifier: MIT */
> > +/*
> > + * Copyright © 2022 Intel Corporation
> > + */
> > +
> > +/**
> > + * DOC: I915_PARAM_HAS_VM_BIND
> > + *
> > + * VM_BIND feature availability.
> > + * See typedef drm_i915_getparam_t param.
> > + * bit[0]: If set, VM_BIND is supported, otherwise not.
> > + * bits[8-15]: VM_BIND implementation version.
> > + * version 0 will not have VM_BIND/UNBIND timeline fence array support.
> > + */
> > +#define I915_PARAM_HAS_VM_BIND		57
> > +
> > +/**
> > + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
> > + *
> > + * Flag to opt-in for VM_BIND mode of binding during VM creation.
> > + * See struct drm_i915_gem_vm_control flags.
> > + *
> > + * The older execbuf2 ioctl will not support VM_BIND mode of operation.
> > + * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
> > + * execlist (See struct drm_i915_gem_execbuffer3 for more details).
> > + *
> > + */
> > +#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
> > +
> > +/**
> > + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
> > + *
> > + * Flag to declare context as long running.
> > + * See struct drm_i915_gem_context_create_ext flags.
> > + *
> > + * Usage of dma-fence expects that they complete in reasonable amount of time.
> > + * Compute on the other hand can be long running. Hence it is not appropriate
> > + * for compute contexts to export request completion dma-fence to user.
> > + * The dma-fence usage will be limited to in-kernel consumption only.
> > + * Compute contexts need to use user/memory fence.
> > + *
> > + * So, long running contexts do not support output fences. Hence,
> > + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is expected
> > + * to be not used. DRM_I915_GEM_WAIT ioctl call is also not supported for
> > + * objects mapped to long running contexts.
> > + */
> > +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
> > +
> > +/* VM_BIND related ioctls */
> > +#define DRM_I915_GEM_VM_BIND		0x3d
> > +#define DRM_I915_GEM_VM_UNBIND		0x3e
> > +#define DRM_I915_GEM_EXECBUFFER3	0x3f
> > +#define DRM_I915_GEM_WAIT_USER_FENCE	0x40
> > +
> > +#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
> > +#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
> > +#define DRM_IOCTL_I915_GEM_EXECBUFFER3		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
> > +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
> > +
> > +/**
> > + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
> > + *
> > + * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
> > + * virtual address (VA) range to the section of an object that should be bound
> > + * in the device page table of the specified address space (VM).
> > + * The VA range specified must be unique (ie., not currently bound) and can
> > + * be mapped to whole object or a section of the object (partial binding).
> > + * Multiple VA mappings can be created to the same section of the object
> > + * (aliasing).
> > + *
> > + * The @queue_idx specifies the queue to use for binding. Same queue can be
> > + * used for both VM_BIND and VM_UNBIND calls. All submitted bind and unbind
> > + * operations in a queue are performed in the order of submission.
> > + *
> > + * The @start, @offset and @length should be 4K page aligned. However the DG2
> > + * and XEHPSDV has 64K page size for device local-memory and has compact page
> > + * table. On those platforms, for binding device local-memory objects, the
> > + * @start should be 2M aligned, @offset and @length should be 64K aligned.
> > + * Also, on those platforms, it is not allowed to bind an device local-memory
> > + * object and a system memory object in a single 2M section of VA range.
> > + */
> > +struct drm_i915_gem_vm_bind {
> > +	/** @vm_id: VM (address space) id to bind */
> > +	__u32 vm_id;
> > +
> > +	/** @queue_idx: Index of queue for binding */
> > +	__u32 queue_idx;
> > +
> > +	/** @rsvd: Reserved, MBZ */
> > +	__u32 rsvd;
> > +
> > +	/** @handle: Object handle */
> > +	__u32 handle;
> > +
> > +	/** @start: Virtual Address start to bind */
> > +	__u64 start;
> > +
> > +	/** @offset: Offset in object to bind */
> > +	__u64 offset;
> > +
> > +	/** @length: Length of mapping to bind */
> > +	__u64 length;
> 
> This probably isn't needed. We are never going to unbind a subset of a
> VMA are we? That being said it can't hurt as a sanity check (e.g.
> internal vma->length == user unbind length).
> 

Ugh, I c/p this into the wrong place. This should be in the unbind struct.

> > +
> > +	/**
> > +	 * @flags: Supported flags are:
> > +	 *
> > +	 * I915_GEM_VM_BIND_READONLY:
> > +	 * Mapping is read-only.
> > +	 *
> > +	 * I915_GEM_VM_BIND_CAPTURE:
> > +	 * Capture this mapping in the dump upon GPU error.
> > +	 */
> > +	__u64 flags;
> > +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
> > +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
> > +
> > +	/**
> > +	 * @extensions: 0-terminated chain of extensions for this operation.
> > +	 *
> > +	 * I915_VM_BIND_EXT_TIMELINE_FENCES:
> > +	 * Specifies an array of input or output timeline fences for this
> > +	 * binding operation. See struct drm_i915_vm_bind_ext_timeline_fences.
> > +	 *
> > +	 * I915_VM_BIND_EXT_USER_FENCES:
> > +	 * Specifies an array of input or output user fences for this
> > +	 * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
> > +	 * This is required for compute contexts.
> > +	 */
> > +	__u64 extensions;
> > +#define I915_VM_BIND_EXT_TIMELINE_FENCES	0
> > +#define I915_VM_BIND_EXT_USER_FENCES		1
> > +};
> > +
> > +/**
> > + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
> > + *
> > + * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
> > + * address (VA) range that should be unbound from the device page table of the
> > + * specified address space (VM). The specified VA range must match one of the
> > + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
> > + * completion.
> > + *
> > + * The @queue_idx specifies the queue to use for unbinding.
> > + * See struct drm_i915_gem_vm_unbind for more information on @queue_idx.
> > + *
> > + * The @start and @length musy specify a unique mapping bound with VM_BIND
> > + * ioctl.
> > + */
> > +struct drm_i915_gem_vm_unbind {
> > +	/** @vm_id: VM (address space) id to bind */
> > +	__u32 vm_id;
> > +
> > +	/** @queue_idx: Index of queue for unbinding */
> > +	__u32 queue_idx;
> > +
> > +	/** @start: Virtual Address start to unbind */
> > +	__u64 start;
> > +
> > +	/** @length: Length of mapping to unbind */
> > +	__u64 length;
> > +
> > +	/** @flags: Reserved for future usage, currently MBZ */
> > +	__u64 flags;
> > +
> > +	/**
> > +	 * @extensions: 0-terminated chain of extensions for this operation.
> > +	 *
> > +	 * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
> > +	 * Specifies an array of input or output timeline fences for this
> > +	 * unbind operation.
> > +	 * It has same format as struct drm_i915_vm_bind_ext_timeline_fences.
> > +	 *
> > +	 * I915_VM_UNBIND_EXT_USER_FENCES:
> > +	 * Specifies an array of input or output user fences for this
> > +	 * unbind operation. This is required for compute contexts.
> > +	 * It has same format as struct drm_i915_vm_bind_ext_user_fence.
> > +	 */
> > +	__u64 extensions;
> > +#define I915_VM_UNBIND_EXT_TIMELINE_FENCES	0
> > +#define I915_VM_UNBIND_EXT_USER_FENCES		1
> > +};
> > +
> > +/**
> > + * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
> > + * or the vm_unbind work.
> > + *
> > + * The vm_bind or vm_unbind aync worker will wait for input fence to signal
> > + * before starting the binding or unbinding.
> > + *
> > + * The vm_bind or vm_unbind async worker will signal the returned output fence
> > + * after the completion of binding or unbinding.
> > + */
> > +struct drm_i915_vm_bind_fence {
> > +	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
> > +	__u32 handle;
> > +
> > +	/**
> > +	 * @flags: Supported flags are:
> > +	 *
> > +	 * I915_VM_BIND_FENCE_WAIT:
> > +	 * Wait for the input fence before binding/unbinding
> > +	 *
> > +	 * I915_VM_BIND_FENCE_SIGNAL:
> > +	 * Return bind/unbind completion fence as output
> > +	 */
> > +	__u32 flags;
> > +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
> > +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
> > +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
> > +};
> > +
> > +/**
> > + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
> > + * and vm_unbind.
> > + *
> > + * This structure describes an array of timeline drm_syncobj and associated
> > + * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
> > + * can be input or output fences (See struct drm_i915_vm_bind_fence).
> > + */
> > +struct drm_i915_vm_bind_ext_timeline_fences {
> > +	/** @base: Extension link. See struct i915_user_extension. */
> > +	struct i915_user_extension base;
> > +
> > +	/**
> > +	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
> > +	 * arrays.
> > +	 */
> > +	__u64 fence_count;
> > +
> > +	/**
> > +	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
> > +	 * of length @fence_count.
> > +	 */
> > +	__u64 handles_ptr;
> > +
> > +	/**
> > +	 * @values_ptr: Pointer to an array of u64 values of length
> > +	 * @fence_count.
> > +	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
> > +	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
> > +	 * binary one.
> > +	 */
> > +	__u64 values_ptr;
> > +};
> > +
> > +/**
> > + * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
> > + * vm_bind or the vm_unbind work.
> > + *
> > + * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
> > + * @addr to become equal to @val) before starting the binding or unbinding.
> > + *
> > + * The vm_bind or vm_unbind async worker will signal the output fence after
> > + * the completion of binding or unbinding by writing @val to memory location at
> > + * @addr
> > + */
> > +struct drm_i915_vm_bind_user_fence {
> > +	/** @addr: User/Memory fence qword aligned process virtual address */
> > +	__u64 addr;
> > +
> > +	/** @val: User/Memory fence value to be written after bind completion */
> > +	__u64 val;
> > +
> > +	/**
> > +	 * @flags: Supported flags are:
> > +	 *
> > +	 * I915_VM_BIND_USER_FENCE_WAIT:
> > +	 * Wait for the input fence before binding/unbinding
> > +	 *
> > +	 * I915_VM_BIND_USER_FENCE_SIGNAL:
> > +	 * Return bind/unbind completion fence as output
> > +	 */
> > +	__u32 flags;
> > +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
> > +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
> > +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
> > +	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
> > +};
> > +
> > +/**
> > + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
> > + * and vm_unbind.
> > + *
> > + * These user fences can be input or output fences
> > + * (See struct drm_i915_vm_bind_user_fence).
> > + */
> > +struct drm_i915_vm_bind_ext_user_fence {
> > +	/** @base: Extension link. See struct i915_user_extension. */
> > +	struct i915_user_extension base;
> > +
> > +	/** @fence_count: Number of elements in the @user_fence_ptr array. */
> > +	__u64 fence_count;
> > +
> > +	/**
> > +	 * @user_fence_ptr: Pointer to an array of
> > +	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
> > +	 */
> > +	__u64 user_fence_ptr;
> > +};
> > +
> > +/**
> > + * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
> > + * ioctl.
> > + *
> > + * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
> > + * only works with this ioctl for submission.
> > + * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
> > + */
> > +struct drm_i915_gem_execbuffer3 {
> > +	/**
> > +	 * @ctx_id: Context id
> > +	 *
> > +	 * Only contexts with user engine map are allowed.
> > +	 */
> > +	__u32 ctx_id;
> > +
> > +	/**
> > +	 * @engine_idx: Engine index
> > +	 *
> > +	 * An index in the user engine map of the context specified by @ctx_id.
> > +	 */
> > +	__u32 engine_idx;
> > +
> > +	/** @rsvd1: Reserved, MBZ */
> > +	__u32 rsvd1;
> > +
> > +	/**
> > +	 * @batch_count: Number of batches in @batch_address array.
> > +	 *
> > +	 * 0 is invalid. For parallel submission, it should be equal to the
> > +	 * number of (parallel) engines involved in that submission.
> > +	 */
> > +	__u32 batch_count;
> > +
> > +	/**
> > +	 * @batch_address: Array of batch gpu virtual addresses.
> > +	 *
> > +	 * If @batch_count is 1, then it is the gpu virtual address of the
> > +	 * batch buffer. If @batch_count > 1, then it is a pointer to an array
> > +	 * of batch buffer gpu virtual addresses.
> > +	 */
> > +	__u64 batch_address;
> > +
> > +	/**
> > +	 * @flags: Supported flags are:
> > +	 *
> > +	 * I915_EXEC3_SECURE:
> > +	 * Request a privileged ("secure") batch buffer/s.
> > +	 * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
> > +	 */
> > +	__u64 flags;
> > +#define I915_EXEC3_SECURE	(1<<0)
> > +
> > +	/** @rsvd2: Reserved, MBZ */
> > +	__u64 rsvd2;
> > +
> > +	/**
> > +	 * @extensions: Zero-terminated chain of extensions.
> > +	 *
> > +	 * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
> > +	 * It has same format as DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
> > +	 * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
> > +	 * 
> > +	 * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
> > +	 * First level batch completion signaling extension.
> > +	 * See struct drm_i915_gem_execbuffer3_ext_user_fence.
> > +	 */
> > +	__u64 extensions;
> > +#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES	0
> > +#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE		1
> > +};
> > +
> > +/**
> > + * struct drm_i915_gem_execbuffer3_ext_user_fence - First level batch completion
> > + * signaling extension.
> > + *
> > + * This extension allows user to attach a user fence (@addr, @value pair) to
> > + * execbuf3, to be signaled by the command streamer after the completion of first
> > + * level batch, by writing the @value at specified @addr and triggering an
> > + * interrupt.
> > + * User can either poll for this user fence to signal or can also wait on it
> > + * with i915_gem_wait_user_fence ioctl.
> > + * This is very much usefaul for long running contexts where waiting on dma-fence
> > + * by user (like i915_gem_wait ioctl) is not supported.
> > + */
> > +struct drm_i915_gem_execbuffer3_ext_user_fence {
> > +	/** @base: Extension link. See struct i915_user_extension. */
> > +	struct i915_user_extension base;
> > +
> > +	/**
> > +	 * @addr: User/Memory fence qword aligned GPU virtual address.
> > +	 *
> > +	 * Address has to be a valid GPU virtual address at the time of
> > +	 * first level batch completion.
> > +	 */
> > +	__u64 addr;
> > +
> > +	/**
> > +	 * @value: User/Memory fence Value to be written to above address
> > +	 * after first level batch completes.
> > +	 */
> > +	__u64 value;
> > +
> > +	/** @rsvd: Reserved, MBZ */
> > +	__u64 rsvd;
> > +};
> > +
> 
> IMO all of these fence structs should be a generic sync interface shared
> between both vm bind and exec3 rather than unique extenisons.
> 
> Both vm bind and exec3 should have something like this:
> 
> __64 syncs;	/* userptr to an array of generic syncs */
> __64 n_syncs;
> 
> Having an array of syncs lets the kernel do one user copy for all the
> syncs rather than reading them in a a chain.
> 
> A generic sync object encapsulates all possible syncs (in / out -
> syncobj, syncobj timeline, ufence, future sync concepts).
> 
> e.g.
> 
> struct {
> 	__u32 user_ext;
> 	__u32 flag;	/* in / out, type, whatever else info we need */
> 	union {
> 		__u32 handle; 	/* to syncobj */
> 		__u64 addr; 	/* ufence address */
> 	};
> 	__64 seqno;	/* syncobj timeline, ufence write value */
> 	...reserve enough bits for future...
> }
> 
> This unifies binds and execs by using the same sync interface
> instilling the concept that binds and execs are the same op (queue'd
> operation /w in/out fences).
> 
> Matt
> 
> > +/**
> > + * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
> > + * private to the specified VM.
> > + *
> > + * See struct drm_i915_gem_create_ext.
> > + */
> > +struct drm_i915_gem_create_ext_vm_private {
> > +#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
> > +	/** @base: Extension link. See struct i915_user_extension. */
> > +	struct i915_user_extension base;
> > +
> > +	/** @vm_id: Id of the VM to which the object is private */
> > +	__u32 vm_id;
> > +};
> > +
> > +/**
> > + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
> > + *
> > + * User/Memory fence can be woken up either by:
> > + *
> > + * 1. GPU context indicated by @ctx_id, or,
> > + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
> > + *    @ctx_id is ignored when this flag is set.
> > + *
> > + * Wakeup condition is,
> > + * ``((*addr & mask) op (value & mask))``
> > + *
> > + * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
> > + */
> > +struct drm_i915_gem_wait_user_fence {
> > +	/** @extensions: Zero-terminated chain of extensions. */
> > +	__u64 extensions;
> > +
> > +	/** @addr: User/Memory fence address */
> > +	__u64 addr;
> > +
> > +	/** @ctx_id: Id of the Context which will signal the fence. */
> > +	__u32 ctx_id;
> > +
> > +	/** @op: Wakeup condition operator */
> > +	__u16 op;
> > +#define I915_UFENCE_WAIT_EQ      0
> > +#define I915_UFENCE_WAIT_NEQ     1
> > +#define I915_UFENCE_WAIT_GT      2
> > +#define I915_UFENCE_WAIT_GTE     3
> > +#define I915_UFENCE_WAIT_LT      4
> > +#define I915_UFENCE_WAIT_LTE     5
> > +#define I915_UFENCE_WAIT_BEFORE  6
> > +#define I915_UFENCE_WAIT_AFTER   7
> > +
> > +	/**
> > +	 * @flags: Supported flags are:
> > +	 *
> > +	 * I915_UFENCE_WAIT_SOFT:
> > +	 *
> > +	 * To be woken up by i915 driver async worker (not by GPU).
> > +	 *
> > +	 * I915_UFENCE_WAIT_ABSTIME:
> > +	 *
> > +	 * Wait timeout specified as absolute time.
> > +	 */
> > +	__u16 flags;
> > +#define I915_UFENCE_WAIT_SOFT    0x1
> > +#define I915_UFENCE_WAIT_ABSTIME 0x2
> > +
> > +	/** @value: Wakeup value */
> > +	__u64 value;
> > +
> > +	/** @mask: Wakeup mask */
> > +	__u64 mask;
> > +#define I915_UFENCE_WAIT_U8     0xffu
> > +#define I915_UFENCE_WAIT_U16    0xffffu
> > +#define I915_UFENCE_WAIT_U32    0xfffffffful
> > +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
> > +
> > +	/**
> > +	 * @timeout: Wait timeout in nanoseconds.
> > +	 *
> > +	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
> > +	 * absolute time in nsec.
> > +	 */
> > +	__s64 timeout;
> > +};
> > -- 
> > 2.21.0.rc0.32.g243a4c7e27
> >
Tvrtko Ursulin June 10, 2022, 10:37 a.m. UTC | #3
On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
> VM_BIND and related uapi definitions
> 
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> ---
>   Documentation/gpu/rfc/i915_vm_bind.h | 490 +++++++++++++++++++++++++++
>   1 file changed, 490 insertions(+)
>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
> 
> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
> new file mode 100644
> index 000000000000..9fc854969cfb
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
> @@ -0,0 +1,490 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2022 Intel Corporation
> + */
> +
> +/**
> + * DOC: I915_PARAM_HAS_VM_BIND
> + *
> + * VM_BIND feature availability.
> + * See typedef drm_i915_getparam_t param.
> + * bit[0]: If set, VM_BIND is supported, otherwise not.
> + * bits[8-15]: VM_BIND implementation version.
> + * version 0 will not have VM_BIND/UNBIND timeline fence array support.
> + */
> +#define I915_PARAM_HAS_VM_BIND		57
> +
> +/**
> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
> + *
> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
> + * See struct drm_i915_gem_vm_control flags.
> + *
> + * The older execbuf2 ioctl will not support VM_BIND mode of operation.
> + * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
> + * execlist (See struct drm_i915_gem_execbuffer3 for more details).
> + *
> + */
> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
> +
> +/**
> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
> + *
> + * Flag to declare context as long running.
> + * See struct drm_i915_gem_context_create_ext flags.
> + *
> + * Usage of dma-fence expects that they complete in reasonable amount of time.
> + * Compute on the other hand can be long running. Hence it is not appropriate
> + * for compute contexts to export request completion dma-fence to user.
> + * The dma-fence usage will be limited to in-kernel consumption only.
> + * Compute contexts need to use user/memory fence.
> + *
> + * So, long running contexts do not support output fences. Hence,
> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is expected
> + * to be not used. DRM_I915_GEM_WAIT ioctl call is also not supported for
> + * objects mapped to long running contexts.
> + */
> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
> +
> +/* VM_BIND related ioctls */
> +#define DRM_I915_GEM_VM_BIND		0x3d
> +#define DRM_I915_GEM_VM_UNBIND		0x3e
> +#define DRM_I915_GEM_EXECBUFFER3	0x3f
> +#define DRM_I915_GEM_WAIT_USER_FENCE	0x40
> +
> +#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
> +#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
> +
> +/**
> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
> + *
> + * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
> + * virtual address (VA) range to the section of an object that should be bound
> + * in the device page table of the specified address space (VM).
> + * The VA range specified must be unique (ie., not currently bound) and can
> + * be mapped to whole object or a section of the object (partial binding).
> + * Multiple VA mappings can be created to the same section of the object
> + * (aliasing).
> + *
> + * The @queue_idx specifies the queue to use for binding. Same queue can be
> + * used for both VM_BIND and VM_UNBIND calls. All submitted bind and unbind
> + * operations in a queue are performed in the order of submission.
> + *
> + * The @start, @offset and @length should be 4K page aligned. However the DG2
> + * and XEHPSDV has 64K page size for device local-memory and has compact page
> + * table. On those platforms, for binding device local-memory objects, the
> + * @start should be 2M aligned, @offset and @length should be 64K aligned.
> + * Also, on those platforms, it is not allowed to bind an device local-memory
> + * object and a system memory object in a single 2M section of VA range.
> + */
> +struct drm_i915_gem_vm_bind {
> +	/** @vm_id: VM (address space) id to bind */
> +	__u32 vm_id;
> +
> +	/** @queue_idx: Index of queue for binding */
> +	__u32 queue_idx;

I have a question here to which I did not find an answer by browsing the 
old threads.

Queue index appears to be an implicit synchronisation mechanism, right? 
Operations on the same index are executed/complete in order of ioctl 
submission?

Do we _have_ to implement this on the kernel side and could just allow 
in/out fence and let userspace deal with it?

Arbitrary/on-demand number of queues will add the complexity on the 
kernel side which should be avoided if possible.

Regards,

Tvrtko

> +
> +	/** @rsvd: Reserved, MBZ */
> +	__u32 rsvd;
> +
> +	/** @handle: Object handle */
> +	__u32 handle;
> +
> +	/** @start: Virtual Address start to bind */
> +	__u64 start;
> +
> +	/** @offset: Offset in object to bind */
> +	__u64 offset;
> +
> +	/** @length: Length of mapping to bind */
> +	__u64 length;
> +
> +	/**
> +	 * @flags: Supported flags are:
> +	 *
> +	 * I915_GEM_VM_BIND_READONLY:
> +	 * Mapping is read-only.
> +	 *
> +	 * I915_GEM_VM_BIND_CAPTURE:
> +	 * Capture this mapping in the dump upon GPU error.
> +	 */
> +	__u64 flags;
> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
> +
> +	/**
> +	 * @extensions: 0-terminated chain of extensions for this operation.
> +	 *
> +	 * I915_VM_BIND_EXT_TIMELINE_FENCES:
> +	 * Specifies an array of input or output timeline fences for this
> +	 * binding operation. See struct drm_i915_vm_bind_ext_timeline_fences.
> +	 *
> +	 * I915_VM_BIND_EXT_USER_FENCES:
> +	 * Specifies an array of input or output user fences for this
> +	 * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
> +	 * This is required for compute contexts.
> +	 */
> +	__u64 extensions;
> +#define I915_VM_BIND_EXT_TIMELINE_FENCES	0
> +#define I915_VM_BIND_EXT_USER_FENCES		1
> +};
> +
> +/**
> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
> + *
> + * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
> + * address (VA) range that should be unbound from the device page table of the
> + * specified address space (VM). The specified VA range must match one of the
> + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
> + * completion.
> + *
> + * The @queue_idx specifies the queue to use for unbinding.
> + * See struct drm_i915_gem_vm_unbind for more information on @queue_idx.
> + *
> + * The @start and @length musy specify a unique mapping bound with VM_BIND
> + * ioctl.
> + */
> +struct drm_i915_gem_vm_unbind {
> +	/** @vm_id: VM (address space) id to bind */
> +	__u32 vm_id;
> +
> +	/** @queue_idx: Index of queue for unbinding */
> +	__u32 queue_idx;
> +
> +	/** @start: Virtual Address start to unbind */
> +	__u64 start;
> +
> +	/** @length: Length of mapping to unbind */
> +	__u64 length;
> +
> +	/** @flags: Reserved for future usage, currently MBZ */
> +	__u64 flags;
> +
> +	/**
> +	 * @extensions: 0-terminated chain of extensions for this operation.
> +	 *
> +	 * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
> +	 * Specifies an array of input or output timeline fences for this
> +	 * unbind operation.
> +	 * It has same format as struct drm_i915_vm_bind_ext_timeline_fences.
> +	 *
> +	 * I915_VM_UNBIND_EXT_USER_FENCES:
> +	 * Specifies an array of input or output user fences for this
> +	 * unbind operation. This is required for compute contexts.
> +	 * It has same format as struct drm_i915_vm_bind_ext_user_fence.
> +	 */
> +	__u64 extensions;
> +#define I915_VM_UNBIND_EXT_TIMELINE_FENCES	0
> +#define I915_VM_UNBIND_EXT_USER_FENCES		1
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
> + * or the vm_unbind work.
> + *
> + * The vm_bind or vm_unbind aync worker will wait for input fence to signal
> + * before starting the binding or unbinding.
> + *
> + * The vm_bind or vm_unbind async worker will signal the returned output fence
> + * after the completion of binding or unbinding.
> + */
> +struct drm_i915_vm_bind_fence {
> +	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
> +	__u32 handle;
> +
> +	/**
> +	 * @flags: Supported flags are:
> +	 *
> +	 * I915_VM_BIND_FENCE_WAIT:
> +	 * Wait for the input fence before binding/unbinding
> +	 *
> +	 * I915_VM_BIND_FENCE_SIGNAL:
> +	 * Return bind/unbind completion fence as output
> +	 */
> +	__u32 flags;
> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
> + * and vm_unbind.
> + *
> + * This structure describes an array of timeline drm_syncobj and associated
> + * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
> + * can be input or output fences (See struct drm_i915_vm_bind_fence).
> + */
> +struct drm_i915_vm_bind_ext_timeline_fences {
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/**
> +	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
> +	 * arrays.
> +	 */
> +	__u64 fence_count;
> +
> +	/**
> +	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
> +	 * of length @fence_count.
> +	 */
> +	__u64 handles_ptr;
> +
> +	/**
> +	 * @values_ptr: Pointer to an array of u64 values of length
> +	 * @fence_count.
> +	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
> +	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
> +	 * binary one.
> +	 */
> +	__u64 values_ptr;
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
> + * vm_bind or the vm_unbind work.
> + *
> + * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
> + * @addr to become equal to @val) before starting the binding or unbinding.
> + *
> + * The vm_bind or vm_unbind async worker will signal the output fence after
> + * the completion of binding or unbinding by writing @val to memory location at
> + * @addr
> + */
> +struct drm_i915_vm_bind_user_fence {
> +	/** @addr: User/Memory fence qword aligned process virtual address */
> +	__u64 addr;
> +
> +	/** @val: User/Memory fence value to be written after bind completion */
> +	__u64 val;
> +
> +	/**
> +	 * @flags: Supported flags are:
> +	 *
> +	 * I915_VM_BIND_USER_FENCE_WAIT:
> +	 * Wait for the input fence before binding/unbinding
> +	 *
> +	 * I915_VM_BIND_USER_FENCE_SIGNAL:
> +	 * Return bind/unbind completion fence as output
> +	 */
> +	__u32 flags;
> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
> +	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
> + * and vm_unbind.
> + *
> + * These user fences can be input or output fences
> + * (See struct drm_i915_vm_bind_user_fence).
> + */
> +struct drm_i915_vm_bind_ext_user_fence {
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/** @fence_count: Number of elements in the @user_fence_ptr array. */
> +	__u64 fence_count;
> +
> +	/**
> +	 * @user_fence_ptr: Pointer to an array of
> +	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
> +	 */
> +	__u64 user_fence_ptr;
> +};
> +
> +/**
> + * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
> + * ioctl.
> + *
> + * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
> + * only works with this ioctl for submission.
> + * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
> + */
> +struct drm_i915_gem_execbuffer3 {
> +	/**
> +	 * @ctx_id: Context id
> +	 *
> +	 * Only contexts with user engine map are allowed.
> +	 */
> +	__u32 ctx_id;
> +
> +	/**
> +	 * @engine_idx: Engine index
> +	 *
> +	 * An index in the user engine map of the context specified by @ctx_id.
> +	 */
> +	__u32 engine_idx;
> +
> +	/** @rsvd1: Reserved, MBZ */
> +	__u32 rsvd1;
> +
> +	/**
> +	 * @batch_count: Number of batches in @batch_address array.
> +	 *
> +	 * 0 is invalid. For parallel submission, it should be equal to the
> +	 * number of (parallel) engines involved in that submission.
> +	 */
> +	__u32 batch_count;
> +
> +	/**
> +	 * @batch_address: Array of batch gpu virtual addresses.
> +	 *
> +	 * If @batch_count is 1, then it is the gpu virtual address of the
> +	 * batch buffer. If @batch_count > 1, then it is a pointer to an array
> +	 * of batch buffer gpu virtual addresses.
> +	 */
> +	__u64 batch_address;
> +
> +	/**
> +	 * @flags: Supported flags are:
> +	 *
> +	 * I915_EXEC3_SECURE:
> +	 * Request a privileged ("secure") batch buffer/s.
> +	 * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
> +	 */
> +	__u64 flags;
> +#define I915_EXEC3_SECURE	(1<<0)
> +
> +	/** @rsvd2: Reserved, MBZ */
> +	__u64 rsvd2;
> +
> +	/**
> +	 * @extensions: Zero-terminated chain of extensions.
> +	 *
> +	 * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
> +	 * It has same format as DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
> +	 * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
> +	 *
> +	 * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
> +	 * First level batch completion signaling extension.
> +	 * See struct drm_i915_gem_execbuffer3_ext_user_fence.
> +	 */
> +	__u64 extensions;
> +#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES	0
> +#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE		1
> +};
> +
> +/**
> + * struct drm_i915_gem_execbuffer3_ext_user_fence - First level batch completion
> + * signaling extension.
> + *
> + * This extension allows user to attach a user fence (@addr, @value pair) to
> + * execbuf3, to be signaled by the command streamer after the completion of first
> + * level batch, by writing the @value at specified @addr and triggering an
> + * interrupt.
> + * User can either poll for this user fence to signal or can also wait on it
> + * with i915_gem_wait_user_fence ioctl.
> + * This is very much usefaul for long running contexts where waiting on dma-fence
> + * by user (like i915_gem_wait ioctl) is not supported.
> + */
> +struct drm_i915_gem_execbuffer3_ext_user_fence {
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/**
> +	 * @addr: User/Memory fence qword aligned GPU virtual address.
> +	 *
> +	 * Address has to be a valid GPU virtual address at the time of
> +	 * first level batch completion.
> +	 */
> +	__u64 addr;
> +
> +	/**
> +	 * @value: User/Memory fence Value to be written to above address
> +	 * after first level batch completes.
> +	 */
> +	__u64 value;
> +
> +	/** @rsvd: Reserved, MBZ */
> +	__u64 rsvd;
> +};
> +
> +/**
> + * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
> + * private to the specified VM.
> + *
> + * See struct drm_i915_gem_create_ext.
> + */
> +struct drm_i915_gem_create_ext_vm_private {
> +#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/** @vm_id: Id of the VM to which the object is private */
> +	__u32 vm_id;
> +};
> +
> +/**
> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
> + *
> + * User/Memory fence can be woken up either by:
> + *
> + * 1. GPU context indicated by @ctx_id, or,
> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
> + *    @ctx_id is ignored when this flag is set.
> + *
> + * Wakeup condition is,
> + * ``((*addr & mask) op (value & mask))``
> + *
> + * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
> + */
> +struct drm_i915_gem_wait_user_fence {
> +	/** @extensions: Zero-terminated chain of extensions. */
> +	__u64 extensions;
> +
> +	/** @addr: User/Memory fence address */
> +	__u64 addr;
> +
> +	/** @ctx_id: Id of the Context which will signal the fence. */
> +	__u32 ctx_id;
> +
> +	/** @op: Wakeup condition operator */
> +	__u16 op;
> +#define I915_UFENCE_WAIT_EQ      0
> +#define I915_UFENCE_WAIT_NEQ     1
> +#define I915_UFENCE_WAIT_GT      2
> +#define I915_UFENCE_WAIT_GTE     3
> +#define I915_UFENCE_WAIT_LT      4
> +#define I915_UFENCE_WAIT_LTE     5
> +#define I915_UFENCE_WAIT_BEFORE  6
> +#define I915_UFENCE_WAIT_AFTER   7
> +
> +	/**
> +	 * @flags: Supported flags are:
> +	 *
> +	 * I915_UFENCE_WAIT_SOFT:
> +	 *
> +	 * To be woken up by i915 driver async worker (not by GPU).
> +	 *
> +	 * I915_UFENCE_WAIT_ABSTIME:
> +	 *
> +	 * Wait timeout specified as absolute time.
> +	 */
> +	__u16 flags;
> +#define I915_UFENCE_WAIT_SOFT    0x1
> +#define I915_UFENCE_WAIT_ABSTIME 0x2
> +
> +	/** @value: Wakeup value */
> +	__u64 value;
> +
> +	/** @mask: Wakeup mask */
> +	__u64 mask;
> +#define I915_UFENCE_WAIT_U8     0xffu
> +#define I915_UFENCE_WAIT_U16    0xffffu
> +#define I915_UFENCE_WAIT_U32    0xfffffffful
> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
> +
> +	/**
> +	 * @timeout: Wait timeout in nanoseconds.
> +	 *
> +	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
> +	 * absolute time in nsec.
> +	 */
> +	__s64 timeout;
> +};
Lionel Landwerlin June 10, 2022, 2:48 p.m. UTC | #4
On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>
> On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>> VM_BIND and related uapi definitions
>>
>> Signed-off-by: Niranjana Vishwanathapura 
>> <niranjana.vishwanathapura@intel.com>
>> ---
>>   Documentation/gpu/rfc/i915_vm_bind.h | 490 +++++++++++++++++++++++++++
>>   1 file changed, 490 insertions(+)
>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>
>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>> b/Documentation/gpu/rfc/i915_vm_bind.h
>> new file mode 100644
>> index 000000000000..9fc854969cfb
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>> @@ -0,0 +1,490 @@
>> +/* SPDX-License-Identifier: MIT */
>> +/*
>> + * Copyright © 2022 Intel Corporation
>> + */
>> +
>> +/**
>> + * DOC: I915_PARAM_HAS_VM_BIND
>> + *
>> + * VM_BIND feature availability.
>> + * See typedef drm_i915_getparam_t param.
>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>> + * bits[8-15]: VM_BIND implementation version.
>> + * version 0 will not have VM_BIND/UNBIND timeline fence array support.
>> + */
>> +#define I915_PARAM_HAS_VM_BIND        57
>> +
>> +/**
>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>> + *
>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>> + * See struct drm_i915_gem_vm_control flags.
>> + *
>> + * The older execbuf2 ioctl will not support VM_BIND mode of operation.
>> + * For VM_BIND mode, we have new execbuf3 ioctl which will not 
>> accept any
>> + * execlist (See struct drm_i915_gem_execbuffer3 for more details).
>> + *
>> + */
>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>> +
>> +/**
>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>> + *
>> + * Flag to declare context as long running.
>> + * See struct drm_i915_gem_context_create_ext flags.
>> + *
>> + * Usage of dma-fence expects that they complete in reasonable 
>> amount of time.
>> + * Compute on the other hand can be long running. Hence it is not 
>> appropriate
>> + * for compute contexts to export request completion dma-fence to user.
>> + * The dma-fence usage will be limited to in-kernel consumption only.
>> + * Compute contexts need to use user/memory fence.
>> + *
>> + * So, long running contexts do not support output fences. Hence,
>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is 
>> expected
>> + * to be not used. DRM_I915_GEM_WAIT ioctl call is also not 
>> supported for
>> + * objects mapped to long running contexts.
>> + */
>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>> +
>> +/* VM_BIND related ioctls */
>> +#define DRM_I915_GEM_VM_BIND        0x3d
>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>> +#define DRM_I915_GEM_EXECBUFFER3    0x3f
>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>> +
>> +#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + 
>> DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + 
>> DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3 DRM_IOWR(DRM_COMMAND_BASE + 
>> DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE 
>> + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
>> +
>> +/**
>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>> + *
>> + * This structure is passed to VM_BIND ioctl and specifies the 
>> mapping of GPU
>> + * virtual address (VA) range to the section of an object that 
>> should be bound
>> + * in the device page table of the specified address space (VM).
>> + * The VA range specified must be unique (ie., not currently bound) 
>> and can
>> + * be mapped to whole object or a section of the object (partial 
>> binding).
>> + * Multiple VA mappings can be created to the same section of the 
>> object
>> + * (aliasing).
>> + *
>> + * The @queue_idx specifies the queue to use for binding. Same queue 
>> can be
>> + * used for both VM_BIND and VM_UNBIND calls. All submitted bind and 
>> unbind
>> + * operations in a queue are performed in the order of submission.
>> + *
>> + * The @start, @offset and @length should be 4K page aligned. 
>> However the DG2
>> + * and XEHPSDV has 64K page size for device local-memory and has 
>> compact page
>> + * table. On those platforms, for binding device local-memory 
>> objects, the
>> + * @start should be 2M aligned, @offset and @length should be 64K 
>> aligned.
>> + * Also, on those platforms, it is not allowed to bind an device 
>> local-memory
>> + * object and a system memory object in a single 2M section of VA 
>> range.
>> + */
>> +struct drm_i915_gem_vm_bind {
>> +    /** @vm_id: VM (address space) id to bind */
>> +    __u32 vm_id;
>> +
>> +    /** @queue_idx: Index of queue for binding */
>> +    __u32 queue_idx;
>
> I have a question here to which I did not find an answer by browsing 
> the old threads.
>
> Queue index appears to be an implicit synchronisation mechanism, 
> right? Operations on the same index are executed/complete in order of 
> ioctl submission?
>
> Do we _have_ to implement this on the kernel side and could just allow 
> in/out fence and let userspace deal with it?


It orders operations like in a queue. Which is kind of what happens with 
existing queues/engines.

If I understood correctly, it's going to be a kthread + a linked list right?


-Lionel


>
> Arbitrary/on-demand number of queues will add the complexity on the 
> kernel side which should be avoided if possible.
>
> Regards,
>
> Tvrtko
>
>> +
>> +    /** @rsvd: Reserved, MBZ */
>> +    __u32 rsvd;
>> +
>> +    /** @handle: Object handle */
>> +    __u32 handle;
>> +
>> +    /** @start: Virtual Address start to bind */
>> +    __u64 start;
>> +
>> +    /** @offset: Offset in object to bind */
>> +    __u64 offset;
>> +
>> +    /** @length: Length of mapping to bind */
>> +    __u64 length;
>> +
>> +    /**
>> +     * @flags: Supported flags are:
>> +     *
>> +     * I915_GEM_VM_BIND_READONLY:
>> +     * Mapping is read-only.
>> +     *
>> +     * I915_GEM_VM_BIND_CAPTURE:
>> +     * Capture this mapping in the dump upon GPU error.
>> +     */
>> +    __u64 flags;
>> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>> +
>> +    /**
>> +     * @extensions: 0-terminated chain of extensions for this 
>> operation.
>> +     *
>> +     * I915_VM_BIND_EXT_TIMELINE_FENCES:
>> +     * Specifies an array of input or output timeline fences for this
>> +     * binding operation. See struct 
>> drm_i915_vm_bind_ext_timeline_fences.
>> +     *
>> +     * I915_VM_BIND_EXT_USER_FENCES:
>> +     * Specifies an array of input or output user fences for this
>> +     * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
>> +     * This is required for compute contexts.
>> +     */
>> +    __u64 extensions;
>> +#define I915_VM_BIND_EXT_TIMELINE_FENCES    0
>> +#define I915_VM_BIND_EXT_USER_FENCES        1
>> +};
>> +
>> +/**
>> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>> + *
>> + * This structure is passed to VM_UNBIND ioctl and specifies the GPU 
>> virtual
>> + * address (VA) range that should be unbound from the device page 
>> table of the
>> + * specified address space (VM). The specified VA range must match 
>> one of the
>> + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>> + * completion.
>> + *
>> + * The @queue_idx specifies the queue to use for unbinding.
>> + * See struct drm_i915_gem_vm_unbind for more information on 
>> @queue_idx.
>> + *
>> + * The @start and @length musy specify a unique mapping bound with 
>> VM_BIND
>> + * ioctl.
>> + */
>> +struct drm_i915_gem_vm_unbind {
>> +    /** @vm_id: VM (address space) id to bind */
>> +    __u32 vm_id;
>> +
>> +    /** @queue_idx: Index of queue for unbinding */
>> +    __u32 queue_idx;
>> +
>> +    /** @start: Virtual Address start to unbind */
>> +    __u64 start;
>> +
>> +    /** @length: Length of mapping to unbind */
>> +    __u64 length;
>> +
>> +    /** @flags: Reserved for future usage, currently MBZ */
>> +    __u64 flags;
>> +
>> +    /**
>> +     * @extensions: 0-terminated chain of extensions for this 
>> operation.
>> +     *
>> +     * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
>> +     * Specifies an array of input or output timeline fences for this
>> +     * unbind operation.
>> +     * It has same format as struct 
>> drm_i915_vm_bind_ext_timeline_fences.
>> +     *
>> +     * I915_VM_UNBIND_EXT_USER_FENCES:
>> +     * Specifies an array of input or output user fences for this
>> +     * unbind operation. This is required for compute contexts.
>> +     * It has same format as struct drm_i915_vm_bind_ext_user_fence.
>> +     */
>> +    __u64 extensions;
>> +#define I915_VM_UNBIND_EXT_TIMELINE_FENCES    0
>> +#define I915_VM_UNBIND_EXT_USER_FENCES        1
>> +};
>> +
>> +/**
>> + * struct drm_i915_vm_bind_fence - An input or output fence for the 
>> vm_bind
>> + * or the vm_unbind work.
>> + *
>> + * The vm_bind or vm_unbind aync worker will wait for input fence to 
>> signal
>> + * before starting the binding or unbinding.
>> + *
>> + * The vm_bind or vm_unbind async worker will signal the returned 
>> output fence
>> + * after the completion of binding or unbinding.
>> + */
>> +struct drm_i915_vm_bind_fence {
>> +    /** @handle: User's handle for a drm_syncobj to wait on or 
>> signal. */
>> +    __u32 handle;
>> +
>> +    /**
>> +     * @flags: Supported flags are:
>> +     *
>> +     * I915_VM_BIND_FENCE_WAIT:
>> +     * Wait for the input fence before binding/unbinding
>> +     *
>> +     * I915_VM_BIND_FENCE_SIGNAL:
>> +     * Return bind/unbind completion fence as output
>> +     */
>> +    __u32 flags;
>> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>> (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>> +};
>> +
>> +/**
>> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for 
>> vm_bind
>> + * and vm_unbind.
>> + *
>> + * This structure describes an array of timeline drm_syncobj and 
>> associated
>> + * points for timeline variants of drm_syncobj. These timeline 
>> 'drm_syncobj's
>> + * can be input or output fences (See struct drm_i915_vm_bind_fence).
>> + */
>> +struct drm_i915_vm_bind_ext_timeline_fences {
>> +    /** @base: Extension link. See struct i915_user_extension. */
>> +    struct i915_user_extension base;
>> +
>> +    /**
>> +     * @fence_count: Number of elements in the @handles_ptr & 
>> @value_ptr
>> +     * arrays.
>> +     */
>> +    __u64 fence_count;
>> +
>> +    /**
>> +     * @handles_ptr: Pointer to an array of struct 
>> drm_i915_vm_bind_fence
>> +     * of length @fence_count.
>> +     */
>> +    __u64 handles_ptr;
>> +
>> +    /**
>> +     * @values_ptr: Pointer to an array of u64 values of length
>> +     * @fence_count.
>> +     * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>> +     * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
>> +     * binary one.
>> +     */
>> +    __u64 values_ptr;
>> +};
>> +
>> +/**
>> + * struct drm_i915_vm_bind_user_fence - An input or output user 
>> fence for the
>> + * vm_bind or the vm_unbind work.
>> + *
>> + * The vm_bind or vm_unbind aync worker will wait for the input 
>> fence (value at
>> + * @addr to become equal to @val) before starting the binding or 
>> unbinding.
>> + *
>> + * The vm_bind or vm_unbind async worker will signal the output 
>> fence after
>> + * the completion of binding or unbinding by writing @val to memory 
>> location at
>> + * @addr
>> + */
>> +struct drm_i915_vm_bind_user_fence {
>> +    /** @addr: User/Memory fence qword aligned process virtual 
>> address */
>> +    __u64 addr;
>> +
>> +    /** @val: User/Memory fence value to be written after bind 
>> completion */
>> +    __u64 val;
>> +
>> +    /**
>> +     * @flags: Supported flags are:
>> +     *
>> +     * I915_VM_BIND_USER_FENCE_WAIT:
>> +     * Wait for the input fence before binding/unbinding
>> +     *
>> +     * I915_VM_BIND_USER_FENCE_SIGNAL:
>> +     * Return bind/unbind completion fence as output
>> +     */
>> +    __u32 flags;
>> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>> +    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>> +};
>> +
>> +/**
>> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for 
>> vm_bind
>> + * and vm_unbind.
>> + *
>> + * These user fences can be input or output fences
>> + * (See struct drm_i915_vm_bind_user_fence).
>> + */
>> +struct drm_i915_vm_bind_ext_user_fence {
>> +    /** @base: Extension link. See struct i915_user_extension. */
>> +    struct i915_user_extension base;
>> +
>> +    /** @fence_count: Number of elements in the @user_fence_ptr 
>> array. */
>> +    __u64 fence_count;
>> +
>> +    /**
>> +     * @user_fence_ptr: Pointer to an array of
>> +     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>> +     */
>> +    __u64 user_fence_ptr;
>> +};
>> +
>> +/**
>> + * struct drm_i915_gem_execbuffer3 - Structure for 
>> DRM_I915_GEM_EXECBUFFER3
>> + * ioctl.
>> + *
>> + * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and 
>> VM_BIND mode
>> + * only works with this ioctl for submission.
>> + * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
>> + */
>> +struct drm_i915_gem_execbuffer3 {
>> +    /**
>> +     * @ctx_id: Context id
>> +     *
>> +     * Only contexts with user engine map are allowed.
>> +     */
>> +    __u32 ctx_id;
>> +
>> +    /**
>> +     * @engine_idx: Engine index
>> +     *
>> +     * An index in the user engine map of the context specified by 
>> @ctx_id.
>> +     */
>> +    __u32 engine_idx;
>> +
>> +    /** @rsvd1: Reserved, MBZ */
>> +    __u32 rsvd1;
>> +
>> +    /**
>> +     * @batch_count: Number of batches in @batch_address array.
>> +     *
>> +     * 0 is invalid. For parallel submission, it should be equal to the
>> +     * number of (parallel) engines involved in that submission.
>> +     */
>> +    __u32 batch_count;
>> +
>> +    /**
>> +     * @batch_address: Array of batch gpu virtual addresses.
>> +     *
>> +     * If @batch_count is 1, then it is the gpu virtual address of the
>> +     * batch buffer. If @batch_count > 1, then it is a pointer to an 
>> array
>> +     * of batch buffer gpu virtual addresses.
>> +     */
>> +    __u64 batch_address;
>> +
>> +    /**
>> +     * @flags: Supported flags are:
>> +     *
>> +     * I915_EXEC3_SECURE:
>> +     * Request a privileged ("secure") batch buffer/s.
>> +     * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
>> +     */
>> +    __u64 flags;
>> +#define I915_EXEC3_SECURE    (1<<0)
>> +
>> +    /** @rsvd2: Reserved, MBZ */
>> +    __u64 rsvd2;
>> +
>> +    /**
>> +     * @extensions: Zero-terminated chain of extensions.
>> +     *
>> +     * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
>> +     * It has same format as 
>> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
>> +     * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
>> +     *
>> +     * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
>> +     * First level batch completion signaling extension.
>> +     * See struct drm_i915_gem_execbuffer3_ext_user_fence.
>> +     */
>> +    __u64 extensions;
>> +#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES    0
>> +#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE        1
>> +};
>> +
>> +/**
>> + * struct drm_i915_gem_execbuffer3_ext_user_fence - First level 
>> batch completion
>> + * signaling extension.
>> + *
>> + * This extension allows user to attach a user fence (@addr, @value 
>> pair) to
>> + * execbuf3, to be signaled by the command streamer after the 
>> completion of first
>> + * level batch, by writing the @value at specified @addr and 
>> triggering an
>> + * interrupt.
>> + * User can either poll for this user fence to signal or can also 
>> wait on it
>> + * with i915_gem_wait_user_fence ioctl.
>> + * This is very much usefaul for long running contexts where waiting 
>> on dma-fence
>> + * by user (like i915_gem_wait ioctl) is not supported.
>> + */
>> +struct drm_i915_gem_execbuffer3_ext_user_fence {
>> +    /** @base: Extension link. See struct i915_user_extension. */
>> +    struct i915_user_extension base;
>> +
>> +    /**
>> +     * @addr: User/Memory fence qword aligned GPU virtual address.
>> +     *
>> +     * Address has to be a valid GPU virtual address at the time of
>> +     * first level batch completion.
>> +     */
>> +    __u64 addr;
>> +
>> +    /**
>> +     * @value: User/Memory fence Value to be written to above address
>> +     * after first level batch completes.
>> +     */
>> +    __u64 value;
>> +
>> +    /** @rsvd: Reserved, MBZ */
>> +    __u64 rsvd;
>> +};
>> +
>> +/**
>> + * struct drm_i915_gem_create_ext_vm_private - Extension to make the 
>> object
>> + * private to the specified VM.
>> + *
>> + * See struct drm_i915_gem_create_ext.
>> + */
>> +struct drm_i915_gem_create_ext_vm_private {
>> +#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>> +    /** @base: Extension link. See struct i915_user_extension. */
>> +    struct i915_user_extension base;
>> +
>> +    /** @vm_id: Id of the VM to which the object is private */
>> +    __u32 vm_id;
>> +};
>> +
>> +/**
>> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>> + *
>> + * User/Memory fence can be woken up either by:
>> + *
>> + * 1. GPU context indicated by @ctx_id, or,
>> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>> + *    @ctx_id is ignored when this flag is set.
>> + *
>> + * Wakeup condition is,
>> + * ``((*addr & mask) op (value & mask))``
>> + *
>> + * See :ref:`Documentation/driver-api/dma-buf.rst 
>> <indefinite_dma_fences>`
>> + */
>> +struct drm_i915_gem_wait_user_fence {
>> +    /** @extensions: Zero-terminated chain of extensions. */
>> +    __u64 extensions;
>> +
>> +    /** @addr: User/Memory fence address */
>> +    __u64 addr;
>> +
>> +    /** @ctx_id: Id of the Context which will signal the fence. */
>> +    __u32 ctx_id;
>> +
>> +    /** @op: Wakeup condition operator */
>> +    __u16 op;
>> +#define I915_UFENCE_WAIT_EQ      0
>> +#define I915_UFENCE_WAIT_NEQ     1
>> +#define I915_UFENCE_WAIT_GT      2
>> +#define I915_UFENCE_WAIT_GTE     3
>> +#define I915_UFENCE_WAIT_LT      4
>> +#define I915_UFENCE_WAIT_LTE     5
>> +#define I915_UFENCE_WAIT_BEFORE  6
>> +#define I915_UFENCE_WAIT_AFTER   7
>> +
>> +    /**
>> +     * @flags: Supported flags are:
>> +     *
>> +     * I915_UFENCE_WAIT_SOFT:
>> +     *
>> +     * To be woken up by i915 driver async worker (not by GPU).
>> +     *
>> +     * I915_UFENCE_WAIT_ABSTIME:
>> +     *
>> +     * Wait timeout specified as absolute time.
>> +     */
>> +    __u16 flags;
>> +#define I915_UFENCE_WAIT_SOFT    0x1
>> +#define I915_UFENCE_WAIT_ABSTIME 0x2
>> +
>> +    /** @value: Wakeup value */
>> +    __u64 value;
>> +
>> +    /** @mask: Wakeup mask */
>> +    __u64 mask;
>> +#define I915_UFENCE_WAIT_U8     0xffu
>> +#define I915_UFENCE_WAIT_U16    0xffffu
>> +#define I915_UFENCE_WAIT_U32    0xfffffffful
>> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>> +
>> +    /**
>> +     * @timeout: Wait timeout in nanoseconds.
>> +     *
>> +     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is 
>> the
>> +     * absolute time in nsec.
>> +     */
>> +    __s64 timeout;
>> +};
Niranjana Vishwanathapura June 10, 2022, 4:14 p.m. UTC | #5
On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin wrote:
>On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>
>>On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>VM_BIND and related uapi definitions
>>>
>>>Signed-off-by: Niranjana Vishwanathapura 
>>><niranjana.vishwanathapura@intel.com>
>>>---
>>>  Documentation/gpu/rfc/i915_vm_bind.h | 490 +++++++++++++++++++++++++++
>>>  1 file changed, 490 insertions(+)
>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>
>>>diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>new file mode 100644
>>>index 000000000000..9fc854969cfb
>>>--- /dev/null
>>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>@@ -0,0 +1,490 @@
>>>+/* SPDX-License-Identifier: MIT */
>>>+/*
>>>+ * Copyright © 2022 Intel Corporation
>>>+ */
>>>+
>>>+/**
>>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>>+ *
>>>+ * VM_BIND feature availability.
>>>+ * See typedef drm_i915_getparam_t param.
>>>+ * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>+ * bits[8-15]: VM_BIND implementation version.
>>>+ * version 0 will not have VM_BIND/UNBIND timeline fence array support.
>>>+ */
>>>+#define I915_PARAM_HAS_VM_BIND        57
>>>+
>>>+/**
>>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>+ *
>>>+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>+ * See struct drm_i915_gem_vm_control flags.
>>>+ *
>>>+ * The older execbuf2 ioctl will not support VM_BIND mode of operation.
>>>+ * For VM_BIND mode, we have new execbuf3 ioctl which will not 
>>>accept any
>>>+ * execlist (See struct drm_i915_gem_execbuffer3 for more details).
>>>+ *
>>>+ */
>>>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>+
>>>+/**
>>>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>+ *
>>>+ * Flag to declare context as long running.
>>>+ * See struct drm_i915_gem_context_create_ext flags.
>>>+ *
>>>+ * Usage of dma-fence expects that they complete in reasonable 
>>>amount of time.
>>>+ * Compute on the other hand can be long running. Hence it is not 
>>>appropriate
>>>+ * for compute contexts to export request completion dma-fence to user.
>>>+ * The dma-fence usage will be limited to in-kernel consumption only.
>>>+ * Compute contexts need to use user/memory fence.
>>>+ *
>>>+ * So, long running contexts do not support output fences. Hence,
>>>+ * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is 
>>>expected
>>>+ * to be not used. DRM_I915_GEM_WAIT ioctl call is also not 
>>>supported for
>>>+ * objects mapped to long running contexts.
>>>+ */
>>>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>+
>>>+/* VM_BIND related ioctls */
>>>+#define DRM_I915_GEM_VM_BIND        0x3d
>>>+#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>+#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>+#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>+
>>>+#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>+#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>>+#define DRM_IOCTL_I915_GEM_EXECBUFFER3 DRM_IOWR(DRM_COMMAND_BASE 
>>>+ DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
>>>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct 
>>>drm_i915_gem_wait_user_fence)
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>+ *
>>>+ * This structure is passed to VM_BIND ioctl and specifies the 
>>>mapping of GPU
>>>+ * virtual address (VA) range to the section of an object that 
>>>should be bound
>>>+ * in the device page table of the specified address space (VM).
>>>+ * The VA range specified must be unique (ie., not currently 
>>>bound) and can
>>>+ * be mapped to whole object or a section of the object (partial 
>>>binding).
>>>+ * Multiple VA mappings can be created to the same section of the 
>>>object
>>>+ * (aliasing).
>>>+ *
>>>+ * The @queue_idx specifies the queue to use for binding. Same 
>>>queue can be
>>>+ * used for both VM_BIND and VM_UNBIND calls. All submitted bind 
>>>and unbind
>>>+ * operations in a queue are performed in the order of submission.
>>>+ *
>>>+ * The @start, @offset and @length should be 4K page aligned. 
>>>However the DG2
>>>+ * and XEHPSDV has 64K page size for device local-memory and has 
>>>compact page
>>>+ * table. On those platforms, for binding device local-memory 
>>>objects, the
>>>+ * @start should be 2M aligned, @offset and @length should be 64K 
>>>aligned.
>>>+ * Also, on those platforms, it is not allowed to bind an device 
>>>local-memory
>>>+ * object and a system memory object in a single 2M section of VA 
>>>range.
>>>+ */
>>>+struct drm_i915_gem_vm_bind {
>>>+    /** @vm_id: VM (address space) id to bind */
>>>+    __u32 vm_id;
>>>+
>>>+    /** @queue_idx: Index of queue for binding */
>>>+    __u32 queue_idx;
>>
>>I have a question here to which I did not find an answer by browsing 
>>the old threads.
>>
>>Queue index appears to be an implicit synchronisation mechanism, 
>>right? Operations on the same index are executed/complete in order 
>>of ioctl submission?
>>
>>Do we _have_ to implement this on the kernel side and could just 
>>allow in/out fence and let userspace deal with it?
>
>
>It orders operations like in a queue. Which is kind of what happens 
>with existing queues/engines.
>
>If I understood correctly, it's going to be a kthread + a linked list right?
>

Yes, that is correct.

>
>-Lionel
>
>
>>
>>Arbitrary/on-demand number of queues will add the complexity on the 
>>kernel side which should be avoided if possible.
>>

It was discussed in the other thread. Jason prefers this over putting
an artificial limit on number of queues (as user can anyway can exhaust
the memory). I think complexity in the driver is manageable.

The other option being discussed in to have the user create those
queues (like creating engine map) before hand and use that in vm_bind
and vm_unbind ioctls. This puts a limit on the number of queues.
But it is not clean either and not sure it is worth making the interface
more complex.
https://www.spinics.net/lists/dri-devel/msg350448.html

Niranjana

>>Regards,
>>
>>Tvrtko
>>
>>>+
>>>+    /** @rsvd: Reserved, MBZ */
>>>+    __u32 rsvd;
>>>+
>>>+    /** @handle: Object handle */
>>>+    __u32 handle;
>>>+
>>>+    /** @start: Virtual Address start to bind */
>>>+    __u64 start;
>>>+
>>>+    /** @offset: Offset in object to bind */
>>>+    __u64 offset;
>>>+
>>>+    /** @length: Length of mapping to bind */
>>>+    __u64 length;
>>>+
>>>+    /**
>>>+     * @flags: Supported flags are:
>>>+     *
>>>+     * I915_GEM_VM_BIND_READONLY:
>>>+     * Mapping is read-only.
>>>+     *
>>>+     * I915_GEM_VM_BIND_CAPTURE:
>>>+     * Capture this mapping in the dump upon GPU error.
>>>+     */
>>>+    __u64 flags;
>>>+#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>+#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>+
>>>+    /**
>>>+     * @extensions: 0-terminated chain of extensions for this 
>>>operation.
>>>+     *
>>>+     * I915_VM_BIND_EXT_TIMELINE_FENCES:
>>>+     * Specifies an array of input or output timeline fences for this
>>>+     * binding operation. See struct 
>>>drm_i915_vm_bind_ext_timeline_fences.
>>>+     *
>>>+     * I915_VM_BIND_EXT_USER_FENCES:
>>>+     * Specifies an array of input or output user fences for this
>>>+     * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
>>>+     * This is required for compute contexts.
>>>+     */
>>>+    __u64 extensions;
>>>+#define I915_VM_BIND_EXT_TIMELINE_FENCES    0
>>>+#define I915_VM_BIND_EXT_USER_FENCES        1
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>>+ *
>>>+ * This structure is passed to VM_UNBIND ioctl and specifies the 
>>>GPU virtual
>>>+ * address (VA) range that should be unbound from the device page 
>>>table of the
>>>+ * specified address space (VM). The specified VA range must 
>>>match one of the
>>>+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>>>+ * completion.
>>>+ *
>>>+ * The @queue_idx specifies the queue to use for unbinding.
>>>+ * See struct drm_i915_gem_vm_unbind for more information on 
>>>@queue_idx.
>>>+ *
>>>+ * The @start and @length musy specify a unique mapping bound 
>>>with VM_BIND
>>>+ * ioctl.
>>>+ */
>>>+struct drm_i915_gem_vm_unbind {
>>>+    /** @vm_id: VM (address space) id to bind */
>>>+    __u32 vm_id;
>>>+
>>>+    /** @queue_idx: Index of queue for unbinding */
>>>+    __u32 queue_idx;
>>>+
>>>+    /** @start: Virtual Address start to unbind */
>>>+    __u64 start;
>>>+
>>>+    /** @length: Length of mapping to unbind */
>>>+    __u64 length;
>>>+
>>>+    /** @flags: Reserved for future usage, currently MBZ */
>>>+    __u64 flags;
>>>+
>>>+    /**
>>>+     * @extensions: 0-terminated chain of extensions for this 
>>>operation.
>>>+     *
>>>+     * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
>>>+     * Specifies an array of input or output timeline fences for this
>>>+     * unbind operation.
>>>+     * It has same format as struct 
>>>drm_i915_vm_bind_ext_timeline_fences.
>>>+     *
>>>+     * I915_VM_UNBIND_EXT_USER_FENCES:
>>>+     * Specifies an array of input or output user fences for this
>>>+     * unbind operation. This is required for compute contexts.
>>>+     * It has same format as struct drm_i915_vm_bind_ext_user_fence.
>>>+     */
>>>+    __u64 extensions;
>>>+#define I915_VM_UNBIND_EXT_TIMELINE_FENCES    0
>>>+#define I915_VM_UNBIND_EXT_USER_FENCES        1
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_vm_bind_fence - An input or output fence for 
>>>the vm_bind
>>>+ * or the vm_unbind work.
>>>+ *
>>>+ * The vm_bind or vm_unbind aync worker will wait for input fence 
>>>to signal
>>>+ * before starting the binding or unbinding.
>>>+ *
>>>+ * The vm_bind or vm_unbind async worker will signal the returned 
>>>output fence
>>>+ * after the completion of binding or unbinding.
>>>+ */
>>>+struct drm_i915_vm_bind_fence {
>>>+    /** @handle: User's handle for a drm_syncobj to wait on or 
>>>signal. */
>>>+    __u32 handle;
>>>+
>>>+    /**
>>>+     * @flags: Supported flags are:
>>>+     *
>>>+     * I915_VM_BIND_FENCE_WAIT:
>>>+     * Wait for the input fence before binding/unbinding
>>>+     *
>>>+     * I915_VM_BIND_FENCE_SIGNAL:
>>>+     * Return bind/unbind completion fence as output
>>>+     */
>>>+    __u32 flags;
>>>+#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>+#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>>>(-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences 
>>>for vm_bind
>>>+ * and vm_unbind.
>>>+ *
>>>+ * This structure describes an array of timeline drm_syncobj and 
>>>associated
>>>+ * points for timeline variants of drm_syncobj. These timeline 
>>>'drm_syncobj's
>>>+ * can be input or output fences (See struct drm_i915_vm_bind_fence).
>>>+ */
>>>+struct drm_i915_vm_bind_ext_timeline_fences {
>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>+    struct i915_user_extension base;
>>>+
>>>+    /**
>>>+     * @fence_count: Number of elements in the @handles_ptr & 
>>>@value_ptr
>>>+     * arrays.
>>>+     */
>>>+    __u64 fence_count;
>>>+
>>>+    /**
>>>+     * @handles_ptr: Pointer to an array of struct 
>>>drm_i915_vm_bind_fence
>>>+     * of length @fence_count.
>>>+     */
>>>+    __u64 handles_ptr;
>>>+
>>>+    /**
>>>+     * @values_ptr: Pointer to an array of u64 values of length
>>>+     * @fence_count.
>>>+     * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>>+     * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
>>>+     * binary one.
>>>+     */
>>>+    __u64 values_ptr;
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_vm_bind_user_fence - An input or output user 
>>>fence for the
>>>+ * vm_bind or the vm_unbind work.
>>>+ *
>>>+ * The vm_bind or vm_unbind aync worker will wait for the input 
>>>fence (value at
>>>+ * @addr to become equal to @val) before starting the binding or 
>>>unbinding.
>>>+ *
>>>+ * The vm_bind or vm_unbind async worker will signal the output 
>>>fence after
>>>+ * the completion of binding or unbinding by writing @val to 
>>>memory location at
>>>+ * @addr
>>>+ */
>>>+struct drm_i915_vm_bind_user_fence {
>>>+    /** @addr: User/Memory fence qword aligned process virtual 
>>>address */
>>>+    __u64 addr;
>>>+
>>>+    /** @val: User/Memory fence value to be written after bind 
>>>completion */
>>>+    __u64 val;
>>>+
>>>+    /**
>>>+     * @flags: Supported flags are:
>>>+     *
>>>+     * I915_VM_BIND_USER_FENCE_WAIT:
>>>+     * Wait for the input fence before binding/unbinding
>>>+     *
>>>+     * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>+     * Return bind/unbind completion fence as output
>>>+     */
>>>+    __u32 flags;
>>>+#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>+#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>+    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_vm_bind_ext_user_fence - User/memory fences 
>>>for vm_bind
>>>+ * and vm_unbind.
>>>+ *
>>>+ * These user fences can be input or output fences
>>>+ * (See struct drm_i915_vm_bind_user_fence).
>>>+ */
>>>+struct drm_i915_vm_bind_ext_user_fence {
>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>+    struct i915_user_extension base;
>>>+
>>>+    /** @fence_count: Number of elements in the @user_fence_ptr 
>>>array. */
>>>+    __u64 fence_count;
>>>+
>>>+    /**
>>>+     * @user_fence_ptr: Pointer to an array of
>>>+     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>+     */
>>>+    __u64 user_fence_ptr;
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_execbuffer3 - Structure for 
>>>DRM_I915_GEM_EXECBUFFER3
>>>+ * ioctl.
>>>+ *
>>>+ * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and 
>>>VM_BIND mode
>>>+ * only works with this ioctl for submission.
>>>+ * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
>>>+ */
>>>+struct drm_i915_gem_execbuffer3 {
>>>+    /**
>>>+     * @ctx_id: Context id
>>>+     *
>>>+     * Only contexts with user engine map are allowed.
>>>+     */
>>>+    __u32 ctx_id;
>>>+
>>>+    /**
>>>+     * @engine_idx: Engine index
>>>+     *
>>>+     * An index in the user engine map of the context specified 
>>>by @ctx_id.
>>>+     */
>>>+    __u32 engine_idx;
>>>+
>>>+    /** @rsvd1: Reserved, MBZ */
>>>+    __u32 rsvd1;
>>>+
>>>+    /**
>>>+     * @batch_count: Number of batches in @batch_address array.
>>>+     *
>>>+     * 0 is invalid. For parallel submission, it should be equal to the
>>>+     * number of (parallel) engines involved in that submission.
>>>+     */
>>>+    __u32 batch_count;
>>>+
>>>+    /**
>>>+     * @batch_address: Array of batch gpu virtual addresses.
>>>+     *
>>>+     * If @batch_count is 1, then it is the gpu virtual address of the
>>>+     * batch buffer. If @batch_count > 1, then it is a pointer to 
>>>an array
>>>+     * of batch buffer gpu virtual addresses.
>>>+     */
>>>+    __u64 batch_address;
>>>+
>>>+    /**
>>>+     * @flags: Supported flags are:
>>>+     *
>>>+     * I915_EXEC3_SECURE:
>>>+     * Request a privileged ("secure") batch buffer/s.
>>>+     * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
>>>+     */
>>>+    __u64 flags;
>>>+#define I915_EXEC3_SECURE    (1<<0)
>>>+
>>>+    /** @rsvd2: Reserved, MBZ */
>>>+    __u64 rsvd2;
>>>+
>>>+    /**
>>>+     * @extensions: Zero-terminated chain of extensions.
>>>+     *
>>>+     * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
>>>+     * It has same format as 
>>>DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
>>>+     * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
>>>+     *
>>>+     * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
>>>+     * First level batch completion signaling extension.
>>>+     * See struct drm_i915_gem_execbuffer3_ext_user_fence.
>>>+     */
>>>+    __u64 extensions;
>>>+#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES    0
>>>+#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE        1
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_execbuffer3_ext_user_fence - First level 
>>>batch completion
>>>+ * signaling extension.
>>>+ *
>>>+ * This extension allows user to attach a user fence (@addr, 
>>>@value pair) to
>>>+ * execbuf3, to be signaled by the command streamer after the 
>>>completion of first
>>>+ * level batch, by writing the @value at specified @addr and 
>>>triggering an
>>>+ * interrupt.
>>>+ * User can either poll for this user fence to signal or can also 
>>>wait on it
>>>+ * with i915_gem_wait_user_fence ioctl.
>>>+ * This is very much usefaul for long running contexts where 
>>>waiting on dma-fence
>>>+ * by user (like i915_gem_wait ioctl) is not supported.
>>>+ */
>>>+struct drm_i915_gem_execbuffer3_ext_user_fence {
>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>+    struct i915_user_extension base;
>>>+
>>>+    /**
>>>+     * @addr: User/Memory fence qword aligned GPU virtual address.
>>>+     *
>>>+     * Address has to be a valid GPU virtual address at the time of
>>>+     * first level batch completion.
>>>+     */
>>>+    __u64 addr;
>>>+
>>>+    /**
>>>+     * @value: User/Memory fence Value to be written to above address
>>>+     * after first level batch completes.
>>>+     */
>>>+    __u64 value;
>>>+
>>>+    /** @rsvd: Reserved, MBZ */
>>>+    __u64 rsvd;
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_create_ext_vm_private - Extension to make 
>>>the object
>>>+ * private to the specified VM.
>>>+ *
>>>+ * See struct drm_i915_gem_create_ext.
>>>+ */
>>>+struct drm_i915_gem_create_ext_vm_private {
>>>+#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>+    struct i915_user_extension base;
>>>+
>>>+    /** @vm_id: Id of the VM to which the object is private */
>>>+    __u32 vm_id;
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>>+ *
>>>+ * User/Memory fence can be woken up either by:
>>>+ *
>>>+ * 1. GPU context indicated by @ctx_id, or,
>>>+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>+ *    @ctx_id is ignored when this flag is set.
>>>+ *
>>>+ * Wakeup condition is,
>>>+ * ``((*addr & mask) op (value & mask))``
>>>+ *
>>>+ * See :ref:`Documentation/driver-api/dma-buf.rst 
>>><indefinite_dma_fences>`
>>>+ */
>>>+struct drm_i915_gem_wait_user_fence {
>>>+    /** @extensions: Zero-terminated chain of extensions. */
>>>+    __u64 extensions;
>>>+
>>>+    /** @addr: User/Memory fence address */
>>>+    __u64 addr;
>>>+
>>>+    /** @ctx_id: Id of the Context which will signal the fence. */
>>>+    __u32 ctx_id;
>>>+
>>>+    /** @op: Wakeup condition operator */
>>>+    __u16 op;
>>>+#define I915_UFENCE_WAIT_EQ      0
>>>+#define I915_UFENCE_WAIT_NEQ     1
>>>+#define I915_UFENCE_WAIT_GT      2
>>>+#define I915_UFENCE_WAIT_GTE     3
>>>+#define I915_UFENCE_WAIT_LT      4
>>>+#define I915_UFENCE_WAIT_LTE     5
>>>+#define I915_UFENCE_WAIT_BEFORE  6
>>>+#define I915_UFENCE_WAIT_AFTER   7
>>>+
>>>+    /**
>>>+     * @flags: Supported flags are:
>>>+     *
>>>+     * I915_UFENCE_WAIT_SOFT:
>>>+     *
>>>+     * To be woken up by i915 driver async worker (not by GPU).
>>>+     *
>>>+     * I915_UFENCE_WAIT_ABSTIME:
>>>+     *
>>>+     * Wait timeout specified as absolute time.
>>>+     */
>>>+    __u16 flags;
>>>+#define I915_UFENCE_WAIT_SOFT    0x1
>>>+#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>+
>>>+    /** @value: Wakeup value */
>>>+    __u64 value;
>>>+
>>>+    /** @mask: Wakeup mask */
>>>+    __u64 mask;
>>>+#define I915_UFENCE_WAIT_U8     0xffu
>>>+#define I915_UFENCE_WAIT_U16    0xffffu
>>>+#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>+#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>+
>>>+    /**
>>>+     * @timeout: Wait timeout in nanoseconds.
>>>+     *
>>>+     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout 
>>>is the
>>>+     * absolute time in nsec.
>>>+     */
>>>+    __s64 timeout;
>>>+};
>
>
Niranjana Vishwanathapura June 10, 2022, 4:35 p.m. UTC | #6
On Fri, Jun 10, 2022 at 01:56:58AM -0700, Matthew Brost wrote:
>On Fri, Jun 10, 2022 at 01:53:40AM -0700, Matthew Brost wrote:
>> On Fri, Jun 10, 2022 at 12:07:11AM -0700, Niranjana Vishwanathapura wrote:
>> > VM_BIND and related uapi definitions
>> >
>> > Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
>> > ---
>> >  Documentation/gpu/rfc/i915_vm_bind.h | 490 +++++++++++++++++++++++++++
>> >  1 file changed, 490 insertions(+)
>> >  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>> >
>> > diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
>> > new file mode 100644
>> > index 000000000000..9fc854969cfb
>> > --- /dev/null
>> > +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>> > @@ -0,0 +1,490 @@
>> > +/* SPDX-License-Identifier: MIT */
>> > +/*
>> > + * Copyright © 2022 Intel Corporation
>> > + */
>> > +
>> > +/**
>> > + * DOC: I915_PARAM_HAS_VM_BIND
>> > + *
>> > + * VM_BIND feature availability.
>> > + * See typedef drm_i915_getparam_t param.
>> > + * bit[0]: If set, VM_BIND is supported, otherwise not.
>> > + * bits[8-15]: VM_BIND implementation version.
>> > + * version 0 will not have VM_BIND/UNBIND timeline fence array support.
>> > + */
>> > +#define I915_PARAM_HAS_VM_BIND		57
>> > +
>> > +/**
>> > + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>> > + *
>> > + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>> > + * See struct drm_i915_gem_vm_control flags.
>> > + *
>> > + * The older execbuf2 ioctl will not support VM_BIND mode of operation.
>> > + * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
>> > + * execlist (See struct drm_i915_gem_execbuffer3 for more details).
>> > + *
>> > + */
>> > +#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
>> > +
>> > +/**
>> > + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>> > + *
>> > + * Flag to declare context as long running.
>> > + * See struct drm_i915_gem_context_create_ext flags.
>> > + *
>> > + * Usage of dma-fence expects that they complete in reasonable amount of time.
>> > + * Compute on the other hand can be long running. Hence it is not appropriate
>> > + * for compute contexts to export request completion dma-fence to user.
>> > + * The dma-fence usage will be limited to in-kernel consumption only.
>> > + * Compute contexts need to use user/memory fence.
>> > + *
>> > + * So, long running contexts do not support output fences. Hence,
>> > + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is expected
>> > + * to be not used. DRM_I915_GEM_WAIT ioctl call is also not supported for
>> > + * objects mapped to long running contexts.
>> > + */
>> > +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>> > +
>> > +/* VM_BIND related ioctls */
>> > +#define DRM_I915_GEM_VM_BIND		0x3d
>> > +#define DRM_I915_GEM_VM_UNBIND		0x3e
>> > +#define DRM_I915_GEM_EXECBUFFER3	0x3f
>> > +#define DRM_I915_GEM_WAIT_USER_FENCE	0x40
>> > +
>> > +#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>> > +#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>> > +#define DRM_IOCTL_I915_GEM_EXECBUFFER3		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
>> > +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
>> > +
>> > +/**
>> > + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>> > + *
>> > + * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
>> > + * virtual address (VA) range to the section of an object that should be bound
>> > + * in the device page table of the specified address space (VM).
>> > + * The VA range specified must be unique (ie., not currently bound) and can
>> > + * be mapped to whole object or a section of the object (partial binding).
>> > + * Multiple VA mappings can be created to the same section of the object
>> > + * (aliasing).
>> > + *
>> > + * The @queue_idx specifies the queue to use for binding. Same queue can be
>> > + * used for both VM_BIND and VM_UNBIND calls. All submitted bind and unbind
>> > + * operations in a queue are performed in the order of submission.
>> > + *
>> > + * The @start, @offset and @length should be 4K page aligned. However the DG2
>> > + * and XEHPSDV has 64K page size for device local-memory and has compact page
>> > + * table. On those platforms, for binding device local-memory objects, the
>> > + * @start should be 2M aligned, @offset and @length should be 64K aligned.
>> > + * Also, on those platforms, it is not allowed to bind an device local-memory
>> > + * object and a system memory object in a single 2M section of VA range.
>> > + */
>> > +struct drm_i915_gem_vm_bind {
>> > +	/** @vm_id: VM (address space) id to bind */
>> > +	__u32 vm_id;
>> > +
>> > +	/** @queue_idx: Index of queue for binding */
>> > +	__u32 queue_idx;
>> > +
>> > +	/** @rsvd: Reserved, MBZ */
>> > +	__u32 rsvd;
>> > +
>> > +	/** @handle: Object handle */
>> > +	__u32 handle;
>> > +
>> > +	/** @start: Virtual Address start to bind */
>> > +	__u64 start;
>> > +
>> > +	/** @offset: Offset in object to bind */
>> > +	__u64 offset;
>> > +
>> > +	/** @length: Length of mapping to bind */
>> > +	__u64 length;
>>
>> This probably isn't needed. We are never going to unbind a subset of a
>> VMA are we? That being said it can't hurt as a sanity check (e.g.
>> internal vma->length == user unbind length).
>>
>
>Ugh, I c/p this into the wrong place. This should be in the unbind struct.

Having the 'length' field for unbind helps if in future we want to allow
unbinding of multiple mappings (vmas) in a single ioctl call. ie., all
mappings that falls in the 'start' - 'start+length-1' range can be
unmapped. We don't support it today as it is somewhat tied to operations
like vma split/merge etc which we are not supporting with initial vm_bind
support.
So yah, currently, it helps in sanity check and ensure user is correctly
unbinding a mapping.

>
>> > +
>> > +	/**
>> > +	 * @flags: Supported flags are:
>> > +	 *
>> > +	 * I915_GEM_VM_BIND_READONLY:
>> > +	 * Mapping is read-only.
>> > +	 *
>> > +	 * I915_GEM_VM_BIND_CAPTURE:
>> > +	 * Capture this mapping in the dump upon GPU error.
>> > +	 */
>> > +	__u64 flags;
>> > +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>> > +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>> > +
>> > +	/**
>> > +	 * @extensions: 0-terminated chain of extensions for this operation.
>> > +	 *
>> > +	 * I915_VM_BIND_EXT_TIMELINE_FENCES:
>> > +	 * Specifies an array of input or output timeline fences for this
>> > +	 * binding operation. See struct drm_i915_vm_bind_ext_timeline_fences.
>> > +	 *
>> > +	 * I915_VM_BIND_EXT_USER_FENCES:
>> > +	 * Specifies an array of input or output user fences for this
>> > +	 * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
>> > +	 * This is required for compute contexts.
>> > +	 */
>> > +	__u64 extensions;
>> > +#define I915_VM_BIND_EXT_TIMELINE_FENCES	0
>> > +#define I915_VM_BIND_EXT_USER_FENCES		1
>> > +};
>> > +
>> > +/**
>> > + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>> > + *
>> > + * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
>> > + * address (VA) range that should be unbound from the device page table of the
>> > + * specified address space (VM). The specified VA range must match one of the
>> > + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>> > + * completion.
>> > + *
>> > + * The @queue_idx specifies the queue to use for unbinding.
>> > + * See struct drm_i915_gem_vm_unbind for more information on @queue_idx.
>> > + *
>> > + * The @start and @length musy specify a unique mapping bound with VM_BIND
>> > + * ioctl.
>> > + */
>> > +struct drm_i915_gem_vm_unbind {
>> > +	/** @vm_id: VM (address space) id to bind */
>> > +	__u32 vm_id;
>> > +
>> > +	/** @queue_idx: Index of queue for unbinding */
>> > +	__u32 queue_idx;
>> > +
>> > +	/** @start: Virtual Address start to unbind */
>> > +	__u64 start;
>> > +
>> > +	/** @length: Length of mapping to unbind */
>> > +	__u64 length;
>> > +
>> > +	/** @flags: Reserved for future usage, currently MBZ */
>> > +	__u64 flags;
>> > +
>> > +	/**
>> > +	 * @extensions: 0-terminated chain of extensions for this operation.
>> > +	 *
>> > +	 * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
>> > +	 * Specifies an array of input or output timeline fences for this
>> > +	 * unbind operation.
>> > +	 * It has same format as struct drm_i915_vm_bind_ext_timeline_fences.
>> > +	 *
>> > +	 * I915_VM_UNBIND_EXT_USER_FENCES:
>> > +	 * Specifies an array of input or output user fences for this
>> > +	 * unbind operation. This is required for compute contexts.
>> > +	 * It has same format as struct drm_i915_vm_bind_ext_user_fence.
>> > +	 */
>> > +	__u64 extensions;
>> > +#define I915_VM_UNBIND_EXT_TIMELINE_FENCES	0
>> > +#define I915_VM_UNBIND_EXT_USER_FENCES		1
>> > +};
>> > +
>> > +/**
>> > + * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
>> > + * or the vm_unbind work.
>> > + *
>> > + * The vm_bind or vm_unbind aync worker will wait for input fence to signal
>> > + * before starting the binding or unbinding.
>> > + *
>> > + * The vm_bind or vm_unbind async worker will signal the returned output fence
>> > + * after the completion of binding or unbinding.
>> > + */
>> > +struct drm_i915_vm_bind_fence {
>> > +	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
>> > +	__u32 handle;
>> > +
>> > +	/**
>> > +	 * @flags: Supported flags are:
>> > +	 *
>> > +	 * I915_VM_BIND_FENCE_WAIT:
>> > +	 * Wait for the input fence before binding/unbinding
>> > +	 *
>> > +	 * I915_VM_BIND_FENCE_SIGNAL:
>> > +	 * Return bind/unbind completion fence as output
>> > +	 */
>> > +	__u32 flags;
>> > +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>> > +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>> > +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>> > +};
>> > +
>> > +/**
>> > + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
>> > + * and vm_unbind.
>> > + *
>> > + * This structure describes an array of timeline drm_syncobj and associated
>> > + * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
>> > + * can be input or output fences (See struct drm_i915_vm_bind_fence).
>> > + */
>> > +struct drm_i915_vm_bind_ext_timeline_fences {
>> > +	/** @base: Extension link. See struct i915_user_extension. */
>> > +	struct i915_user_extension base;
>> > +
>> > +	/**
>> > +	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
>> > +	 * arrays.
>> > +	 */
>> > +	__u64 fence_count;
>> > +
>> > +	/**
>> > +	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
>> > +	 * of length @fence_count.
>> > +	 */
>> > +	__u64 handles_ptr;
>> > +
>> > +	/**
>> > +	 * @values_ptr: Pointer to an array of u64 values of length
>> > +	 * @fence_count.
>> > +	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>> > +	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
>> > +	 * binary one.
>> > +	 */
>> > +	__u64 values_ptr;
>> > +};
>> > +
>> > +/**
>> > + * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
>> > + * vm_bind or the vm_unbind work.
>> > + *
>> > + * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
>> > + * @addr to become equal to @val) before starting the binding or unbinding.
>> > + *
>> > + * The vm_bind or vm_unbind async worker will signal the output fence after
>> > + * the completion of binding or unbinding by writing @val to memory location at
>> > + * @addr
>> > + */
>> > +struct drm_i915_vm_bind_user_fence {
>> > +	/** @addr: User/Memory fence qword aligned process virtual address */
>> > +	__u64 addr;
>> > +
>> > +	/** @val: User/Memory fence value to be written after bind completion */
>> > +	__u64 val;
>> > +
>> > +	/**
>> > +	 * @flags: Supported flags are:
>> > +	 *
>> > +	 * I915_VM_BIND_USER_FENCE_WAIT:
>> > +	 * Wait for the input fence before binding/unbinding
>> > +	 *
>> > +	 * I915_VM_BIND_USER_FENCE_SIGNAL:
>> > +	 * Return bind/unbind completion fence as output
>> > +	 */
>> > +	__u32 flags;
>> > +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>> > +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>> > +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>> > +	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>> > +};
>> > +
>> > +/**
>> > + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
>> > + * and vm_unbind.
>> > + *
>> > + * These user fences can be input or output fences
>> > + * (See struct drm_i915_vm_bind_user_fence).
>> > + */
>> > +struct drm_i915_vm_bind_ext_user_fence {
>> > +	/** @base: Extension link. See struct i915_user_extension. */
>> > +	struct i915_user_extension base;
>> > +
>> > +	/** @fence_count: Number of elements in the @user_fence_ptr array. */
>> > +	__u64 fence_count;
>> > +
>> > +	/**
>> > +	 * @user_fence_ptr: Pointer to an array of
>> > +	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
>> > +	 */
>> > +	__u64 user_fence_ptr;
>> > +};
>> > +
>> > +/**
>> > + * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
>> > + * ioctl.
>> > + *
>> > + * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
>> > + * only works with this ioctl for submission.
>> > + * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
>> > + */
>> > +struct drm_i915_gem_execbuffer3 {
>> > +	/**
>> > +	 * @ctx_id: Context id
>> > +	 *
>> > +	 * Only contexts with user engine map are allowed.
>> > +	 */
>> > +	__u32 ctx_id;
>> > +
>> > +	/**
>> > +	 * @engine_idx: Engine index
>> > +	 *
>> > +	 * An index in the user engine map of the context specified by @ctx_id.
>> > +	 */
>> > +	__u32 engine_idx;
>> > +
>> > +	/** @rsvd1: Reserved, MBZ */
>> > +	__u32 rsvd1;
>> > +
>> > +	/**
>> > +	 * @batch_count: Number of batches in @batch_address array.
>> > +	 *
>> > +	 * 0 is invalid. For parallel submission, it should be equal to the
>> > +	 * number of (parallel) engines involved in that submission.
>> > +	 */
>> > +	__u32 batch_count;
>> > +
>> > +	/**
>> > +	 * @batch_address: Array of batch gpu virtual addresses.
>> > +	 *
>> > +	 * If @batch_count is 1, then it is the gpu virtual address of the
>> > +	 * batch buffer. If @batch_count > 1, then it is a pointer to an array
>> > +	 * of batch buffer gpu virtual addresses.
>> > +	 */
>> > +	__u64 batch_address;
>> > +
>> > +	/**
>> > +	 * @flags: Supported flags are:
>> > +	 *
>> > +	 * I915_EXEC3_SECURE:
>> > +	 * Request a privileged ("secure") batch buffer/s.
>> > +	 * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
>> > +	 */
>> > +	__u64 flags;
>> > +#define I915_EXEC3_SECURE	(1<<0)
>> > +
>> > +	/** @rsvd2: Reserved, MBZ */
>> > +	__u64 rsvd2;
>> > +
>> > +	/**
>> > +	 * @extensions: Zero-terminated chain of extensions.
>> > +	 *
>> > +	 * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
>> > +	 * It has same format as DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
>> > +	 * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
>> > +	 *
>> > +	 * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
>> > +	 * First level batch completion signaling extension.
>> > +	 * See struct drm_i915_gem_execbuffer3_ext_user_fence.
>> > +	 */
>> > +	__u64 extensions;
>> > +#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES	0
>> > +#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE		1
>> > +};
>> > +
>> > +/**
>> > + * struct drm_i915_gem_execbuffer3_ext_user_fence - First level batch completion
>> > + * signaling extension.
>> > + *
>> > + * This extension allows user to attach a user fence (@addr, @value pair) to
>> > + * execbuf3, to be signaled by the command streamer after the completion of first
>> > + * level batch, by writing the @value at specified @addr and triggering an
>> > + * interrupt.
>> > + * User can either poll for this user fence to signal or can also wait on it
>> > + * with i915_gem_wait_user_fence ioctl.
>> > + * This is very much usefaul for long running contexts where waiting on dma-fence
>> > + * by user (like i915_gem_wait ioctl) is not supported.
>> > + */
>> > +struct drm_i915_gem_execbuffer3_ext_user_fence {
>> > +	/** @base: Extension link. See struct i915_user_extension. */
>> > +	struct i915_user_extension base;
>> > +
>> > +	/**
>> > +	 * @addr: User/Memory fence qword aligned GPU virtual address.
>> > +	 *
>> > +	 * Address has to be a valid GPU virtual address at the time of
>> > +	 * first level batch completion.
>> > +	 */
>> > +	__u64 addr;
>> > +
>> > +	/**
>> > +	 * @value: User/Memory fence Value to be written to above address
>> > +	 * after first level batch completes.
>> > +	 */
>> > +	__u64 value;
>> > +
>> > +	/** @rsvd: Reserved, MBZ */
>> > +	__u64 rsvd;
>> > +};
>> > +
>>
>> IMO all of these fence structs should be a generic sync interface shared
>> between both vm bind and exec3 rather than unique extenisons.
>>
>> Both vm bind and exec3 should have something like this:
>>
>> __64 syncs;	/* userptr to an array of generic syncs */
>> __64 n_syncs;
>>
>> Having an array of syncs lets the kernel do one user copy for all the
>> syncs rather than reading them in a a chain.
>>
>> A generic sync object encapsulates all possible syncs (in / out -
>> syncobj, syncobj timeline, ufence, future sync concepts).
>>
>> e.g.
>>
>> struct {
>> 	__u32 user_ext;
>> 	__u32 flag;	/* in / out, type, whatever else info we need */
>> 	union {
>> 		__u32 handle; 	/* to syncobj */
>> 		__u64 addr; 	/* ufence address */
>> 	};
>> 	__64 seqno;	/* syncobj timeline, ufence write value */
>> 	...reserve enough bits for future...
>> }
>>
>> This unifies binds and execs by using the same sync interface
>> instilling the concept that binds and execs are the same op (queue'd
>> operation /w in/out fences).
>>

Hmm...I am not sure whether we really need to merge all fence types
together in a single structure.

Yes, timeline fence array struct is same between vm_bind and execbuf.
But user fence array is not. For compute use case, VM_BIND/UNBIND can
have user fence array. But execbuf will only have one user fence as
out fence. Execbuf can have timeline fence array as 'in' fences for
compute use case.

Besides, for VM_BIND/UNBIND, user will use either the timeline fence
array (mesa) or the user fence array (compute) and will never mix
them (ie., having some fences as timeline fences and some as user
fences is not allowed). Hence, no multiple copy_from_user concern.
So, not sure if having a generic fence is a good idea. It will be
also less confusing for the user if we define them separately.
But internally i915 can maintain them in a generic sturcture if needed.

Niranjana

>> Matt
>>
>> > +/**
>> > + * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
>> > + * private to the specified VM.
>> > + *
>> > + * See struct drm_i915_gem_create_ext.
>> > + */
>> > +struct drm_i915_gem_create_ext_vm_private {
>> > +#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
>> > +	/** @base: Extension link. See struct i915_user_extension. */
>> > +	struct i915_user_extension base;
>> > +
>> > +	/** @vm_id: Id of the VM to which the object is private */
>> > +	__u32 vm_id;
>> > +};
>> > +
>> > +/**
>> > + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>> > + *
>> > + * User/Memory fence can be woken up either by:
>> > + *
>> > + * 1. GPU context indicated by @ctx_id, or,
>> > + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>> > + *    @ctx_id is ignored when this flag is set.
>> > + *
>> > + * Wakeup condition is,
>> > + * ``((*addr & mask) op (value & mask))``
>> > + *
>> > + * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
>> > + */
>> > +struct drm_i915_gem_wait_user_fence {
>> > +	/** @extensions: Zero-terminated chain of extensions. */
>> > +	__u64 extensions;
>> > +
>> > +	/** @addr: User/Memory fence address */
>> > +	__u64 addr;
>> > +
>> > +	/** @ctx_id: Id of the Context which will signal the fence. */
>> > +	__u32 ctx_id;
>> > +
>> > +	/** @op: Wakeup condition operator */
>> > +	__u16 op;
>> > +#define I915_UFENCE_WAIT_EQ      0
>> > +#define I915_UFENCE_WAIT_NEQ     1
>> > +#define I915_UFENCE_WAIT_GT      2
>> > +#define I915_UFENCE_WAIT_GTE     3
>> > +#define I915_UFENCE_WAIT_LT      4
>> > +#define I915_UFENCE_WAIT_LTE     5
>> > +#define I915_UFENCE_WAIT_BEFORE  6
>> > +#define I915_UFENCE_WAIT_AFTER   7
>> > +
>> > +	/**
>> > +	 * @flags: Supported flags are:
>> > +	 *
>> > +	 * I915_UFENCE_WAIT_SOFT:
>> > +	 *
>> > +	 * To be woken up by i915 driver async worker (not by GPU).
>> > +	 *
>> > +	 * I915_UFENCE_WAIT_ABSTIME:
>> > +	 *
>> > +	 * Wait timeout specified as absolute time.
>> > +	 */
>> > +	__u16 flags;
>> > +#define I915_UFENCE_WAIT_SOFT    0x1
>> > +#define I915_UFENCE_WAIT_ABSTIME 0x2
>> > +
>> > +	/** @value: Wakeup value */
>> > +	__u64 value;
>> > +
>> > +	/** @mask: Wakeup mask */
>> > +	__u64 mask;
>> > +#define I915_UFENCE_WAIT_U8     0xffu
>> > +#define I915_UFENCE_WAIT_U16    0xffffu
>> > +#define I915_UFENCE_WAIT_U32    0xfffffffful
>> > +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>> > +
>> > +	/**
>> > +	 * @timeout: Wait timeout in nanoseconds.
>> > +	 *
>> > +	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
>> > +	 * absolute time in nsec.
>> > +	 */
>> > +	__s64 timeout;
>> > +};
>> > --
>> > 2.21.0.rc0.32.g243a4c7e27
>> >
Tvrtko Ursulin June 13, 2022, 8:24 a.m. UTC | #7
On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
> On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin wrote:
>> On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>
>>> On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>> VM_BIND and related uapi definitions
>>>>
>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>> <niranjana.vishwanathapura@intel.com>
>>>> ---
>>>>   Documentation/gpu/rfc/i915_vm_bind.h | 490 
>>>> +++++++++++++++++++++++++++
>>>>   1 file changed, 490 insertions(+)
>>>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>> new file mode 100644
>>>> index 000000000000..9fc854969cfb
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>> @@ -0,0 +1,490 @@
>>>> +/* SPDX-License-Identifier: MIT */
>>>> +/*
>>>> + * Copyright © 2022 Intel Corporation
>>>> + */
>>>> +
>>>> +/**
>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>> + *
>>>> + * VM_BIND feature availability.
>>>> + * See typedef drm_i915_getparam_t param.
>>>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>> + * bits[8-15]: VM_BIND implementation version.
>>>> + * version 0 will not have VM_BIND/UNBIND timeline fence array 
>>>> support.
>>>> + */
>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>> +
>>>> +/**
>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>> + *
>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>> + * See struct drm_i915_gem_vm_control flags.
>>>> + *
>>>> + * The older execbuf2 ioctl will not support VM_BIND mode of 
>>>> operation.
>>>> + * For VM_BIND mode, we have new execbuf3 ioctl which will not 
>>>> accept any
>>>> + * execlist (See struct drm_i915_gem_execbuffer3 for more details).
>>>> + *
>>>> + */
>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>> +
>>>> +/**
>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>> + *
>>>> + * Flag to declare context as long running.
>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>> + *
>>>> + * Usage of dma-fence expects that they complete in reasonable 
>>>> amount of time.
>>>> + * Compute on the other hand can be long running. Hence it is not 
>>>> appropriate
>>>> + * for compute contexts to export request completion dma-fence to 
>>>> user.
>>>> + * The dma-fence usage will be limited to in-kernel consumption only.
>>>> + * Compute contexts need to use user/memory fence.
>>>> + *
>>>> + * So, long running contexts do not support output fences. Hence,
>>>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is 
>>>> expected
>>>> + * to be not used. DRM_I915_GEM_WAIT ioctl call is also not 
>>>> supported for
>>>> + * objects mapped to long running contexts.
>>>> + */
>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>> +
>>>> +/* VM_BIND related ioctls */
>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>> +#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>> +
>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>> DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>> DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3 DRM_IOWR(DRM_COMMAND_BASE + 
>>>> DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct 
>>>> drm_i915_gem_wait_user_fence)
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>> + *
>>>> + * This structure is passed to VM_BIND ioctl and specifies the 
>>>> mapping of GPU
>>>> + * virtual address (VA) range to the section of an object that 
>>>> should be bound
>>>> + * in the device page table of the specified address space (VM).
>>>> + * The VA range specified must be unique (ie., not currently bound) 
>>>> and can
>>>> + * be mapped to whole object or a section of the object (partial 
>>>> binding).
>>>> + * Multiple VA mappings can be created to the same section of the 
>>>> object
>>>> + * (aliasing).
>>>> + *
>>>> + * The @queue_idx specifies the queue to use for binding. Same 
>>>> queue can be
>>>> + * used for both VM_BIND and VM_UNBIND calls. All submitted bind 
>>>> and unbind
>>>> + * operations in a queue are performed in the order of submission.
>>>> + *
>>>> + * The @start, @offset and @length should be 4K page aligned. 
>>>> However the DG2
>>>> + * and XEHPSDV has 64K page size for device local-memory and has 
>>>> compact page
>>>> + * table. On those platforms, for binding device local-memory 
>>>> objects, the
>>>> + * @start should be 2M aligned, @offset and @length should be 64K 
>>>> aligned.
>>>> + * Also, on those platforms, it is not allowed to bind an device 
>>>> local-memory
>>>> + * object and a system memory object in a single 2M section of VA 
>>>> range.
>>>> + */
>>>> +struct drm_i915_gem_vm_bind {
>>>> +    /** @vm_id: VM (address space) id to bind */
>>>> +    __u32 vm_id;
>>>> +
>>>> +    /** @queue_idx: Index of queue for binding */
>>>> +    __u32 queue_idx;
>>>
>>> I have a question here to which I did not find an answer by browsing 
>>> the old threads.
>>>
>>> Queue index appears to be an implicit synchronisation mechanism, 
>>> right? Operations on the same index are executed/complete in order of 
>>> ioctl submission?
>>>
>>> Do we _have_ to implement this on the kernel side and could just 
>>> allow in/out fence and let userspace deal with it?
>>
>>
>> It orders operations like in a queue. Which is kind of what happens 
>> with existing queues/engines.
>>
>> If I understood correctly, it's going to be a kthread + a linked list 
>> right?
>>
> 
> Yes, that is correct.
> 
>>
>> -Lionel
>>
>>
>>>
>>> Arbitrary/on-demand number of queues will add the complexity on the 
>>> kernel side which should be avoided if possible.
>>>
> 
> It was discussed in the other thread. Jason prefers this over putting
> an artificial limit on number of queues (as user can anyway can exhaust
> the memory). I think complexity in the driver is manageable.

You'll need to create tracking structures on demand, with atomic replace 
of last fence, ref counting and locking of some sort, more or less?

> The other option being discussed in to have the user create those
> queues (like creating engine map) before hand and use that in vm_bind
> and vm_unbind ioctls. This puts a limit on the number of queues.
> But it is not clean either and not sure it is worth making the interface
> more complex.
> https://www.spinics.net/lists/dri-devel/msg350448.html

What about the third option of a flag to return a fence (of some sort) 
and pass in a fence? That way userspace can imagine zero or N queues 
with very little effort on the kernel side. Was this considered?

Regards,

Tvrtko

>>> Regards,
>>>
>>> Tvrtko
>>>
>>>> +
>>>> +    /** @rsvd: Reserved, MBZ */
>>>> +    __u32 rsvd;
>>>> +
>>>> +    /** @handle: Object handle */
>>>> +    __u32 handle;
>>>> +
>>>> +    /** @start: Virtual Address start to bind */
>>>> +    __u64 start;
>>>> +
>>>> +    /** @offset: Offset in object to bind */
>>>> +    __u64 offset;
>>>> +
>>>> +    /** @length: Length of mapping to bind */
>>>> +    __u64 length;
>>>> +
>>>> +    /**
>>>> +     * @flags: Supported flags are:
>>>> +     *
>>>> +     * I915_GEM_VM_BIND_READONLY:
>>>> +     * Mapping is read-only.
>>>> +     *
>>>> +     * I915_GEM_VM_BIND_CAPTURE:
>>>> +     * Capture this mapping in the dump upon GPU error.
>>>> +     */
>>>> +    __u64 flags;
>>>> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>> +
>>>> +    /**
>>>> +     * @extensions: 0-terminated chain of extensions for this 
>>>> operation.
>>>> +     *
>>>> +     * I915_VM_BIND_EXT_TIMELINE_FENCES:
>>>> +     * Specifies an array of input or output timeline fences for this
>>>> +     * binding operation. See struct 
>>>> drm_i915_vm_bind_ext_timeline_fences.
>>>> +     *
>>>> +     * I915_VM_BIND_EXT_USER_FENCES:
>>>> +     * Specifies an array of input or output user fences for this
>>>> +     * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
>>>> +     * This is required for compute contexts.
>>>> +     */
>>>> +    __u64 extensions;
>>>> +#define I915_VM_BIND_EXT_TIMELINE_FENCES    0
>>>> +#define I915_VM_BIND_EXT_USER_FENCES        1
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>>> + *
>>>> + * This structure is passed to VM_UNBIND ioctl and specifies the 
>>>> GPU virtual
>>>> + * address (VA) range that should be unbound from the device page 
>>>> table of the
>>>> + * specified address space (VM). The specified VA range must match 
>>>> one of the
>>>> + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>>>> + * completion.
>>>> + *
>>>> + * The @queue_idx specifies the queue to use for unbinding.
>>>> + * See struct drm_i915_gem_vm_unbind for more information on 
>>>> @queue_idx.
>>>> + *
>>>> + * The @start and @length musy specify a unique mapping bound with 
>>>> VM_BIND
>>>> + * ioctl.
>>>> + */
>>>> +struct drm_i915_gem_vm_unbind {
>>>> +    /** @vm_id: VM (address space) id to bind */
>>>> +    __u32 vm_id;
>>>> +
>>>> +    /** @queue_idx: Index of queue for unbinding */
>>>> +    __u32 queue_idx;
>>>> +
>>>> +    /** @start: Virtual Address start to unbind */
>>>> +    __u64 start;
>>>> +
>>>> +    /** @length: Length of mapping to unbind */
>>>> +    __u64 length;
>>>> +
>>>> +    /** @flags: Reserved for future usage, currently MBZ */
>>>> +    __u64 flags;
>>>> +
>>>> +    /**
>>>> +     * @extensions: 0-terminated chain of extensions for this 
>>>> operation.
>>>> +     *
>>>> +     * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
>>>> +     * Specifies an array of input or output timeline fences for this
>>>> +     * unbind operation.
>>>> +     * It has same format as struct 
>>>> drm_i915_vm_bind_ext_timeline_fences.
>>>> +     *
>>>> +     * I915_VM_UNBIND_EXT_USER_FENCES:
>>>> +     * Specifies an array of input or output user fences for this
>>>> +     * unbind operation. This is required for compute contexts.
>>>> +     * It has same format as struct drm_i915_vm_bind_ext_user_fence.
>>>> +     */
>>>> +    __u64 extensions;
>>>> +#define I915_VM_UNBIND_EXT_TIMELINE_FENCES    0
>>>> +#define I915_VM_UNBIND_EXT_USER_FENCES        1
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_fence - An input or output fence for the 
>>>> vm_bind
>>>> + * or the vm_unbind work.
>>>> + *
>>>> + * The vm_bind or vm_unbind aync worker will wait for input fence 
>>>> to signal
>>>> + * before starting the binding or unbinding.
>>>> + *
>>>> + * The vm_bind or vm_unbind async worker will signal the returned 
>>>> output fence
>>>> + * after the completion of binding or unbinding.
>>>> + */
>>>> +struct drm_i915_vm_bind_fence {
>>>> +    /** @handle: User's handle for a drm_syncobj to wait on or 
>>>> signal. */
>>>> +    __u32 handle;
>>>> +
>>>> +    /**
>>>> +     * @flags: Supported flags are:
>>>> +     *
>>>> +     * I915_VM_BIND_FENCE_WAIT:
>>>> +     * Wait for the input fence before binding/unbinding
>>>> +     *
>>>> +     * I915_VM_BIND_FENCE_SIGNAL:
>>>> +     * Return bind/unbind completion fence as output
>>>> +     */
>>>> +    __u32 flags;
>>>> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>>>> (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences 
>>>> for vm_bind
>>>> + * and vm_unbind.
>>>> + *
>>>> + * This structure describes an array of timeline drm_syncobj and 
>>>> associated
>>>> + * points for timeline variants of drm_syncobj. These timeline 
>>>> 'drm_syncobj's
>>>> + * can be input or output fences (See struct drm_i915_vm_bind_fence).
>>>> + */
>>>> +struct drm_i915_vm_bind_ext_timeline_fences {
>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>> +    struct i915_user_extension base;
>>>> +
>>>> +    /**
>>>> +     * @fence_count: Number of elements in the @handles_ptr & 
>>>> @value_ptr
>>>> +     * arrays.
>>>> +     */
>>>> +    __u64 fence_count;
>>>> +
>>>> +    /**
>>>> +     * @handles_ptr: Pointer to an array of struct 
>>>> drm_i915_vm_bind_fence
>>>> +     * of length @fence_count.
>>>> +     */
>>>> +    __u64 handles_ptr;
>>>> +
>>>> +    /**
>>>> +     * @values_ptr: Pointer to an array of u64 values of length
>>>> +     * @fence_count.
>>>> +     * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>>> +     * timeline drm_syncobj is invalid as it turns a drm_syncobj 
>>>> into a
>>>> +     * binary one.
>>>> +     */
>>>> +    __u64 values_ptr;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_user_fence - An input or output user 
>>>> fence for the
>>>> + * vm_bind or the vm_unbind work.
>>>> + *
>>>> + * The vm_bind or vm_unbind aync worker will wait for the input 
>>>> fence (value at
>>>> + * @addr to become equal to @val) before starting the binding or 
>>>> unbinding.
>>>> + *
>>>> + * The vm_bind or vm_unbind async worker will signal the output 
>>>> fence after
>>>> + * the completion of binding or unbinding by writing @val to memory 
>>>> location at
>>>> + * @addr
>>>> + */
>>>> +struct drm_i915_vm_bind_user_fence {
>>>> +    /** @addr: User/Memory fence qword aligned process virtual 
>>>> address */
>>>> +    __u64 addr;
>>>> +
>>>> +    /** @val: User/Memory fence value to be written after bind 
>>>> completion */
>>>> +    __u64 val;
>>>> +
>>>> +    /**
>>>> +     * @flags: Supported flags are:
>>>> +     *
>>>> +     * I915_VM_BIND_USER_FENCE_WAIT:
>>>> +     * Wait for the input fence before binding/unbinding
>>>> +     *
>>>> +     * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>> +     * Return bind/unbind completion fence as output
>>>> +     */
>>>> +    __u32 flags;
>>>> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>> +    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for 
>>>> vm_bind
>>>> + * and vm_unbind.
>>>> + *
>>>> + * These user fences can be input or output fences
>>>> + * (See struct drm_i915_vm_bind_user_fence).
>>>> + */
>>>> +struct drm_i915_vm_bind_ext_user_fence {
>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>> +    struct i915_user_extension base;
>>>> +
>>>> +    /** @fence_count: Number of elements in the @user_fence_ptr 
>>>> array. */
>>>> +    __u64 fence_count;
>>>> +
>>>> +    /**
>>>> +     * @user_fence_ptr: Pointer to an array of
>>>> +     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>> +     */
>>>> +    __u64 user_fence_ptr;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_execbuffer3 - Structure for 
>>>> DRM_I915_GEM_EXECBUFFER3
>>>> + * ioctl.
>>>> + *
>>>> + * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and 
>>>> VM_BIND mode
>>>> + * only works with this ioctl for submission.
>>>> + * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
>>>> + */
>>>> +struct drm_i915_gem_execbuffer3 {
>>>> +    /**
>>>> +     * @ctx_id: Context id
>>>> +     *
>>>> +     * Only contexts with user engine map are allowed.
>>>> +     */
>>>> +    __u32 ctx_id;
>>>> +
>>>> +    /**
>>>> +     * @engine_idx: Engine index
>>>> +     *
>>>> +     * An index in the user engine map of the context specified by 
>>>> @ctx_id.
>>>> +     */
>>>> +    __u32 engine_idx;
>>>> +
>>>> +    /** @rsvd1: Reserved, MBZ */
>>>> +    __u32 rsvd1;
>>>> +
>>>> +    /**
>>>> +     * @batch_count: Number of batches in @batch_address array.
>>>> +     *
>>>> +     * 0 is invalid. For parallel submission, it should be equal to 
>>>> the
>>>> +     * number of (parallel) engines involved in that submission.
>>>> +     */
>>>> +    __u32 batch_count;
>>>> +
>>>> +    /**
>>>> +     * @batch_address: Array of batch gpu virtual addresses.
>>>> +     *
>>>> +     * If @batch_count is 1, then it is the gpu virtual address of the
>>>> +     * batch buffer. If @batch_count > 1, then it is a pointer to 
>>>> an array
>>>> +     * of batch buffer gpu virtual addresses.
>>>> +     */
>>>> +    __u64 batch_address;
>>>> +
>>>> +    /**
>>>> +     * @flags: Supported flags are:
>>>> +     *
>>>> +     * I915_EXEC3_SECURE:
>>>> +     * Request a privileged ("secure") batch buffer/s.
>>>> +     * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
>>>> +     */
>>>> +    __u64 flags;
>>>> +#define I915_EXEC3_SECURE    (1<<0)
>>>> +
>>>> +    /** @rsvd2: Reserved, MBZ */
>>>> +    __u64 rsvd2;
>>>> +
>>>> +    /**
>>>> +     * @extensions: Zero-terminated chain of extensions.
>>>> +     *
>>>> +     * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
>>>> +     * It has same format as 
>>>> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
>>>> +     * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
>>>> +     *
>>>> +     * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
>>>> +     * First level batch completion signaling extension.
>>>> +     * See struct drm_i915_gem_execbuffer3_ext_user_fence.
>>>> +     */
>>>> +    __u64 extensions;
>>>> +#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES    0
>>>> +#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE        1
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_execbuffer3_ext_user_fence - First level 
>>>> batch completion
>>>> + * signaling extension.
>>>> + *
>>>> + * This extension allows user to attach a user fence (@addr, @value 
>>>> pair) to
>>>> + * execbuf3, to be signaled by the command streamer after the 
>>>> completion of first
>>>> + * level batch, by writing the @value at specified @addr and 
>>>> triggering an
>>>> + * interrupt.
>>>> + * User can either poll for this user fence to signal or can also 
>>>> wait on it
>>>> + * with i915_gem_wait_user_fence ioctl.
>>>> + * This is very much usefaul for long running contexts where 
>>>> waiting on dma-fence
>>>> + * by user (like i915_gem_wait ioctl) is not supported.
>>>> + */
>>>> +struct drm_i915_gem_execbuffer3_ext_user_fence {
>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>> +    struct i915_user_extension base;
>>>> +
>>>> +    /**
>>>> +     * @addr: User/Memory fence qword aligned GPU virtual address.
>>>> +     *
>>>> +     * Address has to be a valid GPU virtual address at the time of
>>>> +     * first level batch completion.
>>>> +     */
>>>> +    __u64 addr;
>>>> +
>>>> +    /**
>>>> +     * @value: User/Memory fence Value to be written to above address
>>>> +     * after first level batch completes.
>>>> +     */
>>>> +    __u64 value;
>>>> +
>>>> +    /** @rsvd: Reserved, MBZ */
>>>> +    __u64 rsvd;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_create_ext_vm_private - Extension to make 
>>>> the object
>>>> + * private to the specified VM.
>>>> + *
>>>> + * See struct drm_i915_gem_create_ext.
>>>> + */
>>>> +struct drm_i915_gem_create_ext_vm_private {
>>>> +#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>> +    struct i915_user_extension base;
>>>> +
>>>> +    /** @vm_id: Id of the VM to which the object is private */
>>>> +    __u32 vm_id;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>>> + *
>>>> + * User/Memory fence can be woken up either by:
>>>> + *
>>>> + * 1. GPU context indicated by @ctx_id, or,
>>>> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>> + *    @ctx_id is ignored when this flag is set.
>>>> + *
>>>> + * Wakeup condition is,
>>>> + * ``((*addr & mask) op (value & mask))``
>>>> + *
>>>> + * See :ref:`Documentation/driver-api/dma-buf.rst 
>>>> <indefinite_dma_fences>`
>>>> + */
>>>> +struct drm_i915_gem_wait_user_fence {
>>>> +    /** @extensions: Zero-terminated chain of extensions. */
>>>> +    __u64 extensions;
>>>> +
>>>> +    /** @addr: User/Memory fence address */
>>>> +    __u64 addr;
>>>> +
>>>> +    /** @ctx_id: Id of the Context which will signal the fence. */
>>>> +    __u32 ctx_id;
>>>> +
>>>> +    /** @op: Wakeup condition operator */
>>>> +    __u16 op;
>>>> +#define I915_UFENCE_WAIT_EQ      0
>>>> +#define I915_UFENCE_WAIT_NEQ     1
>>>> +#define I915_UFENCE_WAIT_GT      2
>>>> +#define I915_UFENCE_WAIT_GTE     3
>>>> +#define I915_UFENCE_WAIT_LT      4
>>>> +#define I915_UFENCE_WAIT_LTE     5
>>>> +#define I915_UFENCE_WAIT_BEFORE  6
>>>> +#define I915_UFENCE_WAIT_AFTER   7
>>>> +
>>>> +    /**
>>>> +     * @flags: Supported flags are:
>>>> +     *
>>>> +     * I915_UFENCE_WAIT_SOFT:
>>>> +     *
>>>> +     * To be woken up by i915 driver async worker (not by GPU).
>>>> +     *
>>>> +     * I915_UFENCE_WAIT_ABSTIME:
>>>> +     *
>>>> +     * Wait timeout specified as absolute time.
>>>> +     */
>>>> +    __u16 flags;
>>>> +#define I915_UFENCE_WAIT_SOFT    0x1
>>>> +#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>> +
>>>> +    /** @value: Wakeup value */
>>>> +    __u64 value;
>>>> +
>>>> +    /** @mask: Wakeup mask */
>>>> +    __u64 mask;
>>>> +#define I915_UFENCE_WAIT_U8     0xffu
>>>> +#define I915_UFENCE_WAIT_U16    0xffffu
>>>> +#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>> +
>>>> +    /**
>>>> +     * @timeout: Wait timeout in nanoseconds.
>>>> +     *
>>>> +     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout 
>>>> is the
>>>> +     * absolute time in nsec.
>>>> +     */
>>>> +    __s64 timeout;
>>>> +};
>>
>>
Niranjana Vishwanathapura June 13, 2022, 3:05 p.m. UTC | #8
On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>
>On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin wrote:
>>>On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>
>>>>On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>VM_BIND and related uapi definitions
>>>>>
>>>>>Signed-off-by: Niranjana Vishwanathapura 
>>>>><niranjana.vishwanathapura@intel.com>
>>>>>---
>>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 490 
>>>>>+++++++++++++++++++++++++++
>>>>>  1 file changed, 490 insertions(+)
>>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>
>>>>>diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>new file mode 100644
>>>>>index 000000000000..9fc854969cfb
>>>>>--- /dev/null
>>>>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>@@ -0,0 +1,490 @@
>>>>>+/* SPDX-License-Identifier: MIT */
>>>>>+/*
>>>>>+ * Copyright © 2022 Intel Corporation
>>>>>+ */
>>>>>+
>>>>>+/**
>>>>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>>>>+ *
>>>>>+ * VM_BIND feature availability.
>>>>>+ * See typedef drm_i915_getparam_t param.
>>>>>+ * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>+ * bits[8-15]: VM_BIND implementation version.
>>>>>+ * version 0 will not have VM_BIND/UNBIND timeline fence 
>>>>>array support.
>>>>>+ */
>>>>>+#define I915_PARAM_HAS_VM_BIND        57
>>>>>+
>>>>>+/**
>>>>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>+ *
>>>>>+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>+ * See struct drm_i915_gem_vm_control flags.
>>>>>+ *
>>>>>+ * The older execbuf2 ioctl will not support VM_BIND mode of 
>>>>>operation.
>>>>>+ * For VM_BIND mode, we have new execbuf3 ioctl which will 
>>>>>not accept any
>>>>>+ * execlist (See struct drm_i915_gem_execbuffer3 for more details).
>>>>>+ *
>>>>>+ */
>>>>>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>+
>>>>>+/**
>>>>>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>+ *
>>>>>+ * Flag to declare context as long running.
>>>>>+ * See struct drm_i915_gem_context_create_ext flags.
>>>>>+ *
>>>>>+ * Usage of dma-fence expects that they complete in 
>>>>>reasonable amount of time.
>>>>>+ * Compute on the other hand can be long running. Hence it is 
>>>>>not appropriate
>>>>>+ * for compute contexts to export request completion 
>>>>>dma-fence to user.
>>>>>+ * The dma-fence usage will be limited to in-kernel consumption only.
>>>>>+ * Compute contexts need to use user/memory fence.
>>>>>+ *
>>>>>+ * So, long running contexts do not support output fences. Hence,
>>>>>+ * I915_EXEC_FENCE_SIGNAL (See 
>>>>>&drm_i915_gem_exec_fence.flags) is expected
>>>>>+ * to be not used. DRM_I915_GEM_WAIT ioctl call is also not 
>>>>>supported for
>>>>>+ * objects mapped to long running contexts.
>>>>>+ */
>>>>>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>+
>>>>>+/* VM_BIND related ioctls */
>>>>>+#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>+#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>+#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>+#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>+
>>>>>+#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE 
>>>>>+ DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>>>+#define DRM_IOCTL_I915_GEM_VM_UNBIND 
>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct 
>>>>>drm_i915_gem_vm_bind)
>>>>>+#define DRM_IOCTL_I915_GEM_EXECBUFFER3 
>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct 
>>>>>drm_i915_gem_execbuffer3)
>>>>>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, 
>>>>>struct drm_i915_gem_wait_user_fence)
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>+ *
>>>>>+ * This structure is passed to VM_BIND ioctl and specifies 
>>>>>the mapping of GPU
>>>>>+ * virtual address (VA) range to the section of an object 
>>>>>that should be bound
>>>>>+ * in the device page table of the specified address space (VM).
>>>>>+ * The VA range specified must be unique (ie., not currently 
>>>>>bound) and can
>>>>>+ * be mapped to whole object or a section of the object 
>>>>>(partial binding).
>>>>>+ * Multiple VA mappings can be created to the same section of 
>>>>>the object
>>>>>+ * (aliasing).
>>>>>+ *
>>>>>+ * The @queue_idx specifies the queue to use for binding. 
>>>>>Same queue can be
>>>>>+ * used for both VM_BIND and VM_UNBIND calls. All submitted 
>>>>>bind and unbind
>>>>>+ * operations in a queue are performed in the order of submission.
>>>>>+ *
>>>>>+ * The @start, @offset and @length should be 4K page aligned. 
>>>>>However the DG2
>>>>>+ * and XEHPSDV has 64K page size for device local-memory and 
>>>>>has compact page
>>>>>+ * table. On those platforms, for binding device local-memory 
>>>>>objects, the
>>>>>+ * @start should be 2M aligned, @offset and @length should be 
>>>>>64K aligned.
>>>>>+ * Also, on those platforms, it is not allowed to bind an 
>>>>>device local-memory
>>>>>+ * object and a system memory object in a single 2M section 
>>>>>of VA range.
>>>>>+ */
>>>>>+struct drm_i915_gem_vm_bind {
>>>>>+    /** @vm_id: VM (address space) id to bind */
>>>>>+    __u32 vm_id;
>>>>>+
>>>>>+    /** @queue_idx: Index of queue for binding */
>>>>>+    __u32 queue_idx;
>>>>
>>>>I have a question here to which I did not find an answer by 
>>>>browsing the old threads.
>>>>
>>>>Queue index appears to be an implicit synchronisation mechanism, 
>>>>right? Operations on the same index are executed/complete in 
>>>>order of ioctl submission?
>>>>
>>>>Do we _have_ to implement this on the kernel side and could just 
>>>>allow in/out fence and let userspace deal with it?
>>>
>>>
>>>It orders operations like in a queue. Which is kind of what 
>>>happens with existing queues/engines.
>>>
>>>If I understood correctly, it's going to be a kthread + a linked 
>>>list right?
>>>
>>
>>Yes, that is correct.
>>
>>>
>>>-Lionel
>>>
>>>
>>>>
>>>>Arbitrary/on-demand number of queues will add the complexity on 
>>>>the kernel side which should be avoided if possible.
>>>>
>>
>>It was discussed in the other thread. Jason prefers this over putting
>>an artificial limit on number of queues (as user can anyway can exhaust
>>the memory). I think complexity in the driver is manageable.
>
>You'll need to create tracking structures on demand, with atomic 
>replace of last fence, ref counting and locking of some sort, more or 
>less?
>

We will have a workqueue, an work item and a linked list per queue.
VM_BIND/UNBIND call will add the mapping request to the specified queue's
linked list and schedule the work item on the workqueue of that queue.
I am not sure what you mean by last fence and replacing it.

>>The other option being discussed in to have the user create those
>>queues (like creating engine map) before hand and use that in vm_bind
>>and vm_unbind ioctls. This puts a limit on the number of queues.
>>But it is not clean either and not sure it is worth making the interface
>>more complex.
>>https://www.spinics.net/lists/dri-devel/msg350448.html
>
>What about the third option of a flag to return a fence (of some sort) 
>and pass in a fence? That way userspace can imagine zero or N queues 
>with very little effort on the kernel side. Was this considered?
>

I am not clear what fence you are talking about here and how does that
help with the number of vm_bind queues. Can you eloborate?

Niranjana

>Regards,
>
>Tvrtko
>
>>>>Regards,
>>>>
>>>>Tvrtko
>>>>
>>>>>+
>>>>>+    /** @rsvd: Reserved, MBZ */
>>>>>+    __u32 rsvd;
>>>>>+
>>>>>+    /** @handle: Object handle */
>>>>>+    __u32 handle;
>>>>>+
>>>>>+    /** @start: Virtual Address start to bind */
>>>>>+    __u64 start;
>>>>>+
>>>>>+    /** @offset: Offset in object to bind */
>>>>>+    __u64 offset;
>>>>>+
>>>>>+    /** @length: Length of mapping to bind */
>>>>>+    __u64 length;
>>>>>+
>>>>>+    /**
>>>>>+     * @flags: Supported flags are:
>>>>>+     *
>>>>>+     * I915_GEM_VM_BIND_READONLY:
>>>>>+     * Mapping is read-only.
>>>>>+     *
>>>>>+     * I915_GEM_VM_BIND_CAPTURE:
>>>>>+     * Capture this mapping in the dump upon GPU error.
>>>>>+     */
>>>>>+    __u64 flags;
>>>>>+#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>>>+#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>>>+
>>>>>+    /**
>>>>>+     * @extensions: 0-terminated chain of extensions for this 
>>>>>operation.
>>>>>+     *
>>>>>+     * I915_VM_BIND_EXT_TIMELINE_FENCES:
>>>>>+     * Specifies an array of input or output timeline fences for this
>>>>>+     * binding operation. See struct 
>>>>>drm_i915_vm_bind_ext_timeline_fences.
>>>>>+     *
>>>>>+     * I915_VM_BIND_EXT_USER_FENCES:
>>>>>+     * Specifies an array of input or output user fences for this
>>>>>+     * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
>>>>>+     * This is required for compute contexts.
>>>>>+     */
>>>>>+    __u64 extensions;
>>>>>+#define I915_VM_BIND_EXT_TIMELINE_FENCES    0
>>>>>+#define I915_VM_BIND_EXT_USER_FENCES        1
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>>>>+ *
>>>>>+ * This structure is passed to VM_UNBIND ioctl and specifies 
>>>>>the GPU virtual
>>>>>+ * address (VA) range that should be unbound from the device 
>>>>>page table of the
>>>>>+ * specified address space (VM). The specified VA range must 
>>>>>match one of the
>>>>>+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>>>>>+ * completion.
>>>>>+ *
>>>>>+ * The @queue_idx specifies the queue to use for unbinding.
>>>>>+ * See struct drm_i915_gem_vm_unbind for more information on 
>>>>>@queue_idx.
>>>>>+ *
>>>>>+ * The @start and @length musy specify a unique mapping bound 
>>>>>with VM_BIND
>>>>>+ * ioctl.
>>>>>+ */
>>>>>+struct drm_i915_gem_vm_unbind {
>>>>>+    /** @vm_id: VM (address space) id to bind */
>>>>>+    __u32 vm_id;
>>>>>+
>>>>>+    /** @queue_idx: Index of queue for unbinding */
>>>>>+    __u32 queue_idx;
>>>>>+
>>>>>+    /** @start: Virtual Address start to unbind */
>>>>>+    __u64 start;
>>>>>+
>>>>>+    /** @length: Length of mapping to unbind */
>>>>>+    __u64 length;
>>>>>+
>>>>>+    /** @flags: Reserved for future usage, currently MBZ */
>>>>>+    __u64 flags;
>>>>>+
>>>>>+    /**
>>>>>+     * @extensions: 0-terminated chain of extensions for this 
>>>>>operation.
>>>>>+     *
>>>>>+     * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
>>>>>+     * Specifies an array of input or output timeline fences for this
>>>>>+     * unbind operation.
>>>>>+     * It has same format as struct 
>>>>>drm_i915_vm_bind_ext_timeline_fences.
>>>>>+     *
>>>>>+     * I915_VM_UNBIND_EXT_USER_FENCES:
>>>>>+     * Specifies an array of input or output user fences for this
>>>>>+     * unbind operation. This is required for compute contexts.
>>>>>+     * It has same format as struct drm_i915_vm_bind_ext_user_fence.
>>>>>+     */
>>>>>+    __u64 extensions;
>>>>>+#define I915_VM_UNBIND_EXT_TIMELINE_FENCES    0
>>>>>+#define I915_VM_UNBIND_EXT_USER_FENCES        1
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_vm_bind_fence - An input or output fence 
>>>>>for the vm_bind
>>>>>+ * or the vm_unbind work.
>>>>>+ *
>>>>>+ * The vm_bind or vm_unbind aync worker will wait for input 
>>>>>fence to signal
>>>>>+ * before starting the binding or unbinding.
>>>>>+ *
>>>>>+ * The vm_bind or vm_unbind async worker will signal the 
>>>>>returned output fence
>>>>>+ * after the completion of binding or unbinding.
>>>>>+ */
>>>>>+struct drm_i915_vm_bind_fence {
>>>>>+    /** @handle: User's handle for a drm_syncobj to wait on 
>>>>>or signal. */
>>>>>+    __u32 handle;
>>>>>+
>>>>>+    /**
>>>>>+     * @flags: Supported flags are:
>>>>>+     *
>>>>>+     * I915_VM_BIND_FENCE_WAIT:
>>>>>+     * Wait for the input fence before binding/unbinding
>>>>>+     *
>>>>>+     * I915_VM_BIND_FENCE_SIGNAL:
>>>>>+     * Return bind/unbind completion fence as output
>>>>>+     */
>>>>>+    __u32 flags;
>>>>>+#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>>>+#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>>>+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>>>>>(-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline 
>>>>>fences for vm_bind
>>>>>+ * and vm_unbind.
>>>>>+ *
>>>>>+ * This structure describes an array of timeline drm_syncobj 
>>>>>and associated
>>>>>+ * points for timeline variants of drm_syncobj. These 
>>>>>timeline 'drm_syncobj's
>>>>>+ * can be input or output fences (See struct drm_i915_vm_bind_fence).
>>>>>+ */
>>>>>+struct drm_i915_vm_bind_ext_timeline_fences {
>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>+    struct i915_user_extension base;
>>>>>+
>>>>>+    /**
>>>>>+     * @fence_count: Number of elements in the @handles_ptr & 
>>>>>@value_ptr
>>>>>+     * arrays.
>>>>>+     */
>>>>>+    __u64 fence_count;
>>>>>+
>>>>>+    /**
>>>>>+     * @handles_ptr: Pointer to an array of struct 
>>>>>drm_i915_vm_bind_fence
>>>>>+     * of length @fence_count.
>>>>>+     */
>>>>>+    __u64 handles_ptr;
>>>>>+
>>>>>+    /**
>>>>>+     * @values_ptr: Pointer to an array of u64 values of length
>>>>>+     * @fence_count.
>>>>>+     * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>>>>+     * timeline drm_syncobj is invalid as it turns a 
>>>>>drm_syncobj into a
>>>>>+     * binary one.
>>>>>+     */
>>>>>+    __u64 values_ptr;
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_vm_bind_user_fence - An input or output 
>>>>>user fence for the
>>>>>+ * vm_bind or the vm_unbind work.
>>>>>+ *
>>>>>+ * The vm_bind or vm_unbind aync worker will wait for the 
>>>>>input fence (value at
>>>>>+ * @addr to become equal to @val) before starting the binding 
>>>>>or unbinding.
>>>>>+ *
>>>>>+ * The vm_bind or vm_unbind async worker will signal the 
>>>>>output fence after
>>>>>+ * the completion of binding or unbinding by writing @val to 
>>>>>memory location at
>>>>>+ * @addr
>>>>>+ */
>>>>>+struct drm_i915_vm_bind_user_fence {
>>>>>+    /** @addr: User/Memory fence qword aligned process 
>>>>>virtual address */
>>>>>+    __u64 addr;
>>>>>+
>>>>>+    /** @val: User/Memory fence value to be written after 
>>>>>bind completion */
>>>>>+    __u64 val;
>>>>>+
>>>>>+    /**
>>>>>+     * @flags: Supported flags are:
>>>>>+     *
>>>>>+     * I915_VM_BIND_USER_FENCE_WAIT:
>>>>>+     * Wait for the input fence before binding/unbinding
>>>>>+     *
>>>>>+     * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>>>+     * Return bind/unbind completion fence as output
>>>>>+     */
>>>>>+    __u32 flags;
>>>>>+#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>>>+#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>>>+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>>>+    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_vm_bind_ext_user_fence - User/memory 
>>>>>fences for vm_bind
>>>>>+ * and vm_unbind.
>>>>>+ *
>>>>>+ * These user fences can be input or output fences
>>>>>+ * (See struct drm_i915_vm_bind_user_fence).
>>>>>+ */
>>>>>+struct drm_i915_vm_bind_ext_user_fence {
>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>+    struct i915_user_extension base;
>>>>>+
>>>>>+    /** @fence_count: Number of elements in the 
>>>>>@user_fence_ptr array. */
>>>>>+    __u64 fence_count;
>>>>>+
>>>>>+    /**
>>>>>+     * @user_fence_ptr: Pointer to an array of
>>>>>+     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>>>+     */
>>>>>+    __u64 user_fence_ptr;
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_execbuffer3 - Structure for 
>>>>>DRM_I915_GEM_EXECBUFFER3
>>>>>+ * ioctl.
>>>>>+ *
>>>>>+ * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode 
>>>>>and VM_BIND mode
>>>>>+ * only works with this ioctl for submission.
>>>>>+ * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
>>>>>+ */
>>>>>+struct drm_i915_gem_execbuffer3 {
>>>>>+    /**
>>>>>+     * @ctx_id: Context id
>>>>>+     *
>>>>>+     * Only contexts with user engine map are allowed.
>>>>>+     */
>>>>>+    __u32 ctx_id;
>>>>>+
>>>>>+    /**
>>>>>+     * @engine_idx: Engine index
>>>>>+     *
>>>>>+     * An index in the user engine map of the context 
>>>>>specified by @ctx_id.
>>>>>+     */
>>>>>+    __u32 engine_idx;
>>>>>+
>>>>>+    /** @rsvd1: Reserved, MBZ */
>>>>>+    __u32 rsvd1;
>>>>>+
>>>>>+    /**
>>>>>+     * @batch_count: Number of batches in @batch_address array.
>>>>>+     *
>>>>>+     * 0 is invalid. For parallel submission, it should be 
>>>>>equal to the
>>>>>+     * number of (parallel) engines involved in that submission.
>>>>>+     */
>>>>>+    __u32 batch_count;
>>>>>+
>>>>>+    /**
>>>>>+     * @batch_address: Array of batch gpu virtual addresses.
>>>>>+     *
>>>>>+     * If @batch_count is 1, then it is the gpu virtual address of the
>>>>>+     * batch buffer. If @batch_count > 1, then it is a 
>>>>>pointer to an array
>>>>>+     * of batch buffer gpu virtual addresses.
>>>>>+     */
>>>>>+    __u64 batch_address;
>>>>>+
>>>>>+    /**
>>>>>+     * @flags: Supported flags are:
>>>>>+     *
>>>>>+     * I915_EXEC3_SECURE:
>>>>>+     * Request a privileged ("secure") batch buffer/s.
>>>>>+     * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
>>>>>+     */
>>>>>+    __u64 flags;
>>>>>+#define I915_EXEC3_SECURE    (1<<0)
>>>>>+
>>>>>+    /** @rsvd2: Reserved, MBZ */
>>>>>+    __u64 rsvd2;
>>>>>+
>>>>>+    /**
>>>>>+     * @extensions: Zero-terminated chain of extensions.
>>>>>+     *
>>>>>+     * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
>>>>>+     * It has same format as 
>>>>>DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
>>>>>+     * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
>>>>>+     *
>>>>>+     * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
>>>>>+     * First level batch completion signaling extension.
>>>>>+     * See struct drm_i915_gem_execbuffer3_ext_user_fence.
>>>>>+     */
>>>>>+    __u64 extensions;
>>>>>+#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES    0
>>>>>+#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE        1
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_execbuffer3_ext_user_fence - First 
>>>>>level batch completion
>>>>>+ * signaling extension.
>>>>>+ *
>>>>>+ * This extension allows user to attach a user fence (@addr, 
>>>>>@value pair) to
>>>>>+ * execbuf3, to be signaled by the command streamer after the 
>>>>>completion of first
>>>>>+ * level batch, by writing the @value at specified @addr and 
>>>>>triggering an
>>>>>+ * interrupt.
>>>>>+ * User can either poll for this user fence to signal or can 
>>>>>also wait on it
>>>>>+ * with i915_gem_wait_user_fence ioctl.
>>>>>+ * This is very much usefaul for long running contexts where 
>>>>>waiting on dma-fence
>>>>>+ * by user (like i915_gem_wait ioctl) is not supported.
>>>>>+ */
>>>>>+struct drm_i915_gem_execbuffer3_ext_user_fence {
>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>+    struct i915_user_extension base;
>>>>>+
>>>>>+    /**
>>>>>+     * @addr: User/Memory fence qword aligned GPU virtual address.
>>>>>+     *
>>>>>+     * Address has to be a valid GPU virtual address at the time of
>>>>>+     * first level batch completion.
>>>>>+     */
>>>>>+    __u64 addr;
>>>>>+
>>>>>+    /**
>>>>>+     * @value: User/Memory fence Value to be written to above address
>>>>>+     * after first level batch completes.
>>>>>+     */
>>>>>+    __u64 value;
>>>>>+
>>>>>+    /** @rsvd: Reserved, MBZ */
>>>>>+    __u64 rsvd;
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_create_ext_vm_private - Extension to 
>>>>>make the object
>>>>>+ * private to the specified VM.
>>>>>+ *
>>>>>+ * See struct drm_i915_gem_create_ext.
>>>>>+ */
>>>>>+struct drm_i915_gem_create_ext_vm_private {
>>>>>+#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>+    struct i915_user_extension base;
>>>>>+
>>>>>+    /** @vm_id: Id of the VM to which the object is private */
>>>>>+    __u32 vm_id;
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>>>>+ *
>>>>>+ * User/Memory fence can be woken up either by:
>>>>>+ *
>>>>>+ * 1. GPU context indicated by @ctx_id, or,
>>>>>+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>>>+ *    @ctx_id is ignored when this flag is set.
>>>>>+ *
>>>>>+ * Wakeup condition is,
>>>>>+ * ``((*addr & mask) op (value & mask))``
>>>>>+ *
>>>>>+ * See :ref:`Documentation/driver-api/dma-buf.rst 
>>>>><indefinite_dma_fences>`
>>>>>+ */
>>>>>+struct drm_i915_gem_wait_user_fence {
>>>>>+    /** @extensions: Zero-terminated chain of extensions. */
>>>>>+    __u64 extensions;
>>>>>+
>>>>>+    /** @addr: User/Memory fence address */
>>>>>+    __u64 addr;
>>>>>+
>>>>>+    /** @ctx_id: Id of the Context which will signal the fence. */
>>>>>+    __u32 ctx_id;
>>>>>+
>>>>>+    /** @op: Wakeup condition operator */
>>>>>+    __u16 op;
>>>>>+#define I915_UFENCE_WAIT_EQ      0
>>>>>+#define I915_UFENCE_WAIT_NEQ     1
>>>>>+#define I915_UFENCE_WAIT_GT      2
>>>>>+#define I915_UFENCE_WAIT_GTE     3
>>>>>+#define I915_UFENCE_WAIT_LT      4
>>>>>+#define I915_UFENCE_WAIT_LTE     5
>>>>>+#define I915_UFENCE_WAIT_BEFORE  6
>>>>>+#define I915_UFENCE_WAIT_AFTER   7
>>>>>+
>>>>>+    /**
>>>>>+     * @flags: Supported flags are:
>>>>>+     *
>>>>>+     * I915_UFENCE_WAIT_SOFT:
>>>>>+     *
>>>>>+     * To be woken up by i915 driver async worker (not by GPU).
>>>>>+     *
>>>>>+     * I915_UFENCE_WAIT_ABSTIME:
>>>>>+     *
>>>>>+     * Wait timeout specified as absolute time.
>>>>>+     */
>>>>>+    __u16 flags;
>>>>>+#define I915_UFENCE_WAIT_SOFT    0x1
>>>>>+#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>>>+
>>>>>+    /** @value: Wakeup value */
>>>>>+    __u64 value;
>>>>>+
>>>>>+    /** @mask: Wakeup mask */
>>>>>+    __u64 mask;
>>>>>+#define I915_UFENCE_WAIT_U8     0xffu
>>>>>+#define I915_UFENCE_WAIT_U16    0xffffu
>>>>>+#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>>>+#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>>>+
>>>>>+    /**
>>>>>+     * @timeout: Wait timeout in nanoseconds.
>>>>>+     *
>>>>>+     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time 
>>>>>timeout is the
>>>>>+     * absolute time in nsec.
>>>>>+     */
>>>>>+    __s64 timeout;
>>>>>+};
>>>
>>>
Tvrtko Ursulin June 13, 2022, 4:22 p.m. UTC | #9
On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
> On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>
>> On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>> On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin wrote:
>>>> On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>
>>>>> On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>> VM_BIND and related uapi definitions
>>>>>>
>>>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>> ---
>>>>>>   Documentation/gpu/rfc/i915_vm_bind.h | 490 
>>>>>> +++++++++++++++++++++++++++
>>>>>>   1 file changed, 490 insertions(+)
>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>
>>>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>> new file mode 100644
>>>>>> index 000000000000..9fc854969cfb
>>>>>> --- /dev/null
>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>> @@ -0,0 +1,490 @@
>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>> +/*
>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>> + */
>>>>>> +
>>>>>> +/**
>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>> + *
>>>>>> + * VM_BIND feature availability.
>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>> + * bits[8-15]: VM_BIND implementation version.
>>>>>> + * version 0 will not have VM_BIND/UNBIND timeline fence array 
>>>>>> support.
>>>>>> + */
>>>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>>>> +
>>>>>> +/**
>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>> + *
>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>> + *
>>>>>> + * The older execbuf2 ioctl will not support VM_BIND mode of 
>>>>>> operation.
>>>>>> + * For VM_BIND mode, we have new execbuf3 ioctl which will not 
>>>>>> accept any
>>>>>> + * execlist (See struct drm_i915_gem_execbuffer3 for more details).
>>>>>> + *
>>>>>> + */
>>>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>> +
>>>>>> +/**
>>>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>> + *
>>>>>> + * Flag to declare context as long running.
>>>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>>>> + *
>>>>>> + * Usage of dma-fence expects that they complete in reasonable 
>>>>>> amount of time.
>>>>>> + * Compute on the other hand can be long running. Hence it is not 
>>>>>> appropriate
>>>>>> + * for compute contexts to export request completion dma-fence to 
>>>>>> user.
>>>>>> + * The dma-fence usage will be limited to in-kernel consumption 
>>>>>> only.
>>>>>> + * Compute contexts need to use user/memory fence.
>>>>>> + *
>>>>>> + * So, long running contexts do not support output fences. Hence,
>>>>>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is 
>>>>>> expected
>>>>>> + * to be not used. DRM_I915_GEM_WAIT ioctl call is also not 
>>>>>> supported for
>>>>>> + * objects mapped to long running contexts.
>>>>>> + */
>>>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>> +
>>>>>> +/* VM_BIND related ioctls */
>>>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>> +#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>> +
>>>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>> DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>> DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>>>>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3 DRM_IOWR(DRM_COMMAND_BASE 
>>>>>> + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
>>>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct 
>>>>>> drm_i915_gem_wait_user_fence)
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>> + *
>>>>>> + * This structure is passed to VM_BIND ioctl and specifies the 
>>>>>> mapping of GPU
>>>>>> + * virtual address (VA) range to the section of an object that 
>>>>>> should be bound
>>>>>> + * in the device page table of the specified address space (VM).
>>>>>> + * The VA range specified must be unique (ie., not currently 
>>>>>> bound) and can
>>>>>> + * be mapped to whole object or a section of the object (partial 
>>>>>> binding).
>>>>>> + * Multiple VA mappings can be created to the same section of the 
>>>>>> object
>>>>>> + * (aliasing).
>>>>>> + *
>>>>>> + * The @queue_idx specifies the queue to use for binding. Same 
>>>>>> queue can be
>>>>>> + * used for both VM_BIND and VM_UNBIND calls. All submitted bind 
>>>>>> and unbind
>>>>>> + * operations in a queue are performed in the order of submission.
>>>>>> + *
>>>>>> + * The @start, @offset and @length should be 4K page aligned. 
>>>>>> However the DG2
>>>>>> + * and XEHPSDV has 64K page size for device local-memory and has 
>>>>>> compact page
>>>>>> + * table. On those platforms, for binding device local-memory 
>>>>>> objects, the
>>>>>> + * @start should be 2M aligned, @offset and @length should be 64K 
>>>>>> aligned.
>>>>>> + * Also, on those platforms, it is not allowed to bind an device 
>>>>>> local-memory
>>>>>> + * object and a system memory object in a single 2M section of VA 
>>>>>> range.
>>>>>> + */
>>>>>> +struct drm_i915_gem_vm_bind {
>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>> +    __u32 vm_id;
>>>>>> +
>>>>>> +    /** @queue_idx: Index of queue for binding */
>>>>>> +    __u32 queue_idx;
>>>>>
>>>>> I have a question here to which I did not find an answer by 
>>>>> browsing the old threads.
>>>>>
>>>>> Queue index appears to be an implicit synchronisation mechanism, 
>>>>> right? Operations on the same index are executed/complete in order 
>>>>> of ioctl submission?
>>>>>
>>>>> Do we _have_ to implement this on the kernel side and could just 
>>>>> allow in/out fence and let userspace deal with it?
>>>>
>>>>
>>>> It orders operations like in a queue. Which is kind of what happens 
>>>> with existing queues/engines.
>>>>
>>>> If I understood correctly, it's going to be a kthread + a linked 
>>>> list right?
>>>>
>>>
>>> Yes, that is correct.
>>>
>>>>
>>>> -Lionel
>>>>
>>>>
>>>>>
>>>>> Arbitrary/on-demand number of queues will add the complexity on the 
>>>>> kernel side which should be avoided if possible.
>>>>>
>>>
>>> It was discussed in the other thread. Jason prefers this over putting
>>> an artificial limit on number of queues (as user can anyway can exhaust
>>> the memory). I think complexity in the driver is manageable.
>>
>> You'll need to create tracking structures on demand, with atomic 
>> replace of last fence, ref counting and locking of some sort, more or 
>> less?
>>
> 
> We will have a workqueue, an work item and a linked list per queue.
> VM_BIND/UNBIND call will add the mapping request to the specified queue's
> linked list and schedule the work item on the workqueue of that queue.
> I am not sure what you mean by last fence and replacing it.
> 
>>> The other option being discussed in to have the user create those
>>> queues (like creating engine map) before hand and use that in vm_bind
>>> and vm_unbind ioctls. This puts a limit on the number of queues.
>>> But it is not clean either and not sure it is worth making the interface
>>> more complex.
>>> https://www.spinics.net/lists/dri-devel/msg350448.html
>>
>> What about the third option of a flag to return a fence (of some sort) 
>> and pass in a fence? That way userspace can imagine zero or N queues 
>> with very little effort on the kernel side. Was this considered?
>>
> 
> I am not clear what fence you are talking about here and how does that
> help with the number of vm_bind queues. Can you eloborate?

It is actually already documented that bind/unbind will support input 
and output fences - so what are these queues on top of what userspace 
can already achieve by using them? Purely a convenience or there is more 
to it?

Regards,

Tvrtko
Niranjana Vishwanathapura June 13, 2022, 5:49 p.m. UTC | #10
On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
>
>On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
>>On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>>
>>>On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>>>On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin wrote:
>>>>>On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>>
>>>>>>On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>>>VM_BIND and related uapi definitions
>>>>>>>
>>>>>>>Signed-off-by: Niranjana Vishwanathapura 
>>>>>>><niranjana.vishwanathapura@intel.com>
>>>>>>>---
>>>>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 490 
>>>>>>>+++++++++++++++++++++++++++
>>>>>>>  1 file changed, 490 insertions(+)
>>>>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>
>>>>>>>diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>new file mode 100644
>>>>>>>index 000000000000..9fc854969cfb
>>>>>>>--- /dev/null
>>>>>>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>@@ -0,0 +1,490 @@
>>>>>>>+/* SPDX-License-Identifier: MIT */
>>>>>>>+/*
>>>>>>>+ * Copyright © 2022 Intel Corporation
>>>>>>>+ */
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>+ *
>>>>>>>+ * VM_BIND feature availability.
>>>>>>>+ * See typedef drm_i915_getparam_t param.
>>>>>>>+ * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>>>+ * bits[8-15]: VM_BIND implementation version.
>>>>>>>+ * version 0 will not have VM_BIND/UNBIND timeline fence 
>>>>>>>array support.
>>>>>>>+ */
>>>>>>>+#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>+ *
>>>>>>>+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>>>+ * See struct drm_i915_gem_vm_control flags.
>>>>>>>+ *
>>>>>>>+ * The older execbuf2 ioctl will not support VM_BIND mode 
>>>>>>>of operation.
>>>>>>>+ * For VM_BIND mode, we have new execbuf3 ioctl which 
>>>>>>>will not accept any
>>>>>>>+ * execlist (See struct drm_i915_gem_execbuffer3 for more details).
>>>>>>>+ *
>>>>>>>+ */
>>>>>>>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>+ *
>>>>>>>+ * Flag to declare context as long running.
>>>>>>>+ * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>+ *
>>>>>>>+ * Usage of dma-fence expects that they complete in 
>>>>>>>reasonable amount of time.
>>>>>>>+ * Compute on the other hand can be long running. Hence 
>>>>>>>it is not appropriate
>>>>>>>+ * for compute contexts to export request completion 
>>>>>>>dma-fence to user.
>>>>>>>+ * The dma-fence usage will be limited to in-kernel 
>>>>>>>consumption only.
>>>>>>>+ * Compute contexts need to use user/memory fence.
>>>>>>>+ *
>>>>>>>+ * So, long running contexts do not support output fences. Hence,
>>>>>>>+ * I915_EXEC_FENCE_SIGNAL (See 
>>>>>>>&drm_i915_gem_exec_fence.flags) is expected
>>>>>>>+ * to be not used. DRM_I915_GEM_WAIT ioctl call is also 
>>>>>>>not supported for
>>>>>>>+ * objects mapped to long running contexts.
>>>>>>>+ */
>>>>>>>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>+
>>>>>>>+/* VM_BIND related ioctls */
>>>>>>>+#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>+#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>+#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>>>+#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>>>+
>>>>>>>+#define DRM_IOCTL_I915_GEM_VM_BIND 
>>>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct 
>>>>>>>drm_i915_gem_vm_bind)
>>>>>>>+#define DRM_IOCTL_I915_GEM_VM_UNBIND 
>>>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct 
>>>>>>>drm_i915_gem_vm_bind)
>>>>>>>+#define DRM_IOCTL_I915_GEM_EXECBUFFER3 
>>>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, 
>>>>>>>struct drm_i915_gem_execbuffer3)
>>>>>>>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, 
>>>>>>>struct drm_i915_gem_wait_user_fence)
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>>>+ *
>>>>>>>+ * This structure is passed to VM_BIND ioctl and 
>>>>>>>specifies the mapping of GPU
>>>>>>>+ * virtual address (VA) range to the section of an object 
>>>>>>>that should be bound
>>>>>>>+ * in the device page table of the specified address space (VM).
>>>>>>>+ * The VA range specified must be unique (ie., not 
>>>>>>>currently bound) and can
>>>>>>>+ * be mapped to whole object or a section of the object 
>>>>>>>(partial binding).
>>>>>>>+ * Multiple VA mappings can be created to the same 
>>>>>>>section of the object
>>>>>>>+ * (aliasing).
>>>>>>>+ *
>>>>>>>+ * The @queue_idx specifies the queue to use for binding. 
>>>>>>>Same queue can be
>>>>>>>+ * used for both VM_BIND and VM_UNBIND calls. All 
>>>>>>>submitted bind and unbind
>>>>>>>+ * operations in a queue are performed in the order of submission.
>>>>>>>+ *
>>>>>>>+ * The @start, @offset and @length should be 4K page 
>>>>>>>aligned. However the DG2
>>>>>>>+ * and XEHPSDV has 64K page size for device local-memory 
>>>>>>>and has compact page
>>>>>>>+ * table. On those platforms, for binding device 
>>>>>>>local-memory objects, the
>>>>>>>+ * @start should be 2M aligned, @offset and @length 
>>>>>>>should be 64K aligned.
>>>>>>>+ * Also, on those platforms, it is not allowed to bind an 
>>>>>>>device local-memory
>>>>>>>+ * object and a system memory object in a single 2M 
>>>>>>>section of VA range.
>>>>>>>+ */
>>>>>>>+struct drm_i915_gem_vm_bind {
>>>>>>>+    /** @vm_id: VM (address space) id to bind */
>>>>>>>+    __u32 vm_id;
>>>>>>>+
>>>>>>>+    /** @queue_idx: Index of queue for binding */
>>>>>>>+    __u32 queue_idx;
>>>>>>
>>>>>>I have a question here to which I did not find an answer by 
>>>>>>browsing the old threads.
>>>>>>
>>>>>>Queue index appears to be an implicit synchronisation 
>>>>>>mechanism, right? Operations on the same index are 
>>>>>>executed/complete in order of ioctl submission?
>>>>>>
>>>>>>Do we _have_ to implement this on the kernel side and could 
>>>>>>just allow in/out fence and let userspace deal with it?
>>>>>
>>>>>
>>>>>It orders operations like in a queue. Which is kind of what 
>>>>>happens with existing queues/engines.
>>>>>
>>>>>If I understood correctly, it's going to be a kthread + a 
>>>>>linked list right?
>>>>>
>>>>
>>>>Yes, that is correct.
>>>>
>>>>>
>>>>>-Lionel
>>>>>
>>>>>
>>>>>>
>>>>>>Arbitrary/on-demand number of queues will add the complexity 
>>>>>>on the kernel side which should be avoided if possible.
>>>>>>
>>>>
>>>>It was discussed in the other thread. Jason prefers this over putting
>>>>an artificial limit on number of queues (as user can anyway can exhaust
>>>>the memory). I think complexity in the driver is manageable.
>>>
>>>You'll need to create tracking structures on demand, with atomic 
>>>replace of last fence, ref counting and locking of some sort, more 
>>>or less?
>>>
>>
>>We will have a workqueue, an work item and a linked list per queue.
>>VM_BIND/UNBIND call will add the mapping request to the specified queue's
>>linked list and schedule the work item on the workqueue of that queue.
>>I am not sure what you mean by last fence and replacing it.
>>
>>>>The other option being discussed in to have the user create those
>>>>queues (like creating engine map) before hand and use that in vm_bind
>>>>and vm_unbind ioctls. This puts a limit on the number of queues.
>>>>But it is not clean either and not sure it is worth making the interface
>>>>more complex.
>>>>https://www.spinics.net/lists/dri-devel/msg350448.html
>>>
>>>What about the third option of a flag to return a fence (of some 
>>>sort) and pass in a fence? That way userspace can imagine zero or 
>>>N queues with very little effort on the kernel side. Was this 
>>>considered?
>>>
>>
>>I am not clear what fence you are talking about here and how does that
>>help with the number of vm_bind queues. Can you eloborate?
>
>It is actually already documented that bind/unbind will support input 
>and output fences - so what are these queues on top of what userspace 
>can already achieve by using them? Purely a convenience or there is 
>more to it?
>

Oh, the vm_bind queues are discussed in this thread.
https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html

Apparently Vulkan has requirement for multiple queues, each queue
processing vm_bind/unbind calls in the order of submission.

Niranjana

>Regards,
>
>Tvrtko
Niranjana Vishwanathapura June 13, 2022, 5:56 p.m. UTC | #11
On Fri, Jun 10, 2022 at 09:35:14AM -0700, Niranjana Vishwanathapura wrote:
>On Fri, Jun 10, 2022 at 01:56:58AM -0700, Matthew Brost wrote:
>>On Fri, Jun 10, 2022 at 01:53:40AM -0700, Matthew Brost wrote:
>>>On Fri, Jun 10, 2022 at 12:07:11AM -0700, Niranjana Vishwanathapura wrote:
>>>> VM_BIND and related uapi definitions
>>>>
>>>> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
>>>> ---
>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 490 +++++++++++++++++++++++++++
>>>>  1 file changed, 490 insertions(+)
>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
>>>> new file mode 100644
>>>> index 000000000000..9fc854969cfb
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>> @@ -0,0 +1,490 @@
>>>> +/* SPDX-License-Identifier: MIT */
>>>> +/*
>>>> + * Copyright © 2022 Intel Corporation
>>>> + */
>>>> +
>>>> +/**
>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>> + *
>>>> + * VM_BIND feature availability.
>>>> + * See typedef drm_i915_getparam_t param.
>>>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>> + * bits[8-15]: VM_BIND implementation version.
>>>> + * version 0 will not have VM_BIND/UNBIND timeline fence array support.
>>>> + */
>>>> +#define I915_PARAM_HAS_VM_BIND		57
>>>> +
>>>> +/**
>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>> + *
>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>> + * See struct drm_i915_gem_vm_control flags.
>>>> + *
>>>> + * The older execbuf2 ioctl will not support VM_BIND mode of operation.
>>>> + * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
>>>> + * execlist (See struct drm_i915_gem_execbuffer3 for more details).
>>>> + *
>>>> + */
>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
>>>> +
>>>> +/**
>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>> + *
>>>> + * Flag to declare context as long running.
>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>> + *
>>>> + * Usage of dma-fence expects that they complete in reasonable amount of time.
>>>> + * Compute on the other hand can be long running. Hence it is not appropriate
>>>> + * for compute contexts to export request completion dma-fence to user.
>>>> + * The dma-fence usage will be limited to in-kernel consumption only.
>>>> + * Compute contexts need to use user/memory fence.
>>>> + *
>>>> + * So, long running contexts do not support output fences. Hence,
>>>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is expected
>>>> + * to be not used. DRM_I915_GEM_WAIT ioctl call is also not supported for
>>>> + * objects mapped to long running contexts.
>>>> + */
>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>> +
>>>> +/* VM_BIND related ioctls */
>>>> +#define DRM_I915_GEM_VM_BIND		0x3d
>>>> +#define DRM_I915_GEM_VM_UNBIND		0x3e
>>>> +#define DRM_I915_GEM_EXECBUFFER3	0x3f
>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE	0x40
>>>> +
>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>> + *
>>>> + * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
>>>> + * virtual address (VA) range to the section of an object that should be bound
>>>> + * in the device page table of the specified address space (VM).
>>>> + * The VA range specified must be unique (ie., not currently bound) and can
>>>> + * be mapped to whole object or a section of the object (partial binding).
>>>> + * Multiple VA mappings can be created to the same section of the object
>>>> + * (aliasing).
>>>> + *
>>>> + * The @queue_idx specifies the queue to use for binding. Same queue can be
>>>> + * used for both VM_BIND and VM_UNBIND calls. All submitted bind and unbind
>>>> + * operations in a queue are performed in the order of submission.
>>>> + *
>>>> + * The @start, @offset and @length should be 4K page aligned. However the DG2
>>>> + * and XEHPSDV has 64K page size for device local-memory and has compact page
>>>> + * table. On those platforms, for binding device local-memory objects, the
>>>> + * @start should be 2M aligned, @offset and @length should be 64K aligned.
>>>> + * Also, on those platforms, it is not allowed to bind an device local-memory
>>>> + * object and a system memory object in a single 2M section of VA range.
>>>> + */
>>>> +struct drm_i915_gem_vm_bind {
>>>> +	/** @vm_id: VM (address space) id to bind */
>>>> +	__u32 vm_id;
>>>> +
>>>> +	/** @queue_idx: Index of queue for binding */
>>>> +	__u32 queue_idx;
>>>> +
>>>> +	/** @rsvd: Reserved, MBZ */
>>>> +	__u32 rsvd;
>>>> +
>>>> +	/** @handle: Object handle */
>>>> +	__u32 handle;
>>>> +
>>>> +	/** @start: Virtual Address start to bind */
>>>> +	__u64 start;
>>>> +
>>>> +	/** @offset: Offset in object to bind */
>>>> +	__u64 offset;
>>>> +
>>>> +	/** @length: Length of mapping to bind */
>>>> +	__u64 length;
>>>
>>>This probably isn't needed. We are never going to unbind a subset of a
>>>VMA are we? That being said it can't hurt as a sanity check (e.g.
>>>internal vma->length == user unbind length).
>>>
>>
>>Ugh, I c/p this into the wrong place. This should be in the unbind struct.
>
>Having the 'length' field for unbind helps if in future we want to allow
>unbinding of multiple mappings (vmas) in a single ioctl call. ie., all
>mappings that falls in the 'start' - 'start+length-1' range can be
>unmapped. We don't support it today as it is somewhat tied to operations
>like vma split/merge etc which we are not supporting with initial vm_bind
>support.
>So yah, currently, it helps in sanity check and ensure user is correctly
>unbinding a mapping.
>
>>
>>>> +
>>>> +	/**
>>>> +	 * @flags: Supported flags are:
>>>> +	 *
>>>> +	 * I915_GEM_VM_BIND_READONLY:
>>>> +	 * Mapping is read-only.
>>>> +	 *
>>>> +	 * I915_GEM_VM_BIND_CAPTURE:
>>>> +	 * Capture this mapping in the dump upon GPU error.
>>>> +	 */
>>>> +	__u64 flags;
>>>> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>> +
>>>> +	/**
>>>> +	 * @extensions: 0-terminated chain of extensions for this operation.
>>>> +	 *
>>>> +	 * I915_VM_BIND_EXT_TIMELINE_FENCES:
>>>> +	 * Specifies an array of input or output timeline fences for this
>>>> +	 * binding operation. See struct drm_i915_vm_bind_ext_timeline_fences.
>>>> +	 *
>>>> +	 * I915_VM_BIND_EXT_USER_FENCES:
>>>> +	 * Specifies an array of input or output user fences for this
>>>> +	 * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
>>>> +	 * This is required for compute contexts.
>>>> +	 */
>>>> +	__u64 extensions;
>>>> +#define I915_VM_BIND_EXT_TIMELINE_FENCES	0
>>>> +#define I915_VM_BIND_EXT_USER_FENCES		1
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>>> + *
>>>> + * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
>>>> + * address (VA) range that should be unbound from the device page table of the
>>>> + * specified address space (VM). The specified VA range must match one of the
>>>> + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>>>> + * completion.
>>>> + *
>>>> + * The @queue_idx specifies the queue to use for unbinding.
>>>> + * See struct drm_i915_gem_vm_unbind for more information on @queue_idx.
>>>> + *
>>>> + * The @start and @length musy specify a unique mapping bound with VM_BIND
>>>> + * ioctl.
>>>> + */
>>>> +struct drm_i915_gem_vm_unbind {
>>>> +	/** @vm_id: VM (address space) id to bind */
>>>> +	__u32 vm_id;
>>>> +
>>>> +	/** @queue_idx: Index of queue for unbinding */
>>>> +	__u32 queue_idx;
>>>> +
>>>> +	/** @start: Virtual Address start to unbind */
>>>> +	__u64 start;
>>>> +
>>>> +	/** @length: Length of mapping to unbind */
>>>> +	__u64 length;
>>>> +
>>>> +	/** @flags: Reserved for future usage, currently MBZ */
>>>> +	__u64 flags;
>>>> +
>>>> +	/**
>>>> +	 * @extensions: 0-terminated chain of extensions for this operation.
>>>> +	 *
>>>> +	 * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
>>>> +	 * Specifies an array of input or output timeline fences for this
>>>> +	 * unbind operation.
>>>> +	 * It has same format as struct drm_i915_vm_bind_ext_timeline_fences.
>>>> +	 *
>>>> +	 * I915_VM_UNBIND_EXT_USER_FENCES:
>>>> +	 * Specifies an array of input or output user fences for this
>>>> +	 * unbind operation. This is required for compute contexts.
>>>> +	 * It has same format as struct drm_i915_vm_bind_ext_user_fence.
>>>> +	 */
>>>> +	__u64 extensions;
>>>> +#define I915_VM_UNBIND_EXT_TIMELINE_FENCES	0
>>>> +#define I915_VM_UNBIND_EXT_USER_FENCES		1
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
>>>> + * or the vm_unbind work.
>>>> + *
>>>> + * The vm_bind or vm_unbind aync worker will wait for input fence to signal
>>>> + * before starting the binding or unbinding.
>>>> + *
>>>> + * The vm_bind or vm_unbind async worker will signal the returned output fence
>>>> + * after the completion of binding or unbinding.
>>>> + */
>>>> +struct drm_i915_vm_bind_fence {
>>>> +	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
>>>> +	__u32 handle;
>>>> +
>>>> +	/**
>>>> +	 * @flags: Supported flags are:
>>>> +	 *
>>>> +	 * I915_VM_BIND_FENCE_WAIT:
>>>> +	 * Wait for the input fence before binding/unbinding
>>>> +	 *
>>>> +	 * I915_VM_BIND_FENCE_SIGNAL:
>>>> +	 * Return bind/unbind completion fence as output
>>>> +	 */
>>>> +	__u32 flags;
>>>> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
>>>> + * and vm_unbind.
>>>> + *
>>>> + * This structure describes an array of timeline drm_syncobj and associated
>>>> + * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
>>>> + * can be input or output fences (See struct drm_i915_vm_bind_fence).
>>>> + */
>>>> +struct drm_i915_vm_bind_ext_timeline_fences {
>>>> +	/** @base: Extension link. See struct i915_user_extension. */
>>>> +	struct i915_user_extension base;
>>>> +
>>>> +	/**
>>>> +	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
>>>> +	 * arrays.
>>>> +	 */
>>>> +	__u64 fence_count;
>>>> +
>>>> +	/**
>>>> +	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
>>>> +	 * of length @fence_count.
>>>> +	 */
>>>> +	__u64 handles_ptr;
>>>> +
>>>> +	/**
>>>> +	 * @values_ptr: Pointer to an array of u64 values of length
>>>> +	 * @fence_count.
>>>> +	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>>> +	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
>>>> +	 * binary one.
>>>> +	 */
>>>> +	__u64 values_ptr;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
>>>> + * vm_bind or the vm_unbind work.
>>>> + *
>>>> + * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
>>>> + * @addr to become equal to @val) before starting the binding or unbinding.
>>>> + *
>>>> + * The vm_bind or vm_unbind async worker will signal the output fence after
>>>> + * the completion of binding or unbinding by writing @val to memory location at
>>>> + * @addr
>>>> + */
>>>> +struct drm_i915_vm_bind_user_fence {
>>>> +	/** @addr: User/Memory fence qword aligned process virtual address */
>>>> +	__u64 addr;
>>>> +
>>>> +	/** @val: User/Memory fence value to be written after bind completion */
>>>> +	__u64 val;
>>>> +
>>>> +	/**
>>>> +	 * @flags: Supported flags are:
>>>> +	 *
>>>> +	 * I915_VM_BIND_USER_FENCE_WAIT:
>>>> +	 * Wait for the input fence before binding/unbinding
>>>> +	 *
>>>> +	 * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>> +	 * Return bind/unbind completion fence as output
>>>> +	 */
>>>> +	__u32 flags;
>>>> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>> +	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
>>>> + * and vm_unbind.
>>>> + *
>>>> + * These user fences can be input or output fences
>>>> + * (See struct drm_i915_vm_bind_user_fence).
>>>> + */
>>>> +struct drm_i915_vm_bind_ext_user_fence {
>>>> +	/** @base: Extension link. See struct i915_user_extension. */
>>>> +	struct i915_user_extension base;
>>>> +
>>>> +	/** @fence_count: Number of elements in the @user_fence_ptr array. */
>>>> +	__u64 fence_count;
>>>> +
>>>> +	/**
>>>> +	 * @user_fence_ptr: Pointer to an array of
>>>> +	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>> +	 */
>>>> +	__u64 user_fence_ptr;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
>>>> + * ioctl.
>>>> + *
>>>> + * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
>>>> + * only works with this ioctl for submission.
>>>> + * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
>>>> + */
>>>> +struct drm_i915_gem_execbuffer3 {
>>>> +	/**
>>>> +	 * @ctx_id: Context id
>>>> +	 *
>>>> +	 * Only contexts with user engine map are allowed.
>>>> +	 */
>>>> +	__u32 ctx_id;
>>>> +
>>>> +	/**
>>>> +	 * @engine_idx: Engine index
>>>> +	 *
>>>> +	 * An index in the user engine map of the context specified by @ctx_id.
>>>> +	 */
>>>> +	__u32 engine_idx;
>>>> +
>>>> +	/** @rsvd1: Reserved, MBZ */
>>>> +	__u32 rsvd1;
>>>> +
>>>> +	/**
>>>> +	 * @batch_count: Number of batches in @batch_address array.
>>>> +	 *
>>>> +	 * 0 is invalid. For parallel submission, it should be equal to the
>>>> +	 * number of (parallel) engines involved in that submission.
>>>> +	 */
>>>> +	__u32 batch_count;
>>>> +
>>>> +	/**
>>>> +	 * @batch_address: Array of batch gpu virtual addresses.
>>>> +	 *
>>>> +	 * If @batch_count is 1, then it is the gpu virtual address of the
>>>> +	 * batch buffer. If @batch_count > 1, then it is a pointer to an array
>>>> +	 * of batch buffer gpu virtual addresses.
>>>> +	 */
>>>> +	__u64 batch_address;
>>>> +
>>>> +	/**
>>>> +	 * @flags: Supported flags are:
>>>> +	 *
>>>> +	 * I915_EXEC3_SECURE:
>>>> +	 * Request a privileged ("secure") batch buffer/s.
>>>> +	 * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
>>>> +	 */
>>>> +	__u64 flags;
>>>> +#define I915_EXEC3_SECURE	(1<<0)
>>>> +
>>>> +	/** @rsvd2: Reserved, MBZ */
>>>> +	__u64 rsvd2;
>>>> +
>>>> +	/**
>>>> +	 * @extensions: Zero-terminated chain of extensions.
>>>> +	 *
>>>> +	 * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
>>>> +	 * It has same format as DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
>>>> +	 * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
>>>> +	 *
>>>> +	 * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
>>>> +	 * First level batch completion signaling extension.
>>>> +	 * See struct drm_i915_gem_execbuffer3_ext_user_fence.
>>>> +	 */
>>>> +	__u64 extensions;
>>>> +#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES	0
>>>> +#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE		1
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_execbuffer3_ext_user_fence - First level batch completion
>>>> + * signaling extension.
>>>> + *
>>>> + * This extension allows user to attach a user fence (@addr, @value pair) to
>>>> + * execbuf3, to be signaled by the command streamer after the completion of first
>>>> + * level batch, by writing the @value at specified @addr and triggering an
>>>> + * interrupt.
>>>> + * User can either poll for this user fence to signal or can also wait on it
>>>> + * with i915_gem_wait_user_fence ioctl.
>>>> + * This is very much usefaul for long running contexts where waiting on dma-fence
>>>> + * by user (like i915_gem_wait ioctl) is not supported.
>>>> + */
>>>> +struct drm_i915_gem_execbuffer3_ext_user_fence {
>>>> +	/** @base: Extension link. See struct i915_user_extension. */
>>>> +	struct i915_user_extension base;
>>>> +
>>>> +	/**
>>>> +	 * @addr: User/Memory fence qword aligned GPU virtual address.
>>>> +	 *
>>>> +	 * Address has to be a valid GPU virtual address at the time of
>>>> +	 * first level batch completion.
>>>> +	 */
>>>> +	__u64 addr;
>>>> +
>>>> +	/**
>>>> +	 * @value: User/Memory fence Value to be written to above address
>>>> +	 * after first level batch completes.
>>>> +	 */
>>>> +	__u64 value;
>>>> +
>>>> +	/** @rsvd: Reserved, MBZ */
>>>> +	__u64 rsvd;
>>>> +};
>>>> +
>>>
>>>IMO all of these fence structs should be a generic sync interface shared
>>>between both vm bind and exec3 rather than unique extenisons.
>>>
>>>Both vm bind and exec3 should have something like this:
>>>
>>>__64 syncs;	/* userptr to an array of generic syncs */
>>>__64 n_syncs;
>>>
>>>Having an array of syncs lets the kernel do one user copy for all the
>>>syncs rather than reading them in a a chain.
>>>
>>>A generic sync object encapsulates all possible syncs (in / out -
>>>syncobj, syncobj timeline, ufence, future sync concepts).
>>>
>>>e.g.
>>>
>>>struct {
>>>	__u32 user_ext;
>>>	__u32 flag;	/* in / out, type, whatever else info we need */
>>>	union {
>>>		__u32 handle; 	/* to syncobj */
>>>		__u64 addr; 	/* ufence address */
>>>	};
>>>	__64 seqno;	/* syncobj timeline, ufence write value */
>>>	...reserve enough bits for future...
>>>}
>>>
>>>This unifies binds and execs by using the same sync interface
>>>instilling the concept that binds and execs are the same op (queue'd
>>>operation /w in/out fences).
>>>
>
>Hmm...I am not sure whether we really need to merge all fence types
>together in a single structure.
>
>Yes, timeline fence array struct is same between vm_bind and execbuf.
>But user fence array is not. For compute use case, VM_BIND/UNBIND can
>have user fence array. But execbuf will only have one user fence as
>out fence. Execbuf can have timeline fence array as 'in' fences for
>compute use case.
>
>Besides, for VM_BIND/UNBIND, user will use either the timeline fence
>array (mesa) or the user fence array (compute) and will never mix
>them (ie., having some fences as timeline fences and some as user
>fences is not allowed). Hence, no multiple copy_from_user concern.
>So, not sure if having a generic fence is a good idea. It will be
>also less confusing for the user if we define them separately.
>But internally i915 can maintain them in a generic sturcture if needed.
>

Also, I don't think we will have a case where an array of fences
passed by user will have a mix of timeline and user fences.
So, we don't need a 'type' in each element of the array. The
extension (timeline or user fence) will be the type.

Niranjana

>Niranjana
>
>>>Matt
>>>
>>>> +/**
>>>> + * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
>>>> + * private to the specified VM.
>>>> + *
>>>> + * See struct drm_i915_gem_create_ext.
>>>> + */
>>>> +struct drm_i915_gem_create_ext_vm_private {
>>>> +#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
>>>> +	/** @base: Extension link. See struct i915_user_extension. */
>>>> +	struct i915_user_extension base;
>>>> +
>>>> +	/** @vm_id: Id of the VM to which the object is private */
>>>> +	__u32 vm_id;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>>> + *
>>>> + * User/Memory fence can be woken up either by:
>>>> + *
>>>> + * 1. GPU context indicated by @ctx_id, or,
>>>> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>> + *    @ctx_id is ignored when this flag is set.
>>>> + *
>>>> + * Wakeup condition is,
>>>> + * ``((*addr & mask) op (value & mask))``
>>>> + *
>>>> + * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
>>>> + */
>>>> +struct drm_i915_gem_wait_user_fence {
>>>> +	/** @extensions: Zero-terminated chain of extensions. */
>>>> +	__u64 extensions;
>>>> +
>>>> +	/** @addr: User/Memory fence address */
>>>> +	__u64 addr;
>>>> +
>>>> +	/** @ctx_id: Id of the Context which will signal the fence. */
>>>> +	__u32 ctx_id;
>>>> +
>>>> +	/** @op: Wakeup condition operator */
>>>> +	__u16 op;
>>>> +#define I915_UFENCE_WAIT_EQ      0
>>>> +#define I915_UFENCE_WAIT_NEQ     1
>>>> +#define I915_UFENCE_WAIT_GT      2
>>>> +#define I915_UFENCE_WAIT_GTE     3
>>>> +#define I915_UFENCE_WAIT_LT      4
>>>> +#define I915_UFENCE_WAIT_LTE     5
>>>> +#define I915_UFENCE_WAIT_BEFORE  6
>>>> +#define I915_UFENCE_WAIT_AFTER   7
>>>> +
>>>> +	/**
>>>> +	 * @flags: Supported flags are:
>>>> +	 *
>>>> +	 * I915_UFENCE_WAIT_SOFT:
>>>> +	 *
>>>> +	 * To be woken up by i915 driver async worker (not by GPU).
>>>> +	 *
>>>> +	 * I915_UFENCE_WAIT_ABSTIME:
>>>> +	 *
>>>> +	 * Wait timeout specified as absolute time.
>>>> +	 */
>>>> +	__u16 flags;
>>>> +#define I915_UFENCE_WAIT_SOFT    0x1
>>>> +#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>> +
>>>> +	/** @value: Wakeup value */
>>>> +	__u64 value;
>>>> +
>>>> +	/** @mask: Wakeup mask */
>>>> +	__u64 mask;
>>>> +#define I915_UFENCE_WAIT_U8     0xffu
>>>> +#define I915_UFENCE_WAIT_U16    0xffffu
>>>> +#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>> +
>>>> +	/**
>>>> +	 * @timeout: Wait timeout in nanoseconds.
>>>> +	 *
>>>> +	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
>>>> +	 * absolute time in nsec.
>>>> +	 */
>>>> +	__s64 timeout;
>>>> +};
>>>> --
>>>> 2.21.0.rc0.32.g243a4c7e27
>>>>
Tvrtko Ursulin June 13, 2022, 6:09 p.m. UTC | #12
On 13/06/2022 18:49, Niranjana Vishwanathapura wrote:
> On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
>>
>> On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
>>> On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>>>
>>>> On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>>>> On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin wrote:
>>>>>> On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>>>
>>>>>>> On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>
>>>>>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>> ---
>>>>>>>>   Documentation/gpu/rfc/i915_vm_bind.h | 490 
>>>>>>>> +++++++++++++++++++++++++++
>>>>>>>>   1 file changed, 490 insertions(+)
>>>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>
>>>>>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>> new file mode 100644
>>>>>>>> index 000000000000..9fc854969cfb
>>>>>>>> --- /dev/null
>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>> @@ -0,0 +1,490 @@
>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>> +/*
>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>> + */
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>> + *
>>>>>>>> + * VM_BIND feature availability.
>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>>>> + * bits[8-15]: VM_BIND implementation version.
>>>>>>>> + * version 0 will not have VM_BIND/UNBIND timeline fence array 
>>>>>>>> support.
>>>>>>>> + */
>>>>>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>> + *
>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>> + *
>>>>>>>> + * The older execbuf2 ioctl will not support VM_BIND mode of 
>>>>>>>> operation.
>>>>>>>> + * For VM_BIND mode, we have new execbuf3 ioctl which will not 
>>>>>>>> accept any
>>>>>>>> + * execlist (See struct drm_i915_gem_execbuffer3 for more 
>>>>>>>> details).
>>>>>>>> + *
>>>>>>>> + */
>>>>>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>> + *
>>>>>>>> + * Flag to declare context as long running.
>>>>>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>> + *
>>>>>>>> + * Usage of dma-fence expects that they complete in reasonable 
>>>>>>>> amount of time.
>>>>>>>> + * Compute on the other hand can be long running. Hence it is 
>>>>>>>> not appropriate
>>>>>>>> + * for compute contexts to export request completion dma-fence 
>>>>>>>> to user.
>>>>>>>> + * The dma-fence usage will be limited to in-kernel consumption 
>>>>>>>> only.
>>>>>>>> + * Compute contexts need to use user/memory fence.
>>>>>>>> + *
>>>>>>>> + * So, long running contexts do not support output fences. Hence,
>>>>>>>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) 
>>>>>>>> is expected
>>>>>>>> + * to be not used. DRM_I915_GEM_WAIT ioctl call is also not 
>>>>>>>> supported for
>>>>>>>> + * objects mapped to long running contexts.
>>>>>>>> + */
>>>>>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>> +
>>>>>>>> +/* VM_BIND related ioctls */
>>>>>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>> +#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>>>> +
>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>>>> DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE 
>>>>>>>> + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>>>>>>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3 
>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct 
>>>>>>>> drm_i915_gem_execbuffer3)
>>>>>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct 
>>>>>>>> drm_i915_gem_wait_user_fence)
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>>>> + *
>>>>>>>> + * This structure is passed to VM_BIND ioctl and specifies the 
>>>>>>>> mapping of GPU
>>>>>>>> + * virtual address (VA) range to the section of an object that 
>>>>>>>> should be bound
>>>>>>>> + * in the device page table of the specified address space (VM).
>>>>>>>> + * The VA range specified must be unique (ie., not currently 
>>>>>>>> bound) and can
>>>>>>>> + * be mapped to whole object or a section of the object 
>>>>>>>> (partial binding).
>>>>>>>> + * Multiple VA mappings can be created to the same section of 
>>>>>>>> the object
>>>>>>>> + * (aliasing).
>>>>>>>> + *
>>>>>>>> + * The @queue_idx specifies the queue to use for binding. Same 
>>>>>>>> queue can be
>>>>>>>> + * used for both VM_BIND and VM_UNBIND calls. All submitted 
>>>>>>>> bind and unbind
>>>>>>>> + * operations in a queue are performed in the order of submission.
>>>>>>>> + *
>>>>>>>> + * The @start, @offset and @length should be 4K page aligned. 
>>>>>>>> However the DG2
>>>>>>>> + * and XEHPSDV has 64K page size for device local-memory and 
>>>>>>>> has compact page
>>>>>>>> + * table. On those platforms, for binding device local-memory 
>>>>>>>> objects, the
>>>>>>>> + * @start should be 2M aligned, @offset and @length should be 
>>>>>>>> 64K aligned.
>>>>>>>> + * Also, on those platforms, it is not allowed to bind an 
>>>>>>>> device local-memory
>>>>>>>> + * object and a system memory object in a single 2M section of 
>>>>>>>> VA range.
>>>>>>>> + */
>>>>>>>> +struct drm_i915_gem_vm_bind {
>>>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>>>> +    __u32 vm_id;
>>>>>>>> +
>>>>>>>> +    /** @queue_idx: Index of queue for binding */
>>>>>>>> +    __u32 queue_idx;
>>>>>>>
>>>>>>> I have a question here to which I did not find an answer by 
>>>>>>> browsing the old threads.
>>>>>>>
>>>>>>> Queue index appears to be an implicit synchronisation mechanism, 
>>>>>>> right? Operations on the same index are executed/complete in 
>>>>>>> order of ioctl submission?
>>>>>>>
>>>>>>> Do we _have_ to implement this on the kernel side and could just 
>>>>>>> allow in/out fence and let userspace deal with it?
>>>>>>
>>>>>>
>>>>>> It orders operations like in a queue. Which is kind of what 
>>>>>> happens with existing queues/engines.
>>>>>>
>>>>>> If I understood correctly, it's going to be a kthread + a linked 
>>>>>> list right?
>>>>>>
>>>>>
>>>>> Yes, that is correct.
>>>>>
>>>>>>
>>>>>> -Lionel
>>>>>>
>>>>>>
>>>>>>>
>>>>>>> Arbitrary/on-demand number of queues will add the complexity on 
>>>>>>> the kernel side which should be avoided if possible.
>>>>>>>
>>>>>
>>>>> It was discussed in the other thread. Jason prefers this over putting
>>>>> an artificial limit on number of queues (as user can anyway can 
>>>>> exhaust
>>>>> the memory). I think complexity in the driver is manageable.
>>>>
>>>> You'll need to create tracking structures on demand, with atomic 
>>>> replace of last fence, ref counting and locking of some sort, more 
>>>> or less?
>>>>
>>>
>>> We will have a workqueue, an work item and a linked list per queue.
>>> VM_BIND/UNBIND call will add the mapping request to the specified 
>>> queue's
>>> linked list and schedule the work item on the workqueue of that queue.
>>> I am not sure what you mean by last fence and replacing it.
>>>
>>>>> The other option being discussed in to have the user create those
>>>>> queues (like creating engine map) before hand and use that in vm_bind
>>>>> and vm_unbind ioctls. This puts a limit on the number of queues.
>>>>> But it is not clean either and not sure it is worth making the 
>>>>> interface
>>>>> more complex.
>>>>> https://www.spinics.net/lists/dri-devel/msg350448.html
>>>>
>>>> What about the third option of a flag to return a fence (of some 
>>>> sort) and pass in a fence? That way userspace can imagine zero or N 
>>>> queues with very little effort on the kernel side. Was this considered?
>>>>
>>>
>>> I am not clear what fence you are talking about here and how does that
>>> help with the number of vm_bind queues. Can you eloborate?
>>
>> It is actually already documented that bind/unbind will support input 
>> and output fences - so what are these queues on top of what userspace 
>> can already achieve by using them? Purely a convenience or there is 
>> more to it?
>>
> 
> Oh, the vm_bind queues are discussed in this thread.
> https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
> 
> Apparently Vulkan has requirement for multiple queues, each queue
> processing vm_bind/unbind calls in the order of submission.

I don't see how that answers my question so I will take the freedom to 
repeat it. What are these queues on top of what userspace can already 
achieve by using in-out fences? Purely a convenience or there is more to it?

Queue1:

out_fence_A = vm_bind A
out_fence_B = vm_bind B, in_fence=out_fence_A
execbuf(in_fence = out_fence_B)

Queue2:

out_fence_C = vm_bind C
out_fence_D = vm_bind D, in_fence=out_fence_C
execbuf(in_fence = out_fence_D)

Parallel bind:
out_fence_E = vm_bind E
out_fence_F = vm_bind F
merged_fence = fence_merge(out_fence_E, out_fence_F)
execbuf(in_fence = merged_fence)

Regards,

Tvrtko
Matthew Brost June 13, 2022, 11:39 p.m. UTC | #13
On Mon, Jun 13, 2022 at 07:09:06PM +0100, Tvrtko Ursulin wrote:
> 
> On 13/06/2022 18:49, Niranjana Vishwanathapura wrote:
> > On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
> > > 
> > > On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
> > > > On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
> > > > > 
> > > > > On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
> > > > > > On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin wrote:
> > > > > > > On 10/06/2022 13:37, Tvrtko Ursulin wrote:
> > > > > > > > 
> > > > > > > > On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
> > > > > > > > > VM_BIND and related uapi definitions
> > > > > > > > > 
> > > > > > > > > Signed-off-by: Niranjana Vishwanathapura
> > > > > > > > > <niranjana.vishwanathapura@intel.com>
> > > > > > > > > ---
> > > > > > > > >   Documentation/gpu/rfc/i915_vm_bind.h | 490
> > > > > > > > > +++++++++++++++++++++++++++
> > > > > > > > >   1 file changed, 490 insertions(+)
> > > > > > > > >   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
> > > > > > > > > 
> > > > > > > > > diff --git
> > > > > > > > > a/Documentation/gpu/rfc/i915_vm_bind.h
> > > > > > > > > b/Documentation/gpu/rfc/i915_vm_bind.h
> > > > > > > > > new file mode 100644
> > > > > > > > > index 000000000000..9fc854969cfb
> > > > > > > > > --- /dev/null
> > > > > > > > > +++ b/Documentation/gpu/rfc/i915_vm_bind.h
> > > > > > > > > @@ -0,0 +1,490 @@
> > > > > > > > > +/* SPDX-License-Identifier: MIT */
> > > > > > > > > +/*
> > > > > > > > > + * Copyright © 2022 Intel Corporation
> > > > > > > > > + */
> > > > > > > > > +
> > > > > > > > > +/**
> > > > > > > > > + * DOC: I915_PARAM_HAS_VM_BIND
> > > > > > > > > + *
> > > > > > > > > + * VM_BIND feature availability.
> > > > > > > > > + * See typedef drm_i915_getparam_t param.
> > > > > > > > > + * bit[0]: If set, VM_BIND is supported, otherwise not.
> > > > > > > > > + * bits[8-15]: VM_BIND implementation version.
> > > > > > > > > + * version 0 will not have VM_BIND/UNBIND
> > > > > > > > > timeline fence array support.
> > > > > > > > > + */
> > > > > > > > > +#define I915_PARAM_HAS_VM_BIND        57
> > > > > > > > > +
> > > > > > > > > +/**
> > > > > > > > > + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
> > > > > > > > > + *
> > > > > > > > > + * Flag to opt-in for VM_BIND mode of binding during VM creation.
> > > > > > > > > + * See struct drm_i915_gem_vm_control flags.
> > > > > > > > > + *
> > > > > > > > > + * The older execbuf2 ioctl will not
> > > > > > > > > support VM_BIND mode of operation.
> > > > > > > > > + * For VM_BIND mode, we have new execbuf3
> > > > > > > > > ioctl which will not accept any
> > > > > > > > > + * execlist (See struct
> > > > > > > > > drm_i915_gem_execbuffer3 for more details).
> > > > > > > > > + *
> > > > > > > > > + */
> > > > > > > > > +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
> > > > > > > > > +
> > > > > > > > > +/**
> > > > > > > > > + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
> > > > > > > > > + *
> > > > > > > > > + * Flag to declare context as long running.
> > > > > > > > > + * See struct drm_i915_gem_context_create_ext flags.
> > > > > > > > > + *
> > > > > > > > > + * Usage of dma-fence expects that they
> > > > > > > > > complete in reasonable amount of time.
> > > > > > > > > + * Compute on the other hand can be long
> > > > > > > > > running. Hence it is not appropriate
> > > > > > > > > + * for compute contexts to export request
> > > > > > > > > completion dma-fence to user.
> > > > > > > > > + * The dma-fence usage will be limited to
> > > > > > > > > in-kernel consumption only.
> > > > > > > > > + * Compute contexts need to use user/memory fence.
> > > > > > > > > + *
> > > > > > > > > + * So, long running contexts do not support output fences. Hence,
> > > > > > > > > + * I915_EXEC_FENCE_SIGNAL (See
> > > > > > > > > &drm_i915_gem_exec_fence.flags) is expected
> > > > > > > > > + * to be not used. DRM_I915_GEM_WAIT ioctl
> > > > > > > > > call is also not supported for
> > > > > > > > > + * objects mapped to long running contexts.
> > > > > > > > > + */
> > > > > > > > > +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
> > > > > > > > > +
> > > > > > > > > +/* VM_BIND related ioctls */
> > > > > > > > > +#define DRM_I915_GEM_VM_BIND        0x3d
> > > > > > > > > +#define DRM_I915_GEM_VM_UNBIND        0x3e
> > > > > > > > > +#define DRM_I915_GEM_EXECBUFFER3    0x3f
> > > > > > > > > +#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
> > > > > > > > > +
> > > > > > > > > +#define DRM_IOCTL_I915_GEM_VM_BIND
> > > > > > > > > DRM_IOWR(DRM_COMMAND_BASE +
> > > > > > > > > DRM_I915_GEM_VM_BIND, struct
> > > > > > > > > drm_i915_gem_vm_bind)
> > > > > > > > > +#define DRM_IOCTL_I915_GEM_VM_UNBIND
> > > > > > > > > DRM_IOWR(DRM_COMMAND_BASE +
> > > > > > > > > DRM_I915_GEM_VM_UNBIND, struct
> > > > > > > > > drm_i915_gem_vm_bind)
> > > > > > > > > +#define DRM_IOCTL_I915_GEM_EXECBUFFER3
> > > > > > > > > DRM_IOWR(DRM_COMMAND_BASE +
> > > > > > > > > DRM_I915_GEM_EXECBUFFER3, struct
> > > > > > > > > drm_i915_gem_execbuffer3)
> > > > > > > > > +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE
> > > > > > > > > DRM_IOWR(DRM_COMMAND_BASE +
> > > > > > > > > DRM_I915_GEM_WAIT_USER_FENCE, struct
> > > > > > > > > drm_i915_gem_wait_user_fence)
> > > > > > > > > +
> > > > > > > > > +/**
> > > > > > > > > + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
> > > > > > > > > + *
> > > > > > > > > + * This structure is passed to VM_BIND
> > > > > > > > > ioctl and specifies the mapping of GPU
> > > > > > > > > + * virtual address (VA) range to the
> > > > > > > > > section of an object that should be bound
> > > > > > > > > + * in the device page table of the specified address space (VM).
> > > > > > > > > + * The VA range specified must be unique
> > > > > > > > > (ie., not currently bound) and can
> > > > > > > > > + * be mapped to whole object or a section
> > > > > > > > > of the object (partial binding).
> > > > > > > > > + * Multiple VA mappings can be created to
> > > > > > > > > the same section of the object
> > > > > > > > > + * (aliasing).
> > > > > > > > > + *
> > > > > > > > > + * The @queue_idx specifies the queue to
> > > > > > > > > use for binding. Same queue can be
> > > > > > > > > + * used for both VM_BIND and VM_UNBIND
> > > > > > > > > calls. All submitted bind and unbind
> > > > > > > > > + * operations in a queue are performed in the order of submission.
> > > > > > > > > + *
> > > > > > > > > + * The @start, @offset and @length should
> > > > > > > > > be 4K page aligned. However the DG2
> > > > > > > > > + * and XEHPSDV has 64K page size for device
> > > > > > > > > local-memory and has compact page
> > > > > > > > > + * table. On those platforms, for binding
> > > > > > > > > device local-memory objects, the
> > > > > > > > > + * @start should be 2M aligned, @offset and
> > > > > > > > > @length should be 64K aligned.
> > > > > > > > > + * Also, on those platforms, it is not
> > > > > > > > > allowed to bind an device local-memory
> > > > > > > > > + * object and a system memory object in a
> > > > > > > > > single 2M section of VA range.
> > > > > > > > > + */
> > > > > > > > > +struct drm_i915_gem_vm_bind {
> > > > > > > > > +    /** @vm_id: VM (address space) id to bind */
> > > > > > > > > +    __u32 vm_id;
> > > > > > > > > +
> > > > > > > > > +    /** @queue_idx: Index of queue for binding */
> > > > > > > > > +    __u32 queue_idx;
> > > > > > > > 
> > > > > > > > I have a question here to which I did not find
> > > > > > > > an answer by browsing the old threads.
> > > > > > > > 
> > > > > > > > Queue index appears to be an implicit
> > > > > > > > synchronisation mechanism, right? Operations on
> > > > > > > > the same index are executed/complete in order of
> > > > > > > > ioctl submission?
> > > > > > > > 
> > > > > > > > Do we _have_ to implement this on the kernel
> > > > > > > > side and could just allow in/out fence and let
> > > > > > > > userspace deal with it?
> > > > > > > 
> > > > > > > 
> > > > > > > It orders operations like in a queue. Which is kind
> > > > > > > of what happens with existing queues/engines.
> > > > > > > 
> > > > > > > If I understood correctly, it's going to be a
> > > > > > > kthread + a linked list right?
> > > > > > > 
> > > > > > 
> > > > > > Yes, that is correct.
> > > > > > 
> > > > > > > 
> > > > > > > -Lionel
> > > > > > > 
> > > > > > > 
> > > > > > > > 
> > > > > > > > Arbitrary/on-demand number of queues will add
> > > > > > > > the complexity on the kernel side which should
> > > > > > > > be avoided if possible.
> > > > > > > > 
> > > > > > 
> > > > > > It was discussed in the other thread. Jason prefers this over putting
> > > > > > an artificial limit on number of queues (as user can
> > > > > > anyway can exhaust
> > > > > > the memory). I think complexity in the driver is manageable.
> > > > > 
> > > > > You'll need to create tracking structures on demand, with
> > > > > atomic replace of last fence, ref counting and locking of
> > > > > some sort, more or less?
> > > > > 
> > > > 
> > > > We will have a workqueue, an work item and a linked list per queue.
> > > > VM_BIND/UNBIND call will add the mapping request to the
> > > > specified queue's
> > > > linked list and schedule the work item on the workqueue of that queue.
> > > > I am not sure what you mean by last fence and replacing it.
> > > > 
> > > > > > The other option being discussed in to have the user create those
> > > > > > queues (like creating engine map) before hand and use that in vm_bind
> > > > > > and vm_unbind ioctls. This puts a limit on the number of queues.
> > > > > > But it is not clean either and not sure it is worth
> > > > > > making the interface
> > > > > > more complex.
> > > > > > https://www.spinics.net/lists/dri-devel/msg350448.html
> > > > > 
> > > > > What about the third option of a flag to return a fence (of
> > > > > some sort) and pass in a fence? That way userspace can
> > > > > imagine zero or N queues with very little effort on the
> > > > > kernel side. Was this considered?
> > > > > 
> > > > 
> > > > I am not clear what fence you are talking about here and how does that
> > > > help with the number of vm_bind queues. Can you eloborate?
> > > 
> > > It is actually already documented that bind/unbind will support
> > > input and output fences - so what are these queues on top of what
> > > userspace can already achieve by using them? Purely a convenience or
> > > there is more to it?
> > > 
> > 
> > Oh, the vm_bind queues are discussed in this thread.
> > https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
> > 
> > Apparently Vulkan has requirement for multiple queues, each queue
> > processing vm_bind/unbind calls in the order of submission.
> 
> I don't see how that answers my question so I will take the freedom to
> repeat it. What are these queues on top of what userspace can already
> achieve by using in-out fences? Purely a convenience or there is more to it?
> 
> Queue1:
> 
> out_fence_A = vm_bind A
> out_fence_B = vm_bind B, in_fence=out_fence_A
> execbuf(in_fence = out_fence_B)
> 
> Queue2:
> 
> out_fence_C = vm_bind C
> out_fence_D = vm_bind D, in_fence=out_fence_C
> execbuf(in_fence = out_fence_D)
> 
> Parallel bind:
> out_fence_E = vm_bind E
> out_fence_F = vm_bind F
> merged_fence = fence_merge(out_fence_E, out_fence_F)
> execbuf(in_fence = merged_fence)
> 

Let's say you do this and only 1 queue:

VM_BIND_A (in_fence=fence_A)
VM_BIND_B (in_fence=NULL)

With 1 queue VM_BIND_B in blocked on fence_A, hence the need for than 1
queue.

e.g.
 
VM_BIND_A (queue_id=0, in_fence=fence_A)
VM_BIND_B (queue_id=1, in_fence=NULL)

Now VM_BIND_B can immediately be executed regardless of fence_A status.

Matt

> Regards,
> 
> Tvrtko
Lionel Landwerlin June 14, 2022, 6:27 a.m. UTC | #14
On 10/06/2022 11:53, Matthew Brost wrote:
> On Fri, Jun 10, 2022 at 12:07:11AM -0700, Niranjana Vishwanathapura wrote:
>> VM_BIND and related uapi definitions
>>
>> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
>> ---
>>   Documentation/gpu/rfc/i915_vm_bind.h | 490 +++++++++++++++++++++++++++
>>   1 file changed, 490 insertions(+)
>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>
>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
>> new file mode 100644
>> index 000000000000..9fc854969cfb
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>> @@ -0,0 +1,490 @@
>> +/* SPDX-License-Identifier: MIT */
>> +/*
>> + * Copyright © 2022 Intel Corporation
>> + */
>> +
>> +/**
>> + * DOC: I915_PARAM_HAS_VM_BIND
>> + *
>> + * VM_BIND feature availability.
>> + * See typedef drm_i915_getparam_t param.
>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>> + * bits[8-15]: VM_BIND implementation version.
>> + * version 0 will not have VM_BIND/UNBIND timeline fence array support.
>> + */
>> +#define I915_PARAM_HAS_VM_BIND		57
>> +
>> +/**
>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>> + *
>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>> + * See struct drm_i915_gem_vm_control flags.
>> + *
>> + * The older execbuf2 ioctl will not support VM_BIND mode of operation.
>> + * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
>> + * execlist (See struct drm_i915_gem_execbuffer3 for more details).
>> + *
>> + */
>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
>> +
>> +/**
>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>> + *
>> + * Flag to declare context as long running.
>> + * See struct drm_i915_gem_context_create_ext flags.
>> + *
>> + * Usage of dma-fence expects that they complete in reasonable amount of time.
>> + * Compute on the other hand can be long running. Hence it is not appropriate
>> + * for compute contexts to export request completion dma-fence to user.
>> + * The dma-fence usage will be limited to in-kernel consumption only.
>> + * Compute contexts need to use user/memory fence.
>> + *
>> + * So, long running contexts do not support output fences. Hence,
>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is expected
>> + * to be not used. DRM_I915_GEM_WAIT ioctl call is also not supported for
>> + * objects mapped to long running contexts.
>> + */
>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>> +
>> +/* VM_BIND related ioctls */
>> +#define DRM_I915_GEM_VM_BIND		0x3d
>> +#define DRM_I915_GEM_VM_UNBIND		0x3e
>> +#define DRM_I915_GEM_EXECBUFFER3	0x3f
>> +#define DRM_I915_GEM_WAIT_USER_FENCE	0x40
>> +
>> +#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
>> +
>> +/**
>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>> + *
>> + * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
>> + * virtual address (VA) range to the section of an object that should be bound
>> + * in the device page table of the specified address space (VM).
>> + * The VA range specified must be unique (ie., not currently bound) and can
>> + * be mapped to whole object or a section of the object (partial binding).
>> + * Multiple VA mappings can be created to the same section of the object
>> + * (aliasing).
>> + *
>> + * The @queue_idx specifies the queue to use for binding. Same queue can be
>> + * used for both VM_BIND and VM_UNBIND calls. All submitted bind and unbind
>> + * operations in a queue are performed in the order of submission.
>> + *
>> + * The @start, @offset and @length should be 4K page aligned. However the DG2
>> + * and XEHPSDV has 64K page size for device local-memory and has compact page
>> + * table. On those platforms, for binding device local-memory objects, the
>> + * @start should be 2M aligned, @offset and @length should be 64K aligned.
>> + * Also, on those platforms, it is not allowed to bind an device local-memory
>> + * object and a system memory object in a single 2M section of VA range.
>> + */
>> +struct drm_i915_gem_vm_bind {
>> +	/** @vm_id: VM (address space) id to bind */
>> +	__u32 vm_id;
>> +
>> +	/** @queue_idx: Index of queue for binding */
>> +	__u32 queue_idx;
>> +
>> +	/** @rsvd: Reserved, MBZ */
>> +	__u32 rsvd;
>> +
>> +	/** @handle: Object handle */
>> +	__u32 handle;
>> +
>> +	/** @start: Virtual Address start to bind */
>> +	__u64 start;
>> +
>> +	/** @offset: Offset in object to bind */
>> +	__u64 offset;
>> +
>> +	/** @length: Length of mapping to bind */
>> +	__u64 length;
> This probably isn't needed. We are never going to unbind a subset of a
> VMA are we? That being said it can't hurt as a sanity check (e.g.
> internal vma->length == user unbind length).


Not sure what you mean by that.


Vulkan can unbind a whole range of addresses and it seems like there is 
no restriction on doing something like this :


bind vma=0x1000000 GEMBO=3 offset=0 range=8192

unbind vma=0x1001000 range=4096


You would be left with a single 4k page of GEMBO=3 bound at vma=0x1000000


Or :

bind vma=0x1000000 GEMBO=3 offset=0 range=4096

bind vma=0x1001000 GEMBO=4 offset=0 range=4096

unbind vma=0x1000000 range=8192


You're unbinding 2 bindings with a single operation.


-Lionel


>
>> +
>> +	/**
>> +	 * @flags: Supported flags are:
>> +	 *
>> +	 * I915_GEM_VM_BIND_READONLY:
>> +	 * Mapping is read-only.
>> +	 *
>> +	 * I915_GEM_VM_BIND_CAPTURE:
>> +	 * Capture this mapping in the dump upon GPU error.
>> +	 */
>> +	__u64 flags;
>> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>> +
>> +	/**
>> +	 * @extensions: 0-terminated chain of extensions for this operation.
>> +	 *
>> +	 * I915_VM_BIND_EXT_TIMELINE_FENCES:
>> +	 * Specifies an array of input or output timeline fences for this
>> +	 * binding operation. See struct drm_i915_vm_bind_ext_timeline_fences.
>> +	 *
>> +	 * I915_VM_BIND_EXT_USER_FENCES:
>> +	 * Specifies an array of input or output user fences for this
>> +	 * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
>> +	 * This is required for compute contexts.
>> +	 */
>> +	__u64 extensions;
>> +#define I915_VM_BIND_EXT_TIMELINE_FENCES	0
>> +#define I915_VM_BIND_EXT_USER_FENCES		1
>> +};
>> +
>> +/**
>> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>> + *
>> + * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
>> + * address (VA) range that should be unbound from the device page table of the
>> + * specified address space (VM). The specified VA range must match one of the
>> + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>> + * completion.
>> + *
>> + * The @queue_idx specifies the queue to use for unbinding.
>> + * See struct drm_i915_gem_vm_unbind for more information on @queue_idx.
>> + *
>> + * The @start and @length musy specify a unique mapping bound with VM_BIND
>> + * ioctl.
>> + */
>> +struct drm_i915_gem_vm_unbind {
>> +	/** @vm_id: VM (address space) id to bind */
>> +	__u32 vm_id;
>> +
>> +	/** @queue_idx: Index of queue for unbinding */
>> +	__u32 queue_idx;
>> +
>> +	/** @start: Virtual Address start to unbind */
>> +	__u64 start;
>> +
>> +	/** @length: Length of mapping to unbind */
>> +	__u64 length;
>> +
>> +	/** @flags: Reserved for future usage, currently MBZ */
>> +	__u64 flags;
>> +
>> +	/**
>> +	 * @extensions: 0-terminated chain of extensions for this operation.
>> +	 *
>> +	 * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
>> +	 * Specifies an array of input or output timeline fences for this
>> +	 * unbind operation.
>> +	 * It has same format as struct drm_i915_vm_bind_ext_timeline_fences.
>> +	 *
>> +	 * I915_VM_UNBIND_EXT_USER_FENCES:
>> +	 * Specifies an array of input or output user fences for this
>> +	 * unbind operation. This is required for compute contexts.
>> +	 * It has same format as struct drm_i915_vm_bind_ext_user_fence.
>> +	 */
>> +	__u64 extensions;
>> +#define I915_VM_UNBIND_EXT_TIMELINE_FENCES	0
>> +#define I915_VM_UNBIND_EXT_USER_FENCES		1
>> +};
>> +
>> +/**
>> + * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
>> + * or the vm_unbind work.
>> + *
>> + * The vm_bind or vm_unbind aync worker will wait for input fence to signal
>> + * before starting the binding or unbinding.
>> + *
>> + * The vm_bind or vm_unbind async worker will signal the returned output fence
>> + * after the completion of binding or unbinding.
>> + */
>> +struct drm_i915_vm_bind_fence {
>> +	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
>> +	__u32 handle;
>> +
>> +	/**
>> +	 * @flags: Supported flags are:
>> +	 *
>> +	 * I915_VM_BIND_FENCE_WAIT:
>> +	 * Wait for the input fence before binding/unbinding
>> +	 *
>> +	 * I915_VM_BIND_FENCE_SIGNAL:
>> +	 * Return bind/unbind completion fence as output
>> +	 */
>> +	__u32 flags;
>> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>> +};
>> +
>> +/**
>> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
>> + * and vm_unbind.
>> + *
>> + * This structure describes an array of timeline drm_syncobj and associated
>> + * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
>> + * can be input or output fences (See struct drm_i915_vm_bind_fence).
>> + */
>> +struct drm_i915_vm_bind_ext_timeline_fences {
>> +	/** @base: Extension link. See struct i915_user_extension. */
>> +	struct i915_user_extension base;
>> +
>> +	/**
>> +	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
>> +	 * arrays.
>> +	 */
>> +	__u64 fence_count;
>> +
>> +	/**
>> +	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
>> +	 * of length @fence_count.
>> +	 */
>> +	__u64 handles_ptr;
>> +
>> +	/**
>> +	 * @values_ptr: Pointer to an array of u64 values of length
>> +	 * @fence_count.
>> +	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>> +	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
>> +	 * binary one.
>> +	 */
>> +	__u64 values_ptr;
>> +};
>> +
>> +/**
>> + * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
>> + * vm_bind or the vm_unbind work.
>> + *
>> + * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
>> + * @addr to become equal to @val) before starting the binding or unbinding.
>> + *
>> + * The vm_bind or vm_unbind async worker will signal the output fence after
>> + * the completion of binding or unbinding by writing @val to memory location at
>> + * @addr
>> + */
>> +struct drm_i915_vm_bind_user_fence {
>> +	/** @addr: User/Memory fence qword aligned process virtual address */
>> +	__u64 addr;
>> +
>> +	/** @val: User/Memory fence value to be written after bind completion */
>> +	__u64 val;
>> +
>> +	/**
>> +	 * @flags: Supported flags are:
>> +	 *
>> +	 * I915_VM_BIND_USER_FENCE_WAIT:
>> +	 * Wait for the input fence before binding/unbinding
>> +	 *
>> +	 * I915_VM_BIND_USER_FENCE_SIGNAL:
>> +	 * Return bind/unbind completion fence as output
>> +	 */
>> +	__u32 flags;
>> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>> +	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>> +};
>> +
>> +/**
>> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
>> + * and vm_unbind.
>> + *
>> + * These user fences can be input or output fences
>> + * (See struct drm_i915_vm_bind_user_fence).
>> + */
>> +struct drm_i915_vm_bind_ext_user_fence {
>> +	/** @base: Extension link. See struct i915_user_extension. */
>> +	struct i915_user_extension base;
>> +
>> +	/** @fence_count: Number of elements in the @user_fence_ptr array. */
>> +	__u64 fence_count;
>> +
>> +	/**
>> +	 * @user_fence_ptr: Pointer to an array of
>> +	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
>> +	 */
>> +	__u64 user_fence_ptr;
>> +};
>> +
>> +/**
>> + * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
>> + * ioctl.
>> + *
>> + * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
>> + * only works with this ioctl for submission.
>> + * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
>> + */
>> +struct drm_i915_gem_execbuffer3 {
>> +	/**
>> +	 * @ctx_id: Context id
>> +	 *
>> +	 * Only contexts with user engine map are allowed.
>> +	 */
>> +	__u32 ctx_id;
>> +
>> +	/**
>> +	 * @engine_idx: Engine index
>> +	 *
>> +	 * An index in the user engine map of the context specified by @ctx_id.
>> +	 */
>> +	__u32 engine_idx;
>> +
>> +	/** @rsvd1: Reserved, MBZ */
>> +	__u32 rsvd1;
>> +
>> +	/**
>> +	 * @batch_count: Number of batches in @batch_address array.
>> +	 *
>> +	 * 0 is invalid. For parallel submission, it should be equal to the
>> +	 * number of (parallel) engines involved in that submission.
>> +	 */
>> +	__u32 batch_count;
>> +
>> +	/**
>> +	 * @batch_address: Array of batch gpu virtual addresses.
>> +	 *
>> +	 * If @batch_count is 1, then it is the gpu virtual address of the
>> +	 * batch buffer. If @batch_count > 1, then it is a pointer to an array
>> +	 * of batch buffer gpu virtual addresses.
>> +	 */
>> +	__u64 batch_address;
>> +
>> +	/**
>> +	 * @flags: Supported flags are:
>> +	 *
>> +	 * I915_EXEC3_SECURE:
>> +	 * Request a privileged ("secure") batch buffer/s.
>> +	 * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
>> +	 */
>> +	__u64 flags;
>> +#define I915_EXEC3_SECURE	(1<<0)
>> +
>> +	/** @rsvd2: Reserved, MBZ */
>> +	__u64 rsvd2;
>> +
>> +	/**
>> +	 * @extensions: Zero-terminated chain of extensions.
>> +	 *
>> +	 * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
>> +	 * It has same format as DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
>> +	 * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
>> +	 *
>> +	 * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
>> +	 * First level batch completion signaling extension.
>> +	 * See struct drm_i915_gem_execbuffer3_ext_user_fence.
>> +	 */
>> +	__u64 extensions;
>> +#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES	0
>> +#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE		1
>> +};
>> +
>> +/**
>> + * struct drm_i915_gem_execbuffer3_ext_user_fence - First level batch completion
>> + * signaling extension.
>> + *
>> + * This extension allows user to attach a user fence (@addr, @value pair) to
>> + * execbuf3, to be signaled by the command streamer after the completion of first
>> + * level batch, by writing the @value at specified @addr and triggering an
>> + * interrupt.
>> + * User can either poll for this user fence to signal or can also wait on it
>> + * with i915_gem_wait_user_fence ioctl.
>> + * This is very much usefaul for long running contexts where waiting on dma-fence
>> + * by user (like i915_gem_wait ioctl) is not supported.
>> + */
>> +struct drm_i915_gem_execbuffer3_ext_user_fence {
>> +	/** @base: Extension link. See struct i915_user_extension. */
>> +	struct i915_user_extension base;
>> +
>> +	/**
>> +	 * @addr: User/Memory fence qword aligned GPU virtual address.
>> +	 *
>> +	 * Address has to be a valid GPU virtual address at the time of
>> +	 * first level batch completion.
>> +	 */
>> +	__u64 addr;
>> +
>> +	/**
>> +	 * @value: User/Memory fence Value to be written to above address
>> +	 * after first level batch completes.
>> +	 */
>> +	__u64 value;
>> +
>> +	/** @rsvd: Reserved, MBZ */
>> +	__u64 rsvd;
>> +};
>> +
> IMO all of these fence structs should be a generic sync interface shared
> between both vm bind and exec3 rather than unique extenisons.
>
> Both vm bind and exec3 should have something like this:
>
> __64 syncs;	/* userptr to an array of generic syncs */
> __64 n_syncs;
>
> Having an array of syncs lets the kernel do one user copy for all the
> syncs rather than reading them in a a chain.
>
> A generic sync object encapsulates all possible syncs (in / out -
> syncobj, syncobj timeline, ufence, future sync concepts).
>
> e.g.
>
> struct {
> 	__u32 user_ext;
> 	__u32 flag;	/* in / out, type, whatever else info we need */
> 	union {
> 		__u32 handle; 	/* to syncobj */
> 		__u64 addr; 	/* ufence address */
> 	};
> 	__64 seqno;	/* syncobj timeline, ufence write value */
> 	...reserve enough bits for future...
> }
>
> This unifies binds and execs by using the same sync interface
> instilling the concept that binds and execs are the same op (queue'd
> operation /w in/out fences).
>
> Matt
>
>> +/**
>> + * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
>> + * private to the specified VM.
>> + *
>> + * See struct drm_i915_gem_create_ext.
>> + */
>> +struct drm_i915_gem_create_ext_vm_private {
>> +#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
>> +	/** @base: Extension link. See struct i915_user_extension. */
>> +	struct i915_user_extension base;
>> +
>> +	/** @vm_id: Id of the VM to which the object is private */
>> +	__u32 vm_id;
>> +};
>> +
>> +/**
>> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>> + *
>> + * User/Memory fence can be woken up either by:
>> + *
>> + * 1. GPU context indicated by @ctx_id, or,
>> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>> + *    @ctx_id is ignored when this flag is set.
>> + *
>> + * Wakeup condition is,
>> + * ``((*addr & mask) op (value & mask))``
>> + *
>> + * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
>> + */
>> +struct drm_i915_gem_wait_user_fence {
>> +	/** @extensions: Zero-terminated chain of extensions. */
>> +	__u64 extensions;
>> +
>> +	/** @addr: User/Memory fence address */
>> +	__u64 addr;
>> +
>> +	/** @ctx_id: Id of the Context which will signal the fence. */
>> +	__u32 ctx_id;
>> +
>> +	/** @op: Wakeup condition operator */
>> +	__u16 op;
>> +#define I915_UFENCE_WAIT_EQ      0
>> +#define I915_UFENCE_WAIT_NEQ     1
>> +#define I915_UFENCE_WAIT_GT      2
>> +#define I915_UFENCE_WAIT_GTE     3
>> +#define I915_UFENCE_WAIT_LT      4
>> +#define I915_UFENCE_WAIT_LTE     5
>> +#define I915_UFENCE_WAIT_BEFORE  6
>> +#define I915_UFENCE_WAIT_AFTER   7
>> +
>> +	/**
>> +	 * @flags: Supported flags are:
>> +	 *
>> +	 * I915_UFENCE_WAIT_SOFT:
>> +	 *
>> +	 * To be woken up by i915 driver async worker (not by GPU).
>> +	 *
>> +	 * I915_UFENCE_WAIT_ABSTIME:
>> +	 *
>> +	 * Wait timeout specified as absolute time.
>> +	 */
>> +	__u16 flags;
>> +#define I915_UFENCE_WAIT_SOFT    0x1
>> +#define I915_UFENCE_WAIT_ABSTIME 0x2
>> +
>> +	/** @value: Wakeup value */
>> +	__u64 value;
>> +
>> +	/** @mask: Wakeup mask */
>> +	__u64 mask;
>> +#define I915_UFENCE_WAIT_U8     0xffu
>> +#define I915_UFENCE_WAIT_U16    0xffffu
>> +#define I915_UFENCE_WAIT_U32    0xfffffffful
>> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>> +
>> +	/**
>> +	 * @timeout: Wait timeout in nanoseconds.
>> +	 *
>> +	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
>> +	 * absolute time in nsec.
>> +	 */
>> +	__s64 timeout;
>> +};
>> -- 
>> 2.21.0.rc0.32.g243a4c7e27
>>
Tvrtko Ursulin June 14, 2022, 7:16 a.m. UTC | #15
On 14/06/2022 00:39, Matthew Brost wrote:
> On Mon, Jun 13, 2022 at 07:09:06PM +0100, Tvrtko Ursulin wrote:
>>
>> On 13/06/2022 18:49, Niranjana Vishwanathapura wrote:
>>> On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
>>>>
>>>> On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
>>>>> On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>>>>>
>>>>>> On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>>>>>> On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin wrote:
>>>>>>>> On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>>>>>
>>>>>>>>> On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>>>
>>>>>>>>>> Signed-off-by: Niranjana Vishwanathapura
>>>>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>>>> ---
>>>>>>>>>>    Documentation/gpu/rfc/i915_vm_bind.h | 490
>>>>>>>>>> +++++++++++++++++++++++++++
>>>>>>>>>>    1 file changed, 490 insertions(+)
>>>>>>>>>>    create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>
>>>>>>>>>> diff --git
>>>>>>>>>> a/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>> new file mode 100644
>>>>>>>>>> index 000000000000..9fc854969cfb
>>>>>>>>>> --- /dev/null
>>>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>> @@ -0,0 +1,490 @@
>>>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>>>> +/*
>>>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>>>> + */
>>>>>>>>>> +
>>>>>>>>>> +/**
>>>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>> + *
>>>>>>>>>> + * VM_BIND feature availability.
>>>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>>>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>>>>>> + * bits[8-15]: VM_BIND implementation version.
>>>>>>>>>> + * version 0 will not have VM_BIND/UNBIND
>>>>>>>>>> timeline fence array support.
>>>>>>>>>> + */
>>>>>>>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>>>> +
>>>>>>>>>> +/**
>>>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>> + *
>>>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>> + *
>>>>>>>>>> + * The older execbuf2 ioctl will not
>>>>>>>>>> support VM_BIND mode of operation.
>>>>>>>>>> + * For VM_BIND mode, we have new execbuf3
>>>>>>>>>> ioctl which will not accept any
>>>>>>>>>> + * execlist (See struct
>>>>>>>>>> drm_i915_gem_execbuffer3 for more details).
>>>>>>>>>> + *
>>>>>>>>>> + */
>>>>>>>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>>>> +
>>>>>>>>>> +/**
>>>>>>>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>>>> + *
>>>>>>>>>> + * Flag to declare context as long running.
>>>>>>>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>>>> + *
>>>>>>>>>> + * Usage of dma-fence expects that they
>>>>>>>>>> complete in reasonable amount of time.
>>>>>>>>>> + * Compute on the other hand can be long
>>>>>>>>>> running. Hence it is not appropriate
>>>>>>>>>> + * for compute contexts to export request
>>>>>>>>>> completion dma-fence to user.
>>>>>>>>>> + * The dma-fence usage will be limited to
>>>>>>>>>> in-kernel consumption only.
>>>>>>>>>> + * Compute contexts need to use user/memory fence.
>>>>>>>>>> + *
>>>>>>>>>> + * So, long running contexts do not support output fences. Hence,
>>>>>>>>>> + * I915_EXEC_FENCE_SIGNAL (See
>>>>>>>>>> &drm_i915_gem_exec_fence.flags) is expected
>>>>>>>>>> + * to be not used. DRM_I915_GEM_WAIT ioctl
>>>>>>>>>> call is also not supported for
>>>>>>>>>> + * objects mapped to long running contexts.
>>>>>>>>>> + */
>>>>>>>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>>>> +
>>>>>>>>>> +/* VM_BIND related ioctls */
>>>>>>>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>>>> +#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>>>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>>>>>> +
>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND
>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>> DRM_I915_GEM_VM_BIND, struct
>>>>>>>>>> drm_i915_gem_vm_bind)
>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND
>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>> DRM_I915_GEM_VM_UNBIND, struct
>>>>>>>>>> drm_i915_gem_vm_bind)
>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3
>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>> DRM_I915_GEM_EXECBUFFER3, struct
>>>>>>>>>> drm_i915_gem_execbuffer3)
>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE
>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>> DRM_I915_GEM_WAIT_USER_FENCE, struct
>>>>>>>>>> drm_i915_gem_wait_user_fence)
>>>>>>>>>> +
>>>>>>>>>> +/**
>>>>>>>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>>>>>> + *
>>>>>>>>>> + * This structure is passed to VM_BIND
>>>>>>>>>> ioctl and specifies the mapping of GPU
>>>>>>>>>> + * virtual address (VA) range to the
>>>>>>>>>> section of an object that should be bound
>>>>>>>>>> + * in the device page table of the specified address space (VM).
>>>>>>>>>> + * The VA range specified must be unique
>>>>>>>>>> (ie., not currently bound) and can
>>>>>>>>>> + * be mapped to whole object or a section
>>>>>>>>>> of the object (partial binding).
>>>>>>>>>> + * Multiple VA mappings can be created to
>>>>>>>>>> the same section of the object
>>>>>>>>>> + * (aliasing).
>>>>>>>>>> + *
>>>>>>>>>> + * The @queue_idx specifies the queue to
>>>>>>>>>> use for binding. Same queue can be
>>>>>>>>>> + * used for both VM_BIND and VM_UNBIND
>>>>>>>>>> calls. All submitted bind and unbind
>>>>>>>>>> + * operations in a queue are performed in the order of submission.
>>>>>>>>>> + *
>>>>>>>>>> + * The @start, @offset and @length should
>>>>>>>>>> be 4K page aligned. However the DG2
>>>>>>>>>> + * and XEHPSDV has 64K page size for device
>>>>>>>>>> local-memory and has compact page
>>>>>>>>>> + * table. On those platforms, for binding
>>>>>>>>>> device local-memory objects, the
>>>>>>>>>> + * @start should be 2M aligned, @offset and
>>>>>>>>>> @length should be 64K aligned.
>>>>>>>>>> + * Also, on those platforms, it is not
>>>>>>>>>> allowed to bind an device local-memory
>>>>>>>>>> + * object and a system memory object in a
>>>>>>>>>> single 2M section of VA range.
>>>>>>>>>> + */
>>>>>>>>>> +struct drm_i915_gem_vm_bind {
>>>>>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>>>>>> +    __u32 vm_id;
>>>>>>>>>> +
>>>>>>>>>> +    /** @queue_idx: Index of queue for binding */
>>>>>>>>>> +    __u32 queue_idx;
>>>>>>>>>
>>>>>>>>> I have a question here to which I did not find
>>>>>>>>> an answer by browsing the old threads.
>>>>>>>>>
>>>>>>>>> Queue index appears to be an implicit
>>>>>>>>> synchronisation mechanism, right? Operations on
>>>>>>>>> the same index are executed/complete in order of
>>>>>>>>> ioctl submission?
>>>>>>>>>
>>>>>>>>> Do we _have_ to implement this on the kernel
>>>>>>>>> side and could just allow in/out fence and let
>>>>>>>>> userspace deal with it?
>>>>>>>>
>>>>>>>>
>>>>>>>> It orders operations like in a queue. Which is kind
>>>>>>>> of what happens with existing queues/engines.
>>>>>>>>
>>>>>>>> If I understood correctly, it's going to be a
>>>>>>>> kthread + a linked list right?
>>>>>>>>
>>>>>>>
>>>>>>> Yes, that is correct.
>>>>>>>
>>>>>>>>
>>>>>>>> -Lionel
>>>>>>>>
>>>>>>>>
>>>>>>>>>
>>>>>>>>> Arbitrary/on-demand number of queues will add
>>>>>>>>> the complexity on the kernel side which should
>>>>>>>>> be avoided if possible.
>>>>>>>>>
>>>>>>>
>>>>>>> It was discussed in the other thread. Jason prefers this over putting
>>>>>>> an artificial limit on number of queues (as user can
>>>>>>> anyway can exhaust
>>>>>>> the memory). I think complexity in the driver is manageable.
>>>>>>
>>>>>> You'll need to create tracking structures on demand, with
>>>>>> atomic replace of last fence, ref counting and locking of
>>>>>> some sort, more or less?
>>>>>>
>>>>>
>>>>> We will have a workqueue, an work item and a linked list per queue.
>>>>> VM_BIND/UNBIND call will add the mapping request to the
>>>>> specified queue's
>>>>> linked list and schedule the work item on the workqueue of that queue.
>>>>> I am not sure what you mean by last fence and replacing it.
>>>>>
>>>>>>> The other option being discussed in to have the user create those
>>>>>>> queues (like creating engine map) before hand and use that in vm_bind
>>>>>>> and vm_unbind ioctls. This puts a limit on the number of queues.
>>>>>>> But it is not clean either and not sure it is worth
>>>>>>> making the interface
>>>>>>> more complex.
>>>>>>> https://www.spinics.net/lists/dri-devel/msg350448.html
>>>>>>
>>>>>> What about the third option of a flag to return a fence (of
>>>>>> some sort) and pass in a fence? That way userspace can
>>>>>> imagine zero or N queues with very little effort on the
>>>>>> kernel side. Was this considered?
>>>>>>
>>>>>
>>>>> I am not clear what fence you are talking about here and how does that
>>>>> help with the number of vm_bind queues. Can you eloborate?
>>>>
>>>> It is actually already documented that bind/unbind will support
>>>> input and output fences - so what are these queues on top of what
>>>> userspace can already achieve by using them? Purely a convenience or
>>>> there is more to it?
>>>>
>>>
>>> Oh, the vm_bind queues are discussed in this thread.
>>> https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
>>>
>>> Apparently Vulkan has requirement for multiple queues, each queue
>>> processing vm_bind/unbind calls in the order of submission.
>>
>> I don't see how that answers my question so I will take the freedom to
>> repeat it. What are these queues on top of what userspace can already
>> achieve by using in-out fences? Purely a convenience or there is more to it?
>>
>> Queue1:
>>
>> out_fence_A = vm_bind A
>> out_fence_B = vm_bind B, in_fence=out_fence_A
>> execbuf(in_fence = out_fence_B)
>>
>> Queue2:
>>
>> out_fence_C = vm_bind C
>> out_fence_D = vm_bind D, in_fence=out_fence_C
>> execbuf(in_fence = out_fence_D)
>>
>> Parallel bind:
>> out_fence_E = vm_bind E
>> out_fence_F = vm_bind F
>> merged_fence = fence_merge(out_fence_E, out_fence_F)
>> execbuf(in_fence = merged_fence)
>>
> 
> Let's say you do this and only 1 queue:
> 
> VM_BIND_A (in_fence=fence_A)
> VM_BIND_B (in_fence=NULL)
> 
> With 1 queue VM_BIND_B in blocked on fence_A, hence the need for than 1
> queue.

I don't follow - there isn't a concept of a queue exposed in uapi in 
what I have described so the above two run in parallel there, if we 
ignore fence_A in your example doesn't even exist before you pass it to 
bind A so something is not right.

> e.g.
>   
> VM_BIND_A (queue_id=0, in_fence=fence_A)
> VM_BIND_B (queue_id=1, in_fence=NULL)
> 
> Now VM_BIND_B can immediately be executed regardless of fence_A status.

In my examples userspace can serialise or not as it sees fit using 
fences. The "parallel bind" examples two binds run in parallel. 
Userspace can create multiple such parallel "queues" if it wanted.

Parallel bind 1 and 2 interleaved:
out_fence_A = vm_bind A
out_fence_B = vm_bind B
out_fence_C = vm_bind C
out_fence_D = vm_bind D
// all binds can run in parallel
merged_fence_1 = fence_merge(out_fence_A, out_fence_B)
merged_fence_2 = fence_merge(out_fence_C, out_fence_D)
execbuf(in_fence = merged_fence_1) // after A&B to finish
execbuf(in_fence = merged_fence_2) // after C&D finish

There is a huge disconnect somewhere but I don't know where.

Regards,

Tvrtko
Niranjana Vishwanathapura June 14, 2022, 3:43 p.m. UTC | #16
On Tue, Jun 14, 2022 at 08:16:41AM +0100, Tvrtko Ursulin wrote:
>
>On 14/06/2022 00:39, Matthew Brost wrote:
>>On Mon, Jun 13, 2022 at 07:09:06PM +0100, Tvrtko Ursulin wrote:
>>>
>>>On 13/06/2022 18:49, Niranjana Vishwanathapura wrote:
>>>>On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
>>>>>
>>>>>On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
>>>>>>On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>>>>>>
>>>>>>>On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>>>>>>>On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin wrote:
>>>>>>>>>On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>>>>>>
>>>>>>>>>>On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>VM_BIND and related uapi definitions
>>>>>>>>>>>
>>>>>>>>>>>Signed-off-by: Niranjana Vishwanathapura
>>>>>>>>>>><niranjana.vishwanathapura@intel.com>
>>>>>>>>>>>---
>>>>>>>>>>>   Documentation/gpu/rfc/i915_vm_bind.h | 490
>>>>>>>>>>>+++++++++++++++++++++++++++
>>>>>>>>>>>   1 file changed, 490 insertions(+)
>>>>>>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>
>>>>>>>>>>>diff --git
>>>>>>>>>>>a/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>new file mode 100644
>>>>>>>>>>>index 000000000000..9fc854969cfb
>>>>>>>>>>>--- /dev/null
>>>>>>>>>>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>@@ -0,0 +1,490 @@
>>>>>>>>>>>+/* SPDX-License-Identifier: MIT */
>>>>>>>>>>>+/*
>>>>>>>>>>>+ * Copyright © 2022 Intel Corporation
>>>>>>>>>>>+ */
>>>>>>>>>>>+
>>>>>>>>>>>+/**
>>>>>>>>>>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>>>+ *
>>>>>>>>>>>+ * VM_BIND feature availability.
>>>>>>>>>>>+ * See typedef drm_i915_getparam_t param.
>>>>>>>>>>>+ * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>>>>>>>+ * bits[8-15]: VM_BIND implementation version.
>>>>>>>>>>>+ * version 0 will not have VM_BIND/UNBIND
>>>>>>>>>>>timeline fence array support.
>>>>>>>>>>>+ */
>>>>>>>>>>>+#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>>>>>+
>>>>>>>>>>>+/**
>>>>>>>>>>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>>>+ *
>>>>>>>>>>>+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>>>>>>>+ * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>>>+ *
>>>>>>>>>>>+ * The older execbuf2 ioctl will not
>>>>>>>>>>>support VM_BIND mode of operation.
>>>>>>>>>>>+ * For VM_BIND mode, we have new execbuf3
>>>>>>>>>>>ioctl which will not accept any
>>>>>>>>>>>+ * execlist (See struct
>>>>>>>>>>>drm_i915_gem_execbuffer3 for more details).
>>>>>>>>>>>+ *
>>>>>>>>>>>+ */
>>>>>>>>>>>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>>>>>+
>>>>>>>>>>>+/**
>>>>>>>>>>>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>>>>>+ *
>>>>>>>>>>>+ * Flag to declare context as long running.
>>>>>>>>>>>+ * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>>>>>+ *
>>>>>>>>>>>+ * Usage of dma-fence expects that they
>>>>>>>>>>>complete in reasonable amount of time.
>>>>>>>>>>>+ * Compute on the other hand can be long
>>>>>>>>>>>running. Hence it is not appropriate
>>>>>>>>>>>+ * for compute contexts to export request
>>>>>>>>>>>completion dma-fence to user.
>>>>>>>>>>>+ * The dma-fence usage will be limited to
>>>>>>>>>>>in-kernel consumption only.
>>>>>>>>>>>+ * Compute contexts need to use user/memory fence.
>>>>>>>>>>>+ *
>>>>>>>>>>>+ * So, long running contexts do not support output fences. Hence,
>>>>>>>>>>>+ * I915_EXEC_FENCE_SIGNAL (See
>>>>>>>>>>>&drm_i915_gem_exec_fence.flags) is expected
>>>>>>>>>>>+ * to be not used. DRM_I915_GEM_WAIT ioctl
>>>>>>>>>>>call is also not supported for
>>>>>>>>>>>+ * objects mapped to long running contexts.
>>>>>>>>>>>+ */
>>>>>>>>>>>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>>>>>+
>>>>>>>>>>>+/* VM_BIND related ioctls */
>>>>>>>>>>>+#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>>>>>+#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>>>>>+#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>>>>>>>+#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>>>>>>>+
>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_VM_BIND
>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>DRM_I915_GEM_VM_BIND, struct
>>>>>>>>>>>drm_i915_gem_vm_bind)
>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_VM_UNBIND
>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>DRM_I915_GEM_VM_UNBIND, struct
>>>>>>>>>>>drm_i915_gem_vm_bind)
>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_EXECBUFFER3
>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>DRM_I915_GEM_EXECBUFFER3, struct
>>>>>>>>>>>drm_i915_gem_execbuffer3)
>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE
>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>DRM_I915_GEM_WAIT_USER_FENCE, struct
>>>>>>>>>>>drm_i915_gem_wait_user_fence)
>>>>>>>>>>>+
>>>>>>>>>>>+/**
>>>>>>>>>>>+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>>>>>>>+ *
>>>>>>>>>>>+ * This structure is passed to VM_BIND
>>>>>>>>>>>ioctl and specifies the mapping of GPU
>>>>>>>>>>>+ * virtual address (VA) range to the
>>>>>>>>>>>section of an object that should be bound
>>>>>>>>>>>+ * in the device page table of the specified address space (VM).
>>>>>>>>>>>+ * The VA range specified must be unique
>>>>>>>>>>>(ie., not currently bound) and can
>>>>>>>>>>>+ * be mapped to whole object or a section
>>>>>>>>>>>of the object (partial binding).
>>>>>>>>>>>+ * Multiple VA mappings can be created to
>>>>>>>>>>>the same section of the object
>>>>>>>>>>>+ * (aliasing).
>>>>>>>>>>>+ *
>>>>>>>>>>>+ * The @queue_idx specifies the queue to
>>>>>>>>>>>use for binding. Same queue can be
>>>>>>>>>>>+ * used for both VM_BIND and VM_UNBIND
>>>>>>>>>>>calls. All submitted bind and unbind
>>>>>>>>>>>+ * operations in a queue are performed in the order of submission.
>>>>>>>>>>>+ *
>>>>>>>>>>>+ * The @start, @offset and @length should
>>>>>>>>>>>be 4K page aligned. However the DG2
>>>>>>>>>>>+ * and XEHPSDV has 64K page size for device
>>>>>>>>>>>local-memory and has compact page
>>>>>>>>>>>+ * table. On those platforms, for binding
>>>>>>>>>>>device local-memory objects, the
>>>>>>>>>>>+ * @start should be 2M aligned, @offset and
>>>>>>>>>>>@length should be 64K aligned.
>>>>>>>>>>>+ * Also, on those platforms, it is not
>>>>>>>>>>>allowed to bind an device local-memory
>>>>>>>>>>>+ * object and a system memory object in a
>>>>>>>>>>>single 2M section of VA range.
>>>>>>>>>>>+ */
>>>>>>>>>>>+struct drm_i915_gem_vm_bind {
>>>>>>>>>>>+    /** @vm_id: VM (address space) id to bind */
>>>>>>>>>>>+    __u32 vm_id;
>>>>>>>>>>>+
>>>>>>>>>>>+    /** @queue_idx: Index of queue for binding */
>>>>>>>>>>>+    __u32 queue_idx;
>>>>>>>>>>
>>>>>>>>>>I have a question here to which I did not find
>>>>>>>>>>an answer by browsing the old threads.
>>>>>>>>>>
>>>>>>>>>>Queue index appears to be an implicit
>>>>>>>>>>synchronisation mechanism, right? Operations on
>>>>>>>>>>the same index are executed/complete in order of
>>>>>>>>>>ioctl submission?
>>>>>>>>>>
>>>>>>>>>>Do we _have_ to implement this on the kernel
>>>>>>>>>>side and could just allow in/out fence and let
>>>>>>>>>>userspace deal with it?
>>>>>>>>>
>>>>>>>>>
>>>>>>>>>It orders operations like in a queue. Which is kind
>>>>>>>>>of what happens with existing queues/engines.
>>>>>>>>>
>>>>>>>>>If I understood correctly, it's going to be a
>>>>>>>>>kthread + a linked list right?
>>>>>>>>>
>>>>>>>>
>>>>>>>>Yes, that is correct.
>>>>>>>>
>>>>>>>>>
>>>>>>>>>-Lionel
>>>>>>>>>
>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>Arbitrary/on-demand number of queues will add
>>>>>>>>>>the complexity on the kernel side which should
>>>>>>>>>>be avoided if possible.
>>>>>>>>>>
>>>>>>>>
>>>>>>>>It was discussed in the other thread. Jason prefers this over putting
>>>>>>>>an artificial limit on number of queues (as user can
>>>>>>>>anyway can exhaust
>>>>>>>>the memory). I think complexity in the driver is manageable.
>>>>>>>
>>>>>>>You'll need to create tracking structures on demand, with
>>>>>>>atomic replace of last fence, ref counting and locking of
>>>>>>>some sort, more or less?
>>>>>>>
>>>>>>
>>>>>>We will have a workqueue, an work item and a linked list per queue.
>>>>>>VM_BIND/UNBIND call will add the mapping request to the
>>>>>>specified queue's
>>>>>>linked list and schedule the work item on the workqueue of that queue.
>>>>>>I am not sure what you mean by last fence and replacing it.
>>>>>>
>>>>>>>>The other option being discussed in to have the user create those
>>>>>>>>queues (like creating engine map) before hand and use that in vm_bind
>>>>>>>>and vm_unbind ioctls. This puts a limit on the number of queues.
>>>>>>>>But it is not clean either and not sure it is worth
>>>>>>>>making the interface
>>>>>>>>more complex.
>>>>>>>>https://www.spinics.net/lists/dri-devel/msg350448.html
>>>>>>>
>>>>>>>What about the third option of a flag to return a fence (of
>>>>>>>some sort) and pass in a fence? That way userspace can
>>>>>>>imagine zero or N queues with very little effort on the
>>>>>>>kernel side. Was this considered?
>>>>>>>
>>>>>>
>>>>>>I am not clear what fence you are talking about here and how does that
>>>>>>help with the number of vm_bind queues. Can you eloborate?
>>>>>
>>>>>It is actually already documented that bind/unbind will support
>>>>>input and output fences - so what are these queues on top of what
>>>>>userspace can already achieve by using them? Purely a convenience or
>>>>>there is more to it?
>>>>>
>>>>
>>>>Oh, the vm_bind queues are discussed in this thread.
>>>>https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
>>>>
>>>>Apparently Vulkan has requirement for multiple queues, each queue
>>>>processing vm_bind/unbind calls in the order of submission.
>>>
>>>I don't see how that answers my question so I will take the freedom to
>>>repeat it. What are these queues on top of what userspace can already
>>>achieve by using in-out fences? Purely a convenience or there is more to it?
>>>
>>>Queue1:
>>>
>>>out_fence_A = vm_bind A
>>>out_fence_B = vm_bind B, in_fence=out_fence_A
>>>execbuf(in_fence = out_fence_B)
>>>
>>>Queue2:
>>>
>>>out_fence_C = vm_bind C
>>>out_fence_D = vm_bind D, in_fence=out_fence_C
>>>execbuf(in_fence = out_fence_D)
>>>
>>>Parallel bind:
>>>out_fence_E = vm_bind E
>>>out_fence_F = vm_bind F
>>>merged_fence = fence_merge(out_fence_E, out_fence_F)
>>>execbuf(in_fence = merged_fence)
>>>
>>
>>Let's say you do this and only 1 queue:
>>
>>VM_BIND_A (in_fence=fence_A)
>>VM_BIND_B (in_fence=NULL)
>>
>>With 1 queue VM_BIND_B in blocked on fence_A, hence the need for than 1
>>queue.
>
>I don't follow - there isn't a concept of a queue exposed in uapi in 
>what I have described so the above two run in parallel there, if we 
>ignore fence_A in your example doesn't even exist before you pass it 
>to bind A so something is not right.
>
>>e.g.
>>VM_BIND_A (queue_id=0, in_fence=fence_A)
>>VM_BIND_B (queue_id=1, in_fence=NULL)
>>
>>Now VM_BIND_B can immediately be executed regardless of fence_A status.
>
>In my examples userspace can serialise or not as it sees fit using 
>fences. The "parallel bind" examples two binds run in parallel. 
>Userspace can create multiple such parallel "queues" if it wanted.
>
>Parallel bind 1 and 2 interleaved:
>out_fence_A = vm_bind A
>out_fence_B = vm_bind B
>out_fence_C = vm_bind C
>out_fence_D = vm_bind D
>// all binds can run in parallel
>merged_fence_1 = fence_merge(out_fence_A, out_fence_B)
>merged_fence_2 = fence_merge(out_fence_C, out_fence_D)
>execbuf(in_fence = merged_fence_1) // after A&B to finish
>execbuf(in_fence = merged_fence_2) // after C&D finish
>
>There is a huge disconnect somewhere but I don't know where.
>

Note that Vulkan has requirement that VM_BIND and VM_UNBIND
operations will also have 'in' fences associated with them
and not just the 'out' fences (which your example above shows).

Yes, one of the solution discussed was not to have any queue_idx
at all (assume single queue) and let the vm_bind/unbind operations
submitted run and complete out of submission order. That way
a vm_bind/unbind sumitted later will not be blocked by a vm_bind/unbind
submitted earlier.
But removing the ordering here comes at a cost. Having the operations
run in submission order has some benefits. These are discussed in the
other thread.
https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html

Hence having multiple queues gives the benefit of both worlds and gives
user more options.

Niranjana

>Regards,
>
>Tvrtko
Tvrtko Ursulin June 14, 2022, 4:02 p.m. UTC | #17
On 14/06/2022 16:43, Niranjana Vishwanathapura wrote:
> On Tue, Jun 14, 2022 at 08:16:41AM +0100, Tvrtko Ursulin wrote:
>>
>> On 14/06/2022 00:39, Matthew Brost wrote:
>>> On Mon, Jun 13, 2022 at 07:09:06PM +0100, Tvrtko Ursulin wrote:
>>>>
>>>> On 13/06/2022 18:49, Niranjana Vishwanathapura wrote:
>>>>> On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
>>>>>>
>>>>>> On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
>>>>>>> On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>>>>>>>
>>>>>>>> On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>>>>>>>> On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin wrote:
>>>>>>>>>> On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>>>>>>>
>>>>>>>>>>> On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>>>>>
>>>>>>>>>>>> Signed-off-by: Niranjana Vishwanathapura
>>>>>>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>>>>>> ---
>>>>>>>>>>>>   Documentation/gpu/rfc/i915_vm_bind.h | 490
>>>>>>>>>>>> +++++++++++++++++++++++++++
>>>>>>>>>>>>   1 file changed, 490 insertions(+)
>>>>>>>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>
>>>>>>>>>>>> diff --git
>>>>>>>>>>>> a/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>> new file mode 100644
>>>>>>>>>>>> index 000000000000..9fc854969cfb
>>>>>>>>>>>> --- /dev/null
>>>>>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>> @@ -0,0 +1,490 @@
>>>>>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>>>>>> +/*
>>>>>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>>>>>> + */
>>>>>>>>>>>> +
>>>>>>>>>>>> +/**
>>>>>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * VM_BIND feature availability.
>>>>>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>>>>>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>>>>>>>> + * bits[8-15]: VM_BIND implementation version.
>>>>>>>>>>>> + * version 0 will not have VM_BIND/UNBIND
>>>>>>>>>>>> timeline fence array support.
>>>>>>>>>>>> + */
>>>>>>>>>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>>>>>> +
>>>>>>>>>>>> +/**
>>>>>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM 
>>>>>>>>>>>> creation.
>>>>>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * The older execbuf2 ioctl will not
>>>>>>>>>>>> support VM_BIND mode of operation.
>>>>>>>>>>>> + * For VM_BIND mode, we have new execbuf3
>>>>>>>>>>>> ioctl which will not accept any
>>>>>>>>>>>> + * execlist (See struct
>>>>>>>>>>>> drm_i915_gem_execbuffer3 for more details).
>>>>>>>>>>>> + *
>>>>>>>>>>>> + */
>>>>>>>>>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>>>>>> +
>>>>>>>>>>>> +/**
>>>>>>>>>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * Flag to declare context as long running.
>>>>>>>>>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * Usage of dma-fence expects that they
>>>>>>>>>>>> complete in reasonable amount of time.
>>>>>>>>>>>> + * Compute on the other hand can be long
>>>>>>>>>>>> running. Hence it is not appropriate
>>>>>>>>>>>> + * for compute contexts to export request
>>>>>>>>>>>> completion dma-fence to user.
>>>>>>>>>>>> + * The dma-fence usage will be limited to
>>>>>>>>>>>> in-kernel consumption only.
>>>>>>>>>>>> + * Compute contexts need to use user/memory fence.
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * So, long running contexts do not support output fences. 
>>>>>>>>>>>> Hence,
>>>>>>>>>>>> + * I915_EXEC_FENCE_SIGNAL (See
>>>>>>>>>>>> &drm_i915_gem_exec_fence.flags) is expected
>>>>>>>>>>>> + * to be not used. DRM_I915_GEM_WAIT ioctl
>>>>>>>>>>>> call is also not supported for
>>>>>>>>>>>> + * objects mapped to long running contexts.
>>>>>>>>>>>> + */
>>>>>>>>>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>>>>>> +
>>>>>>>>>>>> +/* VM_BIND related ioctls */
>>>>>>>>>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>>>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>>>>>> +#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>>>>>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>>>>>>>> +
>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND
>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>> DRM_I915_GEM_VM_BIND, struct
>>>>>>>>>>>> drm_i915_gem_vm_bind)
>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND
>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>> DRM_I915_GEM_VM_UNBIND, struct
>>>>>>>>>>>> drm_i915_gem_vm_bind)
>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3
>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>> DRM_I915_GEM_EXECBUFFER3, struct
>>>>>>>>>>>> drm_i915_gem_execbuffer3)
>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE
>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>> DRM_I915_GEM_WAIT_USER_FENCE, struct
>>>>>>>>>>>> drm_i915_gem_wait_user_fence)
>>>>>>>>>>>> +
>>>>>>>>>>>> +/**
>>>>>>>>>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * This structure is passed to VM_BIND
>>>>>>>>>>>> ioctl and specifies the mapping of GPU
>>>>>>>>>>>> + * virtual address (VA) range to the
>>>>>>>>>>>> section of an object that should be bound
>>>>>>>>>>>> + * in the device page table of the specified address space 
>>>>>>>>>>>> (VM).
>>>>>>>>>>>> + * The VA range specified must be unique
>>>>>>>>>>>> (ie., not currently bound) and can
>>>>>>>>>>>> + * be mapped to whole object or a section
>>>>>>>>>>>> of the object (partial binding).
>>>>>>>>>>>> + * Multiple VA mappings can be created to
>>>>>>>>>>>> the same section of the object
>>>>>>>>>>>> + * (aliasing).
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * The @queue_idx specifies the queue to
>>>>>>>>>>>> use for binding. Same queue can be
>>>>>>>>>>>> + * used for both VM_BIND and VM_UNBIND
>>>>>>>>>>>> calls. All submitted bind and unbind
>>>>>>>>>>>> + * operations in a queue are performed in the order of 
>>>>>>>>>>>> submission.
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * The @start, @offset and @length should
>>>>>>>>>>>> be 4K page aligned. However the DG2
>>>>>>>>>>>> + * and XEHPSDV has 64K page size for device
>>>>>>>>>>>> local-memory and has compact page
>>>>>>>>>>>> + * table. On those platforms, for binding
>>>>>>>>>>>> device local-memory objects, the
>>>>>>>>>>>> + * @start should be 2M aligned, @offset and
>>>>>>>>>>>> @length should be 64K aligned.
>>>>>>>>>>>> + * Also, on those platforms, it is not
>>>>>>>>>>>> allowed to bind an device local-memory
>>>>>>>>>>>> + * object and a system memory object in a
>>>>>>>>>>>> single 2M section of VA range.
>>>>>>>>>>>> + */
>>>>>>>>>>>> +struct drm_i915_gem_vm_bind {
>>>>>>>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>>>>>>>> +    __u32 vm_id;
>>>>>>>>>>>> +
>>>>>>>>>>>> +    /** @queue_idx: Index of queue for binding */
>>>>>>>>>>>> +    __u32 queue_idx;
>>>>>>>>>>>
>>>>>>>>>>> I have a question here to which I did not find
>>>>>>>>>>> an answer by browsing the old threads.
>>>>>>>>>>>
>>>>>>>>>>> Queue index appears to be an implicit
>>>>>>>>>>> synchronisation mechanism, right? Operations on
>>>>>>>>>>> the same index are executed/complete in order of
>>>>>>>>>>> ioctl submission?
>>>>>>>>>>>
>>>>>>>>>>> Do we _have_ to implement this on the kernel
>>>>>>>>>>> side and could just allow in/out fence and let
>>>>>>>>>>> userspace deal with it?
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> It orders operations like in a queue. Which is kind
>>>>>>>>>> of what happens with existing queues/engines.
>>>>>>>>>>
>>>>>>>>>> If I understood correctly, it's going to be a
>>>>>>>>>> kthread + a linked list right?
>>>>>>>>>>
>>>>>>>>>
>>>>>>>>> Yes, that is correct.
>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> -Lionel
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> Arbitrary/on-demand number of queues will add
>>>>>>>>>>> the complexity on the kernel side which should
>>>>>>>>>>> be avoided if possible.
>>>>>>>>>>>
>>>>>>>>>
>>>>>>>>> It was discussed in the other thread. Jason prefers this over 
>>>>>>>>> putting
>>>>>>>>> an artificial limit on number of queues (as user can
>>>>>>>>> anyway can exhaust
>>>>>>>>> the memory). I think complexity in the driver is manageable.
>>>>>>>>
>>>>>>>> You'll need to create tracking structures on demand, with
>>>>>>>> atomic replace of last fence, ref counting and locking of
>>>>>>>> some sort, more or less?
>>>>>>>>
>>>>>>>
>>>>>>> We will have a workqueue, an work item and a linked list per queue.
>>>>>>> VM_BIND/UNBIND call will add the mapping request to the
>>>>>>> specified queue's
>>>>>>> linked list and schedule the work item on the workqueue of that 
>>>>>>> queue.
>>>>>>> I am not sure what you mean by last fence and replacing it.
>>>>>>>
>>>>>>>>> The other option being discussed in to have the user create those
>>>>>>>>> queues (like creating engine map) before hand and use that in 
>>>>>>>>> vm_bind
>>>>>>>>> and vm_unbind ioctls. This puts a limit on the number of queues.
>>>>>>>>> But it is not clean either and not sure it is worth
>>>>>>>>> making the interface
>>>>>>>>> more complex.
>>>>>>>>> https://www.spinics.net/lists/dri-devel/msg350448.html
>>>>>>>>
>>>>>>>> What about the third option of a flag to return a fence (of
>>>>>>>> some sort) and pass in a fence? That way userspace can
>>>>>>>> imagine zero or N queues with very little effort on the
>>>>>>>> kernel side. Was this considered?
>>>>>>>>
>>>>>>>
>>>>>>> I am not clear what fence you are talking about here and how does 
>>>>>>> that
>>>>>>> help with the number of vm_bind queues. Can you eloborate?
>>>>>>
>>>>>> It is actually already documented that bind/unbind will support
>>>>>> input and output fences - so what are these queues on top of what
>>>>>> userspace can already achieve by using them? Purely a convenience or
>>>>>> there is more to it?
>>>>>>
>>>>>
>>>>> Oh, the vm_bind queues are discussed in this thread.
>>>>> https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
>>>>>
>>>>> Apparently Vulkan has requirement for multiple queues, each queue
>>>>> processing vm_bind/unbind calls in the order of submission.
>>>>
>>>> I don't see how that answers my question so I will take the freedom to
>>>> repeat it. What are these queues on top of what userspace can already
>>>> achieve by using in-out fences? Purely a convenience or there is 
>>>> more to it?
>>>>
>>>> Queue1:
>>>>
>>>> out_fence_A = vm_bind A
>>>> out_fence_B = vm_bind B, in_fence=out_fence_A
>>>> execbuf(in_fence = out_fence_B)
>>>>
>>>> Queue2:
>>>>
>>>> out_fence_C = vm_bind C
>>>> out_fence_D = vm_bind D, in_fence=out_fence_C
>>>> execbuf(in_fence = out_fence_D)
>>>>
>>>> Parallel bind:
>>>> out_fence_E = vm_bind E
>>>> out_fence_F = vm_bind F
>>>> merged_fence = fence_merge(out_fence_E, out_fence_F)
>>>> execbuf(in_fence = merged_fence)
>>>>
>>>
>>> Let's say you do this and only 1 queue:
>>>
>>> VM_BIND_A (in_fence=fence_A)
>>> VM_BIND_B (in_fence=NULL)
>>>
>>> With 1 queue VM_BIND_B in blocked on fence_A, hence the need for than 1
>>> queue.
>>
>> I don't follow - there isn't a concept of a queue exposed in uapi in 
>> what I have described so the above two run in parallel there, if we 
>> ignore fence_A in your example doesn't even exist before you pass it 
>> to bind A so something is not right.
>>
>>> e.g.
>>> VM_BIND_A (queue_id=0, in_fence=fence_A)
>>> VM_BIND_B (queue_id=1, in_fence=NULL)
>>>
>>> Now VM_BIND_B can immediately be executed regardless of fence_A status.
>>
>> In my examples userspace can serialise or not as it sees fit using 
>> fences. The "parallel bind" examples two binds run in parallel. 
>> Userspace can create multiple such parallel "queues" if it wanted.
>>
>> Parallel bind 1 and 2 interleaved:
>> out_fence_A = vm_bind A
>> out_fence_B = vm_bind B
>> out_fence_C = vm_bind C
>> out_fence_D = vm_bind D
>> // all binds can run in parallel
>> merged_fence_1 = fence_merge(out_fence_A, out_fence_B)
>> merged_fence_2 = fence_merge(out_fence_C, out_fence_D)
>> execbuf(in_fence = merged_fence_1) // after A&B to finish
>> execbuf(in_fence = merged_fence_2) // after C&D finish
>>
>> There is a huge disconnect somewhere but I don't know where.
>>
> 
> Note that Vulkan has requirement that VM_BIND and VM_UNBIND
> operations will also have 'in' fences associated with them
> and not just the 'out' fences (which your example above shows).

I gave more examples earlier:

"""
Queue1:

out_fence_A = vm_bind A
out_fence_B = vm_bind B, in_fence=out_fence_A
execbuf(in_fence = out_fence_B)
"""

Clearly I showed both in and out fence.

> Yes, one of the solution discussed was not to have any queue_idx
> at all (assume single queue) and let the vm_bind/unbind operations
> submitted run and complete out of submission order. That way
> a vm_bind/unbind sumitted later will not be blocked by a vm_bind/unbind
> submitted earlier.
> But removing the ordering here comes at a cost. Having the operations
> run in submission order has some benefits. These are discussed in the
> other thread.
> https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html

That is some messed up deep quoting in that link. Could you please 
summarize the cost which queues in the uapi intended to avoid?

In any case it is not just for me. A significant addition is proposed 
for the driver so there should be a clear summary of cost vs benefit 
rather than a messy thread.

> Hence having multiple queues gives the benefit of both worlds and gives
> user more options.

Maybe, but lets be specific.

Regards,

Tvrtko
Tvrtko Ursulin June 14, 2022, 4:07 p.m. UTC | #18
On 14/06/2022 17:02, Tvrtko Ursulin wrote:
> 
> On 14/06/2022 16:43, Niranjana Vishwanathapura wrote:
>> On Tue, Jun 14, 2022 at 08:16:41AM +0100, Tvrtko Ursulin wrote:
>>>
>>> On 14/06/2022 00:39, Matthew Brost wrote:
>>>> On Mon, Jun 13, 2022 at 07:09:06PM +0100, Tvrtko Ursulin wrote:
>>>>>
>>>>> On 13/06/2022 18:49, Niranjana Vishwanathapura wrote:
>>>>>> On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
>>>>>>>
>>>>>>> On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
>>>>>>>> On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>>>>>>>>
>>>>>>>>> On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>>>>>>>>> On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin 
>>>>>>>>>> wrote:
>>>>>>>>>>> On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>>>>>>>>
>>>>>>>>>>>> On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>>>>>>
>>>>>>>>>>>>> Signed-off-by: Niranjana Vishwanathapura
>>>>>>>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>>>>>>> ---
>>>>>>>>>>>>>   Documentation/gpu/rfc/i915_vm_bind.h | 490
>>>>>>>>>>>>> +++++++++++++++++++++++++++
>>>>>>>>>>>>>   1 file changed, 490 insertions(+)
>>>>>>>>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>
>>>>>>>>>>>>> diff --git
>>>>>>>>>>>>> a/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>> new file mode 100644
>>>>>>>>>>>>> index 000000000000..9fc854969cfb
>>>>>>>>>>>>> --- /dev/null
>>>>>>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>> @@ -0,0 +1,490 @@
>>>>>>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>>>>>>> +/*
>>>>>>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>>>>>>> + */
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +/**
>>>>>>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>>>>> + *
>>>>>>>>>>>>> + * VM_BIND feature availability.
>>>>>>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>>>>>>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>>>>>>>>> + * bits[8-15]: VM_BIND implementation version.
>>>>>>>>>>>>> + * version 0 will not have VM_BIND/UNBIND
>>>>>>>>>>>>> timeline fence array support.
>>>>>>>>>>>>> + */
>>>>>>>>>>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +/**
>>>>>>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>>>>> + *
>>>>>>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM 
>>>>>>>>>>>>> creation.
>>>>>>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>>>>> + *
>>>>>>>>>>>>> + * The older execbuf2 ioctl will not
>>>>>>>>>>>>> support VM_BIND mode of operation.
>>>>>>>>>>>>> + * For VM_BIND mode, we have new execbuf3
>>>>>>>>>>>>> ioctl which will not accept any
>>>>>>>>>>>>> + * execlist (See struct
>>>>>>>>>>>>> drm_i915_gem_execbuffer3 for more details).
>>>>>>>>>>>>> + *
>>>>>>>>>>>>> + */
>>>>>>>>>>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +/**
>>>>>>>>>>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>>>>>>> + *
>>>>>>>>>>>>> + * Flag to declare context as long running.
>>>>>>>>>>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>>>>>>> + *
>>>>>>>>>>>>> + * Usage of dma-fence expects that they
>>>>>>>>>>>>> complete in reasonable amount of time.
>>>>>>>>>>>>> + * Compute on the other hand can be long
>>>>>>>>>>>>> running. Hence it is not appropriate
>>>>>>>>>>>>> + * for compute contexts to export request
>>>>>>>>>>>>> completion dma-fence to user.
>>>>>>>>>>>>> + * The dma-fence usage will be limited to
>>>>>>>>>>>>> in-kernel consumption only.
>>>>>>>>>>>>> + * Compute contexts need to use user/memory fence.
>>>>>>>>>>>>> + *
>>>>>>>>>>>>> + * So, long running contexts do not support output fences. 
>>>>>>>>>>>>> Hence,
>>>>>>>>>>>>> + * I915_EXEC_FENCE_SIGNAL (See
>>>>>>>>>>>>> &drm_i915_gem_exec_fence.flags) is expected
>>>>>>>>>>>>> + * to be not used. DRM_I915_GEM_WAIT ioctl
>>>>>>>>>>>>> call is also not supported for
>>>>>>>>>>>>> + * objects mapped to long running contexts.
>>>>>>>>>>>>> + */
>>>>>>>>>>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +/* VM_BIND related ioctls */
>>>>>>>>>>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>>>>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>>>>>>> +#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>>>>>>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND
>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>> DRM_I915_GEM_VM_BIND, struct
>>>>>>>>>>>>> drm_i915_gem_vm_bind)
>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND
>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>> DRM_I915_GEM_VM_UNBIND, struct
>>>>>>>>>>>>> drm_i915_gem_vm_bind)
>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3
>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>> DRM_I915_GEM_EXECBUFFER3, struct
>>>>>>>>>>>>> drm_i915_gem_execbuffer3)
>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE
>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>> DRM_I915_GEM_WAIT_USER_FENCE, struct
>>>>>>>>>>>>> drm_i915_gem_wait_user_fence)
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +/**
>>>>>>>>>>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to 
>>>>>>>>>>>>> bind.
>>>>>>>>>>>>> + *
>>>>>>>>>>>>> + * This structure is passed to VM_BIND
>>>>>>>>>>>>> ioctl and specifies the mapping of GPU
>>>>>>>>>>>>> + * virtual address (VA) range to the
>>>>>>>>>>>>> section of an object that should be bound
>>>>>>>>>>>>> + * in the device page table of the specified address space 
>>>>>>>>>>>>> (VM).
>>>>>>>>>>>>> + * The VA range specified must be unique
>>>>>>>>>>>>> (ie., not currently bound) and can
>>>>>>>>>>>>> + * be mapped to whole object or a section
>>>>>>>>>>>>> of the object (partial binding).
>>>>>>>>>>>>> + * Multiple VA mappings can be created to
>>>>>>>>>>>>> the same section of the object
>>>>>>>>>>>>> + * (aliasing).
>>>>>>>>>>>>> + *
>>>>>>>>>>>>> + * The @queue_idx specifies the queue to
>>>>>>>>>>>>> use for binding. Same queue can be
>>>>>>>>>>>>> + * used for both VM_BIND and VM_UNBIND
>>>>>>>>>>>>> calls. All submitted bind and unbind
>>>>>>>>>>>>> + * operations in a queue are performed in the order of 
>>>>>>>>>>>>> submission.
>>>>>>>>>>>>> + *
>>>>>>>>>>>>> + * The @start, @offset and @length should
>>>>>>>>>>>>> be 4K page aligned. However the DG2
>>>>>>>>>>>>> + * and XEHPSDV has 64K page size for device
>>>>>>>>>>>>> local-memory and has compact page
>>>>>>>>>>>>> + * table. On those platforms, for binding
>>>>>>>>>>>>> device local-memory objects, the
>>>>>>>>>>>>> + * @start should be 2M aligned, @offset and
>>>>>>>>>>>>> @length should be 64K aligned.
>>>>>>>>>>>>> + * Also, on those platforms, it is not
>>>>>>>>>>>>> allowed to bind an device local-memory
>>>>>>>>>>>>> + * object and a system memory object in a
>>>>>>>>>>>>> single 2M section of VA range.
>>>>>>>>>>>>> + */
>>>>>>>>>>>>> +struct drm_i915_gem_vm_bind {
>>>>>>>>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>>>>>>>>> +    __u32 vm_id;
>>>>>>>>>>>>> +
>>>>>>>>>>>>> +    /** @queue_idx: Index of queue for binding */
>>>>>>>>>>>>> +    __u32 queue_idx;
>>>>>>>>>>>>
>>>>>>>>>>>> I have a question here to which I did not find
>>>>>>>>>>>> an answer by browsing the old threads.
>>>>>>>>>>>>
>>>>>>>>>>>> Queue index appears to be an implicit
>>>>>>>>>>>> synchronisation mechanism, right? Operations on
>>>>>>>>>>>> the same index are executed/complete in order of
>>>>>>>>>>>> ioctl submission?
>>>>>>>>>>>>
>>>>>>>>>>>> Do we _have_ to implement this on the kernel
>>>>>>>>>>>> side and could just allow in/out fence and let
>>>>>>>>>>>> userspace deal with it?
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> It orders operations like in a queue. Which is kind
>>>>>>>>>>> of what happens with existing queues/engines.
>>>>>>>>>>>
>>>>>>>>>>> If I understood correctly, it's going to be a
>>>>>>>>>>> kthread + a linked list right?
>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> Yes, that is correct.
>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>> -Lionel
>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>> Arbitrary/on-demand number of queues will add
>>>>>>>>>>>> the complexity on the kernel side which should
>>>>>>>>>>>> be avoided if possible.
>>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> It was discussed in the other thread. Jason prefers this over 
>>>>>>>>>> putting
>>>>>>>>>> an artificial limit on number of queues (as user can
>>>>>>>>>> anyway can exhaust
>>>>>>>>>> the memory). I think complexity in the driver is manageable.
>>>>>>>>>
>>>>>>>>> You'll need to create tracking structures on demand, with
>>>>>>>>> atomic replace of last fence, ref counting and locking of
>>>>>>>>> some sort, more or less?
>>>>>>>>>
>>>>>>>>
>>>>>>>> We will have a workqueue, an work item and a linked list per queue.
>>>>>>>> VM_BIND/UNBIND call will add the mapping request to the
>>>>>>>> specified queue's
>>>>>>>> linked list and schedule the work item on the workqueue of that 
>>>>>>>> queue.
>>>>>>>> I am not sure what you mean by last fence and replacing it.
>>>>>>>>
>>>>>>>>>> The other option being discussed in to have the user create those
>>>>>>>>>> queues (like creating engine map) before hand and use that in 
>>>>>>>>>> vm_bind
>>>>>>>>>> and vm_unbind ioctls. This puts a limit on the number of queues.
>>>>>>>>>> But it is not clean either and not sure it is worth
>>>>>>>>>> making the interface
>>>>>>>>>> more complex.
>>>>>>>>>> https://www.spinics.net/lists/dri-devel/msg350448.html
>>>>>>>>>
>>>>>>>>> What about the third option of a flag to return a fence (of
>>>>>>>>> some sort) and pass in a fence? That way userspace can
>>>>>>>>> imagine zero or N queues with very little effort on the
>>>>>>>>> kernel side. Was this considered?
>>>>>>>>>
>>>>>>>>
>>>>>>>> I am not clear what fence you are talking about here and how 
>>>>>>>> does that
>>>>>>>> help with the number of vm_bind queues. Can you eloborate?
>>>>>>>
>>>>>>> It is actually already documented that bind/unbind will support
>>>>>>> input and output fences - so what are these queues on top of what
>>>>>>> userspace can already achieve by using them? Purely a convenience or
>>>>>>> there is more to it?
>>>>>>>
>>>>>>
>>>>>> Oh, the vm_bind queues are discussed in this thread.
>>>>>> https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html 
>>>>>>
>>>>>>
>>>>>> Apparently Vulkan has requirement for multiple queues, each queue
>>>>>> processing vm_bind/unbind calls in the order of submission.
>>>>>
>>>>> I don't see how that answers my question so I will take the freedom to
>>>>> repeat it. What are these queues on top of what userspace can already
>>>>> achieve by using in-out fences? Purely a convenience or there is 
>>>>> more to it?
>>>>>
>>>>> Queue1:
>>>>>
>>>>> out_fence_A = vm_bind A
>>>>> out_fence_B = vm_bind B, in_fence=out_fence_A
>>>>> execbuf(in_fence = out_fence_B)
>>>>>
>>>>> Queue2:
>>>>>
>>>>> out_fence_C = vm_bind C
>>>>> out_fence_D = vm_bind D, in_fence=out_fence_C
>>>>> execbuf(in_fence = out_fence_D)
>>>>>
>>>>> Parallel bind:
>>>>> out_fence_E = vm_bind E
>>>>> out_fence_F = vm_bind F
>>>>> merged_fence = fence_merge(out_fence_E, out_fence_F)
>>>>> execbuf(in_fence = merged_fence)
>>>>>
>>>>
>>>> Let's say you do this and only 1 queue:
>>>>
>>>> VM_BIND_A (in_fence=fence_A)
>>>> VM_BIND_B (in_fence=NULL)
>>>>
>>>> With 1 queue VM_BIND_B in blocked on fence_A, hence the need for than 1
>>>> queue.
>>>
>>> I don't follow - there isn't a concept of a queue exposed in uapi in 
>>> what I have described so the above two run in parallel there, if we 
>>> ignore fence_A in your example doesn't even exist before you pass it 
>>> to bind A so something is not right.
>>>
>>>> e.g.
>>>> VM_BIND_A (queue_id=0, in_fence=fence_A)
>>>> VM_BIND_B (queue_id=1, in_fence=NULL)
>>>>
>>>> Now VM_BIND_B can immediately be executed regardless of fence_A status.
>>>
>>> In my examples userspace can serialise or not as it sees fit using 
>>> fences. The "parallel bind" examples two binds run in parallel. 
>>> Userspace can create multiple such parallel "queues" if it wanted.
>>>
>>> Parallel bind 1 and 2 interleaved:
>>> out_fence_A = vm_bind A
>>> out_fence_B = vm_bind B
>>> out_fence_C = vm_bind C
>>> out_fence_D = vm_bind D
>>> // all binds can run in parallel
>>> merged_fence_1 = fence_merge(out_fence_A, out_fence_B)
>>> merged_fence_2 = fence_merge(out_fence_C, out_fence_D)
>>> execbuf(in_fence = merged_fence_1) // after A&B to finish
>>> execbuf(in_fence = merged_fence_2) // after C&D finish
>>>
>>> There is a huge disconnect somewhere but I don't know where.
>>>
>>
>> Note that Vulkan has requirement that VM_BIND and VM_UNBIND
>> operations will also have 'in' fences associated with them
>> and not just the 'out' fences (which your example above shows).
> 
> I gave more examples earlier:
> 
> """
> Queue1:
> 
> out_fence_A = vm_bind A
> out_fence_B = vm_bind B, in_fence=out_fence_A
> execbuf(in_fence = out_fence_B)
> """
> 
> Clearly I showed both in and out fence.
> 
>> Yes, one of the solution discussed was not to have any queue_idx
>> at all (assume single queue) and let the vm_bind/unbind operations
>> submitted run and complete out of submission order. That way
>> a vm_bind/unbind sumitted later will not be blocked by a vm_bind/unbind
>> submitted earlier.
>> But removing the ordering here comes at a cost. Having the operations
>> run in submission order has some benefits. These are discussed in the
>> other thread.
>> https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
> 
> That is some messed up deep quoting in that link. Could you please 
> summarize the cost which queues in the uapi intended to avoid?
> 
> In any case it is not just for me. A significant addition is proposed 
> for the driver so there should be a clear summary of cost vs benefit 
> rather than a messy thread.
> 
>> Hence having multiple queues gives the benefit of both worlds and gives
>> user more options.
> 
> Maybe, but lets be specific.

Also, it can't really give the benefit of both worlds. You can't go 
fully async with the queue_idx scheme without i915 having to create N 
internal queues (where N = number of bind operations submitted).

Therefore I suspect it's a trade-off between cost and convenience but 
I'd like things clearly summarized so decision can be made.

Regards,

Tvrtko
Niranjana Vishwanathapura June 14, 2022, 4:14 p.m. UTC | #19
On Tue, Jun 14, 2022 at 09:27:05AM +0300, Lionel Landwerlin wrote:
>On 10/06/2022 11:53, Matthew Brost wrote:
>>On Fri, Jun 10, 2022 at 12:07:11AM -0700, Niranjana Vishwanathapura wrote:
>>>VM_BIND and related uapi definitions
>>>
>>>Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
>>>---
>>>  Documentation/gpu/rfc/i915_vm_bind.h | 490 +++++++++++++++++++++++++++
>>>  1 file changed, 490 insertions(+)
>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>
>>>diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
>>>new file mode 100644
>>>index 000000000000..9fc854969cfb
>>>--- /dev/null
>>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>@@ -0,0 +1,490 @@
>>>+/* SPDX-License-Identifier: MIT */
>>>+/*
>>>+ * Copyright © 2022 Intel Corporation
>>>+ */
>>>+
>>>+/**
>>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>>+ *
>>>+ * VM_BIND feature availability.
>>>+ * See typedef drm_i915_getparam_t param.
>>>+ * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>+ * bits[8-15]: VM_BIND implementation version.
>>>+ * version 0 will not have VM_BIND/UNBIND timeline fence array support.
>>>+ */
>>>+#define I915_PARAM_HAS_VM_BIND		57
>>>+
>>>+/**
>>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>+ *
>>>+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>+ * See struct drm_i915_gem_vm_control flags.
>>>+ *
>>>+ * The older execbuf2 ioctl will not support VM_BIND mode of operation.
>>>+ * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
>>>+ * execlist (See struct drm_i915_gem_execbuffer3 for more details).
>>>+ *
>>>+ */
>>>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
>>>+
>>>+/**
>>>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>+ *
>>>+ * Flag to declare context as long running.
>>>+ * See struct drm_i915_gem_context_create_ext flags.
>>>+ *
>>>+ * Usage of dma-fence expects that they complete in reasonable amount of time.
>>>+ * Compute on the other hand can be long running. Hence it is not appropriate
>>>+ * for compute contexts to export request completion dma-fence to user.
>>>+ * The dma-fence usage will be limited to in-kernel consumption only.
>>>+ * Compute contexts need to use user/memory fence.
>>>+ *
>>>+ * So, long running contexts do not support output fences. Hence,
>>>+ * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is expected
>>>+ * to be not used. DRM_I915_GEM_WAIT ioctl call is also not supported for
>>>+ * objects mapped to long running contexts.
>>>+ */
>>>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>+
>>>+/* VM_BIND related ioctls */
>>>+#define DRM_I915_GEM_VM_BIND		0x3d
>>>+#define DRM_I915_GEM_VM_UNBIND		0x3e
>>>+#define DRM_I915_GEM_EXECBUFFER3	0x3f
>>>+#define DRM_I915_GEM_WAIT_USER_FENCE	0x40
>>>+
>>>+#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>+#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>>+#define DRM_IOCTL_I915_GEM_EXECBUFFER3		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
>>>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>+ *
>>>+ * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
>>>+ * virtual address (VA) range to the section of an object that should be bound
>>>+ * in the device page table of the specified address space (VM).
>>>+ * The VA range specified must be unique (ie., not currently bound) and can
>>>+ * be mapped to whole object or a section of the object (partial binding).
>>>+ * Multiple VA mappings can be created to the same section of the object
>>>+ * (aliasing).
>>>+ *
>>>+ * The @queue_idx specifies the queue to use for binding. Same queue can be
>>>+ * used for both VM_BIND and VM_UNBIND calls. All submitted bind and unbind
>>>+ * operations in a queue are performed in the order of submission.
>>>+ *
>>>+ * The @start, @offset and @length should be 4K page aligned. However the DG2
>>>+ * and XEHPSDV has 64K page size for device local-memory and has compact page
>>>+ * table. On those platforms, for binding device local-memory objects, the
>>>+ * @start should be 2M aligned, @offset and @length should be 64K aligned.
>>>+ * Also, on those platforms, it is not allowed to bind an device local-memory
>>>+ * object and a system memory object in a single 2M section of VA range.
>>>+ */
>>>+struct drm_i915_gem_vm_bind {
>>>+	/** @vm_id: VM (address space) id to bind */
>>>+	__u32 vm_id;
>>>+
>>>+	/** @queue_idx: Index of queue for binding */
>>>+	__u32 queue_idx;
>>>+
>>>+	/** @rsvd: Reserved, MBZ */
>>>+	__u32 rsvd;
>>>+
>>>+	/** @handle: Object handle */
>>>+	__u32 handle;
>>>+
>>>+	/** @start: Virtual Address start to bind */
>>>+	__u64 start;
>>>+
>>>+	/** @offset: Offset in object to bind */
>>>+	__u64 offset;
>>>+
>>>+	/** @length: Length of mapping to bind */
>>>+	__u64 length;
>>This probably isn't needed. We are never going to unbind a subset of a
>>VMA are we? That being said it can't hurt as a sanity check (e.g.
>>internal vma->length == user unbind length).
>
>
>Not sure what you mean by that.
>
>
>Vulkan can unbind a whole range of addresses and it seems like there 
>is no restriction on doing something like this :
>
>
>bind vma=0x1000000 GEMBO=3 offset=0 range=8192
>
>unbind vma=0x1001000 range=4096
>

VM_BIND interface proposed here do not support it, specifically
the vma merge and split operations.
In the VM_UNBIND documentation I have mentioned that the specified
mapping should uniqely identify the mapping bound through VM_BIND call.
UMDs should handle it.

>
>You would be left with a single 4k page of GEMBO=3 bound at vma=0x1000000
>
>
>Or :
>
>bind vma=0x1000000 GEMBO=3 offset=0 range=4096
>
>bind vma=0x1001000 GEMBO=4 offset=0 range=4096
>
>unbind vma=0x1000000 range=8192
>
>
>You're unbinding 2 bindings with a single operation.
>

This was also considered, but this also is part of vma merge and split
story, hence is not supported currently.

May be in future KMD can support it. But for now UMDs should handle it.

Niranjana

>
>-Lionel
>
>
>>
>>>+
>>>+	/**
>>>+	 * @flags: Supported flags are:
>>>+	 *
>>>+	 * I915_GEM_VM_BIND_READONLY:
>>>+	 * Mapping is read-only.
>>>+	 *
>>>+	 * I915_GEM_VM_BIND_CAPTURE:
>>>+	 * Capture this mapping in the dump upon GPU error.
>>>+	 */
>>>+	__u64 flags;
>>>+#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>+#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>+
>>>+	/**
>>>+	 * @extensions: 0-terminated chain of extensions for this operation.
>>>+	 *
>>>+	 * I915_VM_BIND_EXT_TIMELINE_FENCES:
>>>+	 * Specifies an array of input or output timeline fences for this
>>>+	 * binding operation. See struct drm_i915_vm_bind_ext_timeline_fences.
>>>+	 *
>>>+	 * I915_VM_BIND_EXT_USER_FENCES:
>>>+	 * Specifies an array of input or output user fences for this
>>>+	 * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
>>>+	 * This is required for compute contexts.
>>>+	 */
>>>+	__u64 extensions;
>>>+#define I915_VM_BIND_EXT_TIMELINE_FENCES	0
>>>+#define I915_VM_BIND_EXT_USER_FENCES		1
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>>+ *
>>>+ * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
>>>+ * address (VA) range that should be unbound from the device page table of the
>>>+ * specified address space (VM). The specified VA range must match one of the
>>>+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>>>+ * completion.
>>>+ *
>>>+ * The @queue_idx specifies the queue to use for unbinding.
>>>+ * See struct drm_i915_gem_vm_unbind for more information on @queue_idx.
>>>+ *
>>>+ * The @start and @length musy specify a unique mapping bound with VM_BIND
>>>+ * ioctl.
>>>+ */
>>>+struct drm_i915_gem_vm_unbind {
>>>+	/** @vm_id: VM (address space) id to bind */
>>>+	__u32 vm_id;
>>>+
>>>+	/** @queue_idx: Index of queue for unbinding */
>>>+	__u32 queue_idx;
>>>+
>>>+	/** @start: Virtual Address start to unbind */
>>>+	__u64 start;
>>>+
>>>+	/** @length: Length of mapping to unbind */
>>>+	__u64 length;
>>>+
>>>+	/** @flags: Reserved for future usage, currently MBZ */
>>>+	__u64 flags;
>>>+
>>>+	/**
>>>+	 * @extensions: 0-terminated chain of extensions for this operation.
>>>+	 *
>>>+	 * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
>>>+	 * Specifies an array of input or output timeline fences for this
>>>+	 * unbind operation.
>>>+	 * It has same format as struct drm_i915_vm_bind_ext_timeline_fences.
>>>+	 *
>>>+	 * I915_VM_UNBIND_EXT_USER_FENCES:
>>>+	 * Specifies an array of input or output user fences for this
>>>+	 * unbind operation. This is required for compute contexts.
>>>+	 * It has same format as struct drm_i915_vm_bind_ext_user_fence.
>>>+	 */
>>>+	__u64 extensions;
>>>+#define I915_VM_UNBIND_EXT_TIMELINE_FENCES	0
>>>+#define I915_VM_UNBIND_EXT_USER_FENCES		1
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
>>>+ * or the vm_unbind work.
>>>+ *
>>>+ * The vm_bind or vm_unbind aync worker will wait for input fence to signal
>>>+ * before starting the binding or unbinding.
>>>+ *
>>>+ * The vm_bind or vm_unbind async worker will signal the returned output fence
>>>+ * after the completion of binding or unbinding.
>>>+ */
>>>+struct drm_i915_vm_bind_fence {
>>>+	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
>>>+	__u32 handle;
>>>+
>>>+	/**
>>>+	 * @flags: Supported flags are:
>>>+	 *
>>>+	 * I915_VM_BIND_FENCE_WAIT:
>>>+	 * Wait for the input fence before binding/unbinding
>>>+	 *
>>>+	 * I915_VM_BIND_FENCE_SIGNAL:
>>>+	 * Return bind/unbind completion fence as output
>>>+	 */
>>>+	__u32 flags;
>>>+#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>+#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
>>>+ * and vm_unbind.
>>>+ *
>>>+ * This structure describes an array of timeline drm_syncobj and associated
>>>+ * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
>>>+ * can be input or output fences (See struct drm_i915_vm_bind_fence).
>>>+ */
>>>+struct drm_i915_vm_bind_ext_timeline_fences {
>>>+	/** @base: Extension link. See struct i915_user_extension. */
>>>+	struct i915_user_extension base;
>>>+
>>>+	/**
>>>+	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
>>>+	 * arrays.
>>>+	 */
>>>+	__u64 fence_count;
>>>+
>>>+	/**
>>>+	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
>>>+	 * of length @fence_count.
>>>+	 */
>>>+	__u64 handles_ptr;
>>>+
>>>+	/**
>>>+	 * @values_ptr: Pointer to an array of u64 values of length
>>>+	 * @fence_count.
>>>+	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>>+	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
>>>+	 * binary one.
>>>+	 */
>>>+	__u64 values_ptr;
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
>>>+ * vm_bind or the vm_unbind work.
>>>+ *
>>>+ * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
>>>+ * @addr to become equal to @val) before starting the binding or unbinding.
>>>+ *
>>>+ * The vm_bind or vm_unbind async worker will signal the output fence after
>>>+ * the completion of binding or unbinding by writing @val to memory location at
>>>+ * @addr
>>>+ */
>>>+struct drm_i915_vm_bind_user_fence {
>>>+	/** @addr: User/Memory fence qword aligned process virtual address */
>>>+	__u64 addr;
>>>+
>>>+	/** @val: User/Memory fence value to be written after bind completion */
>>>+	__u64 val;
>>>+
>>>+	/**
>>>+	 * @flags: Supported flags are:
>>>+	 *
>>>+	 * I915_VM_BIND_USER_FENCE_WAIT:
>>>+	 * Wait for the input fence before binding/unbinding
>>>+	 *
>>>+	 * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>+	 * Return bind/unbind completion fence as output
>>>+	 */
>>>+	__u32 flags;
>>>+#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>+#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>+	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
>>>+ * and vm_unbind.
>>>+ *
>>>+ * These user fences can be input or output fences
>>>+ * (See struct drm_i915_vm_bind_user_fence).
>>>+ */
>>>+struct drm_i915_vm_bind_ext_user_fence {
>>>+	/** @base: Extension link. See struct i915_user_extension. */
>>>+	struct i915_user_extension base;
>>>+
>>>+	/** @fence_count: Number of elements in the @user_fence_ptr array. */
>>>+	__u64 fence_count;
>>>+
>>>+	/**
>>>+	 * @user_fence_ptr: Pointer to an array of
>>>+	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>+	 */
>>>+	__u64 user_fence_ptr;
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
>>>+ * ioctl.
>>>+ *
>>>+ * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
>>>+ * only works with this ioctl for submission.
>>>+ * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
>>>+ */
>>>+struct drm_i915_gem_execbuffer3 {
>>>+	/**
>>>+	 * @ctx_id: Context id
>>>+	 *
>>>+	 * Only contexts with user engine map are allowed.
>>>+	 */
>>>+	__u32 ctx_id;
>>>+
>>>+	/**
>>>+	 * @engine_idx: Engine index
>>>+	 *
>>>+	 * An index in the user engine map of the context specified by @ctx_id.
>>>+	 */
>>>+	__u32 engine_idx;
>>>+
>>>+	/** @rsvd1: Reserved, MBZ */
>>>+	__u32 rsvd1;
>>>+
>>>+	/**
>>>+	 * @batch_count: Number of batches in @batch_address array.
>>>+	 *
>>>+	 * 0 is invalid. For parallel submission, it should be equal to the
>>>+	 * number of (parallel) engines involved in that submission.
>>>+	 */
>>>+	__u32 batch_count;
>>>+
>>>+	/**
>>>+	 * @batch_address: Array of batch gpu virtual addresses.
>>>+	 *
>>>+	 * If @batch_count is 1, then it is the gpu virtual address of the
>>>+	 * batch buffer. If @batch_count > 1, then it is a pointer to an array
>>>+	 * of batch buffer gpu virtual addresses.
>>>+	 */
>>>+	__u64 batch_address;
>>>+
>>>+	/**
>>>+	 * @flags: Supported flags are:
>>>+	 *
>>>+	 * I915_EXEC3_SECURE:
>>>+	 * Request a privileged ("secure") batch buffer/s.
>>>+	 * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
>>>+	 */
>>>+	__u64 flags;
>>>+#define I915_EXEC3_SECURE	(1<<0)
>>>+
>>>+	/** @rsvd2: Reserved, MBZ */
>>>+	__u64 rsvd2;
>>>+
>>>+	/**
>>>+	 * @extensions: Zero-terminated chain of extensions.
>>>+	 *
>>>+	 * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
>>>+	 * It has same format as DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
>>>+	 * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
>>>+	 *
>>>+	 * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
>>>+	 * First level batch completion signaling extension.
>>>+	 * See struct drm_i915_gem_execbuffer3_ext_user_fence.
>>>+	 */
>>>+	__u64 extensions;
>>>+#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES	0
>>>+#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE		1
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_execbuffer3_ext_user_fence - First level batch completion
>>>+ * signaling extension.
>>>+ *
>>>+ * This extension allows user to attach a user fence (@addr, @value pair) to
>>>+ * execbuf3, to be signaled by the command streamer after the completion of first
>>>+ * level batch, by writing the @value at specified @addr and triggering an
>>>+ * interrupt.
>>>+ * User can either poll for this user fence to signal or can also wait on it
>>>+ * with i915_gem_wait_user_fence ioctl.
>>>+ * This is very much usefaul for long running contexts where waiting on dma-fence
>>>+ * by user (like i915_gem_wait ioctl) is not supported.
>>>+ */
>>>+struct drm_i915_gem_execbuffer3_ext_user_fence {
>>>+	/** @base: Extension link. See struct i915_user_extension. */
>>>+	struct i915_user_extension base;
>>>+
>>>+	/**
>>>+	 * @addr: User/Memory fence qword aligned GPU virtual address.
>>>+	 *
>>>+	 * Address has to be a valid GPU virtual address at the time of
>>>+	 * first level batch completion.
>>>+	 */
>>>+	__u64 addr;
>>>+
>>>+	/**
>>>+	 * @value: User/Memory fence Value to be written to above address
>>>+	 * after first level batch completes.
>>>+	 */
>>>+	__u64 value;
>>>+
>>>+	/** @rsvd: Reserved, MBZ */
>>>+	__u64 rsvd;
>>>+};
>>>+
>>IMO all of these fence structs should be a generic sync interface shared
>>between both vm bind and exec3 rather than unique extenisons.
>>
>>Both vm bind and exec3 should have something like this:
>>
>>__64 syncs;	/* userptr to an array of generic syncs */
>>__64 n_syncs;
>>
>>Having an array of syncs lets the kernel do one user copy for all the
>>syncs rather than reading them in a a chain.
>>
>>A generic sync object encapsulates all possible syncs (in / out -
>>syncobj, syncobj timeline, ufence, future sync concepts).
>>
>>e.g.
>>
>>struct {
>>	__u32 user_ext;
>>	__u32 flag;	/* in / out, type, whatever else info we need */
>>	union {
>>		__u32 handle; 	/* to syncobj */
>>		__u64 addr; 	/* ufence address */
>>	};
>>	__64 seqno;	/* syncobj timeline, ufence write value */
>>	...reserve enough bits for future...
>>}
>>
>>This unifies binds and execs by using the same sync interface
>>instilling the concept that binds and execs are the same op (queue'd
>>operation /w in/out fences).
>>
>>Matt
>>
>>>+/**
>>>+ * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
>>>+ * private to the specified VM.
>>>+ *
>>>+ * See struct drm_i915_gem_create_ext.
>>>+ */
>>>+struct drm_i915_gem_create_ext_vm_private {
>>>+#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
>>>+	/** @base: Extension link. See struct i915_user_extension. */
>>>+	struct i915_user_extension base;
>>>+
>>>+	/** @vm_id: Id of the VM to which the object is private */
>>>+	__u32 vm_id;
>>>+};
>>>+
>>>+/**
>>>+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>>+ *
>>>+ * User/Memory fence can be woken up either by:
>>>+ *
>>>+ * 1. GPU context indicated by @ctx_id, or,
>>>+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>+ *    @ctx_id is ignored when this flag is set.
>>>+ *
>>>+ * Wakeup condition is,
>>>+ * ``((*addr & mask) op (value & mask))``
>>>+ *
>>>+ * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
>>>+ */
>>>+struct drm_i915_gem_wait_user_fence {
>>>+	/** @extensions: Zero-terminated chain of extensions. */
>>>+	__u64 extensions;
>>>+
>>>+	/** @addr: User/Memory fence address */
>>>+	__u64 addr;
>>>+
>>>+	/** @ctx_id: Id of the Context which will signal the fence. */
>>>+	__u32 ctx_id;
>>>+
>>>+	/** @op: Wakeup condition operator */
>>>+	__u16 op;
>>>+#define I915_UFENCE_WAIT_EQ      0
>>>+#define I915_UFENCE_WAIT_NEQ     1
>>>+#define I915_UFENCE_WAIT_GT      2
>>>+#define I915_UFENCE_WAIT_GTE     3
>>>+#define I915_UFENCE_WAIT_LT      4
>>>+#define I915_UFENCE_WAIT_LTE     5
>>>+#define I915_UFENCE_WAIT_BEFORE  6
>>>+#define I915_UFENCE_WAIT_AFTER   7
>>>+
>>>+	/**
>>>+	 * @flags: Supported flags are:
>>>+	 *
>>>+	 * I915_UFENCE_WAIT_SOFT:
>>>+	 *
>>>+	 * To be woken up by i915 driver async worker (not by GPU).
>>>+	 *
>>>+	 * I915_UFENCE_WAIT_ABSTIME:
>>>+	 *
>>>+	 * Wait timeout specified as absolute time.
>>>+	 */
>>>+	__u16 flags;
>>>+#define I915_UFENCE_WAIT_SOFT    0x1
>>>+#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>+
>>>+	/** @value: Wakeup value */
>>>+	__u64 value;
>>>+
>>>+	/** @mask: Wakeup mask */
>>>+	__u64 mask;
>>>+#define I915_UFENCE_WAIT_U8     0xffu
>>>+#define I915_UFENCE_WAIT_U16    0xffffu
>>>+#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>+#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>+
>>>+	/**
>>>+	 * @timeout: Wait timeout in nanoseconds.
>>>+	 *
>>>+	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
>>>+	 * absolute time in nsec.
>>>+	 */
>>>+	__s64 timeout;
>>>+};
>>>-- 
>>>2.21.0.rc0.32.g243a4c7e27
>>>
>
Niranjana Vishwanathapura June 14, 2022, 4:42 p.m. UTC | #20
On Tue, Jun 14, 2022 at 05:07:37PM +0100, Tvrtko Ursulin wrote:
>
>On 14/06/2022 17:02, Tvrtko Ursulin wrote:
>>
>>On 14/06/2022 16:43, Niranjana Vishwanathapura wrote:
>>>On Tue, Jun 14, 2022 at 08:16:41AM +0100, Tvrtko Ursulin wrote:
>>>>
>>>>On 14/06/2022 00:39, Matthew Brost wrote:
>>>>>On Mon, Jun 13, 2022 at 07:09:06PM +0100, Tvrtko Ursulin wrote:
>>>>>>
>>>>>>On 13/06/2022 18:49, Niranjana Vishwanathapura wrote:
>>>>>>>On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
>>>>>>>>
>>>>>>>>On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
>>>>>>>>>On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>>>>>>>>>
>>>>>>>>>>On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel 
>>>>>>>>>>>Landwerlin wrote:
>>>>>>>>>>>>On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>>>>>>>>>
>>>>>>>>>>>>>On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>>>>VM_BIND and related uapi definitions
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>Signed-off-by: Niranjana Vishwanathapura
>>>>>>>>>>>>>><niranjana.vishwanathapura@intel.com>
>>>>>>>>>>>>>>---
>>>>>>>>>>>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 490
>>>>>>>>>>>>>>+++++++++++++++++++++++++++
>>>>>>>>>>>>>>  1 file changed, 490 insertions(+)
>>>>>>>>>>>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>diff --git
>>>>>>>>>>>>>>a/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>new file mode 100644
>>>>>>>>>>>>>>index 000000000000..9fc854969cfb
>>>>>>>>>>>>>>--- /dev/null
>>>>>>>>>>>>>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>@@ -0,0 +1,490 @@
>>>>>>>>>>>>>>+/* SPDX-License-Identifier: MIT */
>>>>>>>>>>>>>>+/*
>>>>>>>>>>>>>>+ * Copyright © 2022 Intel Corporation
>>>>>>>>>>>>>>+ */
>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>+/**
>>>>>>>>>>>>>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>+ * VM_BIND feature availability.
>>>>>>>>>>>>>>+ * See typedef drm_i915_getparam_t param.
>>>>>>>>>>>>>>+ * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>>>>>>>>>>+ * bits[8-15]: VM_BIND implementation version.
>>>>>>>>>>>>>>+ * version 0 will not have VM_BIND/UNBIND
>>>>>>>>>>>>>>timeline fence array support.
>>>>>>>>>>>>>>+ */
>>>>>>>>>>>>>>+#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>+/**
>>>>>>>>>>>>>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>+ * Flag to opt-in for VM_BIND mode of 
>>>>>>>>>>>>>>binding during VM creation.
>>>>>>>>>>>>>>+ * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>+ * The older execbuf2 ioctl will not
>>>>>>>>>>>>>>support VM_BIND mode of operation.
>>>>>>>>>>>>>>+ * For VM_BIND mode, we have new execbuf3
>>>>>>>>>>>>>>ioctl which will not accept any
>>>>>>>>>>>>>>+ * execlist (See struct
>>>>>>>>>>>>>>drm_i915_gem_execbuffer3 for more details).
>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>+ */
>>>>>>>>>>>>>>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>+/**
>>>>>>>>>>>>>>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>+ * Flag to declare context as long running.
>>>>>>>>>>>>>>+ * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>+ * Usage of dma-fence expects that they
>>>>>>>>>>>>>>complete in reasonable amount of time.
>>>>>>>>>>>>>>+ * Compute on the other hand can be long
>>>>>>>>>>>>>>running. Hence it is not appropriate
>>>>>>>>>>>>>>+ * for compute contexts to export request
>>>>>>>>>>>>>>completion dma-fence to user.
>>>>>>>>>>>>>>+ * The dma-fence usage will be limited to
>>>>>>>>>>>>>>in-kernel consumption only.
>>>>>>>>>>>>>>+ * Compute contexts need to use user/memory fence.
>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>+ * So, long running contexts do not support 
>>>>>>>>>>>>>>output fences. Hence,
>>>>>>>>>>>>>>+ * I915_EXEC_FENCE_SIGNAL (See
>>>>>>>>>>>>>>&drm_i915_gem_exec_fence.flags) is expected
>>>>>>>>>>>>>>+ * to be not used. DRM_I915_GEM_WAIT ioctl
>>>>>>>>>>>>>>call is also not supported for
>>>>>>>>>>>>>>+ * objects mapped to long running contexts.
>>>>>>>>>>>>>>+ */
>>>>>>>>>>>>>>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>+/* VM_BIND related ioctls */
>>>>>>>>>>>>>>+#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>>>>>>>>+#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>>>>>>>>+#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>>>>>>>>>>+#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_VM_BIND
>>>>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>DRM_I915_GEM_VM_BIND, struct
>>>>>>>>>>>>>>drm_i915_gem_vm_bind)
>>>>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_VM_UNBIND
>>>>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>DRM_I915_GEM_VM_UNBIND, struct
>>>>>>>>>>>>>>drm_i915_gem_vm_bind)
>>>>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_EXECBUFFER3
>>>>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>DRM_I915_GEM_EXECBUFFER3, struct
>>>>>>>>>>>>>>drm_i915_gem_execbuffer3)
>>>>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE
>>>>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>DRM_I915_GEM_WAIT_USER_FENCE, struct
>>>>>>>>>>>>>>drm_i915_gem_wait_user_fence)
>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>+/**
>>>>>>>>>>>>>>+ * struct drm_i915_gem_vm_bind - VA to 
>>>>>>>>>>>>>>object mapping to bind.
>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>+ * This structure is passed to VM_BIND
>>>>>>>>>>>>>>ioctl and specifies the mapping of GPU
>>>>>>>>>>>>>>+ * virtual address (VA) range to the
>>>>>>>>>>>>>>section of an object that should be bound
>>>>>>>>>>>>>>+ * in the device page table of the 
>>>>>>>>>>>>>>specified address space (VM).
>>>>>>>>>>>>>>+ * The VA range specified must be unique
>>>>>>>>>>>>>>(ie., not currently bound) and can
>>>>>>>>>>>>>>+ * be mapped to whole object or a section
>>>>>>>>>>>>>>of the object (partial binding).
>>>>>>>>>>>>>>+ * Multiple VA mappings can be created to
>>>>>>>>>>>>>>the same section of the object
>>>>>>>>>>>>>>+ * (aliasing).
>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>+ * The @queue_idx specifies the queue to
>>>>>>>>>>>>>>use for binding. Same queue can be
>>>>>>>>>>>>>>+ * used for both VM_BIND and VM_UNBIND
>>>>>>>>>>>>>>calls. All submitted bind and unbind
>>>>>>>>>>>>>>+ * operations in a queue are performed in 
>>>>>>>>>>>>>>the order of submission.
>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>+ * The @start, @offset and @length should
>>>>>>>>>>>>>>be 4K page aligned. However the DG2
>>>>>>>>>>>>>>+ * and XEHPSDV has 64K page size for device
>>>>>>>>>>>>>>local-memory and has compact page
>>>>>>>>>>>>>>+ * table. On those platforms, for binding
>>>>>>>>>>>>>>device local-memory objects, the
>>>>>>>>>>>>>>+ * @start should be 2M aligned, @offset and
>>>>>>>>>>>>>>@length should be 64K aligned.
>>>>>>>>>>>>>>+ * Also, on those platforms, it is not
>>>>>>>>>>>>>>allowed to bind an device local-memory
>>>>>>>>>>>>>>+ * object and a system memory object in a
>>>>>>>>>>>>>>single 2M section of VA range.
>>>>>>>>>>>>>>+ */
>>>>>>>>>>>>>>+struct drm_i915_gem_vm_bind {
>>>>>>>>>>>>>>+    /** @vm_id: VM (address space) id to bind */
>>>>>>>>>>>>>>+    __u32 vm_id;
>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>+    /** @queue_idx: Index of queue for binding */
>>>>>>>>>>>>>>+    __u32 queue_idx;
>>>>>>>>>>>>>
>>>>>>>>>>>>>I have a question here to which I did not find
>>>>>>>>>>>>>an answer by browsing the old threads.
>>>>>>>>>>>>>
>>>>>>>>>>>>>Queue index appears to be an implicit
>>>>>>>>>>>>>synchronisation mechanism, right? Operations on
>>>>>>>>>>>>>the same index are executed/complete in order of
>>>>>>>>>>>>>ioctl submission?
>>>>>>>>>>>>>
>>>>>>>>>>>>>Do we _have_ to implement this on the kernel
>>>>>>>>>>>>>side and could just allow in/out fence and let
>>>>>>>>>>>>>userspace deal with it?
>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>>It orders operations like in a queue. Which is kind
>>>>>>>>>>>>of what happens with existing queues/engines.
>>>>>>>>>>>>
>>>>>>>>>>>>If I understood correctly, it's going to be a
>>>>>>>>>>>>kthread + a linked list right?
>>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>Yes, that is correct.
>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>>-Lionel
>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>>>
>>>>>>>>>>>>>Arbitrary/on-demand number of queues will add
>>>>>>>>>>>>>the complexity on the kernel side which should
>>>>>>>>>>>>>be avoided if possible.
>>>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>It was discussed in the other thread. Jason 
>>>>>>>>>>>prefers this over putting
>>>>>>>>>>>an artificial limit on number of queues (as user can
>>>>>>>>>>>anyway can exhaust
>>>>>>>>>>>the memory). I think complexity in the driver is manageable.
>>>>>>>>>>
>>>>>>>>>>You'll need to create tracking structures on demand, with
>>>>>>>>>>atomic replace of last fence, ref counting and locking of
>>>>>>>>>>some sort, more or less?
>>>>>>>>>>
>>>>>>>>>
>>>>>>>>>We will have a workqueue, an work item and a linked list per queue.
>>>>>>>>>VM_BIND/UNBIND call will add the mapping request to the
>>>>>>>>>specified queue's
>>>>>>>>>linked list and schedule the work item on the 
>>>>>>>>>workqueue of that queue.
>>>>>>>>>I am not sure what you mean by last fence and replacing it.
>>>>>>>>>
>>>>>>>>>>>The other option being discussed in to have the user create those
>>>>>>>>>>>queues (like creating engine map) before hand and 
>>>>>>>>>>>use that in vm_bind
>>>>>>>>>>>and vm_unbind ioctls. This puts a limit on the number of queues.
>>>>>>>>>>>But it is not clean either and not sure it is worth
>>>>>>>>>>>making the interface
>>>>>>>>>>>more complex.
>>>>>>>>>>>https://www.spinics.net/lists/dri-devel/msg350448.html
>>>>>>>>>>
>>>>>>>>>>What about the third option of a flag to return a fence (of
>>>>>>>>>>some sort) and pass in a fence? That way userspace can
>>>>>>>>>>imagine zero or N queues with very little effort on the
>>>>>>>>>>kernel side. Was this considered?
>>>>>>>>>>
>>>>>>>>>
>>>>>>>>>I am not clear what fence you are talking about here 
>>>>>>>>>and how does that
>>>>>>>>>help with the number of vm_bind queues. Can you eloborate?
>>>>>>>>
>>>>>>>>It is actually already documented that bind/unbind will support
>>>>>>>>input and output fences - so what are these queues on top of what
>>>>>>>>userspace can already achieve by using them? Purely a convenience or
>>>>>>>>there is more to it?
>>>>>>>>
>>>>>>>
>>>>>>>Oh, the vm_bind queues are discussed in this thread.
>>>>>>>https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
>>>>>>>
>>>>>>>
>>>>>>>Apparently Vulkan has requirement for multiple queues, each queue
>>>>>>>processing vm_bind/unbind calls in the order of submission.
>>>>>>
>>>>>>I don't see how that answers my question so I will take the freedom to
>>>>>>repeat it. What are these queues on top of what userspace can already
>>>>>>achieve by using in-out fences? Purely a convenience or 
>>>>>>there is more to it?
>>>>>>
>>>>>>Queue1:
>>>>>>
>>>>>>out_fence_A = vm_bind A
>>>>>>out_fence_B = vm_bind B, in_fence=out_fence_A
>>>>>>execbuf(in_fence = out_fence_B)
>>>>>>
>>>>>>Queue2:
>>>>>>
>>>>>>out_fence_C = vm_bind C
>>>>>>out_fence_D = vm_bind D, in_fence=out_fence_C
>>>>>>execbuf(in_fence = out_fence_D)
>>>>>>
>>>>>>Parallel bind:
>>>>>>out_fence_E = vm_bind E
>>>>>>out_fence_F = vm_bind F
>>>>>>merged_fence = fence_merge(out_fence_E, out_fence_F)
>>>>>>execbuf(in_fence = merged_fence)
>>>>>>
>>>>>
>>>>>Let's say you do this and only 1 queue:
>>>>>
>>>>>VM_BIND_A (in_fence=fence_A)
>>>>>VM_BIND_B (in_fence=NULL)
>>>>>
>>>>>With 1 queue VM_BIND_B in blocked on fence_A, hence the need for than 1
>>>>>queue.
>>>>
>>>>I don't follow - there isn't a concept of a queue exposed in 
>>>>uapi in what I have described so the above two run in parallel 
>>>>there, if we ignore fence_A in your example doesn't even exist 
>>>>before you pass it to bind A so something is not right.
>>>>
>>>>>e.g.
>>>>>VM_BIND_A (queue_id=0, in_fence=fence_A)
>>>>>VM_BIND_B (queue_id=1, in_fence=NULL)
>>>>>
>>>>>Now VM_BIND_B can immediately be executed regardless of fence_A status.
>>>>
>>>>In my examples userspace can serialise or not as it sees fit 
>>>>using fences. The "parallel bind" examples two binds run in 
>>>>parallel. Userspace can create multiple such parallel "queues" 
>>>>if it wanted.
>>>>
>>>>Parallel bind 1 and 2 interleaved:
>>>>out_fence_A = vm_bind A
>>>>out_fence_B = vm_bind B
>>>>out_fence_C = vm_bind C
>>>>out_fence_D = vm_bind D
>>>>// all binds can run in parallel
>>>>merged_fence_1 = fence_merge(out_fence_A, out_fence_B)
>>>>merged_fence_2 = fence_merge(out_fence_C, out_fence_D)
>>>>execbuf(in_fence = merged_fence_1) // after A&B to finish
>>>>execbuf(in_fence = merged_fence_2) // after C&D finish
>>>>
>>>>There is a huge disconnect somewhere but I don't know where.
>>>>
>>>
>>>Note that Vulkan has requirement that VM_BIND and VM_UNBIND
>>>operations will also have 'in' fences associated with them
>>>and not just the 'out' fences (which your example above shows).
>>
>>I gave more examples earlier:
>>
>>"""
>>Queue1:
>>
>>out_fence_A = vm_bind A
>>out_fence_B = vm_bind B, in_fence=out_fence_A
>>execbuf(in_fence = out_fence_B)
>>"""
>>
>>Clearly I showed both in and out fence.
>>

Ok, guess I missed that.

>>>Yes, one of the solution discussed was not to have any queue_idx
>>>at all (assume single queue) and let the vm_bind/unbind operations
>>>submitted run and complete out of submission order. That way
>>>a vm_bind/unbind sumitted later will not be blocked by a vm_bind/unbind
>>>submitted earlier.
>>>But removing the ordering here comes at a cost. Having the operations
>>>run in submission order has some benefits. These are discussed in the
>>>other thread.
>>>https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
>>
>>That is some messed up deep quoting in that link. Could you please 
>>summarize the cost which queues in the uapi intended to avoid?
>>
>>In any case it is not just for me. A significant addition is 
>>proposed for the driver so there should be a clear summary of cost 
>>vs benefit rather than a messy thread.
>>

Say, user has a bunch of mappings to bind or unbind which must be done
in the submission order. If we have only one queue which runs the
operations out of submission order, then user has to insert in and out
fences for each of the operation in the bunch. But by having a in order
processing queues, user needs to insert 'in' fence only for the first
submission and 'out' fence only for the last submission in that bunch.

Also, having in order processing queues allows user to unbind a VA
mapping and re-use the same VA in a subsequent bind operation without
having any dependency (dependency is met by the fact that they are
process in the submission order).

>>>Hence having multiple queues gives the benefit of both worlds and gives
>>>user more options.
>>
>>Maybe, but lets be specific.
>
>Also, it can't really give the benefit of both worlds. You can't go 
>fully async with the queue_idx scheme without i915 having to create N 
>internal queues (where N = number of bind operations submitted).
>

We will have 1 internal queue for each queue_idx used by the user as
each queue is an ordered queue (processed in the submission order).

Niranjana

>Therefore I suspect it's a trade-off between cost and convenience but 
>I'd like things clearly summarized so decision can be made.
>
>Regards,
>
>Tvrtko
Tvrtko Ursulin June 15, 2022, 7:22 a.m. UTC | #21
On 14/06/2022 17:42, Niranjana Vishwanathapura wrote:
> On Tue, Jun 14, 2022 at 05:07:37PM +0100, Tvrtko Ursulin wrote:
>>
>> On 14/06/2022 17:02, Tvrtko Ursulin wrote:
>>>
>>> On 14/06/2022 16:43, Niranjana Vishwanathapura wrote:
>>>> On Tue, Jun 14, 2022 at 08:16:41AM +0100, Tvrtko Ursulin wrote:
>>>>>
>>>>> On 14/06/2022 00:39, Matthew Brost wrote:
>>>>>> On Mon, Jun 13, 2022 at 07:09:06PM +0100, Tvrtko Ursulin wrote:
>>>>>>>
>>>>>>> On 13/06/2022 18:49, Niranjana Vishwanathapura wrote:
>>>>>>>> On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
>>>>>>>>>
>>>>>>>>> On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
>>>>>>>>>> On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>>>>>>>>>>
>>>>>>>>>>> On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>> On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel Landwerlin 
>>>>>>>>>>>> wrote:
>>>>>>>>>>>>> On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> Signed-off-by: Niranjana Vishwanathapura
>>>>>>>>>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>>>>>>>>> ---
>>>>>>>>>>>>>>>   Documentation/gpu/rfc/i915_vm_bind.h | 490
>>>>>>>>>>>>>>> +++++++++++++++++++++++++++
>>>>>>>>>>>>>>>   1 file changed, 490 insertions(+)
>>>>>>>>>>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> diff --git
>>>>>>>>>>>>>>> a/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>> new file mode 100644
>>>>>>>>>>>>>>> index 000000000000..9fc854969cfb
>>>>>>>>>>>>>>> --- /dev/null
>>>>>>>>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>> @@ -0,0 +1,490 @@
>>>>>>>>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>>>>>>>>> +/*
>>>>>>>>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>>>>>>>>> + */
>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>> +/**
>>>>>>>>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>> + * VM_BIND feature availability.
>>>>>>>>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>>>>>>>>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>>>>>>>>>>> + * bits[8-15]: VM_BIND implementation version.
>>>>>>>>>>>>>>> + * version 0 will not have VM_BIND/UNBIND
>>>>>>>>>>>>>>> timeline fence array support.
>>>>>>>>>>>>>>> + */
>>>>>>>>>>>>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>> +/**
>>>>>>>>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM 
>>>>>>>>>>>>>>> creation.
>>>>>>>>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>> + * The older execbuf2 ioctl will not
>>>>>>>>>>>>>>> support VM_BIND mode of operation.
>>>>>>>>>>>>>>> + * For VM_BIND mode, we have new execbuf3
>>>>>>>>>>>>>>> ioctl which will not accept any
>>>>>>>>>>>>>>> + * execlist (See struct
>>>>>>>>>>>>>>> drm_i915_gem_execbuffer3 for more details).
>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>> + */
>>>>>>>>>>>>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>> +/**
>>>>>>>>>>>>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>> + * Flag to declare context as long running.
>>>>>>>>>>>>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>> + * Usage of dma-fence expects that they
>>>>>>>>>>>>>>> complete in reasonable amount of time.
>>>>>>>>>>>>>>> + * Compute on the other hand can be long
>>>>>>>>>>>>>>> running. Hence it is not appropriate
>>>>>>>>>>>>>>> + * for compute contexts to export request
>>>>>>>>>>>>>>> completion dma-fence to user.
>>>>>>>>>>>>>>> + * The dma-fence usage will be limited to
>>>>>>>>>>>>>>> in-kernel consumption only.
>>>>>>>>>>>>>>> + * Compute contexts need to use user/memory fence.
>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>> + * So, long running contexts do not support output 
>>>>>>>>>>>>>>> fences. Hence,
>>>>>>>>>>>>>>> + * I915_EXEC_FENCE_SIGNAL (See
>>>>>>>>>>>>>>> &drm_i915_gem_exec_fence.flags) is expected
>>>>>>>>>>>>>>> + * to be not used. DRM_I915_GEM_WAIT ioctl
>>>>>>>>>>>>>>> call is also not supported for
>>>>>>>>>>>>>>> + * objects mapped to long running contexts.
>>>>>>>>>>>>>>> + */
>>>>>>>>>>>>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>> +/* VM_BIND related ioctls */
>>>>>>>>>>>>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>>>>>>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>>>>>>>>> +#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>>>>>>>>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND
>>>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>> DRM_I915_GEM_VM_BIND, struct
>>>>>>>>>>>>>>> drm_i915_gem_vm_bind)
>>>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND
>>>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>> DRM_I915_GEM_VM_UNBIND, struct
>>>>>>>>>>>>>>> drm_i915_gem_vm_bind)
>>>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3
>>>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>> DRM_I915_GEM_EXECBUFFER3, struct
>>>>>>>>>>>>>>> drm_i915_gem_execbuffer3)
>>>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE
>>>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>> DRM_I915_GEM_WAIT_USER_FENCE, struct
>>>>>>>>>>>>>>> drm_i915_gem_wait_user_fence)
>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>> +/**
>>>>>>>>>>>>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to 
>>>>>>>>>>>>>>> bind.
>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>> + * This structure is passed to VM_BIND
>>>>>>>>>>>>>>> ioctl and specifies the mapping of GPU
>>>>>>>>>>>>>>> + * virtual address (VA) range to the
>>>>>>>>>>>>>>> section of an object that should be bound
>>>>>>>>>>>>>>> + * in the device page table of the specified address 
>>>>>>>>>>>>>>> space (VM).
>>>>>>>>>>>>>>> + * The VA range specified must be unique
>>>>>>>>>>>>>>> (ie., not currently bound) and can
>>>>>>>>>>>>>>> + * be mapped to whole object or a section
>>>>>>>>>>>>>>> of the object (partial binding).
>>>>>>>>>>>>>>> + * Multiple VA mappings can be created to
>>>>>>>>>>>>>>> the same section of the object
>>>>>>>>>>>>>>> + * (aliasing).
>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>> + * The @queue_idx specifies the queue to
>>>>>>>>>>>>>>> use for binding. Same queue can be
>>>>>>>>>>>>>>> + * used for both VM_BIND and VM_UNBIND
>>>>>>>>>>>>>>> calls. All submitted bind and unbind
>>>>>>>>>>>>>>> + * operations in a queue are performed in the order of 
>>>>>>>>>>>>>>> submission.
>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>> + * The @start, @offset and @length should
>>>>>>>>>>>>>>> be 4K page aligned. However the DG2
>>>>>>>>>>>>>>> + * and XEHPSDV has 64K page size for device
>>>>>>>>>>>>>>> local-memory and has compact page
>>>>>>>>>>>>>>> + * table. On those platforms, for binding
>>>>>>>>>>>>>>> device local-memory objects, the
>>>>>>>>>>>>>>> + * @start should be 2M aligned, @offset and
>>>>>>>>>>>>>>> @length should be 64K aligned.
>>>>>>>>>>>>>>> + * Also, on those platforms, it is not
>>>>>>>>>>>>>>> allowed to bind an device local-memory
>>>>>>>>>>>>>>> + * object and a system memory object in a
>>>>>>>>>>>>>>> single 2M section of VA range.
>>>>>>>>>>>>>>> + */
>>>>>>>>>>>>>>> +struct drm_i915_gem_vm_bind {
>>>>>>>>>>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>>>>>>>>>>> +    __u32 vm_id;
>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>> +    /** @queue_idx: Index of queue for binding */
>>>>>>>>>>>>>>> +    __u32 queue_idx;
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> I have a question here to which I did not find
>>>>>>>>>>>>>> an answer by browsing the old threads.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> Queue index appears to be an implicit
>>>>>>>>>>>>>> synchronisation mechanism, right? Operations on
>>>>>>>>>>>>>> the same index are executed/complete in order of
>>>>>>>>>>>>>> ioctl submission?
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> Do we _have_ to implement this on the kernel
>>>>>>>>>>>>>> side and could just allow in/out fence and let
>>>>>>>>>>>>>> userspace deal with it?
>>>>>>>>>>>>>
>>>>>>>>>>>>>
>>>>>>>>>>>>> It orders operations like in a queue. Which is kind
>>>>>>>>>>>>> of what happens with existing queues/engines.
>>>>>>>>>>>>>
>>>>>>>>>>>>> If I understood correctly, it's going to be a
>>>>>>>>>>>>> kthread + a linked list right?
>>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>> Yes, that is correct.
>>>>>>>>>>>>
>>>>>>>>>>>>>
>>>>>>>>>>>>> -Lionel
>>>>>>>>>>>>>
>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> Arbitrary/on-demand number of queues will add
>>>>>>>>>>>>>> the complexity on the kernel side which should
>>>>>>>>>>>>>> be avoided if possible.
>>>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>> It was discussed in the other thread. Jason prefers this 
>>>>>>>>>>>> over putting
>>>>>>>>>>>> an artificial limit on number of queues (as user can
>>>>>>>>>>>> anyway can exhaust
>>>>>>>>>>>> the memory). I think complexity in the driver is manageable.
>>>>>>>>>>>
>>>>>>>>>>> You'll need to create tracking structures on demand, with
>>>>>>>>>>> atomic replace of last fence, ref counting and locking of
>>>>>>>>>>> some sort, more or less?
>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> We will have a workqueue, an work item and a linked list per 
>>>>>>>>>> queue.
>>>>>>>>>> VM_BIND/UNBIND call will add the mapping request to the
>>>>>>>>>> specified queue's
>>>>>>>>>> linked list and schedule the work item on the workqueue of 
>>>>>>>>>> that queue.
>>>>>>>>>> I am not sure what you mean by last fence and replacing it.
>>>>>>>>>>
>>>>>>>>>>>> The other option being discussed in to have the user create 
>>>>>>>>>>>> those
>>>>>>>>>>>> queues (like creating engine map) before hand and use that 
>>>>>>>>>>>> in vm_bind
>>>>>>>>>>>> and vm_unbind ioctls. This puts a limit on the number of 
>>>>>>>>>>>> queues.
>>>>>>>>>>>> But it is not clean either and not sure it is worth
>>>>>>>>>>>> making the interface
>>>>>>>>>>>> more complex.
>>>>>>>>>>>> https://www.spinics.net/lists/dri-devel/msg350448.html
>>>>>>>>>>>
>>>>>>>>>>> What about the third option of a flag to return a fence (of
>>>>>>>>>>> some sort) and pass in a fence? That way userspace can
>>>>>>>>>>> imagine zero or N queues with very little effort on the
>>>>>>>>>>> kernel side. Was this considered?
>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> I am not clear what fence you are talking about here and how 
>>>>>>>>>> does that
>>>>>>>>>> help with the number of vm_bind queues. Can you eloborate?
>>>>>>>>>
>>>>>>>>> It is actually already documented that bind/unbind will support
>>>>>>>>> input and output fences - so what are these queues on top of what
>>>>>>>>> userspace can already achieve by using them? Purely a 
>>>>>>>>> convenience or
>>>>>>>>> there is more to it?
>>>>>>>>>
>>>>>>>>
>>>>>>>> Oh, the vm_bind queues are discussed in this thread.
>>>>>>>> https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html 
>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>>> Apparently Vulkan has requirement for multiple queues, each queue
>>>>>>>> processing vm_bind/unbind calls in the order of submission.
>>>>>>>
>>>>>>> I don't see how that answers my question so I will take the 
>>>>>>> freedom to
>>>>>>> repeat it. What are these queues on top of what userspace can 
>>>>>>> already
>>>>>>> achieve by using in-out fences? Purely a convenience or there is 
>>>>>>> more to it?
>>>>>>>
>>>>>>> Queue1:
>>>>>>>
>>>>>>> out_fence_A = vm_bind A
>>>>>>> out_fence_B = vm_bind B, in_fence=out_fence_A
>>>>>>> execbuf(in_fence = out_fence_B)
>>>>>>>
>>>>>>> Queue2:
>>>>>>>
>>>>>>> out_fence_C = vm_bind C
>>>>>>> out_fence_D = vm_bind D, in_fence=out_fence_C
>>>>>>> execbuf(in_fence = out_fence_D)
>>>>>>>
>>>>>>> Parallel bind:
>>>>>>> out_fence_E = vm_bind E
>>>>>>> out_fence_F = vm_bind F
>>>>>>> merged_fence = fence_merge(out_fence_E, out_fence_F)
>>>>>>> execbuf(in_fence = merged_fence)
>>>>>>>
>>>>>>
>>>>>> Let's say you do this and only 1 queue:
>>>>>>
>>>>>> VM_BIND_A (in_fence=fence_A)
>>>>>> VM_BIND_B (in_fence=NULL)
>>>>>>
>>>>>> With 1 queue VM_BIND_B in blocked on fence_A, hence the need for 
>>>>>> than 1
>>>>>> queue.
>>>>>
>>>>> I don't follow - there isn't a concept of a queue exposed in uapi 
>>>>> in what I have described so the above two run in parallel there, if 
>>>>> we ignore fence_A in your example doesn't even exist before you 
>>>>> pass it to bind A so something is not right.
>>>>>
>>>>>> e.g.
>>>>>> VM_BIND_A (queue_id=0, in_fence=fence_A)
>>>>>> VM_BIND_B (queue_id=1, in_fence=NULL)
>>>>>>
>>>>>> Now VM_BIND_B can immediately be executed regardless of fence_A 
>>>>>> status.
>>>>>
>>>>> In my examples userspace can serialise or not as it sees fit using 
>>>>> fences. The "parallel bind" examples two binds run in parallel. 
>>>>> Userspace can create multiple such parallel "queues" if it wanted.
>>>>>
>>>>> Parallel bind 1 and 2 interleaved:
>>>>> out_fence_A = vm_bind A
>>>>> out_fence_B = vm_bind B
>>>>> out_fence_C = vm_bind C
>>>>> out_fence_D = vm_bind D
>>>>> // all binds can run in parallel
>>>>> merged_fence_1 = fence_merge(out_fence_A, out_fence_B)
>>>>> merged_fence_2 = fence_merge(out_fence_C, out_fence_D)
>>>>> execbuf(in_fence = merged_fence_1) // after A&B to finish
>>>>> execbuf(in_fence = merged_fence_2) // after C&D finish
>>>>>
>>>>> There is a huge disconnect somewhere but I don't know where.
>>>>>
>>>>
>>>> Note that Vulkan has requirement that VM_BIND and VM_UNBIND
>>>> operations will also have 'in' fences associated with them
>>>> and not just the 'out' fences (which your example above shows).
>>>
>>> I gave more examples earlier:
>>>
>>> """
>>> Queue1:
>>>
>>> out_fence_A = vm_bind A
>>> out_fence_B = vm_bind B, in_fence=out_fence_A
>>> execbuf(in_fence = out_fence_B)
>>> """
>>>
>>> Clearly I showed both in and out fence.
>>>
> 
> Ok, guess I missed that.
> 
>>>> Yes, one of the solution discussed was not to have any queue_idx
>>>> at all (assume single queue) and let the vm_bind/unbind operations
>>>> submitted run and complete out of submission order. That way
>>>> a vm_bind/unbind sumitted later will not be blocked by a vm_bind/unbind
>>>> submitted earlier.
>>>> But removing the ordering here comes at a cost. Having the operations
>>>> run in submission order has some benefits. These are discussed in the
>>>> other thread.
>>>> https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
>>>
>>> That is some messed up deep quoting in that link. Could you please 
>>> summarize the cost which queues in the uapi intended to avoid?
>>>
>>> In any case it is not just for me. A significant addition is proposed 
>>> for the driver so there should be a clear summary of cost vs benefit 
>>> rather than a messy thread.
>>>
> 
> Say, user has a bunch of mappings to bind or unbind which must be done
> in the submission order. If we have only one queue which runs the
> operations out of submission order, then user has to insert in and out
> fences for each of the operation in the bunch. But by having a in order
> processing queues, user needs to insert 'in' fence only for the first
> submission and 'out' fence only for the last submission in that bunch.
> 
> Also, having in order processing queues allows user to unbind a VA
> mapping and re-use the same VA in a subsequent bind operation without
> having any dependency (dependency is met by the fact that they are
> process in the submission order).

Okay so it is a convenience thing and maybe more performance efficient.

Has a) the performance impact of requiring fences with every bind/unbind 
been looked at, so we know if it is worth adding code to the driver to 
handle queues and b) do you have the queued implementation sketched out 
so amount of kernel code required can be judged?

Regards,

Tvrtko
Niranjana Vishwanathapura June 15, 2022, 3:20 p.m. UTC | #22
On Wed, Jun 15, 2022 at 08:22:23AM +0100, Tvrtko Ursulin wrote:
>
>On 14/06/2022 17:42, Niranjana Vishwanathapura wrote:
>>On Tue, Jun 14, 2022 at 05:07:37PM +0100, Tvrtko Ursulin wrote:
>>>
>>>On 14/06/2022 17:02, Tvrtko Ursulin wrote:
>>>>
>>>>On 14/06/2022 16:43, Niranjana Vishwanathapura wrote:
>>>>>On Tue, Jun 14, 2022 at 08:16:41AM +0100, Tvrtko Ursulin wrote:
>>>>>>
>>>>>>On 14/06/2022 00:39, Matthew Brost wrote:
>>>>>>>On Mon, Jun 13, 2022 at 07:09:06PM +0100, Tvrtko Ursulin wrote:
>>>>>>>>
>>>>>>>>On 13/06/2022 18:49, Niranjana Vishwanathapura wrote:
>>>>>>>>>On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
>>>>>>>>>>
>>>>>>>>>>On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>>>>>>>>>>>
>>>>>>>>>>>>On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>>>On Fri, Jun 10, 2022 at 05:48:39PM +0300, 
>>>>>>>>>>>>>Lionel Landwerlin wrote:
>>>>>>>>>>>>>>On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>>>>>>VM_BIND and related uapi definitions
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>Signed-off-by: Niranjana Vishwanathapura
>>>>>>>>>>>>>>>><niranjana.vishwanathapura@intel.com>
>>>>>>>>>>>>>>>>---
>>>>>>>>>>>>>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 490
>>>>>>>>>>>>>>>>+++++++++++++++++++++++++++
>>>>>>>>>>>>>>>>  1 file changed, 490 insertions(+)
>>>>>>>>>>>>>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>diff --git
>>>>>>>>>>>>>>>>a/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>>>new file mode 100644
>>>>>>>>>>>>>>>>index 000000000000..9fc854969cfb
>>>>>>>>>>>>>>>>--- /dev/null
>>>>>>>>>>>>>>>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>>>@@ -0,0 +1,490 @@
>>>>>>>>>>>>>>>>+/* SPDX-License-Identifier: MIT */
>>>>>>>>>>>>>>>>+/*
>>>>>>>>>>>>>>>>+ * Copyright © 2022 Intel Corporation
>>>>>>>>>>>>>>>>+ */
>>>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>>>+/**
>>>>>>>>>>>>>>>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>>>+ * VM_BIND feature availability.
>>>>>>>>>>>>>>>>+ * See typedef drm_i915_getparam_t param.
>>>>>>>>>>>>>>>>+ * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>>>>>>>>>>>>+ * bits[8-15]: VM_BIND implementation version.
>>>>>>>>>>>>>>>>+ * version 0 will not have VM_BIND/UNBIND
>>>>>>>>>>>>>>>>timeline fence array support.
>>>>>>>>>>>>>>>>+ */
>>>>>>>>>>>>>>>>+#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>>>+/**
>>>>>>>>>>>>>>>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>>>+ * Flag to opt-in for VM_BIND mode of 
>>>>>>>>>>>>>>>>binding during VM creation.
>>>>>>>>>>>>>>>>+ * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>>>+ * The older execbuf2 ioctl will not
>>>>>>>>>>>>>>>>support VM_BIND mode of operation.
>>>>>>>>>>>>>>>>+ * For VM_BIND mode, we have new execbuf3
>>>>>>>>>>>>>>>>ioctl which will not accept any
>>>>>>>>>>>>>>>>+ * execlist (See struct
>>>>>>>>>>>>>>>>drm_i915_gem_execbuffer3 for more details).
>>>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>>>+ */
>>>>>>>>>>>>>>>>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>>>+/**
>>>>>>>>>>>>>>>>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>>>+ * Flag to declare context as long running.
>>>>>>>>>>>>>>>>+ * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>>>+ * Usage of dma-fence expects that they
>>>>>>>>>>>>>>>>complete in reasonable amount of time.
>>>>>>>>>>>>>>>>+ * Compute on the other hand can be long
>>>>>>>>>>>>>>>>running. Hence it is not appropriate
>>>>>>>>>>>>>>>>+ * for compute contexts to export request
>>>>>>>>>>>>>>>>completion dma-fence to user.
>>>>>>>>>>>>>>>>+ * The dma-fence usage will be limited to
>>>>>>>>>>>>>>>>in-kernel consumption only.
>>>>>>>>>>>>>>>>+ * Compute contexts need to use user/memory fence.
>>>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>>>+ * So, long running contexts do not 
>>>>>>>>>>>>>>>>support output fences. Hence,
>>>>>>>>>>>>>>>>+ * I915_EXEC_FENCE_SIGNAL (See
>>>>>>>>>>>>>>>>&drm_i915_gem_exec_fence.flags) is expected
>>>>>>>>>>>>>>>>+ * to be not used. DRM_I915_GEM_WAIT ioctl
>>>>>>>>>>>>>>>>call is also not supported for
>>>>>>>>>>>>>>>>+ * objects mapped to long running contexts.
>>>>>>>>>>>>>>>>+ */
>>>>>>>>>>>>>>>>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>>>+/* VM_BIND related ioctls */
>>>>>>>>>>>>>>>>+#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>>>>>>>>>>+#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>>>>>>>>>>+#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>>>>>>>>>>>>+#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_VM_BIND
>>>>>>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>>>DRM_I915_GEM_VM_BIND, struct
>>>>>>>>>>>>>>>>drm_i915_gem_vm_bind)
>>>>>>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_VM_UNBIND
>>>>>>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>>>DRM_I915_GEM_VM_UNBIND, struct
>>>>>>>>>>>>>>>>drm_i915_gem_vm_bind)
>>>>>>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_EXECBUFFER3
>>>>>>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>>>DRM_I915_GEM_EXECBUFFER3, struct
>>>>>>>>>>>>>>>>drm_i915_gem_execbuffer3)
>>>>>>>>>>>>>>>>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE
>>>>>>>>>>>>>>>>DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>>>DRM_I915_GEM_WAIT_USER_FENCE, struct
>>>>>>>>>>>>>>>>drm_i915_gem_wait_user_fence)
>>>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>>>+/**
>>>>>>>>>>>>>>>>+ * struct drm_i915_gem_vm_bind - VA to 
>>>>>>>>>>>>>>>>object mapping to bind.
>>>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>>>+ * This structure is passed to VM_BIND
>>>>>>>>>>>>>>>>ioctl and specifies the mapping of GPU
>>>>>>>>>>>>>>>>+ * virtual address (VA) range to the
>>>>>>>>>>>>>>>>section of an object that should be bound
>>>>>>>>>>>>>>>>+ * in the device page table of the 
>>>>>>>>>>>>>>>>specified address space (VM).
>>>>>>>>>>>>>>>>+ * The VA range specified must be unique
>>>>>>>>>>>>>>>>(ie., not currently bound) and can
>>>>>>>>>>>>>>>>+ * be mapped to whole object or a section
>>>>>>>>>>>>>>>>of the object (partial binding).
>>>>>>>>>>>>>>>>+ * Multiple VA mappings can be created to
>>>>>>>>>>>>>>>>the same section of the object
>>>>>>>>>>>>>>>>+ * (aliasing).
>>>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>>>+ * The @queue_idx specifies the queue to
>>>>>>>>>>>>>>>>use for binding. Same queue can be
>>>>>>>>>>>>>>>>+ * used for both VM_BIND and VM_UNBIND
>>>>>>>>>>>>>>>>calls. All submitted bind and unbind
>>>>>>>>>>>>>>>>+ * operations in a queue are performed 
>>>>>>>>>>>>>>>>in the order of submission.
>>>>>>>>>>>>>>>>+ *
>>>>>>>>>>>>>>>>+ * The @start, @offset and @length should
>>>>>>>>>>>>>>>>be 4K page aligned. However the DG2
>>>>>>>>>>>>>>>>+ * and XEHPSDV has 64K page size for device
>>>>>>>>>>>>>>>>local-memory and has compact page
>>>>>>>>>>>>>>>>+ * table. On those platforms, for binding
>>>>>>>>>>>>>>>>device local-memory objects, the
>>>>>>>>>>>>>>>>+ * @start should be 2M aligned, @offset and
>>>>>>>>>>>>>>>>@length should be 64K aligned.
>>>>>>>>>>>>>>>>+ * Also, on those platforms, it is not
>>>>>>>>>>>>>>>>allowed to bind an device local-memory
>>>>>>>>>>>>>>>>+ * object and a system memory object in a
>>>>>>>>>>>>>>>>single 2M section of VA range.
>>>>>>>>>>>>>>>>+ */
>>>>>>>>>>>>>>>>+struct drm_i915_gem_vm_bind {
>>>>>>>>>>>>>>>>+    /** @vm_id: VM (address space) id to bind */
>>>>>>>>>>>>>>>>+    __u32 vm_id;
>>>>>>>>>>>>>>>>+
>>>>>>>>>>>>>>>>+    /** @queue_idx: Index of queue for binding */
>>>>>>>>>>>>>>>>+    __u32 queue_idx;
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>I have a question here to which I did not find
>>>>>>>>>>>>>>>an answer by browsing the old threads.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>Queue index appears to be an implicit
>>>>>>>>>>>>>>>synchronisation mechanism, right? Operations on
>>>>>>>>>>>>>>>the same index are executed/complete in order of
>>>>>>>>>>>>>>>ioctl submission?
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>Do we _have_ to implement this on the kernel
>>>>>>>>>>>>>>>side and could just allow in/out fence and let
>>>>>>>>>>>>>>>userspace deal with it?
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>It orders operations like in a queue. Which is kind
>>>>>>>>>>>>>>of what happens with existing queues/engines.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>If I understood correctly, it's going to be a
>>>>>>>>>>>>>>kthread + a linked list right?
>>>>>>>>>>>>>>
>>>>>>>>>>>>>
>>>>>>>>>>>>>Yes, that is correct.
>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>-Lionel
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>Arbitrary/on-demand number of queues will add
>>>>>>>>>>>>>>>the complexity on the kernel side which should
>>>>>>>>>>>>>>>be avoided if possible.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>
>>>>>>>>>>>>>It was discussed in the other thread. Jason 
>>>>>>>>>>>>>prefers this over putting
>>>>>>>>>>>>>an artificial limit on number of queues (as user can
>>>>>>>>>>>>>anyway can exhaust
>>>>>>>>>>>>>the memory). I think complexity in the driver is manageable.
>>>>>>>>>>>>
>>>>>>>>>>>>You'll need to create tracking structures on demand, with
>>>>>>>>>>>>atomic replace of last fence, ref counting and locking of
>>>>>>>>>>>>some sort, more or less?
>>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>We will have a workqueue, an work item and a 
>>>>>>>>>>>linked list per queue.
>>>>>>>>>>>VM_BIND/UNBIND call will add the mapping request to the
>>>>>>>>>>>specified queue's
>>>>>>>>>>>linked list and schedule the work item on the 
>>>>>>>>>>>workqueue of that queue.
>>>>>>>>>>>I am not sure what you mean by last fence and replacing it.
>>>>>>>>>>>
>>>>>>>>>>>>>The other option being discussed in to have 
>>>>>>>>>>>>>the user create those
>>>>>>>>>>>>>queues (like creating engine map) before hand 
>>>>>>>>>>>>>and use that in vm_bind
>>>>>>>>>>>>>and vm_unbind ioctls. This puts a limit on the 
>>>>>>>>>>>>>number of queues.
>>>>>>>>>>>>>But it is not clean either and not sure it is worth
>>>>>>>>>>>>>making the interface
>>>>>>>>>>>>>more complex.
>>>>>>>>>>>>>https://www.spinics.net/lists/dri-devel/msg350448.html
>>>>>>>>>>>>
>>>>>>>>>>>>What about the third option of a flag to return a fence (of
>>>>>>>>>>>>some sort) and pass in a fence? That way userspace can
>>>>>>>>>>>>imagine zero or N queues with very little effort on the
>>>>>>>>>>>>kernel side. Was this considered?
>>>>>>>>>>>>
>>>>>>>>>>>
>>>>>>>>>>>I am not clear what fence you are talking about 
>>>>>>>>>>>here and how does that
>>>>>>>>>>>help with the number of vm_bind queues. Can you eloborate?
>>>>>>>>>>
>>>>>>>>>>It is actually already documented that bind/unbind will support
>>>>>>>>>>input and output fences - so what are these queues on top of what
>>>>>>>>>>userspace can already achieve by using them? Purely 
>>>>>>>>>>a convenience or
>>>>>>>>>>there is more to it?
>>>>>>>>>>
>>>>>>>>>
>>>>>>>>>Oh, the vm_bind queues are discussed in this thread.
>>>>>>>>>https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
>>>>>>>>>
>>>>>>>>>
>>>>>>>>>
>>>>>>>>>Apparently Vulkan has requirement for multiple queues, each queue
>>>>>>>>>processing vm_bind/unbind calls in the order of submission.
>>>>>>>>
>>>>>>>>I don't see how that answers my question so I will take 
>>>>>>>>the freedom to
>>>>>>>>repeat it. What are these queues on top of what 
>>>>>>>>userspace can already
>>>>>>>>achieve by using in-out fences? Purely a convenience or 
>>>>>>>>there is more to it?
>>>>>>>>
>>>>>>>>Queue1:
>>>>>>>>
>>>>>>>>out_fence_A = vm_bind A
>>>>>>>>out_fence_B = vm_bind B, in_fence=out_fence_A
>>>>>>>>execbuf(in_fence = out_fence_B)
>>>>>>>>
>>>>>>>>Queue2:
>>>>>>>>
>>>>>>>>out_fence_C = vm_bind C
>>>>>>>>out_fence_D = vm_bind D, in_fence=out_fence_C
>>>>>>>>execbuf(in_fence = out_fence_D)
>>>>>>>>
>>>>>>>>Parallel bind:
>>>>>>>>out_fence_E = vm_bind E
>>>>>>>>out_fence_F = vm_bind F
>>>>>>>>merged_fence = fence_merge(out_fence_E, out_fence_F)
>>>>>>>>execbuf(in_fence = merged_fence)
>>>>>>>>
>>>>>>>
>>>>>>>Let's say you do this and only 1 queue:
>>>>>>>
>>>>>>>VM_BIND_A (in_fence=fence_A)
>>>>>>>VM_BIND_B (in_fence=NULL)
>>>>>>>
>>>>>>>With 1 queue VM_BIND_B in blocked on fence_A, hence the 
>>>>>>>need for than 1
>>>>>>>queue.
>>>>>>
>>>>>>I don't follow - there isn't a concept of a queue exposed in 
>>>>>>uapi in what I have described so the above two run in 
>>>>>>parallel there, if we ignore fence_A in your example doesn't 
>>>>>>even exist before you pass it to bind A so something is not 
>>>>>>right.
>>>>>>
>>>>>>>e.g.
>>>>>>>VM_BIND_A (queue_id=0, in_fence=fence_A)
>>>>>>>VM_BIND_B (queue_id=1, in_fence=NULL)
>>>>>>>
>>>>>>>Now VM_BIND_B can immediately be executed regardless of 
>>>>>>>fence_A status.
>>>>>>
>>>>>>In my examples userspace can serialise or not as it sees fit 
>>>>>>using fences. The "parallel bind" examples two binds run in 
>>>>>>parallel. Userspace can create multiple such parallel 
>>>>>>"queues" if it wanted.
>>>>>>
>>>>>>Parallel bind 1 and 2 interleaved:
>>>>>>out_fence_A = vm_bind A
>>>>>>out_fence_B = vm_bind B
>>>>>>out_fence_C = vm_bind C
>>>>>>out_fence_D = vm_bind D
>>>>>>// all binds can run in parallel
>>>>>>merged_fence_1 = fence_merge(out_fence_A, out_fence_B)
>>>>>>merged_fence_2 = fence_merge(out_fence_C, out_fence_D)
>>>>>>execbuf(in_fence = merged_fence_1) // after A&B to finish
>>>>>>execbuf(in_fence = merged_fence_2) // after C&D finish
>>>>>>
>>>>>>There is a huge disconnect somewhere but I don't know where.
>>>>>>
>>>>>
>>>>>Note that Vulkan has requirement that VM_BIND and VM_UNBIND
>>>>>operations will also have 'in' fences associated with them
>>>>>and not just the 'out' fences (which your example above shows).
>>>>
>>>>I gave more examples earlier:
>>>>
>>>>"""
>>>>Queue1:
>>>>
>>>>out_fence_A = vm_bind A
>>>>out_fence_B = vm_bind B, in_fence=out_fence_A
>>>>execbuf(in_fence = out_fence_B)
>>>>"""
>>>>
>>>>Clearly I showed both in and out fence.
>>>>
>>
>>Ok, guess I missed that.
>>
>>>>>Yes, one of the solution discussed was not to have any queue_idx
>>>>>at all (assume single queue) and let the vm_bind/unbind operations
>>>>>submitted run and complete out of submission order. That way
>>>>>a vm_bind/unbind sumitted later will not be blocked by a vm_bind/unbind
>>>>>submitted earlier.
>>>>>But removing the ordering here comes at a cost. Having the operations
>>>>>run in submission order has some benefits. These are discussed in the
>>>>>other thread.
>>>>>https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html
>>>>
>>>>That is some messed up deep quoting in that link. Could you 
>>>>please summarize the cost which queues in the uapi intended to 
>>>>avoid?
>>>>
>>>>In any case it is not just for me. A significant addition is 
>>>>proposed for the driver so there should be a clear summary of 
>>>>cost vs benefit rather than a messy thread.
>>>>
>>
>>Say, user has a bunch of mappings to bind or unbind which must be done
>>in the submission order. If we have only one queue which runs the
>>operations out of submission order, then user has to insert in and out
>>fences for each of the operation in the bunch. But by having a in order
>>processing queues, user needs to insert 'in' fence only for the first
>>submission and 'out' fence only for the last submission in that bunch.
>>
>>Also, having in order processing queues allows user to unbind a VA
>>mapping and re-use the same VA in a subsequent bind operation without
>>having any dependency (dependency is met by the fact that they are
>>process in the submission order).
>
>Okay so it is a convenience thing and maybe more performance efficient.
>
>Has a) the performance impact of requiring fences with every 
>bind/unbind been looked at, so we know if it is worth adding code to 
>the driver to handle queues and b) do you have the queued 
>implementation sketched out so amount of kernel code required can be 
>judged?

No, this sparse requirement (in/out fences) is fairely new and the
queue request came up during this review. I will prototype this
once I post the existing set of vm_bind features in i915 for review.

Niranjana

>
>Regards,
>
>Tvrtko
Tvrtko Ursulin June 16, 2022, 8:53 a.m. UTC | #23
On 15/06/2022 16:20, Niranjana Vishwanathapura wrote:
> On Wed, Jun 15, 2022 at 08:22:23AM +0100, Tvrtko Ursulin wrote:
>>
>> On 14/06/2022 17:42, Niranjana Vishwanathapura wrote:
>>> On Tue, Jun 14, 2022 at 05:07:37PM +0100, Tvrtko Ursulin wrote:
>>>>
>>>> On 14/06/2022 17:02, Tvrtko Ursulin wrote:
>>>>>
>>>>> On 14/06/2022 16:43, Niranjana Vishwanathapura wrote:
>>>>>> On Tue, Jun 14, 2022 at 08:16:41AM +0100, Tvrtko Ursulin wrote:
>>>>>>>
>>>>>>> On 14/06/2022 00:39, Matthew Brost wrote:
>>>>>>>> On Mon, Jun 13, 2022 at 07:09:06PM +0100, Tvrtko Ursulin wrote:
>>>>>>>>>
>>>>>>>>> On 13/06/2022 18:49, Niranjana Vishwanathapura wrote:
>>>>>>>>>> On Mon, Jun 13, 2022 at 05:22:02PM +0100, Tvrtko Ursulin wrote:
>>>>>>>>>>>
>>>>>>>>>>> On 13/06/2022 16:05, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>> On Mon, Jun 13, 2022 at 09:24:18AM +0100, Tvrtko Ursulin wrote:
>>>>>>>>>>>>>
>>>>>>>>>>>>> On 10/06/2022 17:14, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>>>> On Fri, Jun 10, 2022 at 05:48:39PM +0300, Lionel 
>>>>>>>>>>>>>> Landwerlin wrote:
>>>>>>>>>>>>>>> On 10/06/2022 13:37, Tvrtko Ursulin wrote:
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> On 10/06/2022 08:07, Niranjana Vishwanathapura wrote:
>>>>>>>>>>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>> Signed-off-by: Niranjana Vishwanathapura
>>>>>>>>>>>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>>>>>>>>>>> ---
>>>>>>>>>>>>>>>>>   Documentation/gpu/rfc/i915_vm_bind.h | 490
>>>>>>>>>>>>>>>>> +++++++++++++++++++++++++++
>>>>>>>>>>>>>>>>>   1 file changed, 490 insertions(+)
>>>>>>>>>>>>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>> diff --git
>>>>>>>>>>>>>>>>> a/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>>>> new file mode 100644
>>>>>>>>>>>>>>>>> index 000000000000..9fc854969cfb
>>>>>>>>>>>>>>>>> --- /dev/null
>>>>>>>>>>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>>>>>> @@ -0,0 +1,490 @@
>>>>>>>>>>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>>>>>>>>>>> +/*
>>>>>>>>>>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>>>>>>>>>>> + */
>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>> +/**
>>>>>>>>>>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>>>> + * VM_BIND feature availability.
>>>>>>>>>>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>>>>>>>>>>> + * bit[0]: If set, VM_BIND is supported, otherwise not.
>>>>>>>>>>>>>>>>> + * bits[8-15]: VM_BIND implementation version.
>>>>>>>>>>>>>>>>> + * version 0 will not have VM_BIND/UNBIND
>>>>>>>>>>>>>>>>> timeline fence array support.
>>>>>>>>>>>>>>>>> + */
>>>>>>>>>>>>>>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>> +/**
>>>>>>>>>>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding during 
>>>>>>>>>>>>>>>>> VM creation.
>>>>>>>>>>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>>>> + * The older execbuf2 ioctl will not
>>>>>>>>>>>>>>>>> support VM_BIND mode of operation.
>>>>>>>>>>>>>>>>> + * For VM_BIND mode, we have new execbuf3
>>>>>>>>>>>>>>>>> ioctl which will not accept any
>>>>>>>>>>>>>>>>> + * execlist (See struct
>>>>>>>>>>>>>>>>> drm_i915_gem_execbuffer3 for more details).
>>>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>>>> + */
>>>>>>>>>>>>>>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>> +/**
>>>>>>>>>>>>>>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>>>> + * Flag to declare context as long running.
>>>>>>>>>>>>>>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>>>> + * Usage of dma-fence expects that they
>>>>>>>>>>>>>>>>> complete in reasonable amount of time.
>>>>>>>>>>>>>>>>> + * Compute on the other hand can be long
>>>>>>>>>>>>>>>>> running. Hence it is not appropriate
>>>>>>>>>>>>>>>>> + * for compute contexts to export request
>>>>>>>>>>>>>>>>> completion dma-fence to user.
>>>>>>>>>>>>>>>>> + * The dma-fence usage will be limited to
>>>>>>>>>>>>>>>>> in-kernel consumption only.
>>>>>>>>>>>>>>>>> + * Compute contexts need to use user/memory fence.
>>>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>>>> + * So, long running contexts do not support output 
>>>>>>>>>>>>>>>>> fences. Hence,
>>>>>>>>>>>>>>>>> + * I915_EXEC_FENCE_SIGNAL (See
>>>>>>>>>>>>>>>>> &drm_i915_gem_exec_fence.flags) is expected
>>>>>>>>>>>>>>>>> + * to be not used. DRM_I915_GEM_WAIT ioctl
>>>>>>>>>>>>>>>>> call is also not supported for
>>>>>>>>>>>>>>>>> + * objects mapped to long running contexts.
>>>>>>>>>>>>>>>>> + */
>>>>>>>>>>>>>>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u 
>>>>>>>>>>>>>>>>> << 2)
>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>> +/* VM_BIND related ioctls */
>>>>>>>>>>>>>>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>>>>>>>>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>>>>>>>>>>> +#define DRM_I915_GEM_EXECBUFFER3    0x3f
>>>>>>>>>>>>>>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x40
>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND
>>>>>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>>>> DRM_I915_GEM_VM_BIND, struct
>>>>>>>>>>>>>>>>> drm_i915_gem_vm_bind)
>>>>>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND
>>>>>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>>>> DRM_I915_GEM_VM_UNBIND, struct
>>>>>>>>>>>>>>>>> drm_i915_gem_vm_bind)
>>>>>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_EXECBUFFER3
>>>>>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>>>> DRM_I915_GEM_EXECBUFFER3, struct
>>>>>>>>>>>>>>>>> drm_i915_gem_execbuffer3)
>>>>>>>>>>>>>>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE
>>>>>>>>>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE +
>>>>>>>>>>>>>>>>> DRM_I915_GEM_WAIT_USER_FENCE, struct
>>>>>>>>>>>>>>>>> drm_i915_gem_wait_user_fence)
>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>> +/**
>>>>>>>>>>>>>>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping 
>>>>>>>>>>>>>>>>> to bind.
>>>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>>>> + * This structure is passed to VM_BIND
>>>>>>>>>>>>>>>>> ioctl and specifies the mapping of GPU
>>>>>>>>>>>>>>>>> + * virtual address (VA) range to the
>>>>>>>>>>>>>>>>> section of an object that should be bound
>>>>>>>>>>>>>>>>> + * in the device page table of the specified address 
>>>>>>>>>>>>>>>>> space (VM).
>>>>>>>>>>>>>>>>> + * The VA range specified must be unique
>>>>>>>>>>>>>>>>> (ie., not currently bound) and can
>>>>>>>>>>>>>>>>> + * be mapped to whole object or a section
>>>>>>>>>>>>>>>>> of the object (partial binding).
>>>>>>>>>>>>>>>>> + * Multiple VA mappings can be created to
>>>>>>>>>>>>>>>>> the same section of the object
>>>>>>>>>>>>>>>>> + * (aliasing).
>>>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>>>> + * The @queue_idx specifies the queue to
>>>>>>>>>>>>>>>>> use for binding. Same queue can be
>>>>>>>>>>>>>>>>> + * used for both VM_BIND and VM_UNBIND
>>>>>>>>>>>>>>>>> calls. All submitted bind and unbind
>>>>>>>>>>>>>>>>> + * operations in a queue are performed in the order of 
>>>>>>>>>>>>>>>>> submission.
>>>>>>>>>>>>>>>>> + *
>>>>>>>>>>>>>>>>> + * The @start, @offset and @length should
>>>>>>>>>>>>>>>>> be 4K page aligned. However the DG2
>>>>>>>>>>>>>>>>> + * and XEHPSDV has 64K page size for device
>>>>>>>>>>>>>>>>> local-memory and has compact page
>>>>>>>>>>>>>>>>> + * table. On those platforms, for binding
>>>>>>>>>>>>>>>>> device local-memory objects, the
>>>>>>>>>>>>>>>>> + * @start should be 2M aligned, @offset and
>>>>>>>>>>>>>>>>> @length should be 64K aligned.
>>>>>>>>>>>>>>>>> + * Also, on those platforms, it is not
>>>>>>>>>>>>>>>>> allowed to bind an device local-memory
>>>>>>>>>>>>>>>>> + * object and a system memory object in a
>>>>>>>>>>>>>>>>> single 2M section of VA range.
>>>>>>>>>>>>>>>>> + */
>>>>>>>>>>>>>>>>> +struct drm_i915_gem_vm_bind {
>>>>>>>>>>>>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>>>>>>>>>>>>> +    __u32 vm_id;
>>>>>>>>>>>>>>>>> +
>>>>>>>>>>>>>>>>> +    /** @queue_idx: Index of queue for binding */
>>>>>>>>>>>>>>>>> +    __u32 queue_idx;
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> I have a question here to which I did not find
>>>>>>>>>>>>>>>> an answer by browsing the old threads.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> Queue index appears to be an implicit
>>>>>>>>>>>>>>>> synchronisation mechanism, right? Operations on
>>>>>>>>>>>>>>>> the same index are executed/complete in order of
>>>>>>>>>>>>>>>> ioctl submission?
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> Do we _have_ to implement this on the kernel
>>>>>>>>>>>>>>>> side and could just allow in/out fence and let
>>>>>>>>>>>>>>>> userspace deal with it?
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> It orders operations like in a queue. Which is kind
>>>>>>>>>>>>>>> of what happens with existing queues/engines.
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> If I understood correctly, it's going to be a
>>>>>>>>>>>>>>> kthread + a linked list right?
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> Yes, that is correct.
>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>> -Lionel
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>>> Arbitrary/on-demand number of queues will add
>>>>>>>>>>>>>>>> the complexity on the kernel side which should
>>>>>>>>>>>>>>>> be avoided if possible.
>>>>>>>>>>>>>>>>
>>>>>>>>>>>>>>
>>>>>>>>>>>>>> It was discussed in the other thread. Jason prefers this 
>>>>>>>>>>>>>> over putting
>>>>>>>>>>>>>> an artificial limit on number of queues (as user can
>>>>>>>>>>>>>> anyway can exhaust
>>>>>>>>>>>>>> the memory). I think complexity in the driver is manageable.
>>>>>>>>>>>>>
>>>>>>>>>>>>> You'll need to create tracking structures on demand, with
>>>>>>>>>>>>> atomic replace of last fence, ref counting and locking of
>>>>>>>>>>>>> some sort, more or less?
>>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>> We will have a workqueue, an work item and a linked list per 
>>>>>>>>>>>> queue.
>>>>>>>>>>>> VM_BIND/UNBIND call will add the mapping request to the
>>>>>>>>>>>> specified queue's
>>>>>>>>>>>> linked list and schedule the work item on the workqueue of 
>>>>>>>>>>>> that queue.
>>>>>>>>>>>> I am not sure what you mean by last fence and replacing it.
>>>>>>>>>>>>
>>>>>>>>>>>>>> The other option being discussed in to have the user 
>>>>>>>>>>>>>> create those
>>>>>>>>>>>>>> queues (like creating engine map) before hand and use that 
>>>>>>>>>>>>>> in vm_bind
>>>>>>>>>>>>>> and vm_unbind ioctls. This puts a limit on the number of 
>>>>>>>>>>>>>> queues.
>>>>>>>>>>>>>> But it is not clean either and not sure it is worth
>>>>>>>>>>>>>> making the interface
>>>>>>>>>>>>>> more complex.
>>>>>>>>>>>>>> https://www.spinics.net/lists/dri-devel/msg350448.html
>>>>>>>>>>>>>
>>>>>>>>>>>>> What about the third option of a flag to return a fence (of
>>>>>>>>>>>>> some sort) and pass in a fence? That way userspace can
>>>>>>>>>>>>> imagine zero or N queues with very little effort on the
>>>>>>>>>>>>> kernel side. Was this considered?
>>>>>>>>>>>>>
>>>>>>>>>>>>
>>>>>>>>>>>> I am not clear what fence you are talking about here and how 
>>>>>>>>>>>> does that
>>>>>>>>>>>> help with the number of vm_bind queues. Can you eloborate?
>>>>>>>>>>>
>>>>>>>>>>> It is actually already documented that bind/unbind will support
>>>>>>>>>>> input and output fences - so what are these queues on top of 
>>>>>>>>>>> what
>>>>>>>>>>> userspace can already achieve by using them? Purely a 
>>>>>>>>>>> convenience or
>>>>>>>>>>> there is more to it?
>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> Oh, the vm_bind queues are discussed in this thread.
>>>>>>>>>> https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html 
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> Apparently Vulkan has requirement for multiple queues, each queue
>>>>>>>>>> processing vm_bind/unbind calls in the order of submission.
>>>>>>>>>
>>>>>>>>> I don't see how that answers my question so I will take the 
>>>>>>>>> freedom to
>>>>>>>>> repeat it. What are these queues on top of what userspace can 
>>>>>>>>> already
>>>>>>>>> achieve by using in-out fences? Purely a convenience or there 
>>>>>>>>> is more to it?
>>>>>>>>>
>>>>>>>>> Queue1:
>>>>>>>>>
>>>>>>>>> out_fence_A = vm_bind A
>>>>>>>>> out_fence_B = vm_bind B, in_fence=out_fence_A
>>>>>>>>> execbuf(in_fence = out_fence_B)
>>>>>>>>>
>>>>>>>>> Queue2:
>>>>>>>>>
>>>>>>>>> out_fence_C = vm_bind C
>>>>>>>>> out_fence_D = vm_bind D, in_fence=out_fence_C
>>>>>>>>> execbuf(in_fence = out_fence_D)
>>>>>>>>>
>>>>>>>>> Parallel bind:
>>>>>>>>> out_fence_E = vm_bind E
>>>>>>>>> out_fence_F = vm_bind F
>>>>>>>>> merged_fence = fence_merge(out_fence_E, out_fence_F)
>>>>>>>>> execbuf(in_fence = merged_fence)
>>>>>>>>>
>>>>>>>>
>>>>>>>> Let's say you do this and only 1 queue:
>>>>>>>>
>>>>>>>> VM_BIND_A (in_fence=fence_A)
>>>>>>>> VM_BIND_B (in_fence=NULL)
>>>>>>>>
>>>>>>>> With 1 queue VM_BIND_B in blocked on fence_A, hence the need for 
>>>>>>>> than 1
>>>>>>>> queue.
>>>>>>>
>>>>>>> I don't follow - there isn't a concept of a queue exposed in uapi 
>>>>>>> in what I have described so the above two run in parallel there, 
>>>>>>> if we ignore fence_A in your example doesn't even exist before 
>>>>>>> you pass it to bind A so something is not right.
>>>>>>>
>>>>>>>> e.g.
>>>>>>>> VM_BIND_A (queue_id=0, in_fence=fence_A)
>>>>>>>> VM_BIND_B (queue_id=1, in_fence=NULL)
>>>>>>>>
>>>>>>>> Now VM_BIND_B can immediately be executed regardless of fence_A 
>>>>>>>> status.
>>>>>>>
>>>>>>> In my examples userspace can serialise or not as it sees fit 
>>>>>>> using fences. The "parallel bind" examples two binds run in 
>>>>>>> parallel. Userspace can create multiple such parallel "queues" if 
>>>>>>> it wanted.
>>>>>>>
>>>>>>> Parallel bind 1 and 2 interleaved:
>>>>>>> out_fence_A = vm_bind A
>>>>>>> out_fence_B = vm_bind B
>>>>>>> out_fence_C = vm_bind C
>>>>>>> out_fence_D = vm_bind D
>>>>>>> // all binds can run in parallel
>>>>>>> merged_fence_1 = fence_merge(out_fence_A, out_fence_B)
>>>>>>> merged_fence_2 = fence_merge(out_fence_C, out_fence_D)
>>>>>>> execbuf(in_fence = merged_fence_1) // after A&B to finish
>>>>>>> execbuf(in_fence = merged_fence_2) // after C&D finish
>>>>>>>
>>>>>>> There is a huge disconnect somewhere but I don't know where.
>>>>>>>
>>>>>>
>>>>>> Note that Vulkan has requirement that VM_BIND and VM_UNBIND
>>>>>> operations will also have 'in' fences associated with them
>>>>>> and not just the 'out' fences (which your example above shows).
>>>>>
>>>>> I gave more examples earlier:
>>>>>
>>>>> """
>>>>> Queue1:
>>>>>
>>>>> out_fence_A = vm_bind A
>>>>> out_fence_B = vm_bind B, in_fence=out_fence_A
>>>>> execbuf(in_fence = out_fence_B)
>>>>> """
>>>>>
>>>>> Clearly I showed both in and out fence.
>>>>>
>>>
>>> Ok, guess I missed that.
>>>
>>>>>> Yes, one of the solution discussed was not to have any queue_idx
>>>>>> at all (assume single queue) and let the vm_bind/unbind operations
>>>>>> submitted run and complete out of submission order. That way
>>>>>> a vm_bind/unbind sumitted later will not be blocked by a 
>>>>>> vm_bind/unbind
>>>>>> submitted earlier.
>>>>>> But removing the ordering here comes at a cost. Having the operations
>>>>>> run in submission order has some benefits. These are discussed in the
>>>>>> other thread.
>>>>>> https://lists.freedesktop.org/archives/intel-gfx/2022-June/299217.html 
>>>>>>
>>>>>
>>>>> That is some messed up deep quoting in that link. Could you please 
>>>>> summarize the cost which queues in the uapi intended to avoid?
>>>>>
>>>>> In any case it is not just for me. A significant addition is 
>>>>> proposed for the driver so there should be a clear summary of cost 
>>>>> vs benefit rather than a messy thread.
>>>>>
>>>
>>> Say, user has a bunch of mappings to bind or unbind which must be done
>>> in the submission order. If we have only one queue which runs the
>>> operations out of submission order, then user has to insert in and out
>>> fences for each of the operation in the bunch. But by having a in order
>>> processing queues, user needs to insert 'in' fence only for the first
>>> submission and 'out' fence only for the last submission in that bunch.
>>>
>>> Also, having in order processing queues allows user to unbind a VA
>>> mapping and re-use the same VA in a subsequent bind operation without
>>> having any dependency (dependency is met by the fact that they are
>>> process in the submission order).
>>
>> Okay so it is a convenience thing and maybe more performance efficient.
>>
>> Has a) the performance impact of requiring fences with every 
>> bind/unbind been looked at, so we know if it is worth adding code to 
>> the driver to handle queues and b) do you have the queued 
>> implementation sketched out so amount of kernel code required can be 
>> judged?
> 
> No, this sparse requirement (in/out fences) is fairely new and the
> queue request came up during this review. I will prototype this
> once I post the existing set of vm_bind features in i915 for review.

If I understand right there isn't an implementation for neither in/out 
fences nor queues? I am thinking, if at least you had in/out fences you 
could easily measure the overhead with tight (un)bind in a loop with and 
without in/out fences.

Or maybe even execbuf2 path could be used. It's a lot heavier path in 
general, but w/ and wo/ fences might be representative of an absolute 
cost of dealing with them (fences). And evaluate that number together 
with plain (un)bind ioctl cost from the implementation you have.

If it shows that chaining in-out fences adds a significant overhead then 
queue in the uapi would be acceptable.

Regards,

Tvrtko
diff mbox series

Patch

diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
new file mode 100644
index 000000000000..9fc854969cfb
--- /dev/null
+++ b/Documentation/gpu/rfc/i915_vm_bind.h
@@ -0,0 +1,490 @@ 
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+/**
+ * DOC: I915_PARAM_HAS_VM_BIND
+ *
+ * VM_BIND feature availability.
+ * See typedef drm_i915_getparam_t param.
+ * bit[0]: If set, VM_BIND is supported, otherwise not.
+ * bits[8-15]: VM_BIND implementation version.
+ * version 0 will not have VM_BIND/UNBIND timeline fence array support.
+ */
+#define I915_PARAM_HAS_VM_BIND		57
+
+/**
+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
+ *
+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
+ * See struct drm_i915_gem_vm_control flags.
+ *
+ * The older execbuf2 ioctl will not support VM_BIND mode of operation.
+ * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
+ * execlist (See struct drm_i915_gem_execbuffer3 for more details).
+ *
+ */
+#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
+
+/**
+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
+ *
+ * Flag to declare context as long running.
+ * See struct drm_i915_gem_context_create_ext flags.
+ *
+ * Usage of dma-fence expects that they complete in reasonable amount of time.
+ * Compute on the other hand can be long running. Hence it is not appropriate
+ * for compute contexts to export request completion dma-fence to user.
+ * The dma-fence usage will be limited to in-kernel consumption only.
+ * Compute contexts need to use user/memory fence.
+ *
+ * So, long running contexts do not support output fences. Hence,
+ * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) is expected
+ * to be not used. DRM_I915_GEM_WAIT ioctl call is also not supported for
+ * objects mapped to long running contexts.
+ */
+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
+
+/* VM_BIND related ioctls */
+#define DRM_I915_GEM_VM_BIND		0x3d
+#define DRM_I915_GEM_VM_UNBIND		0x3e
+#define DRM_I915_GEM_EXECBUFFER3	0x3f
+#define DRM_I915_GEM_WAIT_USER_FENCE	0x40
+
+#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
+#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER3		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
+
+/**
+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
+ *
+ * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
+ * virtual address (VA) range to the section of an object that should be bound
+ * in the device page table of the specified address space (VM).
+ * The VA range specified must be unique (ie., not currently bound) and can
+ * be mapped to whole object or a section of the object (partial binding).
+ * Multiple VA mappings can be created to the same section of the object
+ * (aliasing).
+ *
+ * The @queue_idx specifies the queue to use for binding. Same queue can be
+ * used for both VM_BIND and VM_UNBIND calls. All submitted bind and unbind
+ * operations in a queue are performed in the order of submission.
+ *
+ * The @start, @offset and @length should be 4K page aligned. However the DG2
+ * and XEHPSDV has 64K page size for device local-memory and has compact page
+ * table. On those platforms, for binding device local-memory objects, the
+ * @start should be 2M aligned, @offset and @length should be 64K aligned.
+ * Also, on those platforms, it is not allowed to bind an device local-memory
+ * object and a system memory object in a single 2M section of VA range.
+ */
+struct drm_i915_gem_vm_bind {
+	/** @vm_id: VM (address space) id to bind */
+	__u32 vm_id;
+
+	/** @queue_idx: Index of queue for binding */
+	__u32 queue_idx;
+
+	/** @rsvd: Reserved, MBZ */
+	__u32 rsvd;
+
+	/** @handle: Object handle */
+	__u32 handle;
+
+	/** @start: Virtual Address start to bind */
+	__u64 start;
+
+	/** @offset: Offset in object to bind */
+	__u64 offset;
+
+	/** @length: Length of mapping to bind */
+	__u64 length;
+
+	/**
+	 * @flags: Supported flags are:
+	 *
+	 * I915_GEM_VM_BIND_READONLY:
+	 * Mapping is read-only.
+	 *
+	 * I915_GEM_VM_BIND_CAPTURE:
+	 * Capture this mapping in the dump upon GPU error.
+	 */
+	__u64 flags;
+#define I915_GEM_VM_BIND_READONLY    (1 << 0)
+#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
+
+	/**
+	 * @extensions: 0-terminated chain of extensions for this operation.
+	 *
+	 * I915_VM_BIND_EXT_TIMELINE_FENCES:
+	 * Specifies an array of input or output timeline fences for this
+	 * binding operation. See struct drm_i915_vm_bind_ext_timeline_fences.
+	 *
+	 * I915_VM_BIND_EXT_USER_FENCES:
+	 * Specifies an array of input or output user fences for this
+	 * binding operation. See struct drm_i915_vm_bind_ext_user_fence.
+	 * This is required for compute contexts.
+	 */
+	__u64 extensions;
+#define I915_VM_BIND_EXT_TIMELINE_FENCES	0
+#define I915_VM_BIND_EXT_USER_FENCES		1
+};
+
+/**
+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
+ *
+ * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
+ * address (VA) range that should be unbound from the device page table of the
+ * specified address space (VM). The specified VA range must match one of the
+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
+ * completion.
+ *
+ * The @queue_idx specifies the queue to use for unbinding.
+ * See struct drm_i915_gem_vm_unbind for more information on @queue_idx.
+ *
+ * The @start and @length musy specify a unique mapping bound with VM_BIND
+ * ioctl.
+ */
+struct drm_i915_gem_vm_unbind {
+	/** @vm_id: VM (address space) id to bind */
+	__u32 vm_id;
+
+	/** @queue_idx: Index of queue for unbinding */
+	__u32 queue_idx;
+
+	/** @start: Virtual Address start to unbind */
+	__u64 start;
+
+	/** @length: Length of mapping to unbind */
+	__u64 length;
+
+	/** @flags: Reserved for future usage, currently MBZ */
+	__u64 flags;
+
+	/**
+	 * @extensions: 0-terminated chain of extensions for this operation.
+	 *
+	 * I915_VM_UNBIND_EXT_TIMELINE_FENCES:
+	 * Specifies an array of input or output timeline fences for this
+	 * unbind operation.
+	 * It has same format as struct drm_i915_vm_bind_ext_timeline_fences.
+	 *
+	 * I915_VM_UNBIND_EXT_USER_FENCES:
+	 * Specifies an array of input or output user fences for this
+	 * unbind operation. This is required for compute contexts.
+	 * It has same format as struct drm_i915_vm_bind_ext_user_fence.
+	 */
+	__u64 extensions;
+#define I915_VM_UNBIND_EXT_TIMELINE_FENCES	0
+#define I915_VM_UNBIND_EXT_USER_FENCES		1
+};
+
+/**
+ * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
+ * or the vm_unbind work.
+ *
+ * The vm_bind or vm_unbind aync worker will wait for input fence to signal
+ * before starting the binding or unbinding.
+ *
+ * The vm_bind or vm_unbind async worker will signal the returned output fence
+ * after the completion of binding or unbinding.
+ */
+struct drm_i915_vm_bind_fence {
+	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
+	__u32 handle;
+
+	/**
+	 * @flags: Supported flags are:
+	 *
+	 * I915_VM_BIND_FENCE_WAIT:
+	 * Wait for the input fence before binding/unbinding
+	 *
+	 * I915_VM_BIND_FENCE_SIGNAL:
+	 * Return bind/unbind completion fence as output
+	 */
+	__u32 flags;
+#define I915_VM_BIND_FENCE_WAIT            (1<<0)
+#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
+};
+
+/**
+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
+ * and vm_unbind.
+ *
+ * This structure describes an array of timeline drm_syncobj and associated
+ * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
+ * can be input or output fences (See struct drm_i915_vm_bind_fence).
+ */
+struct drm_i915_vm_bind_ext_timeline_fences {
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/**
+	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
+	 * arrays.
+	 */
+	__u64 fence_count;
+
+	/**
+	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
+	 * of length @fence_count.
+	 */
+	__u64 handles_ptr;
+
+	/**
+	 * @values_ptr: Pointer to an array of u64 values of length
+	 * @fence_count.
+	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
+	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+	 * binary one.
+	 */
+	__u64 values_ptr;
+};
+
+/**
+ * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
+ * vm_bind or the vm_unbind work.
+ *
+ * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
+ * @addr to become equal to @val) before starting the binding or unbinding.
+ *
+ * The vm_bind or vm_unbind async worker will signal the output fence after
+ * the completion of binding or unbinding by writing @val to memory location at
+ * @addr
+ */
+struct drm_i915_vm_bind_user_fence {
+	/** @addr: User/Memory fence qword aligned process virtual address */
+	__u64 addr;
+
+	/** @val: User/Memory fence value to be written after bind completion */
+	__u64 val;
+
+	/**
+	 * @flags: Supported flags are:
+	 *
+	 * I915_VM_BIND_USER_FENCE_WAIT:
+	 * Wait for the input fence before binding/unbinding
+	 *
+	 * I915_VM_BIND_USER_FENCE_SIGNAL:
+	 * Return bind/unbind completion fence as output
+	 */
+	__u32 flags;
+#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
+#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
+	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
+};
+
+/**
+ * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
+ * and vm_unbind.
+ *
+ * These user fences can be input or output fences
+ * (See struct drm_i915_vm_bind_user_fence).
+ */
+struct drm_i915_vm_bind_ext_user_fence {
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/** @fence_count: Number of elements in the @user_fence_ptr array. */
+	__u64 fence_count;
+
+	/**
+	 * @user_fence_ptr: Pointer to an array of
+	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
+	 */
+	__u64 user_fence_ptr;
+};
+
+/**
+ * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
+ * ioctl.
+ *
+ * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
+ * only works with this ioctl for submission.
+ * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
+ */
+struct drm_i915_gem_execbuffer3 {
+	/**
+	 * @ctx_id: Context id
+	 *
+	 * Only contexts with user engine map are allowed.
+	 */
+	__u32 ctx_id;
+
+	/**
+	 * @engine_idx: Engine index
+	 *
+	 * An index in the user engine map of the context specified by @ctx_id.
+	 */
+	__u32 engine_idx;
+
+	/** @rsvd1: Reserved, MBZ */
+	__u32 rsvd1;
+
+	/**
+	 * @batch_count: Number of batches in @batch_address array.
+	 *
+	 * 0 is invalid. For parallel submission, it should be equal to the
+	 * number of (parallel) engines involved in that submission.
+	 */
+	__u32 batch_count;
+
+	/**
+	 * @batch_address: Array of batch gpu virtual addresses.
+	 *
+	 * If @batch_count is 1, then it is the gpu virtual address of the
+	 * batch buffer. If @batch_count > 1, then it is a pointer to an array
+	 * of batch buffer gpu virtual addresses.
+	 */
+	__u64 batch_address;
+
+	/**
+	 * @flags: Supported flags are:
+	 *
+	 * I915_EXEC3_SECURE:
+	 * Request a privileged ("secure") batch buffer/s.
+	 * It is only available for DRM_ROOT_ONLY | DRM_MASTER processes.
+	 */
+	__u64 flags;
+#define I915_EXEC3_SECURE	(1<<0)
+
+	/** @rsvd2: Reserved, MBZ */
+	__u64 rsvd2;
+
+	/**
+	 * @extensions: Zero-terminated chain of extensions.
+	 *
+	 * DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES:
+	 * It has same format as DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES.
+	 * See struct drm_i915_gem_execbuffer_ext_timeline_fences.
+	 * 
+	 * DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE:
+	 * First level batch completion signaling extension.
+	 * See struct drm_i915_gem_execbuffer3_ext_user_fence.
+	 */
+	__u64 extensions;
+#define DRM_I915_GEM_EXECBUFFER3_EXT_TIMELINE_FENCES	0
+#define DRM_I915_GEM_EXECBUFFER3_EXT_USER_FENCE		1
+};
+
+/**
+ * struct drm_i915_gem_execbuffer3_ext_user_fence - First level batch completion
+ * signaling extension.
+ *
+ * This extension allows user to attach a user fence (@addr, @value pair) to
+ * execbuf3, to be signaled by the command streamer after the completion of first
+ * level batch, by writing the @value at specified @addr and triggering an
+ * interrupt.
+ * User can either poll for this user fence to signal or can also wait on it
+ * with i915_gem_wait_user_fence ioctl.
+ * This is very much usefaul for long running contexts where waiting on dma-fence
+ * by user (like i915_gem_wait ioctl) is not supported.
+ */
+struct drm_i915_gem_execbuffer3_ext_user_fence {
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/**
+	 * @addr: User/Memory fence qword aligned GPU virtual address.
+	 *
+	 * Address has to be a valid GPU virtual address at the time of
+	 * first level batch completion.
+	 */
+	__u64 addr;
+
+	/**
+	 * @value: User/Memory fence Value to be written to above address
+	 * after first level batch completes.
+	 */
+	__u64 value;
+
+	/** @rsvd: Reserved, MBZ */
+	__u64 rsvd;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
+ * private to the specified VM.
+ *
+ * See struct drm_i915_gem_create_ext.
+ */
+struct drm_i915_gem_create_ext_vm_private {
+#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/** @vm_id: Id of the VM to which the object is private */
+	__u32 vm_id;
+};
+
+/**
+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
+ *
+ * User/Memory fence can be woken up either by:
+ *
+ * 1. GPU context indicated by @ctx_id, or,
+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
+ *    @ctx_id is ignored when this flag is set.
+ *
+ * Wakeup condition is,
+ * ``((*addr & mask) op (value & mask))``
+ *
+ * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
+ */
+struct drm_i915_gem_wait_user_fence {
+	/** @extensions: Zero-terminated chain of extensions. */
+	__u64 extensions;
+
+	/** @addr: User/Memory fence address */
+	__u64 addr;
+
+	/** @ctx_id: Id of the Context which will signal the fence. */
+	__u32 ctx_id;
+
+	/** @op: Wakeup condition operator */
+	__u16 op;
+#define I915_UFENCE_WAIT_EQ      0
+#define I915_UFENCE_WAIT_NEQ     1
+#define I915_UFENCE_WAIT_GT      2
+#define I915_UFENCE_WAIT_GTE     3
+#define I915_UFENCE_WAIT_LT      4
+#define I915_UFENCE_WAIT_LTE     5
+#define I915_UFENCE_WAIT_BEFORE  6
+#define I915_UFENCE_WAIT_AFTER   7
+
+	/**
+	 * @flags: Supported flags are:
+	 *
+	 * I915_UFENCE_WAIT_SOFT:
+	 *
+	 * To be woken up by i915 driver async worker (not by GPU).
+	 *
+	 * I915_UFENCE_WAIT_ABSTIME:
+	 *
+	 * Wait timeout specified as absolute time.
+	 */
+	__u16 flags;
+#define I915_UFENCE_WAIT_SOFT    0x1
+#define I915_UFENCE_WAIT_ABSTIME 0x2
+
+	/** @value: Wakeup value */
+	__u64 value;
+
+	/** @mask: Wakeup mask */
+	__u64 mask;
+#define I915_UFENCE_WAIT_U8     0xffu
+#define I915_UFENCE_WAIT_U16    0xffffu
+#define I915_UFENCE_WAIT_U32    0xfffffffful
+#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
+
+	/**
+	 * @timeout: Wait timeout in nanoseconds.
+	 *
+	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
+	 * absolute time in nsec.
+	 */
+	__s64 timeout;
+};