diff mbox series

[RFC,v3,3/3] drm/doc/rfc: VM_BIND uapi definition

Message ID 20220517183212.20274-4-niranjana.vishwanathapura@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/doc/rfc: i915 VM_BIND feature design + uapi | expand

Commit Message

Niranjana Vishwanathapura May 17, 2022, 6:32 p.m. UTC
VM_BIND and related uapi definitions

v2: Ensure proper kernel-doc formatting with cross references.
    Also add new uapi and documentation as per review comments
    from Daniel.

Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
---
 Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
 1 file changed, 399 insertions(+)
 create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h

Comments

Zanoni, Paulo R May 19, 2022, 11:07 p.m. UTC | #1
On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
> VM_BIND and related uapi definitions
> 
> v2: Ensure proper kernel-doc formatting with cross references.
>     Also add new uapi and documentation as per review comments
>     from Daniel.
> 
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> ---
>  Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
>  1 file changed, 399 insertions(+)
>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
> 
> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
> new file mode 100644
> index 000000000000..589c0a009107
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
> @@ -0,0 +1,399 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2022 Intel Corporation
> + */
> +
> +/**
> + * DOC: I915_PARAM_HAS_VM_BIND
> + *
> + * VM_BIND feature availability.
> + * See typedef drm_i915_getparam_t param.
> + */
> +#define I915_PARAM_HAS_VM_BIND		57
> +
> +/**
> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
> + *
> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
> + * See struct drm_i915_gem_vm_control flags.
> + *
> + * A VM in VM_BIND mode will not support the older execbuff mode of binding.
> + * In VM_BIND mode, execbuff ioctl will not accept any execlist (ie., the
> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be provided
> + * to pass in the batch buffer addresses.
> + *
> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must always be
> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
> + * The buffers_ptr, buffer_count, batch_start_offset and batch_len fields
> + * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
> + */

From that description, it seems we have:

struct drm_i915_gem_execbuffer2 {
	__u64 buffers_ptr;		-> must be 0 (new)
	__u32 buffer_count;		-> must be 0 (new)
	__u32 batch_start_offset;	-> must be 0 (new)
	__u32 batch_len;		-> must be 0 (new)
	__u32 DR1;			-> must be 0 (old)
	__u32 DR4;			-> must be 0 (old)
	__u32 num_cliprects; (fences)	-> must be 0 since using extensions
	__u64 cliprects_ptr; (fences, extensions) -> contains an actual pointer!
	__u64 flags;			-> some flags must be 0 (new)
	__u64 rsvd1; (context info)	-> repurposed field (old)
	__u64 rsvd2;			-> unused
};

Based on that, why can't we just get drm_i915_gem_execbuffer3 instead
of adding even more complexity to an already abused interface? While
the Vulkan-like extension thing is really nice, I don't think what
we're doing here is extending the ioctl usage, we're completely
changing how the base struct should be interpreted based on how the VM
was created (which is an entirely different ioctl).

From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
already at -6 without these changes. I think after vm_bind we'll need
to create a -11 entry just to deal with this ioctl.


+#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
+
+/**
+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
+ *
+ * Flag to declare context as long running.
+ * See struct drm_i915_gem_context_create_ext flags.
+ *
+ * Usage of dma-fence expects that they complete in reasonable amount of time.
+ * Compute on the other hand can be long running. Hence it is not appropriate
+ * for compute contexts to export request completion dma-fence to user.
+ * The dma-fence usage will be limited to in-kernel consumption only.
+ * Compute contexts need to use user/memory fence.
+ *
+ * So, long running contexts do not support output fences. Hence,
+ * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
+ * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) are expected
+ * to be not used.
+ *
+ * DRM_I915_GEM_WAIT ioctl call is also not supported for objects mapped
+ * to long running contexts.
+ */
+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
+
+/* VM_BIND related ioctls */
+#define DRM_I915_GEM_VM_BIND		0x3d
+#define DRM_I915_GEM_VM_UNBIND		0x3e
+#define DRM_I915_GEM_WAIT_USER_FENCE	0x3f
+
+#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
+#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
+
+/**
+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
+ *
+ * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
+ * virtual address (VA) range to the section of an object that should be bound
+ * in the device page table of the specified address space (VM).
+ * The VA range specified must be unique (ie., not currently bound) and can
+ * be mapped to whole object or a section of the object (partial binding).
+ * Multiple VA mappings can be created to the same section of the object
+ * (aliasing).
+ */
+struct drm_i915_gem_vm_bind {
+	/** @vm_id: VM (address space) id to bind */
+	__u32 vm_id;
+
+	/** @handle: Object handle */
+	__u32 handle;
+
+	/** @start: Virtual Address start to bind */
+	__u64 start;
+
+	/** @offset: Offset in object to bind */
+	__u64 offset;
+
+	/** @length: Length of mapping to bind */
+	__u64 length;
+
+	/**
+	 * @flags: Supported flags are,
+	 *
+	 * I915_GEM_VM_BIND_READONLY:
+	 * Mapping is read-only.
+	 *
+	 * I915_GEM_VM_BIND_CAPTURE:
+	 * Capture this mapping in the dump upon GPU error.
+	 */
+	__u64 flags;
+#define I915_GEM_VM_BIND_READONLY    (1 << 0)
+#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
+
+	/** @extensions: 0-terminated chain of extensions for this mapping. */
+	__u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
+ *
+ * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
+ * address (VA) range that should be unbound from the device page table of the
+ * specified address space (VM). The specified VA range must match one of the
+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
+ * completion.
+ */
+struct drm_i915_gem_vm_unbind {
+	/** @vm_id: VM (address space) id to bind */
+	__u32 vm_id;
+
+	/** @rsvd: Reserved for future use; must be zero. */
+	__u32 rsvd;
+
+	/** @start: Virtual Address start to unbind */
+	__u64 start;
+
+	/** @length: Length of mapping to unbind */
+	__u64 length;
+
+	/** @flags: reserved for future usage, currently MBZ */
+	__u64 flags;
+
+	/** @extensions: 0-terminated chain of extensions for this mapping. */
+	__u64 extensions;
+};
+
+/**
+ * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
+ * or the vm_unbind work.
+ *
+ * The vm_bind or vm_unbind aync worker will wait for input fence to signal
+ * before starting the binding or unbinding.
+ *
+ * The vm_bind or vm_unbind async worker will signal the returned output fence
+ * after the completion of binding or unbinding.
+ */
+struct drm_i915_vm_bind_fence {
+	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
+	__u32 handle;
+
+	/**
+	 * @flags: Supported flags are,
+	 *
+	 * I915_VM_BIND_FENCE_WAIT:
+	 * Wait for the input fence before binding/unbinding
+	 *
+	 * I915_VM_BIND_FENCE_SIGNAL:
+	 * Return bind/unbind completion fence as output
+	 */
+	__u32 flags;
+#define I915_VM_BIND_FENCE_WAIT            (1<<0)
+#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
+};
+
+/**
+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
+ * and vm_unbind.
+ *
+ * This structure describes an array of timeline drm_syncobj and associated
+ * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
+ * can be input or output fences (See struct drm_i915_vm_bind_fence).
+ */
+struct drm_i915_vm_bind_ext_timeline_fences {
+#define I915_VM_BIND_EXT_timeline_FENCES	0
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/**
+	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
+	 * arrays.
+	 */
+	__u64 fence_count;
+
+	/**
+	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
+	 * of length @fence_count.
+	 */
+	__u64 handles_ptr;
+
+	/**
+	 * @values_ptr: Pointer to an array of u64 values of length
+	 * @fence_count.
+	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
+	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+	 * binary one.
+	 */
+	__u64 values_ptr;
+};
+
+/**
+ * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
+ * vm_bind or the vm_unbind work.
+ *
+ * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
+ * @addr to become equal to @val) before starting the binding or unbinding.
+ *
+ * The vm_bind or vm_unbind async worker will signal the output fence after
+ * the completion of binding or unbinding by writing @val to memory location at
+ * @addr
+ */
+struct drm_i915_vm_bind_user_fence {
+	/** @addr: User/Memory fence qword aligned process virtual address */
+	__u64 addr;
+
+	/** @val: User/Memory fence value to be written after bind completion */
+	__u64 val;
+
+	/**
+	 * @flags: Supported flags are,
+	 *
+	 * I915_VM_BIND_USER_FENCE_WAIT:
+	 * Wait for the input fence before binding/unbinding
+	 *
+	 * I915_VM_BIND_USER_FENCE_SIGNAL:
+	 * Return bind/unbind completion fence as output
+	 */
+	__u32 flags;
+#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
+#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
+	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
+};
+
+/**
+ * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
+ * and vm_unbind.
+ *
+ * These user fences can be input or output fences
+ * (See struct drm_i915_vm_bind_user_fence).
+ */
+struct drm_i915_vm_bind_ext_user_fence {
+#define I915_VM_BIND_EXT_USER_FENCES	1
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/** @fence_count: Number of elements in the @user_fence_ptr array. */
+	__u64 fence_count;
+
+	/**
+	 * @user_fence_ptr: Pointer to an array of
+	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
+	 */
+	__u64 user_fence_ptr;
+};
+
+/**
+ * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array of batch buffer
+ * gpu virtual addresses.
+ *
+ * In the execbuff ioctl (See struct drm_i915_gem_execbuffer2), this extension
+ * must always be appended in the VM_BIND mode and it will be an error to
+ * append this extension in older non-VM_BIND mode.
+ */
+struct drm_i915_gem_execbuffer_ext_batch_addresses {
+#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES	1
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/** @count: Number of addresses in the addr array. */
+	__u32 count;
+
+	/** @addr: An array of batch gpu virtual addresses. */
+	__u64 addr[0];
+};
+
+/**
+ * struct drm_i915_gem_execbuffer_ext_user_fence - First level batch completion
+ * signaling extension.
+ *
+ * This extension allows user to attach a user fence (@addr, @value pair) to an
+ * execbuf to be signaled by the command streamer after the completion of first
+ * level batch, by writing the @value at specified @addr and triggering an
+ * interrupt.
+ * User can either poll for this user fence to signal or can also wait on it
+ * with i915_gem_wait_user_fence ioctl.
+ * This is very much usefaul for long running contexts where waiting on dma-fence
+ * by user (like i915_gem_wait ioctl) is not supported.
+ */
+struct drm_i915_gem_execbuffer_ext_user_fence {
+#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE		2
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/**
+	 * @addr: User/Memory fence qword aligned GPU virtual address.
+	 *
+	 * Address has to be a valid GPU virtual address at the time of
+	 * first level batch completion.
+	 */
+	__u64 addr;
+
+	/**
+	 * @value: User/Memory fence Value to be written to above address
+	 * after first level batch completes.
+	 */
+	__u64 value;
+
+	/** @rsvd: Reserved for future extensions, MBZ */
+	__u64 rsvd;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
+ * private to the specified VM.
+ *
+ * See struct drm_i915_gem_create_ext.
+ */
+struct drm_i915_gem_create_ext_vm_private {
+#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/** @vm_id: Id of the VM to which the object is private */
+	__u32 vm_id;
+};
+
+/**
+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
+ *
+ * User/Memory fence can be woken up either by:
+ *
+ * 1. GPU context indicated by @ctx_id, or,
+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
+ *    @ctx_id is ignored when this flag is set.
+ *
+ * Wakeup condition is,
+ * ``((*addr & mask) op (value & mask))``
+ *
+ * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
+ */
+struct drm_i915_gem_wait_user_fence {
+	/** @extensions: Zero-terminated chain of extensions. */
+	__u64 extensions;
+
+	/** @addr: User/Memory fence address */
+	__u64 addr;
+
+	/** @ctx_id: Id of the Context which will signal the fence. */
+	__u32 ctx_id;
+
+	/** @op: Wakeup condition operator */
+	__u16 op;
+#define I915_UFENCE_WAIT_EQ      0
+#define I915_UFENCE_WAIT_NEQ     1
+#define I915_UFENCE_WAIT_GT      2
+#define I915_UFENCE_WAIT_GTE     3
+#define I915_UFENCE_WAIT_LT      4
+#define I915_UFENCE_WAIT_LTE     5
+#define I915_UFENCE_WAIT_BEFORE  6
+#define I915_UFENCE_WAIT_AFTER   7
+
+	/**
+	 * @flags: Supported flags are,
+	 *
+	 * I915_UFENCE_WAIT_SOFT:
+	 *
+	 * To be woken up by i915 driver async worker (not by GPU).
+	 *
+	 * I915_UFENCE_WAIT_ABSTIME:
+	 *
+	 * Wait timeout specified as absolute time.
+	 */
+	__u16 flags;
+#define I915_UFENCE_WAIT_SOFT    0x1
+#define I915_UFENCE_WAIT_ABSTIME 0x2
+
+	/** @value: Wakeup value */
+	__u64 value;
+
+	/** @mask: Wakeup mask */
+	__u64 mask;
+#define I915_UFENCE_WAIT_U8     0xffu
+#define I915_UFENCE_WAIT_U16    0xffffu
+#define I915_UFENCE_WAIT_U32    0xfffffffful
+#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
+
+	/**
+	 * @timeout: Wait timeout in nanoseconds.
+	 *
+	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
+	 * absolute time in nsec.
+	 */
+	__s64 timeout;
+};
Niranjana Vishwanathapura May 23, 2022, 7:19 p.m. UTC | #2
On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>> VM_BIND and related uapi definitions
>>
>> v2: Ensure proper kernel-doc formatting with cross references.
>>     Also add new uapi and documentation as per review comments
>>     from Daniel.
>>
>> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
>> ---
>>  Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
>>  1 file changed, 399 insertions(+)
>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>
>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
>> new file mode 100644
>> index 000000000000..589c0a009107
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>> @@ -0,0 +1,399 @@
>> +/* SPDX-License-Identifier: MIT */
>> +/*
>> + * Copyright © 2022 Intel Corporation
>> + */
>> +
>> +/**
>> + * DOC: I915_PARAM_HAS_VM_BIND
>> + *
>> + * VM_BIND feature availability.
>> + * See typedef drm_i915_getparam_t param.
>> + */
>> +#define I915_PARAM_HAS_VM_BIND               57
>> +
>> +/**
>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>> + *
>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>> + * See struct drm_i915_gem_vm_control flags.
>> + *
>> + * A VM in VM_BIND mode will not support the older execbuff mode of binding.
>> + * In VM_BIND mode, execbuff ioctl will not accept any execlist (ie., the
>> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be provided
>> + * to pass in the batch buffer addresses.
>> + *
>> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
>> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must always be
>> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>> + * The buffers_ptr, buffer_count, batch_start_offset and batch_len fields
>> + * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
>> + */
>
>From that description, it seems we have:
>
>struct drm_i915_gem_execbuffer2 {
>        __u64 buffers_ptr;              -> must be 0 (new)
>        __u32 buffer_count;             -> must be 0 (new)
>        __u32 batch_start_offset;       -> must be 0 (new)
>        __u32 batch_len;                -> must be 0 (new)
>        __u32 DR1;                      -> must be 0 (old)
>        __u32 DR4;                      -> must be 0 (old)
>        __u32 num_cliprects; (fences)   -> must be 0 since using extensions
>        __u64 cliprects_ptr; (fences, extensions) -> contains an actual pointer!
>        __u64 flags;                    -> some flags must be 0 (new)
>        __u64 rsvd1; (context info)     -> repurposed field (old)
>        __u64 rsvd2;                    -> unused
>};
>
>Based on that, why can't we just get drm_i915_gem_execbuffer3 instead
>of adding even more complexity to an already abused interface? While
>the Vulkan-like extension thing is really nice, I don't think what
>we're doing here is extending the ioctl usage, we're completely
>changing how the base struct should be interpreted based on how the VM
>was created (which is an entirely different ioctl).
>
>From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>already at -6 without these changes. I think after vm_bind we'll need
>to create a -11 entry just to deal with this ioctl.
>

The only change here is removing the execlist support for VM_BIND
mode (other than natual extensions).
Adding a new execbuffer3 was considered, but I think we need to be careful
with that as that goes beyond the VM_BIND support, including any future
requirements (as we don't want an execbuffer4 after VM_BIND).

Niranjana

>
>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND       (1 << 0)
>+
>+/**
>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>+ *
>+ * Flag to declare context as long running.
>+ * See struct drm_i915_gem_context_create_ext flags.
>+ *
>+ * Usage of dma-fence expects that they complete in reasonable amount of time.
>+ * Compute on the other hand can be long running. Hence it is not appropriate
>+ * for compute contexts to export request completion dma-fence to user.
>+ * The dma-fence usage will be limited to in-kernel consumption only.
>+ * Compute contexts need to use user/memory fence.
>+ *
>+ * So, long running contexts do not support output fences. Hence,
>+ * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
>+ * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) are expected
>+ * to be not used.
>+ *
>+ * DRM_I915_GEM_WAIT ioctl call is also not supported for objects mapped
>+ * to long running contexts.
>+ */
>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>+
>+/* VM_BIND related ioctls */
>+#define DRM_I915_GEM_VM_BIND           0x3d
>+#define DRM_I915_GEM_VM_UNBIND         0x3e
>+#define DRM_I915_GEM_WAIT_USER_FENCE   0x3f
>+
>+#define DRM_IOCTL_I915_GEM_VM_BIND             DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>+#define DRM_IOCTL_I915_GEM_VM_UNBIND           DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
>+
>+/**
>+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>+ *
>+ * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
>+ * virtual address (VA) range to the section of an object that should be bound
>+ * in the device page table of the specified address space (VM).
>+ * The VA range specified must be unique (ie., not currently bound) and can
>+ * be mapped to whole object or a section of the object (partial binding).
>+ * Multiple VA mappings can be created to the same section of the object
>+ * (aliasing).
>+ */
>+struct drm_i915_gem_vm_bind {
>+       /** @vm_id: VM (address space) id to bind */
>+       __u32 vm_id;
>+
>+       /** @handle: Object handle */
>+       __u32 handle;
>+
>+       /** @start: Virtual Address start to bind */
>+       __u64 start;
>+
>+       /** @offset: Offset in object to bind */
>+       __u64 offset;
>+
>+       /** @length: Length of mapping to bind */
>+       __u64 length;
>+
>+       /**
>+        * @flags: Supported flags are,
>+        *
>+        * I915_GEM_VM_BIND_READONLY:
>+        * Mapping is read-only.
>+        *
>+        * I915_GEM_VM_BIND_CAPTURE:
>+        * Capture this mapping in the dump upon GPU error.
>+        */
>+       __u64 flags;
>+#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>+#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>+
>+       /** @extensions: 0-terminated chain of extensions for this mapping. */
>+       __u64 extensions;
>+};
>+
>+/**
>+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>+ *
>+ * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
>+ * address (VA) range that should be unbound from the device page table of the
>+ * specified address space (VM). The specified VA range must match one of the
>+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>+ * completion.
>+ */
>+struct drm_i915_gem_vm_unbind {
>+       /** @vm_id: VM (address space) id to bind */
>+       __u32 vm_id;
>+
>+       /** @rsvd: Reserved for future use; must be zero. */
>+       __u32 rsvd;
>+
>+       /** @start: Virtual Address start to unbind */
>+       __u64 start;
>+
>+       /** @length: Length of mapping to unbind */
>+       __u64 length;
>+
>+       /** @flags: reserved for future usage, currently MBZ */
>+       __u64 flags;
>+
>+       /** @extensions: 0-terminated chain of extensions for this mapping. */
>+       __u64 extensions;
>+};
>+
>+/**
>+ * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
>+ * or the vm_unbind work.
>+ *
>+ * The vm_bind or vm_unbind aync worker will wait for input fence to signal
>+ * before starting the binding or unbinding.
>+ *
>+ * The vm_bind or vm_unbind async worker will signal the returned output fence
>+ * after the completion of binding or unbinding.
>+ */
>+struct drm_i915_vm_bind_fence {
>+       /** @handle: User's handle for a drm_syncobj to wait on or signal. */
>+       __u32 handle;
>+
>+       /**
>+        * @flags: Supported flags are,
>+        *
>+        * I915_VM_BIND_FENCE_WAIT:
>+        * Wait for the input fence before binding/unbinding
>+        *
>+        * I915_VM_BIND_FENCE_SIGNAL:
>+        * Return bind/unbind completion fence as output
>+        */
>+       __u32 flags;
>+#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>+#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>+};
>+
>+/**
>+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
>+ * and vm_unbind.
>+ *
>+ * This structure describes an array of timeline drm_syncobj and associated
>+ * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
>+ * can be input or output fences (See struct drm_i915_vm_bind_fence).
>+ */
>+struct drm_i915_vm_bind_ext_timeline_fences {
>+#define I915_VM_BIND_EXT_timeline_FENCES       0
>+       /** @base: Extension link. See struct i915_user_extension. */
>+       struct i915_user_extension base;
>+
>+       /**
>+        * @fence_count: Number of elements in the @handles_ptr & @value_ptr
>+        * arrays.
>+        */
>+       __u64 fence_count;
>+
>+       /**
>+        * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
>+        * of length @fence_count.
>+        */
>+       __u64 handles_ptr;
>+
>+       /**
>+        * @values_ptr: Pointer to an array of u64 values of length
>+        * @fence_count.
>+        * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>+        * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
>+        * binary one.
>+        */
>+       __u64 values_ptr;
>+};
>+
>+/**
>+ * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
>+ * vm_bind or the vm_unbind work.
>+ *
>+ * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
>+ * @addr to become equal to @val) before starting the binding or unbinding.
>+ *
>+ * The vm_bind or vm_unbind async worker will signal the output fence after
>+ * the completion of binding or unbinding by writing @val to memory location at
>+ * @addr
>+ */
>+struct drm_i915_vm_bind_user_fence {
>+       /** @addr: User/Memory fence qword aligned process virtual address */
>+       __u64 addr;
>+
>+       /** @val: User/Memory fence value to be written after bind completion */
>+       __u64 val;
>+
>+       /**
>+        * @flags: Supported flags are,
>+        *
>+        * I915_VM_BIND_USER_FENCE_WAIT:
>+        * Wait for the input fence before binding/unbinding
>+        *
>+        * I915_VM_BIND_USER_FENCE_SIGNAL:
>+        * Return bind/unbind completion fence as output
>+        */
>+       __u32 flags;
>+#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>+#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>+       (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>+};
>+
>+/**
>+ * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
>+ * and vm_unbind.
>+ *
>+ * These user fences can be input or output fences
>+ * (See struct drm_i915_vm_bind_user_fence).
>+ */
>+struct drm_i915_vm_bind_ext_user_fence {
>+#define I915_VM_BIND_EXT_USER_FENCES   1
>+       /** @base: Extension link. See struct i915_user_extension. */
>+       struct i915_user_extension base;
>+
>+       /** @fence_count: Number of elements in the @user_fence_ptr array. */
>+       __u64 fence_count;
>+
>+       /**
>+        * @user_fence_ptr: Pointer to an array of
>+        * struct drm_i915_vm_bind_user_fence of length @fence_count.
>+        */
>+       __u64 user_fence_ptr;
>+};
>+
>+/**
>+ * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array of batch buffer
>+ * gpu virtual addresses.
>+ *
>+ * In the execbuff ioctl (See struct drm_i915_gem_execbuffer2), this extension
>+ * must always be appended in the VM_BIND mode and it will be an error to
>+ * append this extension in older non-VM_BIND mode.
>+ */
>+struct drm_i915_gem_execbuffer_ext_batch_addresses {
>+#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES    1
>+       /** @base: Extension link. See struct i915_user_extension. */
>+       struct i915_user_extension base;
>+
>+       /** @count: Number of addresses in the addr array. */
>+       __u32 count;
>+
>+       /** @addr: An array of batch gpu virtual addresses. */
>+       __u64 addr[0];
>+};
>+
>+/**
>+ * struct drm_i915_gem_execbuffer_ext_user_fence - First level batch completion
>+ * signaling extension.
>+ *
>+ * This extension allows user to attach a user fence (@addr, @value pair) to an
>+ * execbuf to be signaled by the command streamer after the completion of first
>+ * level batch, by writing the @value at specified @addr and triggering an
>+ * interrupt.
>+ * User can either poll for this user fence to signal or can also wait on it
>+ * with i915_gem_wait_user_fence ioctl.
>+ * This is very much usefaul for long running contexts where waiting on dma-fence
>+ * by user (like i915_gem_wait ioctl) is not supported.
>+ */
>+struct drm_i915_gem_execbuffer_ext_user_fence {
>+#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE         2
>+       /** @base: Extension link. See struct i915_user_extension. */
>+       struct i915_user_extension base;
>+
>+       /**
>+        * @addr: User/Memory fence qword aligned GPU virtual address.
>+        *
>+        * Address has to be a valid GPU virtual address at the time of
>+        * first level batch completion.
>+        */
>+       __u64 addr;
>+
>+       /**
>+        * @value: User/Memory fence Value to be written to above address
>+        * after first level batch completes.
>+        */
>+       __u64 value;
>+
>+       /** @rsvd: Reserved for future extensions, MBZ */
>+       __u64 rsvd;
>+};
>+
>+/**
>+ * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
>+ * private to the specified VM.
>+ *
>+ * See struct drm_i915_gem_create_ext.
>+ */
>+struct drm_i915_gem_create_ext_vm_private {
>+#define I915_GEM_CREATE_EXT_VM_PRIVATE         2
>+       /** @base: Extension link. See struct i915_user_extension. */
>+       struct i915_user_extension base;
>+
>+       /** @vm_id: Id of the VM to which the object is private */
>+       __u32 vm_id;
>+};
>+
>+/**
>+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>+ *
>+ * User/Memory fence can be woken up either by:
>+ *
>+ * 1. GPU context indicated by @ctx_id, or,
>+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>+ *    @ctx_id is ignored when this flag is set.
>+ *
>+ * Wakeup condition is,
>+ * ``((*addr & mask) op (value & mask))``
>+ *
>+ * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
>+ */
>+struct drm_i915_gem_wait_user_fence {
>+       /** @extensions: Zero-terminated chain of extensions. */
>+       __u64 extensions;
>+
>+       /** @addr: User/Memory fence address */
>+       __u64 addr;
>+
>+       /** @ctx_id: Id of the Context which will signal the fence. */
>+       __u32 ctx_id;
>+
>+       /** @op: Wakeup condition operator */
>+       __u16 op;
>+#define I915_UFENCE_WAIT_EQ      0
>+#define I915_UFENCE_WAIT_NEQ     1
>+#define I915_UFENCE_WAIT_GT      2
>+#define I915_UFENCE_WAIT_GTE     3
>+#define I915_UFENCE_WAIT_LT      4
>+#define I915_UFENCE_WAIT_LTE     5
>+#define I915_UFENCE_WAIT_BEFORE  6
>+#define I915_UFENCE_WAIT_AFTER   7
>+
>+       /**
>+        * @flags: Supported flags are,
>+        *
>+        * I915_UFENCE_WAIT_SOFT:
>+        *
>+        * To be woken up by i915 driver async worker (not by GPU).
>+        *
>+        * I915_UFENCE_WAIT_ABSTIME:
>+        *
>+        * Wait timeout specified as absolute time.
>+        */
>+       __u16 flags;
>+#define I915_UFENCE_WAIT_SOFT    0x1
>+#define I915_UFENCE_WAIT_ABSTIME 0x2
>+
>+       /** @value: Wakeup value */
>+       __u64 value;
>+
>+       /** @mask: Wakeup mask */
>+       __u64 mask;
>+#define I915_UFENCE_WAIT_U8     0xffu
>+#define I915_UFENCE_WAIT_U16    0xffffu
>+#define I915_UFENCE_WAIT_U32    0xfffffffful
>+#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>+
>+       /**
>+        * @timeout: Wait timeout in nanoseconds.
>+        *
>+        * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
>+        * absolute time in nsec.
>+        */
>+       __s64 timeout;
>+};
>
Dave Airlie June 1, 2022, 9:02 a.m. UTC | #3
On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
<niranjana.vishwanathapura@intel.com> wrote:
>
> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
> >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
> >> VM_BIND and related uapi definitions
> >>
> >> v2: Ensure proper kernel-doc formatting with cross references.
> >>     Also add new uapi and documentation as per review comments
> >>     from Daniel.
> >>
> >> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> >> ---
> >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
> >>  1 file changed, 399 insertions(+)
> >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
> >>
> >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
> >> new file mode 100644
> >> index 000000000000..589c0a009107
> >> --- /dev/null
> >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
> >> @@ -0,0 +1,399 @@
> >> +/* SPDX-License-Identifier: MIT */
> >> +/*
> >> + * Copyright © 2022 Intel Corporation
> >> + */
> >> +
> >> +/**
> >> + * DOC: I915_PARAM_HAS_VM_BIND
> >> + *
> >> + * VM_BIND feature availability.
> >> + * See typedef drm_i915_getparam_t param.
> >> + */
> >> +#define I915_PARAM_HAS_VM_BIND               57
> >> +
> >> +/**
> >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
> >> + *
> >> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
> >> + * See struct drm_i915_gem_vm_control flags.
> >> + *
> >> + * A VM in VM_BIND mode will not support the older execbuff mode of binding.
> >> + * In VM_BIND mode, execbuff ioctl will not accept any execlist (ie., the
> >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
> >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
> >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
> >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be provided
> >> + * to pass in the batch buffer addresses.
> >> + *
> >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
> >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
> >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must always be
> >> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
> >> + * The buffers_ptr, buffer_count, batch_start_offset and batch_len fields
> >> + * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
> >> + */
> >
> >From that description, it seems we have:
> >
> >struct drm_i915_gem_execbuffer2 {
> >        __u64 buffers_ptr;              -> must be 0 (new)
> >        __u32 buffer_count;             -> must be 0 (new)
> >        __u32 batch_start_offset;       -> must be 0 (new)
> >        __u32 batch_len;                -> must be 0 (new)
> >        __u32 DR1;                      -> must be 0 (old)
> >        __u32 DR4;                      -> must be 0 (old)
> >        __u32 num_cliprects; (fences)   -> must be 0 since using extensions
> >        __u64 cliprects_ptr; (fences, extensions) -> contains an actual pointer!
> >        __u64 flags;                    -> some flags must be 0 (new)
> >        __u64 rsvd1; (context info)     -> repurposed field (old)
> >        __u64 rsvd2;                    -> unused
> >};
> >
> >Based on that, why can't we just get drm_i915_gem_execbuffer3 instead
> >of adding even more complexity to an already abused interface? While
> >the Vulkan-like extension thing is really nice, I don't think what
> >we're doing here is extending the ioctl usage, we're completely
> >changing how the base struct should be interpreted based on how the VM
> >was created (which is an entirely different ioctl).
> >
> >From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
> >already at -6 without these changes. I think after vm_bind we'll need
> >to create a -11 entry just to deal with this ioctl.
> >
>
> The only change here is removing the execlist support for VM_BIND
> mode (other than natual extensions).
> Adding a new execbuffer3 was considered, but I think we need to be careful
> with that as that goes beyond the VM_BIND support, including any future
> requirements (as we don't want an execbuffer4 after VM_BIND).

Why not? it's not like adding extensions here is really that different
than adding new ioctls.

I definitely think this deserves an execbuffer3 without even
considering future requirements. Just  to burn down the old
requirements and pointless fields.

Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave the
older sw on execbuf2 for ever.

Dave.
Daniel Vetter June 1, 2022, 9:27 a.m. UTC | #4
On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>
> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
> <niranjana.vishwanathapura@intel.com> wrote:
> >
> > On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
> > >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
> > >> VM_BIND and related uapi definitions
> > >>
> > >> v2: Ensure proper kernel-doc formatting with cross references.
> > >>     Also add new uapi and documentation as per review comments
> > >>     from Daniel.
> > >>
> > >> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> > >> ---
> > >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
> > >>  1 file changed, 399 insertions(+)
> > >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
> > >>
> > >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
> > >> new file mode 100644
> > >> index 000000000000..589c0a009107
> > >> --- /dev/null
> > >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
> > >> @@ -0,0 +1,399 @@
> > >> +/* SPDX-License-Identifier: MIT */
> > >> +/*
> > >> + * Copyright © 2022 Intel Corporation
> > >> + */
> > >> +
> > >> +/**
> > >> + * DOC: I915_PARAM_HAS_VM_BIND
> > >> + *
> > >> + * VM_BIND feature availability.
> > >> + * See typedef drm_i915_getparam_t param.
> > >> + */
> > >> +#define I915_PARAM_HAS_VM_BIND               57
> > >> +
> > >> +/**
> > >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
> > >> + *
> > >> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
> > >> + * See struct drm_i915_gem_vm_control flags.
> > >> + *
> > >> + * A VM in VM_BIND mode will not support the older execbuff mode of binding.
> > >> + * In VM_BIND mode, execbuff ioctl will not accept any execlist (ie., the
> > >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
> > >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
> > >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
> > >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be provided
> > >> + * to pass in the batch buffer addresses.
> > >> + *
> > >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
> > >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
> > >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must always be
> > >> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
> > >> + * The buffers_ptr, buffer_count, batch_start_offset and batch_len fields
> > >> + * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
> > >> + */
> > >
> > >From that description, it seems we have:
> > >
> > >struct drm_i915_gem_execbuffer2 {
> > >        __u64 buffers_ptr;              -> must be 0 (new)
> > >        __u32 buffer_count;             -> must be 0 (new)
> > >        __u32 batch_start_offset;       -> must be 0 (new)
> > >        __u32 batch_len;                -> must be 0 (new)
> > >        __u32 DR1;                      -> must be 0 (old)
> > >        __u32 DR4;                      -> must be 0 (old)
> > >        __u32 num_cliprects; (fences)   -> must be 0 since using extensions
> > >        __u64 cliprects_ptr; (fences, extensions) -> contains an actual pointer!
> > >        __u64 flags;                    -> some flags must be 0 (new)
> > >        __u64 rsvd1; (context info)     -> repurposed field (old)
> > >        __u64 rsvd2;                    -> unused
> > >};
> > >
> > >Based on that, why can't we just get drm_i915_gem_execbuffer3 instead
> > >of adding even more complexity to an already abused interface? While
> > >the Vulkan-like extension thing is really nice, I don't think what
> > >we're doing here is extending the ioctl usage, we're completely
> > >changing how the base struct should be interpreted based on how the VM
> > >was created (which is an entirely different ioctl).
> > >
> > >From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
> > >already at -6 without these changes. I think after vm_bind we'll need
> > >to create a -11 entry just to deal with this ioctl.
> > >
> >
> > The only change here is removing the execlist support for VM_BIND
> > mode (other than natual extensions).
> > Adding a new execbuffer3 was considered, but I think we need to be careful
> > with that as that goes beyond the VM_BIND support, including any future
> > requirements (as we don't want an execbuffer4 after VM_BIND).
>
> Why not? it's not like adding extensions here is really that different
> than adding new ioctls.
>
> I definitely think this deserves an execbuffer3 without even
> considering future requirements. Just  to burn down the old
> requirements and pointless fields.
>
> Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave the
> older sw on execbuf2 for ever.

I guess another point in favour of execbuf3 would be that it's less
midlayer. If we share the entry point then there's quite a few vfuncs
needed to cleanly split out the vm_bind paths from the legacy
reloc/softping paths.

If we invert this and do execbuf3, then there's the existing ioctl
vfunc, and then we share code (where it even makes sense, probably
request setup/submit need to be shared, anything else is probably
cleaner to just copypaste) with the usual helper approach.

Also that would guarantee that really none of the old concepts like
i915_active on the vma or vma open counts and all that stuff leaks
into the new vm_bind execbuf.

Finally I also think that copypasting would make backporting easier,
or at least more flexible, since it should make it easier to have the
upstream vm_bind co-exist with all the other things we have. Without
huge amounts of conflicts (or at least much less) that pushing a pile
of vfuncs into the existing code would cause.

So maybe we should do this?
-Daniel
Niranjana Vishwanathapura June 2, 2022, 5:08 a.m. UTC | #5
On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>
>> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>> <niranjana.vishwanathapura@intel.com> wrote:
>> >
>> > On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>> > >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>> > >> VM_BIND and related uapi definitions
>> > >>
>> > >> v2: Ensure proper kernel-doc formatting with cross references.
>> > >>     Also add new uapi and documentation as per review comments
>> > >>     from Daniel.
>> > >>
>> > >> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
>> > >> ---
>> > >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
>> > >>  1 file changed, 399 insertions(+)
>> > >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>> > >>
>> > >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
>> > >> new file mode 100644
>> > >> index 000000000000..589c0a009107
>> > >> --- /dev/null
>> > >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>> > >> @@ -0,0 +1,399 @@
>> > >> +/* SPDX-License-Identifier: MIT */
>> > >> +/*
>> > >> + * Copyright © 2022 Intel Corporation
>> > >> + */
>> > >> +
>> > >> +/**
>> > >> + * DOC: I915_PARAM_HAS_VM_BIND
>> > >> + *
>> > >> + * VM_BIND feature availability.
>> > >> + * See typedef drm_i915_getparam_t param.
>> > >> + */
>> > >> +#define I915_PARAM_HAS_VM_BIND               57
>> > >> +
>> > >> +/**
>> > >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>> > >> + *
>> > >> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>> > >> + * See struct drm_i915_gem_vm_control flags.
>> > >> + *
>> > >> + * A VM in VM_BIND mode will not support the older execbuff mode of binding.
>> > >> + * In VM_BIND mode, execbuff ioctl will not accept any execlist (ie., the
>> > >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>> > >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>> > >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>> > >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be provided
>> > >> + * to pass in the batch buffer addresses.
>> > >> + *
>> > >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>> > >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
>> > >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must always be
>> > >> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>> > >> + * The buffers_ptr, buffer_count, batch_start_offset and batch_len fields
>> > >> + * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
>> > >> + */
>> > >
>> > >From that description, it seems we have:
>> > >
>> > >struct drm_i915_gem_execbuffer2 {
>> > >        __u64 buffers_ptr;              -> must be 0 (new)
>> > >        __u32 buffer_count;             -> must be 0 (new)
>> > >        __u32 batch_start_offset;       -> must be 0 (new)
>> > >        __u32 batch_len;                -> must be 0 (new)
>> > >        __u32 DR1;                      -> must be 0 (old)
>> > >        __u32 DR4;                      -> must be 0 (old)
>> > >        __u32 num_cliprects; (fences)   -> must be 0 since using extensions
>> > >        __u64 cliprects_ptr; (fences, extensions) -> contains an actual pointer!
>> > >        __u64 flags;                    -> some flags must be 0 (new)
>> > >        __u64 rsvd1; (context info)     -> repurposed field (old)
>> > >        __u64 rsvd2;                    -> unused
>> > >};
>> > >
>> > >Based on that, why can't we just get drm_i915_gem_execbuffer3 instead
>> > >of adding even more complexity to an already abused interface? While
>> > >the Vulkan-like extension thing is really nice, I don't think what
>> > >we're doing here is extending the ioctl usage, we're completely
>> > >changing how the base struct should be interpreted based on how the VM
>> > >was created (which is an entirely different ioctl).
>> > >
>> > >From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>> > >already at -6 without these changes. I think after vm_bind we'll need
>> > >to create a -11 entry just to deal with this ioctl.
>> > >
>> >
>> > The only change here is removing the execlist support for VM_BIND
>> > mode (other than natual extensions).
>> > Adding a new execbuffer3 was considered, but I think we need to be careful
>> > with that as that goes beyond the VM_BIND support, including any future
>> > requirements (as we don't want an execbuffer4 after VM_BIND).
>>
>> Why not? it's not like adding extensions here is really that different
>> than adding new ioctls.
>>
>> I definitely think this deserves an execbuffer3 without even
>> considering future requirements. Just  to burn down the old
>> requirements and pointless fields.
>>
>> Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave the
>> older sw on execbuf2 for ever.
>
>I guess another point in favour of execbuf3 would be that it's less
>midlayer. If we share the entry point then there's quite a few vfuncs
>needed to cleanly split out the vm_bind paths from the legacy
>reloc/softping paths.
>
>If we invert this and do execbuf3, then there's the existing ioctl
>vfunc, and then we share code (where it even makes sense, probably
>request setup/submit need to be shared, anything else is probably
>cleaner to just copypaste) with the usual helper approach.
>
>Also that would guarantee that really none of the old concepts like
>i915_active on the vma or vma open counts and all that stuff leaks
>into the new vm_bind execbuf.
>
>Finally I also think that copypasting would make backporting easier,
>or at least more flexible, since it should make it easier to have the
>upstream vm_bind co-exist with all the other things we have. Without
>huge amounts of conflicts (or at least much less) that pushing a pile
>of vfuncs into the existing code would cause.
>
>So maybe we should do this?

Thanks Dave, Daniel.
There are a few things that will be common between execbuf2 and
execbuf3, like request setup/submit (as you said), fence handling 
(timeline fences, fence array, composite fences), engine selection,
etc. Also, many of the 'flags' will be there in execbuf3 also (but
bit position will differ).
But I guess these should be fine as the suggestion here is to
copy-paste the execbuff code and having a shared code where possible.
Besides, we can stop supporting some older feature in execbuff3
(like fence array in favor of newer timeline fences), which will
further reduce common code.

Ok, I will update this series by adding execbuf3 and send out soon.

Niranjana

>-Daniel
>-- 
>Daniel Vetter
>Software Engineer, Intel Corporation
>http://blog.ffwll.ch
Niranjana Vishwanathapura June 3, 2022, 6:53 a.m. UTC | #6
On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura wrote:
>On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>
>>>On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>><niranjana.vishwanathapura@intel.com> wrote:
>>>>
>>>> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>> >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>>>> >> VM_BIND and related uapi definitions
>>>> >>
>>>> >> v2: Ensure proper kernel-doc formatting with cross references.
>>>> >>     Also add new uapi and documentation as per review comments
>>>> >>     from Daniel.
>>>> >>
>>>> >> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
>>>> >> ---
>>>> >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
>>>> >>  1 file changed, 399 insertions(+)
>>>> >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>> >>
>>>> >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
>>>> >> new file mode 100644
>>>> >> index 000000000000..589c0a009107
>>>> >> --- /dev/null
>>>> >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>> >> @@ -0,0 +1,399 @@
>>>> >> +/* SPDX-License-Identifier: MIT */
>>>> >> +/*
>>>> >> + * Copyright © 2022 Intel Corporation
>>>> >> + */
>>>> >> +
>>>> >> +/**
>>>> >> + * DOC: I915_PARAM_HAS_VM_BIND
>>>> >> + *
>>>> >> + * VM_BIND feature availability.
>>>> >> + * See typedef drm_i915_getparam_t param.
>>>> >> + */
>>>> >> +#define I915_PARAM_HAS_VM_BIND               57
>>>> >> +
>>>> >> +/**
>>>> >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>> >> + *
>>>> >> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>> >> + * See struct drm_i915_gem_vm_control flags.
>>>> >> + *
>>>> >> + * A VM in VM_BIND mode will not support the older execbuff mode of binding.
>>>> >> + * In VM_BIND mode, execbuff ioctl will not accept any execlist (ie., the
>>>> >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>> >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>> >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>> >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be provided
>>>> >> + * to pass in the batch buffer addresses.
>>>> >> + *
>>>> >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>> >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
>>>> >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must always be
>>>> >> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>> >> + * The buffers_ptr, buffer_count, batch_start_offset and batch_len fields
>>>> >> + * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
>>>> >> + */
>>>> >
>>>> >From that description, it seems we have:
>>>> >
>>>> >struct drm_i915_gem_execbuffer2 {
>>>> >        __u64 buffers_ptr;              -> must be 0 (new)
>>>> >        __u32 buffer_count;             -> must be 0 (new)
>>>> >        __u32 batch_start_offset;       -> must be 0 (new)
>>>> >        __u32 batch_len;                -> must be 0 (new)
>>>> >        __u32 DR1;                      -> must be 0 (old)
>>>> >        __u32 DR4;                      -> must be 0 (old)
>>>> >        __u32 num_cliprects; (fences)   -> must be 0 since using extensions
>>>> >        __u64 cliprects_ptr; (fences, extensions) -> contains an actual pointer!
>>>> >        __u64 flags;                    -> some flags must be 0 (new)
>>>> >        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>> >        __u64 rsvd2;                    -> unused
>>>> >};
>>>> >
>>>> >Based on that, why can't we just get drm_i915_gem_execbuffer3 instead
>>>> >of adding even more complexity to an already abused interface? While
>>>> >the Vulkan-like extension thing is really nice, I don't think what
>>>> >we're doing here is extending the ioctl usage, we're completely
>>>> >changing how the base struct should be interpreted based on how the VM
>>>> >was created (which is an entirely different ioctl).
>>>> >
>>>> >From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>>>> >already at -6 without these changes. I think after vm_bind we'll need
>>>> >to create a -11 entry just to deal with this ioctl.
>>>> >
>>>>
>>>> The only change here is removing the execlist support for VM_BIND
>>>> mode (other than natual extensions).
>>>> Adding a new execbuffer3 was considered, but I think we need to be careful
>>>> with that as that goes beyond the VM_BIND support, including any future
>>>> requirements (as we don't want an execbuffer4 after VM_BIND).
>>>
>>>Why not? it's not like adding extensions here is really that different
>>>than adding new ioctls.
>>>
>>>I definitely think this deserves an execbuffer3 without even
>>>considering future requirements. Just  to burn down the old
>>>requirements and pointless fields.
>>>
>>>Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave the
>>>older sw on execbuf2 for ever.
>>
>>I guess another point in favour of execbuf3 would be that it's less
>>midlayer. If we share the entry point then there's quite a few vfuncs
>>needed to cleanly split out the vm_bind paths from the legacy
>>reloc/softping paths.
>>
>>If we invert this and do execbuf3, then there's the existing ioctl
>>vfunc, and then we share code (where it even makes sense, probably
>>request setup/submit need to be shared, anything else is probably
>>cleaner to just copypaste) with the usual helper approach.
>>
>>Also that would guarantee that really none of the old concepts like
>>i915_active on the vma or vma open counts and all that stuff leaks
>>into the new vm_bind execbuf.
>>
>>Finally I also think that copypasting would make backporting easier,
>>or at least more flexible, since it should make it easier to have the
>>upstream vm_bind co-exist with all the other things we have. Without
>>huge amounts of conflicts (or at least much less) that pushing a pile
>>of vfuncs into the existing code would cause.
>>
>>So maybe we should do this?
>
>Thanks Dave, Daniel.
>There are a few things that will be common between execbuf2 and
>execbuf3, like request setup/submit (as you said), fence handling 
>(timeline fences, fence array, composite fences), engine selection,
>etc. Also, many of the 'flags' will be there in execbuf3 also (but
>bit position will differ).
>But I guess these should be fine as the suggestion here is to
>copy-paste the execbuff code and having a shared code where possible.
>Besides, we can stop supporting some older feature in execbuff3
>(like fence array in favor of newer timeline fences), which will
>further reduce common code.
>
>Ok, I will update this series by adding execbuf3 and send out soon.
>

Does this sound reasonable?

struct drm_i915_gem_execbuffer3 {
        __u32 ctx_id;		/* previously execbuffer2.rsvd1 */

        __u32 batch_count;
        __u64 batch_addr_ptr;	/* Pointer to an array of batch gpu virtual addresses */

        __u64 flags;
#define I915_EXEC3_RING_MASK              (0x3f)
#define I915_EXEC3_DEFAULT                (0<<0)
#define I915_EXEC3_RENDER                 (1<<0)
#define I915_EXEC3_BSD                    (2<<0)
#define I915_EXEC3_BLT                    (3<<0)
#define I915_EXEC3_VEBOX                  (4<<0)

#define I915_EXEC3_SECURE               (1<<6)
#define I915_EXEC3_IS_PINNED            (1<<7)

#define I915_EXEC3_BSD_SHIFT     (8)
#define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
#define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
#define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
#define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)

#define I915_EXEC3_FENCE_IN             (1<<10)
#define I915_EXEC3_FENCE_OUT            (1<<11)
#define I915_EXEC3_FENCE_SUBMIT         (1<<12)

        __u64 in_out_fence;		/* previously execbuffer2.rsvd2 */

        __u64 extensions;		/* currently only for DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
};

With this, user can pass in batch addresses and count directly,
instead of as an extension (as this rfc series was proposing).

I have removed many of the flags which were either legacy or not
applicable to BM_BIND mode.
I have also removed fence array support (execbuffer2.cliprects_ptr)
as we have timeline fence array support. Is that fine?
Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?

Any thing else needs to be added or removed?

Niranjana

>Niranjana
>
>>-Daniel
>>-- 
>>Daniel Vetter
>>Software Engineer, Intel Corporation
>>http://blog.ffwll.ch
Tvrtko Ursulin June 7, 2022, 10:27 a.m. UTC | #7
On 17/05/2022 19:32, Niranjana Vishwanathapura wrote:
> VM_BIND and related uapi definitions
> 
> v2: Ensure proper kernel-doc formatting with cross references.
>      Also add new uapi and documentation as per review comments
>      from Daniel.
> 
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> ---
>   Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
>   1 file changed, 399 insertions(+)
>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
> 
> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
> new file mode 100644
> index 000000000000..589c0a009107
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
> @@ -0,0 +1,399 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2022 Intel Corporation
> + */
> +
> +/**
> + * DOC: I915_PARAM_HAS_VM_BIND
> + *
> + * VM_BIND feature availability.
> + * See typedef drm_i915_getparam_t param.
> + */
> +#define I915_PARAM_HAS_VM_BIND		57
> +
> +/**
> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
> + *
> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
> + * See struct drm_i915_gem_vm_control flags.
> + *
> + * A VM in VM_BIND mode will not support the older execbuff mode of binding.
> + * In VM_BIND mode, execbuff ioctl will not accept any execlist (ie., the
> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be provided
> + * to pass in the batch buffer addresses.
> + *
> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must always be
> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
> + * The buffers_ptr, buffer_count, batch_start_offset and batch_len fields
> + * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
> + */
> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
> +
> +/**
> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
> + *
> + * Flag to declare context as long running.
> + * See struct drm_i915_gem_context_create_ext flags.
> + *
> + * Usage of dma-fence expects that they complete in reasonable amount of time.
> + * Compute on the other hand can be long running. Hence it is not appropriate
> + * for compute contexts to export request completion dma-fence to user.
> + * The dma-fence usage will be limited to in-kernel consumption only.
> + * Compute contexts need to use user/memory fence.
> + *
> + * So, long running contexts do not support output fences. Hence,
> + * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) are expected
> + * to be not used.
> + *
> + * DRM_I915_GEM_WAIT ioctl call is also not supported for objects mapped
> + * to long running contexts.
> + */
> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
> +
> +/* VM_BIND related ioctls */
> +#define DRM_I915_GEM_VM_BIND		0x3d
> +#define DRM_I915_GEM_VM_UNBIND		0x3e
> +#define DRM_I915_GEM_WAIT_USER_FENCE	0x3f
> +
> +#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
> +#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
> +
> +/**
> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
> + *
> + * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
> + * virtual address (VA) range to the section of an object that should be bound
> + * in the device page table of the specified address space (VM).
> + * The VA range specified must be unique (ie., not currently bound) and can
> + * be mapped to whole object or a section of the object (partial binding).
> + * Multiple VA mappings can be created to the same section of the object
> + * (aliasing).
> + */
> +struct drm_i915_gem_vm_bind {
> +	/** @vm_id: VM (address space) id to bind */
> +	__u32 vm_id;
> +
> +	/** @handle: Object handle */
> +	__u32 handle;
> +
> +	/** @start: Virtual Address start to bind */
> +	__u64 start;
> +
> +	/** @offset: Offset in object to bind */
> +	__u64 offset;
> +
> +	/** @length: Length of mapping to bind */
> +	__u64 length;

Does it support, or should it, equivalent of EXEC_OBJECT_PAD_TO_SIZE? Or 
if not userspace is expected to map the remainder of the space to a 
dummy object? In which case would there be any alignment/padding issues 
preventing the two bind to be placed next to each other?

I ask because someone from the compute side asked me about a problem 
with their strategy of dealing with overfetch and I suggested pad to size.

Regards,

Tvrtko

> +
> +	/**
> +	 * @flags: Supported flags are,
> +	 *
> +	 * I915_GEM_VM_BIND_READONLY:
> +	 * Mapping is read-only.
> +	 *
> +	 * I915_GEM_VM_BIND_CAPTURE:
> +	 * Capture this mapping in the dump upon GPU error.
> +	 */
> +	__u64 flags;
> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
> +
> +	/** @extensions: 0-terminated chain of extensions for this mapping. */
> +	__u64 extensions;
> +};
> +
> +/**
> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
> + *
> + * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
> + * address (VA) range that should be unbound from the device page table of the
> + * specified address space (VM). The specified VA range must match one of the
> + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
> + * completion.
> + */
> +struct drm_i915_gem_vm_unbind {
> +	/** @vm_id: VM (address space) id to bind */
> +	__u32 vm_id;
> +
> +	/** @rsvd: Reserved for future use; must be zero. */
> +	__u32 rsvd;
> +
> +	/** @start: Virtual Address start to unbind */
> +	__u64 start;
> +
> +	/** @length: Length of mapping to unbind */
> +	__u64 length;
> +
> +	/** @flags: reserved for future usage, currently MBZ */
> +	__u64 flags;
> +
> +	/** @extensions: 0-terminated chain of extensions for this mapping. */
> +	__u64 extensions;
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
> + * or the vm_unbind work.
> + *
> + * The vm_bind or vm_unbind aync worker will wait for input fence to signal
> + * before starting the binding or unbinding.
> + *
> + * The vm_bind or vm_unbind async worker will signal the returned output fence
> + * after the completion of binding or unbinding.
> + */
> +struct drm_i915_vm_bind_fence {
> +	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
> +	__u32 handle;
> +
> +	/**
> +	 * @flags: Supported flags are,
> +	 *
> +	 * I915_VM_BIND_FENCE_WAIT:
> +	 * Wait for the input fence before binding/unbinding
> +	 *
> +	 * I915_VM_BIND_FENCE_SIGNAL:
> +	 * Return bind/unbind completion fence as output
> +	 */
> +	__u32 flags;
> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
> + * and vm_unbind.
> + *
> + * This structure describes an array of timeline drm_syncobj and associated
> + * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
> + * can be input or output fences (See struct drm_i915_vm_bind_fence).
> + */
> +struct drm_i915_vm_bind_ext_timeline_fences {
> +#define I915_VM_BIND_EXT_timeline_FENCES	0
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/**
> +	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
> +	 * arrays.
> +	 */
> +	__u64 fence_count;
> +
> +	/**
> +	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
> +	 * of length @fence_count.
> +	 */
> +	__u64 handles_ptr;
> +
> +	/**
> +	 * @values_ptr: Pointer to an array of u64 values of length
> +	 * @fence_count.
> +	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
> +	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
> +	 * binary one.
> +	 */
> +	__u64 values_ptr;
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
> + * vm_bind or the vm_unbind work.
> + *
> + * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
> + * @addr to become equal to @val) before starting the binding or unbinding.
> + *
> + * The vm_bind or vm_unbind async worker will signal the output fence after
> + * the completion of binding or unbinding by writing @val to memory location at
> + * @addr
> + */
> +struct drm_i915_vm_bind_user_fence {
> +	/** @addr: User/Memory fence qword aligned process virtual address */
> +	__u64 addr;
> +
> +	/** @val: User/Memory fence value to be written after bind completion */
> +	__u64 val;
> +
> +	/**
> +	 * @flags: Supported flags are,
> +	 *
> +	 * I915_VM_BIND_USER_FENCE_WAIT:
> +	 * Wait for the input fence before binding/unbinding
> +	 *
> +	 * I915_VM_BIND_USER_FENCE_SIGNAL:
> +	 * Return bind/unbind completion fence as output
> +	 */
> +	__u32 flags;
> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
> +	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
> + * and vm_unbind.
> + *
> + * These user fences can be input or output fences
> + * (See struct drm_i915_vm_bind_user_fence).
> + */
> +struct drm_i915_vm_bind_ext_user_fence {
> +#define I915_VM_BIND_EXT_USER_FENCES	1
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/** @fence_count: Number of elements in the @user_fence_ptr array. */
> +	__u64 fence_count;
> +
> +	/**
> +	 * @user_fence_ptr: Pointer to an array of
> +	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
> +	 */
> +	__u64 user_fence_ptr;
> +};
> +
> +/**
> + * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array of batch buffer
> + * gpu virtual addresses.
> + *
> + * In the execbuff ioctl (See struct drm_i915_gem_execbuffer2), this extension
> + * must always be appended in the VM_BIND mode and it will be an error to
> + * append this extension in older non-VM_BIND mode.
> + */
> +struct drm_i915_gem_execbuffer_ext_batch_addresses {
> +#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES	1
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/** @count: Number of addresses in the addr array. */
> +	__u32 count;
> +
> +	/** @addr: An array of batch gpu virtual addresses. */
> +	__u64 addr[0];
> +};
> +
> +/**
> + * struct drm_i915_gem_execbuffer_ext_user_fence - First level batch completion
> + * signaling extension.
> + *
> + * This extension allows user to attach a user fence (@addr, @value pair) to an
> + * execbuf to be signaled by the command streamer after the completion of first
> + * level batch, by writing the @value at specified @addr and triggering an
> + * interrupt.
> + * User can either poll for this user fence to signal or can also wait on it
> + * with i915_gem_wait_user_fence ioctl.
> + * This is very much usefaul for long running contexts where waiting on dma-fence
> + * by user (like i915_gem_wait ioctl) is not supported.
> + */
> +struct drm_i915_gem_execbuffer_ext_user_fence {
> +#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE		2
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/**
> +	 * @addr: User/Memory fence qword aligned GPU virtual address.
> +	 *
> +	 * Address has to be a valid GPU virtual address at the time of
> +	 * first level batch completion.
> +	 */
> +	__u64 addr;
> +
> +	/**
> +	 * @value: User/Memory fence Value to be written to above address
> +	 * after first level batch completes.
> +	 */
> +	__u64 value;
> +
> +	/** @rsvd: Reserved for future extensions, MBZ */
> +	__u64 rsvd;
> +};
> +
> +/**
> + * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
> + * private to the specified VM.
> + *
> + * See struct drm_i915_gem_create_ext.
> + */
> +struct drm_i915_gem_create_ext_vm_private {
> +#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/** @vm_id: Id of the VM to which the object is private */
> +	__u32 vm_id;
> +};
> +
> +/**
> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
> + *
> + * User/Memory fence can be woken up either by:
> + *
> + * 1. GPU context indicated by @ctx_id, or,
> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
> + *    @ctx_id is ignored when this flag is set.
> + *
> + * Wakeup condition is,
> + * ``((*addr & mask) op (value & mask))``
> + *
> + * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
> + */
> +struct drm_i915_gem_wait_user_fence {
> +	/** @extensions: Zero-terminated chain of extensions. */
> +	__u64 extensions;
> +
> +	/** @addr: User/Memory fence address */
> +	__u64 addr;
> +
> +	/** @ctx_id: Id of the Context which will signal the fence. */
> +	__u32 ctx_id;
> +
> +	/** @op: Wakeup condition operator */
> +	__u16 op;
> +#define I915_UFENCE_WAIT_EQ      0
> +#define I915_UFENCE_WAIT_NEQ     1
> +#define I915_UFENCE_WAIT_GT      2
> +#define I915_UFENCE_WAIT_GTE     3
> +#define I915_UFENCE_WAIT_LT      4
> +#define I915_UFENCE_WAIT_LTE     5
> +#define I915_UFENCE_WAIT_BEFORE  6
> +#define I915_UFENCE_WAIT_AFTER   7
> +
> +	/**
> +	 * @flags: Supported flags are,
> +	 *
> +	 * I915_UFENCE_WAIT_SOFT:
> +	 *
> +	 * To be woken up by i915 driver async worker (not by GPU).
> +	 *
> +	 * I915_UFENCE_WAIT_ABSTIME:
> +	 *
> +	 * Wait timeout specified as absolute time.
> +	 */
> +	__u16 flags;
> +#define I915_UFENCE_WAIT_SOFT    0x1
> +#define I915_UFENCE_WAIT_ABSTIME 0x2
> +
> +	/** @value: Wakeup value */
> +	__u64 value;
> +
> +	/** @mask: Wakeup mask */
> +	__u64 mask;
> +#define I915_UFENCE_WAIT_U8     0xffu
> +#define I915_UFENCE_WAIT_U16    0xffffu
> +#define I915_UFENCE_WAIT_U32    0xfffffffful
> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
> +
> +	/**
> +	 * @timeout: Wait timeout in nanoseconds.
> +	 *
> +	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
> +	 * absolute time in nsec.
> +	 */
> +	__s64 timeout;
> +};
Tvrtko Ursulin June 7, 2022, 10:42 a.m. UTC | #8
On 03/06/2022 07:53, Niranjana Vishwanathapura wrote:
> On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura wrote:
>> On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>> On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>
>>>> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>> <niranjana.vishwanathapura@intel.com> wrote:
>>>>>
>>>>> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>> >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>>>>> >> VM_BIND and related uapi definitions
>>>>> >>
>>>>> >> v2: Ensure proper kernel-doc formatting with cross references.
>>>>> >>     Also add new uapi and documentation as per review comments
>>>>> >>     from Daniel.
>>>>> >>
>>>>> >> Signed-off-by: Niranjana Vishwanathapura 
>>>>> <niranjana.vishwanathapura@intel.com>
>>>>> >> ---
>>>>> >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>> +++++++++++++++++++++++++++
>>>>> >>  1 file changed, 399 insertions(+)
>>>>> >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>> >>
>>>>> >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>> >> new file mode 100644
>>>>> >> index 000000000000..589c0a009107
>>>>> >> --- /dev/null
>>>>> >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>> >> @@ -0,0 +1,399 @@
>>>>> >> +/* SPDX-License-Identifier: MIT */
>>>>> >> +/*
>>>>> >> + * Copyright © 2022 Intel Corporation
>>>>> >> + */
>>>>> >> +
>>>>> >> +/**
>>>>> >> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>> >> + *
>>>>> >> + * VM_BIND feature availability.
>>>>> >> + * See typedef drm_i915_getparam_t param.
>>>>> >> + */
>>>>> >> +#define I915_PARAM_HAS_VM_BIND               57
>>>>> >> +
>>>>> >> +/**
>>>>> >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>> >> + *
>>>>> >> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>> >> + * See struct drm_i915_gem_vm_control flags.
>>>>> >> + *
>>>>> >> + * A VM in VM_BIND mode will not support the older execbuff 
>>>>> mode of binding.
>>>>> >> + * In VM_BIND mode, execbuff ioctl will not accept any execlist 
>>>>> (ie., the
>>>>> >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>> >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>> >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>> >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must 
>>>>> be provided
>>>>> >> + * to pass in the batch buffer addresses.
>>>>> >> + *
>>>>> >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>> >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags 
>>>>> must be 0
>>>>> >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag 
>>>>> must always be
>>>>> >> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>> >> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>> batch_len fields
>>>>> >> + * of struct drm_i915_gem_execbuffer2 are also not used and 
>>>>> must be 0.
>>>>> >> + */
>>>>> >
>>>>> >From that description, it seems we have:
>>>>> >
>>>>> >struct drm_i915_gem_execbuffer2 {
>>>>> >        __u64 buffers_ptr;              -> must be 0 (new)
>>>>> >        __u32 buffer_count;             -> must be 0 (new)
>>>>> >        __u32 batch_start_offset;       -> must be 0 (new)
>>>>> >        __u32 batch_len;                -> must be 0 (new)
>>>>> >        __u32 DR1;                      -> must be 0 (old)
>>>>> >        __u32 DR4;                      -> must be 0 (old)
>>>>> >        __u32 num_cliprects; (fences)   -> must be 0 since using 
>>>>> extensions
>>>>> >        __u64 cliprects_ptr; (fences, extensions) -> contains an 
>>>>> actual pointer!
>>>>> >        __u64 flags;                    -> some flags must be 0 (new)
>>>>> >        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>> >        __u64 rsvd2;                    -> unused
>>>>> >};
>>>>> >
>>>>> >Based on that, why can't we just get drm_i915_gem_execbuffer3 instead
>>>>> >of adding even more complexity to an already abused interface? While
>>>>> >the Vulkan-like extension thing is really nice, I don't think what
>>>>> >we're doing here is extending the ioctl usage, we're completely
>>>>> >changing how the base struct should be interpreted based on how 
>>>>> the VM
>>>>> >was created (which is an entirely different ioctl).
>>>>> >
>>>>> >From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>>>>> >already at -6 without these changes. I think after vm_bind we'll need
>>>>> >to create a -11 entry just to deal with this ioctl.
>>>>> >
>>>>>
>>>>> The only change here is removing the execlist support for VM_BIND
>>>>> mode (other than natual extensions).
>>>>> Adding a new execbuffer3 was considered, but I think we need to be 
>>>>> careful
>>>>> with that as that goes beyond the VM_BIND support, including any 
>>>>> future
>>>>> requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>
>>>> Why not? it's not like adding extensions here is really that different
>>>> than adding new ioctls.
>>>>
>>>> I definitely think this deserves an execbuffer3 without even
>>>> considering future requirements. Just  to burn down the old
>>>> requirements and pointless fields.
>>>>
>>>> Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave the
>>>> older sw on execbuf2 for ever.
>>>
>>> I guess another point in favour of execbuf3 would be that it's less
>>> midlayer. If we share the entry point then there's quite a few vfuncs
>>> needed to cleanly split out the vm_bind paths from the legacy
>>> reloc/softping paths.
>>>
>>> If we invert this and do execbuf3, then there's the existing ioctl
>>> vfunc, and then we share code (where it even makes sense, probably
>>> request setup/submit need to be shared, anything else is probably
>>> cleaner to just copypaste) with the usual helper approach.
>>>
>>> Also that would guarantee that really none of the old concepts like
>>> i915_active on the vma or vma open counts and all that stuff leaks
>>> into the new vm_bind execbuf.
>>>
>>> Finally I also think that copypasting would make backporting easier,
>>> or at least more flexible, since it should make it easier to have the
>>> upstream vm_bind co-exist with all the other things we have. Without
>>> huge amounts of conflicts (or at least much less) that pushing a pile
>>> of vfuncs into the existing code would cause.
>>>
>>> So maybe we should do this?
>>
>> Thanks Dave, Daniel.
>> There are a few things that will be common between execbuf2 and
>> execbuf3, like request setup/submit (as you said), fence handling 
>> (timeline fences, fence array, composite fences), engine selection,
>> etc. Also, many of the 'flags' will be there in execbuf3 also (but
>> bit position will differ).
>> But I guess these should be fine as the suggestion here is to
>> copy-paste the execbuff code and having a shared code where possible.
>> Besides, we can stop supporting some older feature in execbuff3
>> (like fence array in favor of newer timeline fences), which will
>> further reduce common code.
>>
>> Ok, I will update this series by adding execbuf3 and send out soon.
>>
> 
> Does this sound reasonable?
> 
> struct drm_i915_gem_execbuffer3 {
>         __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
> 
>         __u32 batch_count;
>         __u64 batch_addr_ptr;    /* Pointer to an array of batch gpu 
> virtual addresses */

Casual stumble upon..

Alternatively you could embed N pointers to make life a bit easier for 
both userspace and kernel side. Yes, but then "N batch buffers should be 
enough for everyone" problem.. :)

> 
>         __u64 flags;
> #define I915_EXEC3_RING_MASK              (0x3f)
> #define I915_EXEC3_DEFAULT                (0<<0)
> #define I915_EXEC3_RENDER                 (1<<0)
> #define I915_EXEC3_BSD                    (2<<0)
> #define I915_EXEC3_BLT                    (3<<0)
> #define I915_EXEC3_VEBOX                  (4<<0)
> 
> #define I915_EXEC3_SECURE               (1<<6)
> #define I915_EXEC3_IS_PINNED            (1<<7)
> 
> #define I915_EXEC3_BSD_SHIFT     (8)
> #define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
> #define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
> #define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
> #define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)

I'd suggest legacy engine selection is unwanted, especially not with the 
convoluted BSD1/2 flags. Can we just require context with engine map and 
index? Or if default context has to be supported then I'd suggest 
...class_instance for that mode.

> #define I915_EXEC3_FENCE_IN             (1<<10)
> #define I915_EXEC3_FENCE_OUT            (1<<11)
> #define I915_EXEC3_FENCE_SUBMIT         (1<<12)

People are likely to object to submit fence since generic mechanism to 
align submissions was rejected.

> 
>         __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */

New ioctl you can afford dedicated fields.

In any case I suggest you involve UMD folks in designing it.

Regards,

Tvrtko

> 
>         __u64 extensions;        /* currently only for 
> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
> };
> 
> With this, user can pass in batch addresses and count directly,
> instead of as an extension (as this rfc series was proposing).
> 
> I have removed many of the flags which were either legacy or not
> applicable to BM_BIND mode.
> I have also removed fence array support (execbuffer2.cliprects_ptr)
> as we have timeline fence array support. Is that fine?
> Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
> 
> Any thing else needs to be added or removed?
> 
> Niranjana
> 
>> Niranjana
>>
>>> -Daniel
>>> -- 
>>> Daniel Vetter
>>> Software Engineer, Intel Corporation
>>> http://blog.ffwll.ch
Niranjana Vishwanathapura June 7, 2022, 7:37 p.m. UTC | #9
On Tue, Jun 07, 2022 at 11:27:14AM +0100, Tvrtko Ursulin wrote:
>
>On 17/05/2022 19:32, Niranjana Vishwanathapura wrote:
>>VM_BIND and related uapi definitions
>>
>>v2: Ensure proper kernel-doc formatting with cross references.
>>     Also add new uapi and documentation as per review comments
>>     from Daniel.
>>
>>Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
>>---
>>  Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
>>  1 file changed, 399 insertions(+)
>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>
>>diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
>>new file mode 100644
>>index 000000000000..589c0a009107
>>--- /dev/null
>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>@@ -0,0 +1,399 @@
>>+/* SPDX-License-Identifier: MIT */
>>+/*
>>+ * Copyright © 2022 Intel Corporation
>>+ */
>>+
>>+/**
>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>+ *
>>+ * VM_BIND feature availability.
>>+ * See typedef drm_i915_getparam_t param.
>>+ */
>>+#define I915_PARAM_HAS_VM_BIND		57
>>+
>>+/**
>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>+ *
>>+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>+ * See struct drm_i915_gem_vm_control flags.
>>+ *
>>+ * A VM in VM_BIND mode will not support the older execbuff mode of binding.
>>+ * In VM_BIND mode, execbuff ioctl will not accept any execlist (ie., the
>>+ * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>+ * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>+ * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>+ * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be provided
>>+ * to pass in the batch buffer addresses.
>>+ *
>>+ * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>+ * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
>>+ * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must always be
>>+ * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>+ * The buffers_ptr, buffer_count, batch_start_offset and batch_len fields
>>+ * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
>>+ */
>>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
>>+
>>+/**
>>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>+ *
>>+ * Flag to declare context as long running.
>>+ * See struct drm_i915_gem_context_create_ext flags.
>>+ *
>>+ * Usage of dma-fence expects that they complete in reasonable amount of time.
>>+ * Compute on the other hand can be long running. Hence it is not appropriate
>>+ * for compute contexts to export request completion dma-fence to user.
>>+ * The dma-fence usage will be limited to in-kernel consumption only.
>>+ * Compute contexts need to use user/memory fence.
>>+ *
>>+ * So, long running contexts do not support output fences. Hence,
>>+ * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
>>+ * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) are expected
>>+ * to be not used.
>>+ *
>>+ * DRM_I915_GEM_WAIT ioctl call is also not supported for objects mapped
>>+ * to long running contexts.
>>+ */
>>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>+
>>+/* VM_BIND related ioctls */
>>+#define DRM_I915_GEM_VM_BIND		0x3d
>>+#define DRM_I915_GEM_VM_UNBIND		0x3e
>>+#define DRM_I915_GEM_WAIT_USER_FENCE	0x3f
>>+
>>+#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>+#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
>>+
>>+/**
>>+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>+ *
>>+ * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
>>+ * virtual address (VA) range to the section of an object that should be bound
>>+ * in the device page table of the specified address space (VM).
>>+ * The VA range specified must be unique (ie., not currently bound) and can
>>+ * be mapped to whole object or a section of the object (partial binding).
>>+ * Multiple VA mappings can be created to the same section of the object
>>+ * (aliasing).
>>+ */
>>+struct drm_i915_gem_vm_bind {
>>+	/** @vm_id: VM (address space) id to bind */
>>+	__u32 vm_id;
>>+
>>+	/** @handle: Object handle */
>>+	__u32 handle;
>>+
>>+	/** @start: Virtual Address start to bind */
>>+	__u64 start;
>>+
>>+	/** @offset: Offset in object to bind */
>>+	__u64 offset;
>>+
>>+	/** @length: Length of mapping to bind */
>>+	__u64 length;
>
>Does it support, or should it, equivalent of EXEC_OBJECT_PAD_TO_SIZE? 
>Or if not userspace is expected to map the remainder of the space to a 
>dummy object? In which case would there be any alignment/padding 
>issues preventing the two bind to be placed next to each other?
>
>I ask because someone from the compute side asked me about a problem 
>with their strategy of dealing with overfetch and I suggested pad to 
>size.
>

Thanks Tvrtko,
I think we shouldn't be needing it. As with VM_BIND VA assignment
is completely pushed to userspace, no padding should be necessary
once the 'start' and 'size' alignment conditions are met.

I will add some documentation on alignment requirement here.
Generally, 'start' and 'size' should be 4K aligned. But, I think
when we have 64K lmem page sizes (dg2 and xehpsdv), they need to
be 64K aligned.

Niranjana

>Regards,
>
>Tvrtko
>
>>+
>>+	/**
>>+	 * @flags: Supported flags are,
>>+	 *
>>+	 * I915_GEM_VM_BIND_READONLY:
>>+	 * Mapping is read-only.
>>+	 *
>>+	 * I915_GEM_VM_BIND_CAPTURE:
>>+	 * Capture this mapping in the dump upon GPU error.
>>+	 */
>>+	__u64 flags;
>>+#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>+#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>+
>>+	/** @extensions: 0-terminated chain of extensions for this mapping. */
>>+	__u64 extensions;
>>+};
>>+
>>+/**
>>+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>+ *
>>+ * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
>>+ * address (VA) range that should be unbound from the device page table of the
>>+ * specified address space (VM). The specified VA range must match one of the
>>+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>>+ * completion.
>>+ */
>>+struct drm_i915_gem_vm_unbind {
>>+	/** @vm_id: VM (address space) id to bind */
>>+	__u32 vm_id;
>>+
>>+	/** @rsvd: Reserved for future use; must be zero. */
>>+	__u32 rsvd;
>>+
>>+	/** @start: Virtual Address start to unbind */
>>+	__u64 start;
>>+
>>+	/** @length: Length of mapping to unbind */
>>+	__u64 length;
>>+
>>+	/** @flags: reserved for future usage, currently MBZ */
>>+	__u64 flags;
>>+
>>+	/** @extensions: 0-terminated chain of extensions for this mapping. */
>>+	__u64 extensions;
>>+};
>>+
>>+/**
>>+ * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
>>+ * or the vm_unbind work.
>>+ *
>>+ * The vm_bind or vm_unbind aync worker will wait for input fence to signal
>>+ * before starting the binding or unbinding.
>>+ *
>>+ * The vm_bind or vm_unbind async worker will signal the returned output fence
>>+ * after the completion of binding or unbinding.
>>+ */
>>+struct drm_i915_vm_bind_fence {
>>+	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
>>+	__u32 handle;
>>+
>>+	/**
>>+	 * @flags: Supported flags are,
>>+	 *
>>+	 * I915_VM_BIND_FENCE_WAIT:
>>+	 * Wait for the input fence before binding/unbinding
>>+	 *
>>+	 * I915_VM_BIND_FENCE_SIGNAL:
>>+	 * Return bind/unbind completion fence as output
>>+	 */
>>+	__u32 flags;
>>+#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>+#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>+};
>>+
>>+/**
>>+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
>>+ * and vm_unbind.
>>+ *
>>+ * This structure describes an array of timeline drm_syncobj and associated
>>+ * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
>>+ * can be input or output fences (See struct drm_i915_vm_bind_fence).
>>+ */
>>+struct drm_i915_vm_bind_ext_timeline_fences {
>>+#define I915_VM_BIND_EXT_timeline_FENCES	0
>>+	/** @base: Extension link. See struct i915_user_extension. */
>>+	struct i915_user_extension base;
>>+
>>+	/**
>>+	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
>>+	 * arrays.
>>+	 */
>>+	__u64 fence_count;
>>+
>>+	/**
>>+	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
>>+	 * of length @fence_count.
>>+	 */
>>+	__u64 handles_ptr;
>>+
>>+	/**
>>+	 * @values_ptr: Pointer to an array of u64 values of length
>>+	 * @fence_count.
>>+	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>+	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
>>+	 * binary one.
>>+	 */
>>+	__u64 values_ptr;
>>+};
>>+
>>+/**
>>+ * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
>>+ * vm_bind or the vm_unbind work.
>>+ *
>>+ * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
>>+ * @addr to become equal to @val) before starting the binding or unbinding.
>>+ *
>>+ * The vm_bind or vm_unbind async worker will signal the output fence after
>>+ * the completion of binding or unbinding by writing @val to memory location at
>>+ * @addr
>>+ */
>>+struct drm_i915_vm_bind_user_fence {
>>+	/** @addr: User/Memory fence qword aligned process virtual address */
>>+	__u64 addr;
>>+
>>+	/** @val: User/Memory fence value to be written after bind completion */
>>+	__u64 val;
>>+
>>+	/**
>>+	 * @flags: Supported flags are,
>>+	 *
>>+	 * I915_VM_BIND_USER_FENCE_WAIT:
>>+	 * Wait for the input fence before binding/unbinding
>>+	 *
>>+	 * I915_VM_BIND_USER_FENCE_SIGNAL:
>>+	 * Return bind/unbind completion fence as output
>>+	 */
>>+	__u32 flags;
>>+#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>+#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>+	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>+};
>>+
>>+/**
>>+ * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
>>+ * and vm_unbind.
>>+ *
>>+ * These user fences can be input or output fences
>>+ * (See struct drm_i915_vm_bind_user_fence).
>>+ */
>>+struct drm_i915_vm_bind_ext_user_fence {
>>+#define I915_VM_BIND_EXT_USER_FENCES	1
>>+	/** @base: Extension link. See struct i915_user_extension. */
>>+	struct i915_user_extension base;
>>+
>>+	/** @fence_count: Number of elements in the @user_fence_ptr array. */
>>+	__u64 fence_count;
>>+
>>+	/**
>>+	 * @user_fence_ptr: Pointer to an array of
>>+	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>+	 */
>>+	__u64 user_fence_ptr;
>>+};
>>+
>>+/**
>>+ * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array of batch buffer
>>+ * gpu virtual addresses.
>>+ *
>>+ * In the execbuff ioctl (See struct drm_i915_gem_execbuffer2), this extension
>>+ * must always be appended in the VM_BIND mode and it will be an error to
>>+ * append this extension in older non-VM_BIND mode.
>>+ */
>>+struct drm_i915_gem_execbuffer_ext_batch_addresses {
>>+#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES	1
>>+	/** @base: Extension link. See struct i915_user_extension. */
>>+	struct i915_user_extension base;
>>+
>>+	/** @count: Number of addresses in the addr array. */
>>+	__u32 count;
>>+
>>+	/** @addr: An array of batch gpu virtual addresses. */
>>+	__u64 addr[0];
>>+};
>>+
>>+/**
>>+ * struct drm_i915_gem_execbuffer_ext_user_fence - First level batch completion
>>+ * signaling extension.
>>+ *
>>+ * This extension allows user to attach a user fence (@addr, @value pair) to an
>>+ * execbuf to be signaled by the command streamer after the completion of first
>>+ * level batch, by writing the @value at specified @addr and triggering an
>>+ * interrupt.
>>+ * User can either poll for this user fence to signal or can also wait on it
>>+ * with i915_gem_wait_user_fence ioctl.
>>+ * This is very much usefaul for long running contexts where waiting on dma-fence
>>+ * by user (like i915_gem_wait ioctl) is not supported.
>>+ */
>>+struct drm_i915_gem_execbuffer_ext_user_fence {
>>+#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE		2
>>+	/** @base: Extension link. See struct i915_user_extension. */
>>+	struct i915_user_extension base;
>>+
>>+	/**
>>+	 * @addr: User/Memory fence qword aligned GPU virtual address.
>>+	 *
>>+	 * Address has to be a valid GPU virtual address at the time of
>>+	 * first level batch completion.
>>+	 */
>>+	__u64 addr;
>>+
>>+	/**
>>+	 * @value: User/Memory fence Value to be written to above address
>>+	 * after first level batch completes.
>>+	 */
>>+	__u64 value;
>>+
>>+	/** @rsvd: Reserved for future extensions, MBZ */
>>+	__u64 rsvd;
>>+};
>>+
>>+/**
>>+ * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
>>+ * private to the specified VM.
>>+ *
>>+ * See struct drm_i915_gem_create_ext.
>>+ */
>>+struct drm_i915_gem_create_ext_vm_private {
>>+#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
>>+	/** @base: Extension link. See struct i915_user_extension. */
>>+	struct i915_user_extension base;
>>+
>>+	/** @vm_id: Id of the VM to which the object is private */
>>+	__u32 vm_id;
>>+};
>>+
>>+/**
>>+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>+ *
>>+ * User/Memory fence can be woken up either by:
>>+ *
>>+ * 1. GPU context indicated by @ctx_id, or,
>>+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>+ *    @ctx_id is ignored when this flag is set.
>>+ *
>>+ * Wakeup condition is,
>>+ * ``((*addr & mask) op (value & mask))``
>>+ *
>>+ * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
>>+ */
>>+struct drm_i915_gem_wait_user_fence {
>>+	/** @extensions: Zero-terminated chain of extensions. */
>>+	__u64 extensions;
>>+
>>+	/** @addr: User/Memory fence address */
>>+	__u64 addr;
>>+
>>+	/** @ctx_id: Id of the Context which will signal the fence. */
>>+	__u32 ctx_id;
>>+
>>+	/** @op: Wakeup condition operator */
>>+	__u16 op;
>>+#define I915_UFENCE_WAIT_EQ      0
>>+#define I915_UFENCE_WAIT_NEQ     1
>>+#define I915_UFENCE_WAIT_GT      2
>>+#define I915_UFENCE_WAIT_GTE     3
>>+#define I915_UFENCE_WAIT_LT      4
>>+#define I915_UFENCE_WAIT_LTE     5
>>+#define I915_UFENCE_WAIT_BEFORE  6
>>+#define I915_UFENCE_WAIT_AFTER   7
>>+
>>+	/**
>>+	 * @flags: Supported flags are,
>>+	 *
>>+	 * I915_UFENCE_WAIT_SOFT:
>>+	 *
>>+	 * To be woken up by i915 driver async worker (not by GPU).
>>+	 *
>>+	 * I915_UFENCE_WAIT_ABSTIME:
>>+	 *
>>+	 * Wait timeout specified as absolute time.
>>+	 */
>>+	__u16 flags;
>>+#define I915_UFENCE_WAIT_SOFT    0x1
>>+#define I915_UFENCE_WAIT_ABSTIME 0x2
>>+
>>+	/** @value: Wakeup value */
>>+	__u64 value;
>>+
>>+	/** @mask: Wakeup mask */
>>+	__u64 mask;
>>+#define I915_UFENCE_WAIT_U8     0xffu
>>+#define I915_UFENCE_WAIT_U16    0xffffu
>>+#define I915_UFENCE_WAIT_U32    0xfffffffful
>>+#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>+
>>+	/**
>>+	 * @timeout: Wait timeout in nanoseconds.
>>+	 *
>>+	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
>>+	 * absolute time in nsec.
>>+	 */
>>+	__s64 timeout;
>>+};
Niranjana Vishwanathapura June 7, 2022, 9:25 p.m. UTC | #10
On Tue, Jun 07, 2022 at 11:42:08AM +0100, Tvrtko Ursulin wrote:
>
>On 03/06/2022 07:53, Niranjana Vishwanathapura wrote:
>>On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura wrote:
>>>On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>>>On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>>
>>>>>On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>>><niranjana.vishwanathapura@intel.com> wrote:
>>>>>>
>>>>>>On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>>>>On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>
>>>>>>>> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>>>     Also add new uapi and documentation as per review comments
>>>>>>>>     from Daniel.
>>>>>>>>
>>>>>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>>>><niranjana.vishwanathapura@intel.com>
>>>>>>>> ---
>>>>>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>>+++++++++++++++++++++++++++
>>>>>>>>  1 file changed, 399 insertions(+)
>>>>>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>
>>>>>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>> new file mode 100644
>>>>>>>> index 000000000000..589c0a009107
>>>>>>>> --- /dev/null
>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>> @@ -0,0 +1,399 @@
>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>> +/*
>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>> + */
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>> + *
>>>>>>>> + * VM_BIND feature availability.
>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>> + */
>>>>>>>> +#define I915_PARAM_HAS_VM_BIND               57
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>> + *
>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>> + *
>>>>>>>> + * A VM in VM_BIND mode will not support the older 
>>>>>>execbuff mode of binding.
>>>>>>>> + * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>>>execlist (ie., the
>>>>>>>> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>>>> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>>>> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>>>> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension 
>>>>>>must be provided
>>>>>>>> + * to pass in the batch buffer addresses.
>>>>>>>> + *
>>>>>>>> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>>>> + * I915_EXEC_BATCH_FIRST of 
>>>>>>&drm_i915_gem_execbuffer2.flags must be 0
>>>>>>>> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS 
>>>>>>flag must always be
>>>>>>>> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>>>> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>>batch_len fields
>>>>>>>> + * of struct drm_i915_gem_execbuffer2 are also not used 
>>>>>>and must be 0.
>>>>>>>> + */
>>>>>>>
>>>>>>>From that description, it seems we have:
>>>>>>>
>>>>>>>struct drm_i915_gem_execbuffer2 {
>>>>>>>        __u64 buffers_ptr;              -> must be 0 (new)
>>>>>>>        __u32 buffer_count;             -> must be 0 (new)
>>>>>>>        __u32 batch_start_offset;       -> must be 0 (new)
>>>>>>>        __u32 batch_len;                -> must be 0 (new)
>>>>>>>        __u32 DR1;                      -> must be 0 (old)
>>>>>>>        __u32 DR4;                      -> must be 0 (old)
>>>>>>>        __u32 num_cliprects; (fences)   -> must be 0 since 
>>>>>>using extensions
>>>>>>>        __u64 cliprects_ptr; (fences, extensions) -> 
>>>>>>contains an actual pointer!
>>>>>>>        __u64 flags;                    -> some flags must be 0 (new)
>>>>>>>        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>>>>        __u64 rsvd2;                    -> unused
>>>>>>>};
>>>>>>>
>>>>>>>Based on that, why can't we just get drm_i915_gem_execbuffer3 instead
>>>>>>>of adding even more complexity to an already abused interface? While
>>>>>>>the Vulkan-like extension thing is really nice, I don't think what
>>>>>>>we're doing here is extending the ioctl usage, we're completely
>>>>>>>changing how the base struct should be interpreted based on 
>>>>>>how the VM
>>>>>>>was created (which is an entirely different ioctl).
>>>>>>>
>>>>>>>From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>>>>>>>already at -6 without these changes. I think after vm_bind we'll need
>>>>>>>to create a -11 entry just to deal with this ioctl.
>>>>>>>
>>>>>>
>>>>>>The only change here is removing the execlist support for VM_BIND
>>>>>>mode (other than natual extensions).
>>>>>>Adding a new execbuffer3 was considered, but I think we need 
>>>>>>to be careful
>>>>>>with that as that goes beyond the VM_BIND support, including 
>>>>>>any future
>>>>>>requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>>
>>>>>Why not? it's not like adding extensions here is really that different
>>>>>than adding new ioctls.
>>>>>
>>>>>I definitely think this deserves an execbuffer3 without even
>>>>>considering future requirements. Just  to burn down the old
>>>>>requirements and pointless fields.
>>>>>
>>>>>Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave the
>>>>>older sw on execbuf2 for ever.
>>>>
>>>>I guess another point in favour of execbuf3 would be that it's less
>>>>midlayer. If we share the entry point then there's quite a few vfuncs
>>>>needed to cleanly split out the vm_bind paths from the legacy
>>>>reloc/softping paths.
>>>>
>>>>If we invert this and do execbuf3, then there's the existing ioctl
>>>>vfunc, and then we share code (where it even makes sense, probably
>>>>request setup/submit need to be shared, anything else is probably
>>>>cleaner to just copypaste) with the usual helper approach.
>>>>
>>>>Also that would guarantee that really none of the old concepts like
>>>>i915_active on the vma or vma open counts and all that stuff leaks
>>>>into the new vm_bind execbuf.
>>>>
>>>>Finally I also think that copypasting would make backporting easier,
>>>>or at least more flexible, since it should make it easier to have the
>>>>upstream vm_bind co-exist with all the other things we have. Without
>>>>huge amounts of conflicts (or at least much less) that pushing a pile
>>>>of vfuncs into the existing code would cause.
>>>>
>>>>So maybe we should do this?
>>>
>>>Thanks Dave, Daniel.
>>>There are a few things that will be common between execbuf2 and
>>>execbuf3, like request setup/submit (as you said), fence handling 
>>>(timeline fences, fence array, composite fences), engine 
>>>selection,
>>>etc. Also, many of the 'flags' will be there in execbuf3 also (but
>>>bit position will differ).
>>>But I guess these should be fine as the suggestion here is to
>>>copy-paste the execbuff code and having a shared code where possible.
>>>Besides, we can stop supporting some older feature in execbuff3
>>>(like fence array in favor of newer timeline fences), which will
>>>further reduce common code.
>>>
>>>Ok, I will update this series by adding execbuf3 and send out soon.
>>>
>>
>>Does this sound reasonable?
>>
>>struct drm_i915_gem_execbuffer3 {
>>        __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>>
>>        __u32 batch_count;
>>        __u64 batch_addr_ptr;    /* Pointer to an array of batch gpu 
>>virtual addresses */
>
>Casual stumble upon..
>
>Alternatively you could embed N pointers to make life a bit easier for 
>both userspace and kernel side. Yes, but then "N batch buffers should 
>be enough for everyone" problem.. :)
>

Thanks Tvrtko,
Yes, hence the batch_addr_ptr.

>>
>>        __u64 flags;
>>#define I915_EXEC3_RING_MASK              (0x3f)
>>#define I915_EXEC3_DEFAULT                (0<<0)
>>#define I915_EXEC3_RENDER                 (1<<0)
>>#define I915_EXEC3_BSD                    (2<<0)
>>#define I915_EXEC3_BLT                    (3<<0)
>>#define I915_EXEC3_VEBOX                  (4<<0)
>>
>>#define I915_EXEC3_SECURE               (1<<6)
>>#define I915_EXEC3_IS_PINNED            (1<<7)
>>
>>#define I915_EXEC3_BSD_SHIFT     (8)
>>#define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
>>#define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
>>#define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
>>#define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
>
>I'd suggest legacy engine selection is unwanted, especially not with 
>the convoluted BSD1/2 flags. Can we just require context with engine 
>map and index? Or if default context has to be supported then I'd 
>suggest ...class_instance for that mode.
>

Ok, I will be happy to remove it and only support contexts with
engine map, if UMDs agree on that.

>>#define I915_EXEC3_FENCE_IN             (1<<10)
>>#define I915_EXEC3_FENCE_OUT            (1<<11)
>>#define I915_EXEC3_FENCE_SUBMIT         (1<<12)
>
>People are likely to object to submit fence since generic mechanism to 
>align submissions was rejected.
>

Ok, again, I can remove it if UMDs are ok with it.

>>
>>        __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
>
>New ioctl you can afford dedicated fields.
>

Yes, but as I asked below, I am not sure if we need this or the
timeline fence arry extension we have is good enough.

>In any case I suggest you involve UMD folks in designing it.
>

Yah.
Paulo, Lionel, Jason, Daniel, can you comment on these regarding
what will UMD need in execbuf3 and what can be removed?

Thanks,
Niranjana

>Regards,
>
>Tvrtko
>
>>
>>        __u64 extensions;        /* currently only for 
>>DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
>>};
>>
>>With this, user can pass in batch addresses and count directly,
>>instead of as an extension (as this rfc series was proposing).
>>
>>I have removed many of the flags which were either legacy or not
>>applicable to BM_BIND mode.
>>I have also removed fence array support (execbuffer2.cliprects_ptr)
>>as we have timeline fence array support. Is that fine?
>>Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
>>
>>Any thing else needs to be added or removed?
>>
>>Niranjana
>>
>>>Niranjana
>>>
>>>>-Daniel
>>>>-- 
>>>>Daniel Vetter
>>>>Software Engineer, Intel Corporation
>>>>http://blog.ffwll.ch
Lionel Landwerlin June 8, 2022, 6:40 a.m. UTC | #11
On 03/06/2022 09:53, Niranjana Vishwanathapura wrote:
> On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura 
> wrote:
>> On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>> On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>
>>>> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>> <niranjana.vishwanathapura@intel.com> wrote:
>>>>>
>>>>> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>> >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>>>>> >> VM_BIND and related uapi definitions
>>>>> >>
>>>>> >> v2: Ensure proper kernel-doc formatting with cross references.
>>>>> >>     Also add new uapi and documentation as per review comments
>>>>> >>     from Daniel.
>>>>> >>
>>>>> >> Signed-off-by: Niranjana Vishwanathapura 
>>>>> <niranjana.vishwanathapura@intel.com>
>>>>> >> ---
>>>>> >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>> +++++++++++++++++++++++++++
>>>>> >>  1 file changed, 399 insertions(+)
>>>>> >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>> >>
>>>>> >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>> >> new file mode 100644
>>>>> >> index 000000000000..589c0a009107
>>>>> >> --- /dev/null
>>>>> >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>> >> @@ -0,0 +1,399 @@
>>>>> >> +/* SPDX-License-Identifier: MIT */
>>>>> >> +/*
>>>>> >> + * Copyright © 2022 Intel Corporation
>>>>> >> + */
>>>>> >> +
>>>>> >> +/**
>>>>> >> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>> >> + *
>>>>> >> + * VM_BIND feature availability.
>>>>> >> + * See typedef drm_i915_getparam_t param.
>>>>> >> + */
>>>>> >> +#define I915_PARAM_HAS_VM_BIND               57
>>>>> >> +
>>>>> >> +/**
>>>>> >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>> >> + *
>>>>> >> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>> >> + * See struct drm_i915_gem_vm_control flags.
>>>>> >> + *
>>>>> >> + * A VM in VM_BIND mode will not support the older execbuff 
>>>>> mode of binding.
>>>>> >> + * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>> execlist (ie., the
>>>>> >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>> >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>> >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>> >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must 
>>>>> be provided
>>>>> >> + * to pass in the batch buffer addresses.
>>>>> >> + *
>>>>> >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>> >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags 
>>>>> must be 0
>>>>> >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag 
>>>>> must always be
>>>>> >> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>> >> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>> batch_len fields
>>>>> >> + * of struct drm_i915_gem_execbuffer2 are also not used and 
>>>>> must be 0.
>>>>> >> + */
>>>>> >
>>>>> >From that description, it seems we have:
>>>>> >
>>>>> >struct drm_i915_gem_execbuffer2 {
>>>>> >        __u64 buffers_ptr;              -> must be 0 (new)
>>>>> >        __u32 buffer_count;             -> must be 0 (new)
>>>>> >        __u32 batch_start_offset;       -> must be 0 (new)
>>>>> >        __u32 batch_len;                -> must be 0 (new)
>>>>> >        __u32 DR1;                      -> must be 0 (old)
>>>>> >        __u32 DR4;                      -> must be 0 (old)
>>>>> >        __u32 num_cliprects; (fences)   -> must be 0 since using 
>>>>> extensions
>>>>> >        __u64 cliprects_ptr; (fences, extensions) -> contains an 
>>>>> actual pointer!
>>>>> >        __u64 flags;                    -> some flags must be 0 
>>>>> (new)
>>>>> >        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>> >        __u64 rsvd2;                    -> unused
>>>>> >};
>>>>> >
>>>>> >Based on that, why can't we just get drm_i915_gem_execbuffer3 
>>>>> instead
>>>>> >of adding even more complexity to an already abused interface? While
>>>>> >the Vulkan-like extension thing is really nice, I don't think what
>>>>> >we're doing here is extending the ioctl usage, we're completely
>>>>> >changing how the base struct should be interpreted based on how 
>>>>> the VM
>>>>> >was created (which is an entirely different ioctl).
>>>>> >
>>>>> >From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>>>>> >already at -6 without these changes. I think after vm_bind we'll 
>>>>> need
>>>>> >to create a -11 entry just to deal with this ioctl.
>>>>> >
>>>>>
>>>>> The only change here is removing the execlist support for VM_BIND
>>>>> mode (other than natual extensions).
>>>>> Adding a new execbuffer3 was considered, but I think we need to be 
>>>>> careful
>>>>> with that as that goes beyond the VM_BIND support, including any 
>>>>> future
>>>>> requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>
>>>> Why not? it's not like adding extensions here is really that different
>>>> than adding new ioctls.
>>>>
>>>> I definitely think this deserves an execbuffer3 without even
>>>> considering future requirements. Just  to burn down the old
>>>> requirements and pointless fields.
>>>>
>>>> Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave the
>>>> older sw on execbuf2 for ever.
>>>
>>> I guess another point in favour of execbuf3 would be that it's less
>>> midlayer. If we share the entry point then there's quite a few vfuncs
>>> needed to cleanly split out the vm_bind paths from the legacy
>>> reloc/softping paths.
>>>
>>> If we invert this and do execbuf3, then there's the existing ioctl
>>> vfunc, and then we share code (where it even makes sense, probably
>>> request setup/submit need to be shared, anything else is probably
>>> cleaner to just copypaste) with the usual helper approach.
>>>
>>> Also that would guarantee that really none of the old concepts like
>>> i915_active on the vma or vma open counts and all that stuff leaks
>>> into the new vm_bind execbuf.
>>>
>>> Finally I also think that copypasting would make backporting easier,
>>> or at least more flexible, since it should make it easier to have the
>>> upstream vm_bind co-exist with all the other things we have. Without
>>> huge amounts of conflicts (or at least much less) that pushing a pile
>>> of vfuncs into the existing code would cause.
>>>
>>> So maybe we should do this?
>>
>> Thanks Dave, Daniel.
>> There are a few things that will be common between execbuf2 and
>> execbuf3, like request setup/submit (as you said), fence handling 
>> (timeline fences, fence array, composite fences), engine selection,
>> etc. Also, many of the 'flags' will be there in execbuf3 also (but
>> bit position will differ).
>> But I guess these should be fine as the suggestion here is to
>> copy-paste the execbuff code and having a shared code where possible.
>> Besides, we can stop supporting some older feature in execbuff3
>> (like fence array in favor of newer timeline fences), which will
>> further reduce common code.
>>
>> Ok, I will update this series by adding execbuf3 and send out soon.
>>
>
> Does this sound reasonable?


Thanks for proposing this. Some comments below.


>
> struct drm_i915_gem_execbuffer3 {
>        __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>
>        __u32 batch_count;
>        __u64 batch_addr_ptr;    /* Pointer to an array of batch gpu 
> virtual addresses */
>
>        __u64 flags;
> #define I915_EXEC3_RING_MASK              (0x3f)
> #define I915_EXEC3_DEFAULT                (0<<0)
> #define I915_EXEC3_RENDER                 (1<<0)
> #define I915_EXEC3_BSD                    (2<<0)
> #define I915_EXEC3_BLT                    (3<<0)
> #define I915_EXEC3_VEBOX                  (4<<0)


Shouldn't we use the new engine selection uAPI instead?

We can already create an engine map with I915_CONTEXT_PARAM_ENGINES in 
drm_i915_gem_context_create_ext_setparam.

And you can also create virtual engines with the same extension.

It feels like this could be a single u32 with the engine index (in the 
context engine map).


>
> #define I915_EXEC3_SECURE               (1<<6)
> #define I915_EXEC3_IS_PINNED            (1<<7)


What's the meaning of PINNED?


>
> #define I915_EXEC3_BSD_SHIFT     (8)
> #define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
> #define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
> #define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
> #define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
>
> #define I915_EXEC3_FENCE_IN             (1<<10)
> #define I915_EXEC3_FENCE_OUT            (1<<11)


For Mesa, as soon as we have DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 
support, we only use that.

So there isn't much point for FENCE_IN/OUT.

Maybe check with other UMDs?


> #define I915_EXEC3_FENCE_SUBMIT         (1<<12)


What's FENCE_SUBMIT?


>
>        __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
>
>        __u64 extensions;        /* currently only for 
> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
> };
>
> With this, user can pass in batch addresses and count directly,
> instead of as an extension (as this rfc series was proposing).
>
> I have removed many of the flags which were either legacy or not
> applicable to BM_BIND mode.
> I have also removed fence array support (execbuffer2.cliprects_ptr)
> as we have timeline fence array support. Is that fine?
> Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
>
> Any thing else needs to be added or removed?
>
> Niranjana
>
>> Niranjana
>>
>>> -Daniel
>>> -- 
>>> Daniel Vetter
>>> Software Engineer, Intel Corporation
>>> http://blog.ffwll.ch
Lionel Landwerlin June 8, 2022, 6:43 a.m. UTC | #12
On 08/06/2022 09:40, Lionel Landwerlin wrote:
> On 03/06/2022 09:53, Niranjana Vishwanathapura wrote:
>> On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura 
>> wrote:
>>> On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>>> On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>>
>>>>> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>>> <niranjana.vishwanathapura@intel.com> wrote:
>>>>>>
>>>>>> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>>> >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>>>>>> >> VM_BIND and related uapi definitions
>>>>>> >>
>>>>>> >> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>> >>     Also add new uapi and documentation as per review comments
>>>>>> >>     from Daniel.
>>>>>> >>
>>>>>> >> Signed-off-by: Niranjana Vishwanathapura 
>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>> >> ---
>>>>>> >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>> +++++++++++++++++++++++++++
>>>>>> >>  1 file changed, 399 insertions(+)
>>>>>> >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>> >>
>>>>>> >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>> >> new file mode 100644
>>>>>> >> index 000000000000..589c0a009107
>>>>>> >> --- /dev/null
>>>>>> >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>> >> @@ -0,0 +1,399 @@
>>>>>> >> +/* SPDX-License-Identifier: MIT */
>>>>>> >> +/*
>>>>>> >> + * Copyright © 2022 Intel Corporation
>>>>>> >> + */
>>>>>> >> +
>>>>>> >> +/**
>>>>>> >> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>> >> + *
>>>>>> >> + * VM_BIND feature availability.
>>>>>> >> + * See typedef drm_i915_getparam_t param.
>>>>>> >> + */
>>>>>> >> +#define I915_PARAM_HAS_VM_BIND 57
>>>>>> >> +
>>>>>> >> +/**
>>>>>> >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>> >> + *
>>>>>> >> + * Flag to opt-in for VM_BIND mode of binding during VM 
>>>>>> creation.
>>>>>> >> + * See struct drm_i915_gem_vm_control flags.
>>>>>> >> + *
>>>>>> >> + * A VM in VM_BIND mode will not support the older execbuff 
>>>>>> mode of binding.
>>>>>> >> + * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>>> execlist (ie., the
>>>>>> >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>> >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>> >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>> >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must 
>>>>>> be provided
>>>>>> >> + * to pass in the batch buffer addresses.
>>>>>> >> + *
>>>>>> >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>> >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags 
>>>>>> must be 0
>>>>>> >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag 
>>>>>> must always be
>>>>>> >> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>> >> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>> batch_len fields
>>>>>> >> + * of struct drm_i915_gem_execbuffer2 are also not used and 
>>>>>> must be 0.
>>>>>> >> + */
>>>>>> >
>>>>>> >From that description, it seems we have:
>>>>>> >
>>>>>> >struct drm_i915_gem_execbuffer2 {
>>>>>> >        __u64 buffers_ptr;              -> must be 0 (new)
>>>>>> >        __u32 buffer_count;             -> must be 0 (new)
>>>>>> >        __u32 batch_start_offset;       -> must be 0 (new)
>>>>>> >        __u32 batch_len;                -> must be 0 (new)
>>>>>> >        __u32 DR1;                      -> must be 0 (old)
>>>>>> >        __u32 DR4;                      -> must be 0 (old)
>>>>>> >        __u32 num_cliprects; (fences)   -> must be 0 since using 
>>>>>> extensions
>>>>>> >        __u64 cliprects_ptr; (fences, extensions) -> contains an 
>>>>>> actual pointer!
>>>>>> >        __u64 flags;                    -> some flags must be 0 
>>>>>> (new)
>>>>>> >        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>>> >        __u64 rsvd2;                    -> unused
>>>>>> >};
>>>>>> >
>>>>>> >Based on that, why can't we just get drm_i915_gem_execbuffer3 
>>>>>> instead
>>>>>> >of adding even more complexity to an already abused interface? 
>>>>>> While
>>>>>> >the Vulkan-like extension thing is really nice, I don't think what
>>>>>> >we're doing here is extending the ioctl usage, we're completely
>>>>>> >changing how the base struct should be interpreted based on how 
>>>>>> the VM
>>>>>> >was created (which is an entirely different ioctl).
>>>>>> >
>>>>>> >From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>>>>>> >already at -6 without these changes. I think after vm_bind we'll 
>>>>>> need
>>>>>> >to create a -11 entry just to deal with this ioctl.
>>>>>> >
>>>>>>
>>>>>> The only change here is removing the execlist support for VM_BIND
>>>>>> mode (other than natual extensions).
>>>>>> Adding a new execbuffer3 was considered, but I think we need to 
>>>>>> be careful
>>>>>> with that as that goes beyond the VM_BIND support, including any 
>>>>>> future
>>>>>> requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>>
>>>>> Why not? it's not like adding extensions here is really that 
>>>>> different
>>>>> than adding new ioctls.
>>>>>
>>>>> I definitely think this deserves an execbuffer3 without even
>>>>> considering future requirements. Just  to burn down the old
>>>>> requirements and pointless fields.
>>>>>
>>>>> Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave 
>>>>> the
>>>>> older sw on execbuf2 for ever.
>>>>
>>>> I guess another point in favour of execbuf3 would be that it's less
>>>> midlayer. If we share the entry point then there's quite a few vfuncs
>>>> needed to cleanly split out the vm_bind paths from the legacy
>>>> reloc/softping paths.
>>>>
>>>> If we invert this and do execbuf3, then there's the existing ioctl
>>>> vfunc, and then we share code (where it even makes sense, probably
>>>> request setup/submit need to be shared, anything else is probably
>>>> cleaner to just copypaste) with the usual helper approach.
>>>>
>>>> Also that would guarantee that really none of the old concepts like
>>>> i915_active on the vma or vma open counts and all that stuff leaks
>>>> into the new vm_bind execbuf.
>>>>
>>>> Finally I also think that copypasting would make backporting easier,
>>>> or at least more flexible, since it should make it easier to have the
>>>> upstream vm_bind co-exist with all the other things we have. Without
>>>> huge amounts of conflicts (or at least much less) that pushing a pile
>>>> of vfuncs into the existing code would cause.
>>>>
>>>> So maybe we should do this?
>>>
>>> Thanks Dave, Daniel.
>>> There are a few things that will be common between execbuf2 and
>>> execbuf3, like request setup/submit (as you said), fence handling 
>>> (timeline fences, fence array, composite fences), engine selection,
>>> etc. Also, many of the 'flags' will be there in execbuf3 also (but
>>> bit position will differ).
>>> But I guess these should be fine as the suggestion here is to
>>> copy-paste the execbuff code and having a shared code where possible.
>>> Besides, we can stop supporting some older feature in execbuff3
>>> (like fence array in favor of newer timeline fences), which will
>>> further reduce common code.
>>>
>>> Ok, I will update this series by adding execbuf3 and send out soon.
>>>
>>
>> Does this sound reasonable?
>
>
> Thanks for proposing this. Some comments below.
>
>
>>
>> struct drm_i915_gem_execbuffer3 {
>>        __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>>
>>        __u32 batch_count;
>>        __u64 batch_addr_ptr;    /* Pointer to an array of batch gpu 
>> virtual addresses */
>>
>>        __u64 flags;
>> #define I915_EXEC3_RING_MASK              (0x3f)
>> #define I915_EXEC3_DEFAULT                (0<<0)
>> #define I915_EXEC3_RENDER                 (1<<0)
>> #define I915_EXEC3_BSD                    (2<<0)
>> #define I915_EXEC3_BLT                    (3<<0)
>> #define I915_EXEC3_VEBOX                  (4<<0)
>
>
> Shouldn't we use the new engine selection uAPI instead?
>
> We can already create an engine map with I915_CONTEXT_PARAM_ENGINES in 
> drm_i915_gem_context_create_ext_setparam.
>
> And you can also create virtual engines with the same extension.
>
> It feels like this could be a single u32 with the engine index (in the 
> context engine map).
>
>
>>
>> #define I915_EXEC3_SECURE               (1<<6)
>> #define I915_EXEC3_IS_PINNED            (1<<7)
>
>
> What's the meaning of PINNED?
>
>
>>
>> #define I915_EXEC3_BSD_SHIFT     (8)
>> #define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
>> #define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
>> #define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
>> #define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
>>
>> #define I915_EXEC3_FENCE_IN             (1<<10)
>> #define I915_EXEC3_FENCE_OUT            (1<<11)
>
>
> For Mesa, as soon as we have 
> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES support, we only use that.
>
> So there isn't much point for FENCE_IN/OUT.
>
> Maybe check with other UMDs?


Correcting myself a bit here :

     - iris uses I915_EXEC_FENCE_ARRAY

     - anv uses I915_EXEC_FENCE_ARRAY or 
DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES


In either case we could easily switch to 
DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES all the time.


>
>
>> #define I915_EXEC3_FENCE_SUBMIT (1<<12)
>
>
> What's FENCE_SUBMIT?
>
>
>>
>>        __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
>>
>>        __u64 extensions;        /* currently only for 
>> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
>> };
>>
>> With this, user can pass in batch addresses and count directly,
>> instead of as an extension (as this rfc series was proposing).
>>
>> I have removed many of the flags which were either legacy or not
>> applicable to BM_BIND mode.
>> I have also removed fence array support (execbuffer2.cliprects_ptr)
>> as we have timeline fence array support. Is that fine?
>> Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
>>
>> Any thing else needs to be added or removed?
>>
>> Niranjana
>>
>>> Niranjana
>>>
>>>> -Daniel
>>>> -- 
>>>> Daniel Vetter
>>>> Software Engineer, Intel Corporation
>>>> http://blog.ffwll.ch
>
>
Lionel Landwerlin June 8, 2022, 7:12 a.m. UTC | #13
On 03/06/2022 09:53, Niranjana Vishwanathapura wrote:
> On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura 
> wrote:
>> On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>> On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>
>>>> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>> <niranjana.vishwanathapura@intel.com> wrote:
>>>>>
>>>>> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>> >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>>>>> >> VM_BIND and related uapi definitions
>>>>> >>
>>>>> >> v2: Ensure proper kernel-doc formatting with cross references.
>>>>> >>     Also add new uapi and documentation as per review comments
>>>>> >>     from Daniel.
>>>>> >>
>>>>> >> Signed-off-by: Niranjana Vishwanathapura 
>>>>> <niranjana.vishwanathapura@intel.com>
>>>>> >> ---
>>>>> >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>> +++++++++++++++++++++++++++
>>>>> >>  1 file changed, 399 insertions(+)
>>>>> >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>> >>
>>>>> >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>> >> new file mode 100644
>>>>> >> index 000000000000..589c0a009107
>>>>> >> --- /dev/null
>>>>> >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>> >> @@ -0,0 +1,399 @@
>>>>> >> +/* SPDX-License-Identifier: MIT */
>>>>> >> +/*
>>>>> >> + * Copyright © 2022 Intel Corporation
>>>>> >> + */
>>>>> >> +
>>>>> >> +/**
>>>>> >> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>> >> + *
>>>>> >> + * VM_BIND feature availability.
>>>>> >> + * See typedef drm_i915_getparam_t param.
>>>>> >> + */
>>>>> >> +#define I915_PARAM_HAS_VM_BIND               57
>>>>> >> +
>>>>> >> +/**
>>>>> >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>> >> + *
>>>>> >> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>> >> + * See struct drm_i915_gem_vm_control flags.
>>>>> >> + *
>>>>> >> + * A VM in VM_BIND mode will not support the older execbuff 
>>>>> mode of binding.
>>>>> >> + * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>> execlist (ie., the
>>>>> >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>> >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>> >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>> >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must 
>>>>> be provided
>>>>> >> + * to pass in the batch buffer addresses.
>>>>> >> + *
>>>>> >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>> >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags 
>>>>> must be 0
>>>>> >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag 
>>>>> must always be
>>>>> >> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>> >> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>> batch_len fields
>>>>> >> + * of struct drm_i915_gem_execbuffer2 are also not used and 
>>>>> must be 0.
>>>>> >> + */
>>>>> >
>>>>> >From that description, it seems we have:
>>>>> >
>>>>> >struct drm_i915_gem_execbuffer2 {
>>>>> >        __u64 buffers_ptr;              -> must be 0 (new)
>>>>> >        __u32 buffer_count;             -> must be 0 (new)
>>>>> >        __u32 batch_start_offset;       -> must be 0 (new)
>>>>> >        __u32 batch_len;                -> must be 0 (new)
>>>>> >        __u32 DR1;                      -> must be 0 (old)
>>>>> >        __u32 DR4;                      -> must be 0 (old)
>>>>> >        __u32 num_cliprects; (fences)   -> must be 0 since using 
>>>>> extensions
>>>>> >        __u64 cliprects_ptr; (fences, extensions) -> contains an 
>>>>> actual pointer!
>>>>> >        __u64 flags;                    -> some flags must be 0 
>>>>> (new)
>>>>> >        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>> >        __u64 rsvd2;                    -> unused
>>>>> >};
>>>>> >
>>>>> >Based on that, why can't we just get drm_i915_gem_execbuffer3 
>>>>> instead
>>>>> >of adding even more complexity to an already abused interface? While
>>>>> >the Vulkan-like extension thing is really nice, I don't think what
>>>>> >we're doing here is extending the ioctl usage, we're completely
>>>>> >changing how the base struct should be interpreted based on how 
>>>>> the VM
>>>>> >was created (which is an entirely different ioctl).
>>>>> >
>>>>> >From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>>>>> >already at -6 without these changes. I think after vm_bind we'll 
>>>>> need
>>>>> >to create a -11 entry just to deal with this ioctl.
>>>>> >
>>>>>
>>>>> The only change here is removing the execlist support for VM_BIND
>>>>> mode (other than natual extensions).
>>>>> Adding a new execbuffer3 was considered, but I think we need to be 
>>>>> careful
>>>>> with that as that goes beyond the VM_BIND support, including any 
>>>>> future
>>>>> requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>
>>>> Why not? it's not like adding extensions here is really that different
>>>> than adding new ioctls.
>>>>
>>>> I definitely think this deserves an execbuffer3 without even
>>>> considering future requirements. Just  to burn down the old
>>>> requirements and pointless fields.
>>>>
>>>> Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave the
>>>> older sw on execbuf2 for ever.
>>>
>>> I guess another point in favour of execbuf3 would be that it's less
>>> midlayer. If we share the entry point then there's quite a few vfuncs
>>> needed to cleanly split out the vm_bind paths from the legacy
>>> reloc/softping paths.
>>>
>>> If we invert this and do execbuf3, then there's the existing ioctl
>>> vfunc, and then we share code (where it even makes sense, probably
>>> request setup/submit need to be shared, anything else is probably
>>> cleaner to just copypaste) with the usual helper approach.
>>>
>>> Also that would guarantee that really none of the old concepts like
>>> i915_active on the vma or vma open counts and all that stuff leaks
>>> into the new vm_bind execbuf.
>>>
>>> Finally I also think that copypasting would make backporting easier,
>>> or at least more flexible, since it should make it easier to have the
>>> upstream vm_bind co-exist with all the other things we have. Without
>>> huge amounts of conflicts (or at least much less) that pushing a pile
>>> of vfuncs into the existing code would cause.
>>>
>>> So maybe we should do this?
>>
>> Thanks Dave, Daniel.
>> There are a few things that will be common between execbuf2 and
>> execbuf3, like request setup/submit (as you said), fence handling 
>> (timeline fences, fence array, composite fences), engine selection,
>> etc. Also, many of the 'flags' will be there in execbuf3 also (but
>> bit position will differ).
>> But I guess these should be fine as the suggestion here is to
>> copy-paste the execbuff code and having a shared code where possible.
>> Besides, we can stop supporting some older feature in execbuff3
>> (like fence array in favor of newer timeline fences), which will
>> further reduce common code.
>>
>> Ok, I will update this series by adding execbuf3 and send out soon.
>>
>
> Does this sound reasonable?
>
> struct drm_i915_gem_execbuffer3 {
>        __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>
>        __u32 batch_count;
>        __u64 batch_addr_ptr;    /* Pointer to an array of batch gpu 
> virtual addresses */


Quick question raised on IRC about the batches : Are multiple batches 
limited to virtual engines?


Thanks,


-Lionel


>
>        __u64 flags;
> #define I915_EXEC3_RING_MASK              (0x3f)
> #define I915_EXEC3_DEFAULT                (0<<0)
> #define I915_EXEC3_RENDER                 (1<<0)
> #define I915_EXEC3_BSD                    (2<<0)
> #define I915_EXEC3_BLT                    (3<<0)
> #define I915_EXEC3_VEBOX                  (4<<0)
>
> #define I915_EXEC3_SECURE               (1<<6)
> #define I915_EXEC3_IS_PINNED            (1<<7)
>
> #define I915_EXEC3_BSD_SHIFT     (8)
> #define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
> #define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
> #define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
> #define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
>
> #define I915_EXEC3_FENCE_IN             (1<<10)
> #define I915_EXEC3_FENCE_OUT            (1<<11)
> #define I915_EXEC3_FENCE_SUBMIT         (1<<12)
>
>        __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
>
>        __u64 extensions;        /* currently only for 
> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
> };
>
> With this, user can pass in batch addresses and count directly,
> instead of as an extension (as this rfc series was proposing).
>
> I have removed many of the flags which were either legacy or not
> applicable to BM_BIND mode.
> I have also removed fence array support (execbuffer2.cliprects_ptr)
> as we have timeline fence array support. Is that fine?
> Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
>
> Any thing else needs to be added or removed?
>
> Niranjana
>
>> Niranjana
>>
>>> -Daniel
>>> -- 
>>> Daniel Vetter
>>> Software Engineer, Intel Corporation
>>> http://blog.ffwll.ch
Tvrtko Ursulin June 8, 2022, 7:17 a.m. UTC | #14
On 07/06/2022 20:37, Niranjana Vishwanathapura wrote:
> On Tue, Jun 07, 2022 at 11:27:14AM +0100, Tvrtko Ursulin wrote:
>>
>> On 17/05/2022 19:32, Niranjana Vishwanathapura wrote:
>>> VM_BIND and related uapi definitions
>>>
>>> v2: Ensure proper kernel-doc formatting with cross references.
>>>     Also add new uapi and documentation as per review comments
>>>     from Daniel.
>>>
>>> Signed-off-by: Niranjana Vishwanathapura 
>>> <niranjana.vishwanathapura@intel.com>
>>> ---
>>>  Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
>>>  1 file changed, 399 insertions(+)
>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>
>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>> new file mode 100644
>>> index 000000000000..589c0a009107
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>> @@ -0,0 +1,399 @@
>>> +/* SPDX-License-Identifier: MIT */
>>> +/*
>>> + * Copyright © 2022 Intel Corporation
>>> + */
>>> +
>>> +/**
>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>> + *
>>> + * VM_BIND feature availability.
>>> + * See typedef drm_i915_getparam_t param.
>>> + */
>>> +#define I915_PARAM_HAS_VM_BIND        57
>>> +
>>> +/**
>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>> + *
>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>> + * See struct drm_i915_gem_vm_control flags.
>>> + *
>>> + * A VM in VM_BIND mode will not support the older execbuff mode of 
>>> binding.
>>> + * In VM_BIND mode, execbuff ioctl will not accept any execlist 
>>> (ie., the
>>> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be 
>>> provided
>>> + * to pass in the batch buffer addresses.
>>> + *
>>> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
>>> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must 
>>> always be
>>> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>> + * The buffers_ptr, buffer_count, batch_start_offset and batch_len 
>>> fields
>>> + * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
>>> + */
>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>> +
>>> +/**
>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>> + *
>>> + * Flag to declare context as long running.
>>> + * See struct drm_i915_gem_context_create_ext flags.
>>> + *
>>> + * Usage of dma-fence expects that they complete in reasonable 
>>> amount of time.
>>> + * Compute on the other hand can be long running. Hence it is not 
>>> appropriate
>>> + * for compute contexts to export request completion dma-fence to user.
>>> + * The dma-fence usage will be limited to in-kernel consumption only.
>>> + * Compute contexts need to use user/memory fence.
>>> + *
>>> + * So, long running contexts do not support output fences. Hence,
>>> + * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
>>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) are 
>>> expected
>>> + * to be not used.
>>> + *
>>> + * DRM_I915_GEM_WAIT ioctl call is also not supported for objects 
>>> mapped
>>> + * to long running contexts.
>>> + */
>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>> +
>>> +/* VM_BIND related ioctls */
>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x3f
>>> +
>>> +#define DRM_IOCTL_I915_GEM_VM_BIND        DRM_IOWR(DRM_COMMAND_BASE 
>>> + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND        
>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct 
>>> drm_i915_gem_vm_bind)
>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE    
>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct 
>>> drm_i915_gem_wait_user_fence)
>>> +
>>> +/**
>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>> + *
>>> + * This structure is passed to VM_BIND ioctl and specifies the 
>>> mapping of GPU
>>> + * virtual address (VA) range to the section of an object that 
>>> should be bound
>>> + * in the device page table of the specified address space (VM).
>>> + * The VA range specified must be unique (ie., not currently bound) 
>>> and can
>>> + * be mapped to whole object or a section of the object (partial 
>>> binding).
>>> + * Multiple VA mappings can be created to the same section of the 
>>> object
>>> + * (aliasing).
>>> + */
>>> +struct drm_i915_gem_vm_bind {
>>> +    /** @vm_id: VM (address space) id to bind */
>>> +    __u32 vm_id;
>>> +
>>> +    /** @handle: Object handle */
>>> +    __u32 handle;
>>> +
>>> +    /** @start: Virtual Address start to bind */
>>> +    __u64 start;
>>> +
>>> +    /** @offset: Offset in object to bind */
>>> +    __u64 offset;
>>> +
>>> +    /** @length: Length of mapping to bind */
>>> +    __u64 length;
>>
>> Does it support, or should it, equivalent of EXEC_OBJECT_PAD_TO_SIZE? 
>> Or if not userspace is expected to map the remainder of the space to a 
>> dummy object? In which case would there be any alignment/padding 
>> issues preventing the two bind to be placed next to each other?
>>
>> I ask because someone from the compute side asked me about a problem 
>> with their strategy of dealing with overfetch and I suggested pad to 
>> size.
>>
> 
> Thanks Tvrtko,
> I think we shouldn't be needing it. As with VM_BIND VA assignment
> is completely pushed to userspace, no padding should be necessary
> once the 'start' and 'size' alignment conditions are met.
> 
> I will add some documentation on alignment requirement here.
> Generally, 'start' and 'size' should be 4K aligned. But, I think
> when we have 64K lmem page sizes (dg2 and xehpsdv), they need to
> be 64K aligned.

+ Matt

Align to 64k is enough for all overfetch issues?

Apparently compute has a situation where a buffer is received by one 
component and another has to apply more alignment to it, to deal with 
overfetch. Since they cannot grow the actual BO if they wanted to 
VM_BIND a scratch area on top? Or perhaps none of this is a problem on 
discrete and original BO should be correctly allocated to start with.

Side question - what about the align to 2MiB mentioned in 
i915_vma_insert to avoid mixing 4k and 64k PTEs? That does not apply to 
discrete?

Regards,

Tvrtko

> 
> Niranjana
> 
>> Regards,
>>
>> Tvrtko
>>
>>> +
>>> +    /**
>>> +     * @flags: Supported flags are,
>>> +     *
>>> +     * I915_GEM_VM_BIND_READONLY:
>>> +     * Mapping is read-only.
>>> +     *
>>> +     * I915_GEM_VM_BIND_CAPTURE:
>>> +     * Capture this mapping in the dump upon GPU error.
>>> +     */
>>> +    __u64 flags;
>>> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>> +
>>> +    /** @extensions: 0-terminated chain of extensions for this 
>>> mapping. */
>>> +    __u64 extensions;
>>> +};
>>> +
>>> +/**
>>> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>> + *
>>> + * This structure is passed to VM_UNBIND ioctl and specifies the GPU 
>>> virtual
>>> + * address (VA) range that should be unbound from the device page 
>>> table of the
>>> + * specified address space (VM). The specified VA range must match 
>>> one of the
>>> + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>>> + * completion.
>>> + */
>>> +struct drm_i915_gem_vm_unbind {
>>> +    /** @vm_id: VM (address space) id to bind */
>>> +    __u32 vm_id;
>>> +
>>> +    /** @rsvd: Reserved for future use; must be zero. */
>>> +    __u32 rsvd;
>>> +
>>> +    /** @start: Virtual Address start to unbind */
>>> +    __u64 start;
>>> +
>>> +    /** @length: Length of mapping to unbind */
>>> +    __u64 length;
>>> +
>>> +    /** @flags: reserved for future usage, currently MBZ */
>>> +    __u64 flags;
>>> +
>>> +    /** @extensions: 0-terminated chain of extensions for this 
>>> mapping. */
>>> +    __u64 extensions;
>>> +};
>>> +
>>> +/**
>>> + * struct drm_i915_vm_bind_fence - An input or output fence for the 
>>> vm_bind
>>> + * or the vm_unbind work.
>>> + *
>>> + * The vm_bind or vm_unbind aync worker will wait for input fence to 
>>> signal
>>> + * before starting the binding or unbinding.
>>> + *
>>> + * The vm_bind or vm_unbind async worker will signal the returned 
>>> output fence
>>> + * after the completion of binding or unbinding.
>>> + */
>>> +struct drm_i915_vm_bind_fence {
>>> +    /** @handle: User's handle for a drm_syncobj to wait on or 
>>> signal. */
>>> +    __u32 handle;
>>> +
>>> +    /**
>>> +     * @flags: Supported flags are,
>>> +     *
>>> +     * I915_VM_BIND_FENCE_WAIT:
>>> +     * Wait for the input fence before binding/unbinding
>>> +     *
>>> +     * I915_VM_BIND_FENCE_SIGNAL:
>>> +     * Return bind/unbind completion fence as output
>>> +     */
>>> +    __u32 flags;
>>> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>>> (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>> +};
>>> +
>>> +/**
>>> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for 
>>> vm_bind
>>> + * and vm_unbind.
>>> + *
>>> + * This structure describes an array of timeline drm_syncobj and 
>>> associated
>>> + * points for timeline variants of drm_syncobj. These timeline 
>>> 'drm_syncobj's
>>> + * can be input or output fences (See struct drm_i915_vm_bind_fence).
>>> + */
>>> +struct drm_i915_vm_bind_ext_timeline_fences {
>>> +#define I915_VM_BIND_EXT_timeline_FENCES    0
>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>> +    struct i915_user_extension base;
>>> +
>>> +    /**
>>> +     * @fence_count: Number of elements in the @handles_ptr & 
>>> @value_ptr
>>> +     * arrays.
>>> +     */
>>> +    __u64 fence_count;
>>> +
>>> +    /**
>>> +     * @handles_ptr: Pointer to an array of struct 
>>> drm_i915_vm_bind_fence
>>> +     * of length @fence_count.
>>> +     */
>>> +    __u64 handles_ptr;
>>> +
>>> +    /**
>>> +     * @values_ptr: Pointer to an array of u64 values of length
>>> +     * @fence_count.
>>> +     * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>> +     * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
>>> +     * binary one.
>>> +     */
>>> +    __u64 values_ptr;
>>> +};
>>> +
>>> +/**
>>> + * struct drm_i915_vm_bind_user_fence - An input or output user 
>>> fence for the
>>> + * vm_bind or the vm_unbind work.
>>> + *
>>> + * The vm_bind or vm_unbind aync worker will wait for the input 
>>> fence (value at
>>> + * @addr to become equal to @val) before starting the binding or 
>>> unbinding.
>>> + *
>>> + * The vm_bind or vm_unbind async worker will signal the output 
>>> fence after
>>> + * the completion of binding or unbinding by writing @val to memory 
>>> location at
>>> + * @addr
>>> + */
>>> +struct drm_i915_vm_bind_user_fence {
>>> +    /** @addr: User/Memory fence qword aligned process virtual 
>>> address */
>>> +    __u64 addr;
>>> +
>>> +    /** @val: User/Memory fence value to be written after bind 
>>> completion */
>>> +    __u64 val;
>>> +
>>> +    /**
>>> +     * @flags: Supported flags are,
>>> +     *
>>> +     * I915_VM_BIND_USER_FENCE_WAIT:
>>> +     * Wait for the input fence before binding/unbinding
>>> +     *
>>> +     * I915_VM_BIND_USER_FENCE_SIGNAL:
>>> +     * Return bind/unbind completion fence as output
>>> +     */
>>> +    __u32 flags;
>>> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>> +    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>> +};
>>> +
>>> +/**
>>> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for 
>>> vm_bind
>>> + * and vm_unbind.
>>> + *
>>> + * These user fences can be input or output fences
>>> + * (See struct drm_i915_vm_bind_user_fence).
>>> + */
>>> +struct drm_i915_vm_bind_ext_user_fence {
>>> +#define I915_VM_BIND_EXT_USER_FENCES    1
>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>> +    struct i915_user_extension base;
>>> +
>>> +    /** @fence_count: Number of elements in the @user_fence_ptr 
>>> array. */
>>> +    __u64 fence_count;
>>> +
>>> +    /**
>>> +     * @user_fence_ptr: Pointer to an array of
>>> +     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>> +     */
>>> +    __u64 user_fence_ptr;
>>> +};
>>> +
>>> +/**
>>> + * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array of 
>>> batch buffer
>>> + * gpu virtual addresses.
>>> + *
>>> + * In the execbuff ioctl (See struct drm_i915_gem_execbuffer2), this 
>>> extension
>>> + * must always be appended in the VM_BIND mode and it will be an 
>>> error to
>>> + * append this extension in older non-VM_BIND mode.
>>> + */
>>> +struct drm_i915_gem_execbuffer_ext_batch_addresses {
>>> +#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES    1
>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>> +    struct i915_user_extension base;
>>> +
>>> +    /** @count: Number of addresses in the addr array. */
>>> +    __u32 count;
>>> +
>>> +    /** @addr: An array of batch gpu virtual addresses. */
>>> +    __u64 addr[0];
>>> +};
>>> +
>>> +/**
>>> + * struct drm_i915_gem_execbuffer_ext_user_fence - First level batch 
>>> completion
>>> + * signaling extension.
>>> + *
>>> + * This extension allows user to attach a user fence (@addr, @value 
>>> pair) to an
>>> + * execbuf to be signaled by the command streamer after the 
>>> completion of first
>>> + * level batch, by writing the @value at specified @addr and 
>>> triggering an
>>> + * interrupt.
>>> + * User can either poll for this user fence to signal or can also 
>>> wait on it
>>> + * with i915_gem_wait_user_fence ioctl.
>>> + * This is very much usefaul for long running contexts where waiting 
>>> on dma-fence
>>> + * by user (like i915_gem_wait ioctl) is not supported.
>>> + */
>>> +struct drm_i915_gem_execbuffer_ext_user_fence {
>>> +#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE        2
>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>> +    struct i915_user_extension base;
>>> +
>>> +    /**
>>> +     * @addr: User/Memory fence qword aligned GPU virtual address.
>>> +     *
>>> +     * Address has to be a valid GPU virtual address at the time of
>>> +     * first level batch completion.
>>> +     */
>>> +    __u64 addr;
>>> +
>>> +    /**
>>> +     * @value: User/Memory fence Value to be written to above address
>>> +     * after first level batch completes.
>>> +     */
>>> +    __u64 value;
>>> +
>>> +    /** @rsvd: Reserved for future extensions, MBZ */
>>> +    __u64 rsvd;
>>> +};
>>> +
>>> +/**
>>> + * struct drm_i915_gem_create_ext_vm_private - Extension to make the 
>>> object
>>> + * private to the specified VM.
>>> + *
>>> + * See struct drm_i915_gem_create_ext.
>>> + */
>>> +struct drm_i915_gem_create_ext_vm_private {
>>> +#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>> +    struct i915_user_extension base;
>>> +
>>> +    /** @vm_id: Id of the VM to which the object is private */
>>> +    __u32 vm_id;
>>> +};
>>> +
>>> +/**
>>> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>> + *
>>> + * User/Memory fence can be woken up either by:
>>> + *
>>> + * 1. GPU context indicated by @ctx_id, or,
>>> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>> + *    @ctx_id is ignored when this flag is set.
>>> + *
>>> + * Wakeup condition is,
>>> + * ``((*addr & mask) op (value & mask))``
>>> + *
>>> + * See :ref:`Documentation/driver-api/dma-buf.rst 
>>> <indefinite_dma_fences>`
>>> + */
>>> +struct drm_i915_gem_wait_user_fence {
>>> +    /** @extensions: Zero-terminated chain of extensions. */
>>> +    __u64 extensions;
>>> +
>>> +    /** @addr: User/Memory fence address */
>>> +    __u64 addr;
>>> +
>>> +    /** @ctx_id: Id of the Context which will signal the fence. */
>>> +    __u32 ctx_id;
>>> +
>>> +    /** @op: Wakeup condition operator */
>>> +    __u16 op;
>>> +#define I915_UFENCE_WAIT_EQ      0
>>> +#define I915_UFENCE_WAIT_NEQ     1
>>> +#define I915_UFENCE_WAIT_GT      2
>>> +#define I915_UFENCE_WAIT_GTE     3
>>> +#define I915_UFENCE_WAIT_LT      4
>>> +#define I915_UFENCE_WAIT_LTE     5
>>> +#define I915_UFENCE_WAIT_BEFORE  6
>>> +#define I915_UFENCE_WAIT_AFTER   7
>>> +
>>> +    /**
>>> +     * @flags: Supported flags are,
>>> +     *
>>> +     * I915_UFENCE_WAIT_SOFT:
>>> +     *
>>> +     * To be woken up by i915 driver async worker (not by GPU).
>>> +     *
>>> +     * I915_UFENCE_WAIT_ABSTIME:
>>> +     *
>>> +     * Wait timeout specified as absolute time.
>>> +     */
>>> +    __u16 flags;
>>> +#define I915_UFENCE_WAIT_SOFT    0x1
>>> +#define I915_UFENCE_WAIT_ABSTIME 0x2
>>> +
>>> +    /** @value: Wakeup value */
>>> +    __u64 value;
>>> +
>>> +    /** @mask: Wakeup mask */
>>> +    __u64 mask;
>>> +#define I915_UFENCE_WAIT_U8     0xffu
>>> +#define I915_UFENCE_WAIT_U16    0xffffu
>>> +#define I915_UFENCE_WAIT_U32    0xfffffffful
>>> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>> +
>>> +    /**
>>> +     * @timeout: Wait timeout in nanoseconds.
>>> +     *
>>> +     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is 
>>> the
>>> +     * absolute time in nsec.
>>> +     */
>>> +    __s64 timeout;
>>> +};
Tvrtko Ursulin June 8, 2022, 7:34 a.m. UTC | #15
On 07/06/2022 22:25, Niranjana Vishwanathapura wrote:
> On Tue, Jun 07, 2022 at 11:42:08AM +0100, Tvrtko Ursulin wrote:
>>
>> On 03/06/2022 07:53, Niranjana Vishwanathapura wrote:
>>> On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura 
>>> wrote:
>>>> On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>>>> On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>>>
>>>>>> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>>>> <niranjana.vishwanathapura@intel.com> wrote:
>>>>>>>
>>>>>>> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>>>>> On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>>
>>>>>>>>> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>>>>      Also add new uapi and documentation as per review comments
>>>>>>>>>      from Daniel.
>>>>>>>>>
>>>>>>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>>> ---
>>>>>>>>>   Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>>> +++++++++++++++++++++++++++
>>>>>>>>>   1 file changed, 399 insertions(+)
>>>>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>
>>>>>>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>> new file mode 100644
>>>>>>>>> index 000000000000..589c0a009107
>>>>>>>>> --- /dev/null
>>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>> @@ -0,0 +1,399 @@
>>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>>> +/*
>>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>>> + */
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>> + *
>>>>>>>>> + * VM_BIND feature availability.
>>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>>> + */
>>>>>>>>> +#define I915_PARAM_HAS_VM_BIND               57
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>> + *
>>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>>> + *
>>>>>>>>> + * A VM in VM_BIND mode will not support the older 
>>>>>>> execbuff mode of binding.
>>>>>>>>> + * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>>>> execlist (ie., the
>>>>>>>>> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>>>>> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>>>>> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>>>>> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension 
>>>>>>> must be provided
>>>>>>>>> + * to pass in the batch buffer addresses.
>>>>>>>>> + *
>>>>>>>>> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>>>>> + * I915_EXEC_BATCH_FIRST of 
>>>>>>> &drm_i915_gem_execbuffer2.flags must be 0
>>>>>>>>> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS 
>>>>>>> flag must always be
>>>>>>>>> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>>>>> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>>> batch_len fields
>>>>>>>>> + * of struct drm_i915_gem_execbuffer2 are also not used 
>>>>>>> and must be 0.
>>>>>>>>> + */
>>>>>>>>
>>>>>>>> From that description, it seems we have:
>>>>>>>>
>>>>>>>> struct drm_i915_gem_execbuffer2 {
>>>>>>>>         __u64 buffers_ptr;              -> must be 0 (new)
>>>>>>>>         __u32 buffer_count;             -> must be 0 (new)
>>>>>>>>         __u32 batch_start_offset;       -> must be 0 (new)
>>>>>>>>         __u32 batch_len;                -> must be 0 (new)
>>>>>>>>         __u32 DR1;                      -> must be 0 (old)
>>>>>>>>         __u32 DR4;                      -> must be 0 (old)
>>>>>>>>         __u32 num_cliprects; (fences)   -> must be 0 since 
>>>>>>> using extensions
>>>>>>>>         __u64 cliprects_ptr; (fences, extensions) -> 
>>>>>>> contains an actual pointer!
>>>>>>>>         __u64 flags;                    -> some flags must be 0 
>>>>>>>> (new)
>>>>>>>>         __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>>>>>         __u64 rsvd2;                    -> unused
>>>>>>>> };
>>>>>>>>
>>>>>>>> Based on that, why can't we just get drm_i915_gem_execbuffer3 
>>>>>>>> instead
>>>>>>>> of adding even more complexity to an already abused interface? 
>>>>>>>> While
>>>>>>>> the Vulkan-like extension thing is really nice, I don't think what
>>>>>>>> we're doing here is extending the ioctl usage, we're completely
>>>>>>>> changing how the base struct should be interpreted based on 
>>>>>>> how the VM
>>>>>>>> was created (which is an entirely different ioctl).
>>>>>>>>
>>>>>>>> From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>>>>>>>> already at -6 without these changes. I think after vm_bind we'll 
>>>>>>>> need
>>>>>>>> to create a -11 entry just to deal with this ioctl.
>>>>>>>>
>>>>>>>
>>>>>>> The only change here is removing the execlist support for VM_BIND
>>>>>>> mode (other than natual extensions).
>>>>>>> Adding a new execbuffer3 was considered, but I think we need to 
>>>>>>> be careful
>>>>>>> with that as that goes beyond the VM_BIND support, including any 
>>>>>>> future
>>>>>>> requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>>>
>>>>>> Why not? it's not like adding extensions here is really that 
>>>>>> different
>>>>>> than adding new ioctls.
>>>>>>
>>>>>> I definitely think this deserves an execbuffer3 without even
>>>>>> considering future requirements. Just  to burn down the old
>>>>>> requirements and pointless fields.
>>>>>>
>>>>>> Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave 
>>>>>> the
>>>>>> older sw on execbuf2 for ever.
>>>>>
>>>>> I guess another point in favour of execbuf3 would be that it's less
>>>>> midlayer. If we share the entry point then there's quite a few vfuncs
>>>>> needed to cleanly split out the vm_bind paths from the legacy
>>>>> reloc/softping paths.
>>>>>
>>>>> If we invert this and do execbuf3, then there's the existing ioctl
>>>>> vfunc, and then we share code (where it even makes sense, probably
>>>>> request setup/submit need to be shared, anything else is probably
>>>>> cleaner to just copypaste) with the usual helper approach.
>>>>>
>>>>> Also that would guarantee that really none of the old concepts like
>>>>> i915_active on the vma or vma open counts and all that stuff leaks
>>>>> into the new vm_bind execbuf.
>>>>>
>>>>> Finally I also think that copypasting would make backporting easier,
>>>>> or at least more flexible, since it should make it easier to have the
>>>>> upstream vm_bind co-exist with all the other things we have. Without
>>>>> huge amounts of conflicts (or at least much less) that pushing a pile
>>>>> of vfuncs into the existing code would cause.
>>>>>
>>>>> So maybe we should do this?
>>>>
>>>> Thanks Dave, Daniel.
>>>> There are a few things that will be common between execbuf2 and
>>>> execbuf3, like request setup/submit (as you said), fence handling 
>>>> (timeline fences, fence array, composite fences), engine selection,
>>>> etc. Also, many of the 'flags' will be there in execbuf3 also (but
>>>> bit position will differ).
>>>> But I guess these should be fine as the suggestion here is to
>>>> copy-paste the execbuff code and having a shared code where possible.
>>>> Besides, we can stop supporting some older feature in execbuff3
>>>> (like fence array in favor of newer timeline fences), which will
>>>> further reduce common code.
>>>>
>>>> Ok, I will update this series by adding execbuf3 and send out soon.
>>>>
>>>
>>> Does this sound reasonable?
>>>
>>> struct drm_i915_gem_execbuffer3 {
>>>        __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>>>
>>>        __u32 batch_count;
>>>        __u64 batch_addr_ptr;    /* Pointer to an array of batch gpu 
>>> virtual addresses */
>>
>> Casual stumble upon..
>>
>> Alternatively you could embed N pointers to make life a bit easier for 
>> both userspace and kernel side. Yes, but then "N batch buffers should 
>> be enough for everyone" problem.. :)
>>
> 
> Thanks Tvrtko,
> Yes, hence the batch_addr_ptr.

Right, but then userspace has to allocate a separate buffer and kernel 
has to access it separately from a single copy_from_user. Pros and cons 
of "this many batches should be enough for everyone" versus the extra 
operations.

Hmm.. for the common case of one batch - you could define the uapi to 
say if batch_count is one then pointer is GPU VA to the batch itself, 
not a pointer to userspace array of GPU VA?

Regards,

Tvrtko

>>>        __u64 flags;
>>> #define I915_EXEC3_RING_MASK              (0x3f)
>>> #define I915_EXEC3_DEFAULT                (0<<0)
>>> #define I915_EXEC3_RENDER                 (1<<0)
>>> #define I915_EXEC3_BSD                    (2<<0)
>>> #define I915_EXEC3_BLT                    (3<<0)
>>> #define I915_EXEC3_VEBOX                  (4<<0)
>>>
>>> #define I915_EXEC3_SECURE               (1<<6)
>>> #define I915_EXEC3_IS_PINNED            (1<<7)
>>>
>>> #define I915_EXEC3_BSD_SHIFT     (8)
>>> #define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
>>> #define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
>>> #define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
>>> #define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
>>
>> I'd suggest legacy engine selection is unwanted, especially not with 
>> the convoluted BSD1/2 flags. Can we just require context with engine 
>> map and index? Or if default context has to be supported then I'd 
>> suggest ...class_instance for that mode.
>>
> 
> Ok, I will be happy to remove it and only support contexts with
> engine map, if UMDs agree on that.
> 
>>> #define I915_EXEC3_FENCE_IN             (1<<10)
>>> #define I915_EXEC3_FENCE_OUT            (1<<11)
>>> #define I915_EXEC3_FENCE_SUBMIT         (1<<12)
>>
>> People are likely to object to submit fence since generic mechanism to 
>> align submissions was rejected.
>>
> 
> Ok, again, I can remove it if UMDs are ok with it.
> 
>>>
>>>        __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
>>
>> New ioctl you can afford dedicated fields.
>>
> 
> Yes, but as I asked below, I am not sure if we need this or the
> timeline fence arry extension we have is good enough.
> 
>> In any case I suggest you involve UMD folks in designing it.
>>
> 
> Yah.
> Paulo, Lionel, Jason, Daniel, can you comment on these regarding
> what will UMD need in execbuf3 and what can be removed?
> 
> Thanks,
> Niranjana
> 
>> Regards,
>>
>> Tvrtko
>>
>>>
>>>        __u64 extensions;        /* currently only for 
>>> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
>>> };
>>>
>>> With this, user can pass in batch addresses and count directly,
>>> instead of as an extension (as this rfc series was proposing).
>>>
>>> I have removed many of the flags which were either legacy or not
>>> applicable to BM_BIND mode.
>>> I have also removed fence array support (execbuffer2.cliprects_ptr)
>>> as we have timeline fence array support. Is that fine?
>>> Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
>>>
>>> Any thing else needs to be added or removed?
>>>
>>> Niranjana
>>>
>>>> Niranjana
>>>>
>>>>> -Daniel
>>>>> -- 
>>>>> Daniel Vetter
>>>>> Software Engineer, Intel Corporation
>>>>> http://blog.ffwll.ch
Tvrtko Ursulin June 8, 2022, 8:36 a.m. UTC | #16
On 08/06/2022 07:40, Lionel Landwerlin wrote:
> On 03/06/2022 09:53, Niranjana Vishwanathapura wrote:
>> On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura 
>> wrote:
>>> On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>>> On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>>
>>>>> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>>> <niranjana.vishwanathapura@intel.com> wrote:
>>>>>>
>>>>>> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>>> >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>>>>>> >> VM_BIND and related uapi definitions
>>>>>> >>
>>>>>> >> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>> >>     Also add new uapi and documentation as per review comments
>>>>>> >>     from Daniel.
>>>>>> >>
>>>>>> >> Signed-off-by: Niranjana Vishwanathapura 
>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>> >> ---
>>>>>> >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>> +++++++++++++++++++++++++++
>>>>>> >>  1 file changed, 399 insertions(+)
>>>>>> >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>> >>
>>>>>> >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>> >> new file mode 100644
>>>>>> >> index 000000000000..589c0a009107
>>>>>> >> --- /dev/null
>>>>>> >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>> >> @@ -0,0 +1,399 @@
>>>>>> >> +/* SPDX-License-Identifier: MIT */
>>>>>> >> +/*
>>>>>> >> + * Copyright © 2022 Intel Corporation
>>>>>> >> + */
>>>>>> >> +
>>>>>> >> +/**
>>>>>> >> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>> >> + *
>>>>>> >> + * VM_BIND feature availability.
>>>>>> >> + * See typedef drm_i915_getparam_t param.
>>>>>> >> + */
>>>>>> >> +#define I915_PARAM_HAS_VM_BIND               57
>>>>>> >> +
>>>>>> >> +/**
>>>>>> >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>> >> + *
>>>>>> >> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>> >> + * See struct drm_i915_gem_vm_control flags.
>>>>>> >> + *
>>>>>> >> + * A VM in VM_BIND mode will not support the older execbuff 
>>>>>> mode of binding.
>>>>>> >> + * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>>> execlist (ie., the
>>>>>> >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>> >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>> >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>> >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must 
>>>>>> be provided
>>>>>> >> + * to pass in the batch buffer addresses.
>>>>>> >> + *
>>>>>> >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>> >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags 
>>>>>> must be 0
>>>>>> >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag 
>>>>>> must always be
>>>>>> >> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>> >> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>> batch_len fields
>>>>>> >> + * of struct drm_i915_gem_execbuffer2 are also not used and 
>>>>>> must be 0.
>>>>>> >> + */
>>>>>> >
>>>>>> >From that description, it seems we have:
>>>>>> >
>>>>>> >struct drm_i915_gem_execbuffer2 {
>>>>>> >        __u64 buffers_ptr;              -> must be 0 (new)
>>>>>> >        __u32 buffer_count;             -> must be 0 (new)
>>>>>> >        __u32 batch_start_offset;       -> must be 0 (new)
>>>>>> >        __u32 batch_len;                -> must be 0 (new)
>>>>>> >        __u32 DR1;                      -> must be 0 (old)
>>>>>> >        __u32 DR4;                      -> must be 0 (old)
>>>>>> >        __u32 num_cliprects; (fences)   -> must be 0 since using 
>>>>>> extensions
>>>>>> >        __u64 cliprects_ptr; (fences, extensions) -> contains an 
>>>>>> actual pointer!
>>>>>> >        __u64 flags;                    -> some flags must be 0 
>>>>>> (new)
>>>>>> >        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>>> >        __u64 rsvd2;                    -> unused
>>>>>> >};
>>>>>> >
>>>>>> >Based on that, why can't we just get drm_i915_gem_execbuffer3 
>>>>>> instead
>>>>>> >of adding even more complexity to an already abused interface? While
>>>>>> >the Vulkan-like extension thing is really nice, I don't think what
>>>>>> >we're doing here is extending the ioctl usage, we're completely
>>>>>> >changing how the base struct should be interpreted based on how 
>>>>>> the VM
>>>>>> >was created (which is an entirely different ioctl).
>>>>>> >
>>>>>> >From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>>>>>> >already at -6 without these changes. I think after vm_bind we'll 
>>>>>> need
>>>>>> >to create a -11 entry just to deal with this ioctl.
>>>>>> >
>>>>>>
>>>>>> The only change here is removing the execlist support for VM_BIND
>>>>>> mode (other than natual extensions).
>>>>>> Adding a new execbuffer3 was considered, but I think we need to be 
>>>>>> careful
>>>>>> with that as that goes beyond the VM_BIND support, including any 
>>>>>> future
>>>>>> requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>>
>>>>> Why not? it's not like adding extensions here is really that different
>>>>> than adding new ioctls.
>>>>>
>>>>> I definitely think this deserves an execbuffer3 without even
>>>>> considering future requirements. Just  to burn down the old
>>>>> requirements and pointless fields.
>>>>>
>>>>> Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave the
>>>>> older sw on execbuf2 for ever.
>>>>
>>>> I guess another point in favour of execbuf3 would be that it's less
>>>> midlayer. If we share the entry point then there's quite a few vfuncs
>>>> needed to cleanly split out the vm_bind paths from the legacy
>>>> reloc/softping paths.
>>>>
>>>> If we invert this and do execbuf3, then there's the existing ioctl
>>>> vfunc, and then we share code (where it even makes sense, probably
>>>> request setup/submit need to be shared, anything else is probably
>>>> cleaner to just copypaste) with the usual helper approach.
>>>>
>>>> Also that would guarantee that really none of the old concepts like
>>>> i915_active on the vma or vma open counts and all that stuff leaks
>>>> into the new vm_bind execbuf.
>>>>
>>>> Finally I also think that copypasting would make backporting easier,
>>>> or at least more flexible, since it should make it easier to have the
>>>> upstream vm_bind co-exist with all the other things we have. Without
>>>> huge amounts of conflicts (or at least much less) that pushing a pile
>>>> of vfuncs into the existing code would cause.
>>>>
>>>> So maybe we should do this?
>>>
>>> Thanks Dave, Daniel.
>>> There are a few things that will be common between execbuf2 and
>>> execbuf3, like request setup/submit (as you said), fence handling 
>>> (timeline fences, fence array, composite fences), engine selection,
>>> etc. Also, many of the 'flags' will be there in execbuf3 also (but
>>> bit position will differ).
>>> But I guess these should be fine as the suggestion here is to
>>> copy-paste the execbuff code and having a shared code where possible.
>>> Besides, we can stop supporting some older feature in execbuff3
>>> (like fence array in favor of newer timeline fences), which will
>>> further reduce common code.
>>>
>>> Ok, I will update this series by adding execbuf3 and send out soon.
>>>
>>
>> Does this sound reasonable?
> 
> 
> Thanks for proposing this. Some comments below.
> 
> 
>>
>> struct drm_i915_gem_execbuffer3 {
>>        __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>>
>>        __u32 batch_count;
>>        __u64 batch_addr_ptr;    /* Pointer to an array of batch gpu 
>> virtual addresses */
>>
>>        __u64 flags;
>> #define I915_EXEC3_RING_MASK              (0x3f)
>> #define I915_EXEC3_DEFAULT                (0<<0)
>> #define I915_EXEC3_RENDER                 (1<<0)
>> #define I915_EXEC3_BSD                    (2<<0)
>> #define I915_EXEC3_BLT                    (3<<0)
>> #define I915_EXEC3_VEBOX                  (4<<0)
> 
> 
> Shouldn't we use the new engine selection uAPI instead?
> 
> We can already create an engine map with I915_CONTEXT_PARAM_ENGINES in 
> drm_i915_gem_context_create_ext_setparam.
> 
> And you can also create virtual engines with the same extension.
> 
> It feels like this could be a single u32 with the engine index (in the 
> context engine map).

Yes I said the same yesterday.

Also note that as you can't any longer set engines on a default context, 
question is whether userspace cares to use execbuf3 with it (default 
context).

If it does, it will need an alternative engine selection for that case. 
I was proposing class:instance rather than legacy cumbersome flags.

If it does not, I  mean if the decision is to only allow execbuf3 with 
engine maps, then it leaves the default context a waste of kernel memory 
in the execbuf3 future. :( Don't know what to do there..

Regards,

Tvrtko

> 
> 
>>
>> #define I915_EXEC3_SECURE               (1<<6)
>> #define I915_EXEC3_IS_PINNED            (1<<7)
> 
> 
> What's the meaning of PINNED?
> 
> 
>>
>> #define I915_EXEC3_BSD_SHIFT     (8)
>> #define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
>> #define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
>> #define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
>> #define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
>>
>> #define I915_EXEC3_FENCE_IN             (1<<10)
>> #define I915_EXEC3_FENCE_OUT            (1<<11)
> 
> 
> For Mesa, as soon as we have DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 
> support, we only use that.
> 
> So there isn't much point for FENCE_IN/OUT.
> 
> Maybe check with other UMDs?
> 
> 
>> #define I915_EXEC3_FENCE_SUBMIT         (1<<12)
> 
> 
> What's FENCE_SUBMIT?
> 
> 
>>
>>        __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
>>
>>        __u64 extensions;        /* currently only for 
>> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
>> };
>>
>> With this, user can pass in batch addresses and count directly,
>> instead of as an extension (as this rfc series was proposing).
>>
>> I have removed many of the flags which were either legacy or not
>> applicable to BM_BIND mode.
>> I have also removed fence array support (execbuffer2.cliprects_ptr)
>> as we have timeline fence array support. Is that fine?
>> Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
>>
>> Any thing else needs to be added or removed?
>>
>> Niranjana
>>
>>> Niranjana
>>>
>>>> -Daniel
>>>> -- 
>>>> Daniel Vetter
>>>> Software Engineer, Intel Corporation
>>>> http://blog.ffwll.ch
> 
>
Lionel Landwerlin June 8, 2022, 8:45 a.m. UTC | #17
On 08/06/2022 11:36, Tvrtko Ursulin wrote:
>
> On 08/06/2022 07:40, Lionel Landwerlin wrote:
>> On 03/06/2022 09:53, Niranjana Vishwanathapura wrote:
>>> On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura 
>>> wrote:
>>>> On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>>>> On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>>>
>>>>>> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>>>> <niranjana.vishwanathapura@intel.com> wrote:
>>>>>>>
>>>>>>> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>>>> >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura 
>>>>>>> wrote:
>>>>>>> >> VM_BIND and related uapi definitions
>>>>>>> >>
>>>>>>> >> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>> >>     Also add new uapi and documentation as per review comments
>>>>>>> >>     from Daniel.
>>>>>>> >>
>>>>>>> >> Signed-off-by: Niranjana Vishwanathapura 
>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>> >> ---
>>>>>>> >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>>> +++++++++++++++++++++++++++
>>>>>>> >>  1 file changed, 399 insertions(+)
>>>>>>> >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>> >>
>>>>>>> >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>> >> new file mode 100644
>>>>>>> >> index 000000000000..589c0a009107
>>>>>>> >> --- /dev/null
>>>>>>> >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>> >> @@ -0,0 +1,399 @@
>>>>>>> >> +/* SPDX-License-Identifier: MIT */
>>>>>>> >> +/*
>>>>>>> >> + * Copyright © 2022 Intel Corporation
>>>>>>> >> + */
>>>>>>> >> +
>>>>>>> >> +/**
>>>>>>> >> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>> >> + *
>>>>>>> >> + * VM_BIND feature availability.
>>>>>>> >> + * See typedef drm_i915_getparam_t param.
>>>>>>> >> + */
>>>>>>> >> +#define I915_PARAM_HAS_VM_BIND 57
>>>>>>> >> +
>>>>>>> >> +/**
>>>>>>> >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>> >> + *
>>>>>>> >> + * Flag to opt-in for VM_BIND mode of binding during VM 
>>>>>>> creation.
>>>>>>> >> + * See struct drm_i915_gem_vm_control flags.
>>>>>>> >> + *
>>>>>>> >> + * A VM in VM_BIND mode will not support the older execbuff 
>>>>>>> mode of binding.
>>>>>>> >> + * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>>>> execlist (ie., the
>>>>>>> >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>>> >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>>> >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>>> >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension 
>>>>>>> must be provided
>>>>>>> >> + * to pass in the batch buffer addresses.
>>>>>>> >> + *
>>>>>>> >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>>> >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags 
>>>>>>> must be 0
>>>>>>> >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag 
>>>>>>> must always be
>>>>>>> >> + * set (See struct 
>>>>>>> drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>>> >> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>>> batch_len fields
>>>>>>> >> + * of struct drm_i915_gem_execbuffer2 are also not used and 
>>>>>>> must be 0.
>>>>>>> >> + */
>>>>>>> >
>>>>>>> >From that description, it seems we have:
>>>>>>> >
>>>>>>> >struct drm_i915_gem_execbuffer2 {
>>>>>>> >        __u64 buffers_ptr;              -> must be 0 (new)
>>>>>>> >        __u32 buffer_count;             -> must be 0 (new)
>>>>>>> >        __u32 batch_start_offset;       -> must be 0 (new)
>>>>>>> >        __u32 batch_len;                -> must be 0 (new)
>>>>>>> >        __u32 DR1;                      -> must be 0 (old)
>>>>>>> >        __u32 DR4;                      -> must be 0 (old)
>>>>>>> >        __u32 num_cliprects; (fences)   -> must be 0 since 
>>>>>>> using extensions
>>>>>>> >        __u64 cliprects_ptr; (fences, extensions) -> contains 
>>>>>>> an actual pointer!
>>>>>>> >        __u64 flags;                    -> some flags must be 0 
>>>>>>> (new)
>>>>>>> >        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>>>> >        __u64 rsvd2;                    -> unused
>>>>>>> >};
>>>>>>> >
>>>>>>> >Based on that, why can't we just get drm_i915_gem_execbuffer3 
>>>>>>> instead
>>>>>>> >of adding even more complexity to an already abused interface? 
>>>>>>> While
>>>>>>> >the Vulkan-like extension thing is really nice, I don't think what
>>>>>>> >we're doing here is extending the ioctl usage, we're completely
>>>>>>> >changing how the base struct should be interpreted based on how 
>>>>>>> the VM
>>>>>>> >was created (which is an entirely different ioctl).
>>>>>>> >
>>>>>>> >From Rusty Russel's API Design grading, 
>>>>>>> drm_i915_gem_execbuffer2 is
>>>>>>> >already at -6 without these changes. I think after vm_bind 
>>>>>>> we'll need
>>>>>>> >to create a -11 entry just to deal with this ioctl.
>>>>>>> >
>>>>>>>
>>>>>>> The only change here is removing the execlist support for VM_BIND
>>>>>>> mode (other than natual extensions).
>>>>>>> Adding a new execbuffer3 was considered, but I think we need to 
>>>>>>> be careful
>>>>>>> with that as that goes beyond the VM_BIND support, including any 
>>>>>>> future
>>>>>>> requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>>>
>>>>>> Why not? it's not like adding extensions here is really that 
>>>>>> different
>>>>>> than adding new ioctls.
>>>>>>
>>>>>> I definitely think this deserves an execbuffer3 without even
>>>>>> considering future requirements. Just  to burn down the old
>>>>>> requirements and pointless fields.
>>>>>>
>>>>>> Make execbuffer3 be vm bind only, no relocs, no legacy bits, 
>>>>>> leave the
>>>>>> older sw on execbuf2 for ever.
>>>>>
>>>>> I guess another point in favour of execbuf3 would be that it's less
>>>>> midlayer. If we share the entry point then there's quite a few vfuncs
>>>>> needed to cleanly split out the vm_bind paths from the legacy
>>>>> reloc/softping paths.
>>>>>
>>>>> If we invert this and do execbuf3, then there's the existing ioctl
>>>>> vfunc, and then we share code (where it even makes sense, probably
>>>>> request setup/submit need to be shared, anything else is probably
>>>>> cleaner to just copypaste) with the usual helper approach.
>>>>>
>>>>> Also that would guarantee that really none of the old concepts like
>>>>> i915_active on the vma or vma open counts and all that stuff leaks
>>>>> into the new vm_bind execbuf.
>>>>>
>>>>> Finally I also think that copypasting would make backporting easier,
>>>>> or at least more flexible, since it should make it easier to have the
>>>>> upstream vm_bind co-exist with all the other things we have. Without
>>>>> huge amounts of conflicts (or at least much less) that pushing a pile
>>>>> of vfuncs into the existing code would cause.
>>>>>
>>>>> So maybe we should do this?
>>>>
>>>> Thanks Dave, Daniel.
>>>> There are a few things that will be common between execbuf2 and
>>>> execbuf3, like request setup/submit (as you said), fence handling 
>>>> (timeline fences, fence array, composite fences), engine selection,
>>>> etc. Also, many of the 'flags' will be there in execbuf3 also (but
>>>> bit position will differ).
>>>> But I guess these should be fine as the suggestion here is to
>>>> copy-paste the execbuff code and having a shared code where possible.
>>>> Besides, we can stop supporting some older feature in execbuff3
>>>> (like fence array in favor of newer timeline fences), which will
>>>> further reduce common code.
>>>>
>>>> Ok, I will update this series by adding execbuf3 and send out soon.
>>>>
>>>
>>> Does this sound reasonable?
>>
>>
>> Thanks for proposing this. Some comments below.
>>
>>
>>>
>>> struct drm_i915_gem_execbuffer3 {
>>>        __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>>>
>>>        __u32 batch_count;
>>>        __u64 batch_addr_ptr;    /* Pointer to an array of batch gpu 
>>> virtual addresses */
>>>
>>>        __u64 flags;
>>> #define I915_EXEC3_RING_MASK              (0x3f)
>>> #define I915_EXEC3_DEFAULT                (0<<0)
>>> #define I915_EXEC3_RENDER                 (1<<0)
>>> #define I915_EXEC3_BSD                    (2<<0)
>>> #define I915_EXEC3_BLT                    (3<<0)
>>> #define I915_EXEC3_VEBOX                  (4<<0)
>>
>>
>> Shouldn't we use the new engine selection uAPI instead?
>>
>> We can already create an engine map with I915_CONTEXT_PARAM_ENGINES 
>> in drm_i915_gem_context_create_ext_setparam.
>>
>> And you can also create virtual engines with the same extension.
>>
>> It feels like this could be a single u32 with the engine index (in 
>> the context engine map).
>
> Yes I said the same yesterday.
>
> Also note that as you can't any longer set engines on a default 
> context, question is whether userspace cares to use execbuf3 with it 
> (default context).
>
> If it does, it will need an alternative engine selection for that 
> case. I was proposing class:instance rather than legacy cumbersome flags.
>
> If it does not, I  mean if the decision is to only allow execbuf3 with 
> engine maps, then it leaves the default context a waste of kernel 
> memory in the execbuf3 future. :( Don't know what to do there..
>
> Regards,
>
> Tvrtko


Thanks Tvrtko, I only saw your reply after responding.


Both Iris & Anv create a context with engines (if kernel supports it) : 
https://gitlab.freedesktop.org/mesa/mesa/-/blob/main/src/intel/common/intel_gem.c#L73

I think we should be fine with just a single engine id and we don't care 
about the default context.


-Lionel


>
>>
>>
>>>
>>> #define I915_EXEC3_SECURE               (1<<6)
>>> #define I915_EXEC3_IS_PINNED            (1<<7)
>>
>>
>> What's the meaning of PINNED?
>>
>>
>>>
>>> #define I915_EXEC3_BSD_SHIFT     (8)
>>> #define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
>>> #define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
>>> #define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
>>> #define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
>>>
>>> #define I915_EXEC3_FENCE_IN             (1<<10)
>>> #define I915_EXEC3_FENCE_OUT            (1<<11)
>>
>>
>> For Mesa, as soon as we have 
>> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES support, we only use that.
>>
>> So there isn't much point for FENCE_IN/OUT.
>>
>> Maybe check with other UMDs?
>>
>>
>>> #define I915_EXEC3_FENCE_SUBMIT (1<<12)
>>
>>
>> What's FENCE_SUBMIT?
>>
>>
>>>
>>>        __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
>>>
>>>        __u64 extensions;        /* currently only for 
>>> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
>>> };
>>>
>>> With this, user can pass in batch addresses and count directly,
>>> instead of as an extension (as this rfc series was proposing).
>>>
>>> I have removed many of the flags which were either legacy or not
>>> applicable to BM_BIND mode.
>>> I have also removed fence array support (execbuffer2.cliprects_ptr)
>>> as we have timeline fence array support. Is that fine?
>>> Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
>>>
>>> Any thing else needs to be added or removed?
>>>
>>> Niranjana
>>>
>>>> Niranjana
>>>>
>>>>> -Daniel
>>>>> -- 
>>>>> Daniel Vetter
>>>>> Software Engineer, Intel Corporation
>>>>> http://blog.ffwll.ch
>>
>>
Tvrtko Ursulin June 8, 2022, 8:54 a.m. UTC | #18
On 08/06/2022 09:45, Lionel Landwerlin wrote:
> On 08/06/2022 11:36, Tvrtko Ursulin wrote:
>>
>> On 08/06/2022 07:40, Lionel Landwerlin wrote:
>>> On 03/06/2022 09:53, Niranjana Vishwanathapura wrote:
>>>> On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura 
>>>> wrote:
>>>>> On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>>>>> On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>>>>
>>>>>>> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>>>>> <niranjana.vishwanathapura@intel.com> wrote:
>>>>>>>>
>>>>>>>> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>>>>> >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura 
>>>>>>>> wrote:
>>>>>>>> >> VM_BIND and related uapi definitions
>>>>>>>> >>
>>>>>>>> >> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>>> >>     Also add new uapi and documentation as per review comments
>>>>>>>> >>     from Daniel.
>>>>>>>> >>
>>>>>>>> >> Signed-off-by: Niranjana Vishwanathapura 
>>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>> >> ---
>>>>>>>> >>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>>>> +++++++++++++++++++++++++++
>>>>>>>> >>  1 file changed, 399 insertions(+)
>>>>>>>> >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>> >>
>>>>>>>> >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>> >> new file mode 100644
>>>>>>>> >> index 000000000000..589c0a009107
>>>>>>>> >> --- /dev/null
>>>>>>>> >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>> >> @@ -0,0 +1,399 @@
>>>>>>>> >> +/* SPDX-License-Identifier: MIT */
>>>>>>>> >> +/*
>>>>>>>> >> + * Copyright © 2022 Intel Corporation
>>>>>>>> >> + */
>>>>>>>> >> +
>>>>>>>> >> +/**
>>>>>>>> >> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>> >> + *
>>>>>>>> >> + * VM_BIND feature availability.
>>>>>>>> >> + * See typedef drm_i915_getparam_t param.
>>>>>>>> >> + */
>>>>>>>> >> +#define I915_PARAM_HAS_VM_BIND 57
>>>>>>>> >> +
>>>>>>>> >> +/**
>>>>>>>> >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>> >> + *
>>>>>>>> >> + * Flag to opt-in for VM_BIND mode of binding during VM 
>>>>>>>> creation.
>>>>>>>> >> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>> >> + *
>>>>>>>> >> + * A VM in VM_BIND mode will not support the older execbuff 
>>>>>>>> mode of binding.
>>>>>>>> >> + * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>>>>> execlist (ie., the
>>>>>>>> >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>>>> >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>>>> >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>>>> >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension 
>>>>>>>> must be provided
>>>>>>>> >> + * to pass in the batch buffer addresses.
>>>>>>>> >> + *
>>>>>>>> >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>>>> >> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags 
>>>>>>>> must be 0
>>>>>>>> >> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag 
>>>>>>>> must always be
>>>>>>>> >> + * set (See struct 
>>>>>>>> drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>>>> >> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>>>> batch_len fields
>>>>>>>> >> + * of struct drm_i915_gem_execbuffer2 are also not used and 
>>>>>>>> must be 0.
>>>>>>>> >> + */
>>>>>>>> >
>>>>>>>> >From that description, it seems we have:
>>>>>>>> >
>>>>>>>> >struct drm_i915_gem_execbuffer2 {
>>>>>>>> >        __u64 buffers_ptr;              -> must be 0 (new)
>>>>>>>> >        __u32 buffer_count;             -> must be 0 (new)
>>>>>>>> >        __u32 batch_start_offset;       -> must be 0 (new)
>>>>>>>> >        __u32 batch_len;                -> must be 0 (new)
>>>>>>>> >        __u32 DR1;                      -> must be 0 (old)
>>>>>>>> >        __u32 DR4;                      -> must be 0 (old)
>>>>>>>> >        __u32 num_cliprects; (fences)   -> must be 0 since 
>>>>>>>> using extensions
>>>>>>>> >        __u64 cliprects_ptr; (fences, extensions) -> contains 
>>>>>>>> an actual pointer!
>>>>>>>> >        __u64 flags;                    -> some flags must be 0 
>>>>>>>> (new)
>>>>>>>> >        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>>>>> >        __u64 rsvd2;                    -> unused
>>>>>>>> >};
>>>>>>>> >
>>>>>>>> >Based on that, why can't we just get drm_i915_gem_execbuffer3 
>>>>>>>> instead
>>>>>>>> >of adding even more complexity to an already abused interface? 
>>>>>>>> While
>>>>>>>> >the Vulkan-like extension thing is really nice, I don't think what
>>>>>>>> >we're doing here is extending the ioctl usage, we're completely
>>>>>>>> >changing how the base struct should be interpreted based on how 
>>>>>>>> the VM
>>>>>>>> >was created (which is an entirely different ioctl).
>>>>>>>> >
>>>>>>>> >From Rusty Russel's API Design grading, 
>>>>>>>> drm_i915_gem_execbuffer2 is
>>>>>>>> >already at -6 without these changes. I think after vm_bind 
>>>>>>>> we'll need
>>>>>>>> >to create a -11 entry just to deal with this ioctl.
>>>>>>>> >
>>>>>>>>
>>>>>>>> The only change here is removing the execlist support for VM_BIND
>>>>>>>> mode (other than natual extensions).
>>>>>>>> Adding a new execbuffer3 was considered, but I think we need to 
>>>>>>>> be careful
>>>>>>>> with that as that goes beyond the VM_BIND support, including any 
>>>>>>>> future
>>>>>>>> requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>>>>
>>>>>>> Why not? it's not like adding extensions here is really that 
>>>>>>> different
>>>>>>> than adding new ioctls.
>>>>>>>
>>>>>>> I definitely think this deserves an execbuffer3 without even
>>>>>>> considering future requirements. Just  to burn down the old
>>>>>>> requirements and pointless fields.
>>>>>>>
>>>>>>> Make execbuffer3 be vm bind only, no relocs, no legacy bits, 
>>>>>>> leave the
>>>>>>> older sw on execbuf2 for ever.
>>>>>>
>>>>>> I guess another point in favour of execbuf3 would be that it's less
>>>>>> midlayer. If we share the entry point then there's quite a few vfuncs
>>>>>> needed to cleanly split out the vm_bind paths from the legacy
>>>>>> reloc/softping paths.
>>>>>>
>>>>>> If we invert this and do execbuf3, then there's the existing ioctl
>>>>>> vfunc, and then we share code (where it even makes sense, probably
>>>>>> request setup/submit need to be shared, anything else is probably
>>>>>> cleaner to just copypaste) with the usual helper approach.
>>>>>>
>>>>>> Also that would guarantee that really none of the old concepts like
>>>>>> i915_active on the vma or vma open counts and all that stuff leaks
>>>>>> into the new vm_bind execbuf.
>>>>>>
>>>>>> Finally I also think that copypasting would make backporting easier,
>>>>>> or at least more flexible, since it should make it easier to have the
>>>>>> upstream vm_bind co-exist with all the other things we have. Without
>>>>>> huge amounts of conflicts (or at least much less) that pushing a pile
>>>>>> of vfuncs into the existing code would cause.
>>>>>>
>>>>>> So maybe we should do this?
>>>>>
>>>>> Thanks Dave, Daniel.
>>>>> There are a few things that will be common between execbuf2 and
>>>>> execbuf3, like request setup/submit (as you said), fence handling 
>>>>> (timeline fences, fence array, composite fences), engine selection,
>>>>> etc. Also, many of the 'flags' will be there in execbuf3 also (but
>>>>> bit position will differ).
>>>>> But I guess these should be fine as the suggestion here is to
>>>>> copy-paste the execbuff code and having a shared code where possible.
>>>>> Besides, we can stop supporting some older feature in execbuff3
>>>>> (like fence array in favor of newer timeline fences), which will
>>>>> further reduce common code.
>>>>>
>>>>> Ok, I will update this series by adding execbuf3 and send out soon.
>>>>>
>>>>
>>>> Does this sound reasonable?
>>>
>>>
>>> Thanks for proposing this. Some comments below.
>>>
>>>
>>>>
>>>> struct drm_i915_gem_execbuffer3 {
>>>>        __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>>>>
>>>>        __u32 batch_count;
>>>>        __u64 batch_addr_ptr;    /* Pointer to an array of batch gpu 
>>>> virtual addresses */
>>>>
>>>>        __u64 flags;
>>>> #define I915_EXEC3_RING_MASK              (0x3f)
>>>> #define I915_EXEC3_DEFAULT                (0<<0)
>>>> #define I915_EXEC3_RENDER                 (1<<0)
>>>> #define I915_EXEC3_BSD                    (2<<0)
>>>> #define I915_EXEC3_BLT                    (3<<0)
>>>> #define I915_EXEC3_VEBOX                  (4<<0)
>>>
>>>
>>> Shouldn't we use the new engine selection uAPI instead?
>>>
>>> We can already create an engine map with I915_CONTEXT_PARAM_ENGINES 
>>> in drm_i915_gem_context_create_ext_setparam.
>>>
>>> And you can also create virtual engines with the same extension.
>>>
>>> It feels like this could be a single u32 with the engine index (in 
>>> the context engine map).
>>
>> Yes I said the same yesterday.
>>
>> Also note that as you can't any longer set engines on a default 
>> context, question is whether userspace cares to use execbuf3 with it 
>> (default context).
>>
>> If it does, it will need an alternative engine selection for that 
>> case. I was proposing class:instance rather than legacy cumbersome flags.
>>
>> If it does not, I  mean if the decision is to only allow execbuf3 with 
>> engine maps, then it leaves the default context a waste of kernel 
>> memory in the execbuf3 future. :( Don't know what to do there..
>>
>> Regards,
>>
>> Tvrtko
> 
> 
> Thanks Tvrtko, I only saw your reply after responding.
> 
> 
> Both Iris & Anv create a context with engines (if kernel supports it) : 
> https://gitlab.freedesktop.org/mesa/mesa/-/blob/main/src/intel/common/intel_gem.c#L73 
> 
> 
> I think we should be fine with just a single engine id and we don't care 
> about the default context.

I wonder if in this case we could stop creating the default context 
starting from a future "gen"? Otherwise, with engine map only execbuf3 
and execbuf3 only userspace, it would serve no purpose apart from 
wasting kernel memory.

Regards,

Tvrtko

> 
> 
> -Lionel
> 
> 
>>
>>>
>>>
>>>>
>>>> #define I915_EXEC3_SECURE               (1<<6)
>>>> #define I915_EXEC3_IS_PINNED            (1<<7)
>>>
>>>
>>> What's the meaning of PINNED?
>>>
>>>
>>>>
>>>> #define I915_EXEC3_BSD_SHIFT     (8)
>>>> #define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
>>>> #define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
>>>> #define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
>>>> #define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
>>>>
>>>> #define I915_EXEC3_FENCE_IN             (1<<10)
>>>> #define I915_EXEC3_FENCE_OUT            (1<<11)
>>>
>>>
>>> For Mesa, as soon as we have 
>>> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES support, we only use that.
>>>
>>> So there isn't much point for FENCE_IN/OUT.
>>>
>>> Maybe check with other UMDs?
>>>
>>>
>>>> #define I915_EXEC3_FENCE_SUBMIT (1<<12)
>>>
>>>
>>> What's FENCE_SUBMIT?
>>>
>>>
>>>>
>>>>        __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
>>>>
>>>>        __u64 extensions;        /* currently only for 
>>>> DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
>>>> };
>>>>
>>>> With this, user can pass in batch addresses and count directly,
>>>> instead of as an extension (as this rfc series was proposing).
>>>>
>>>> I have removed many of the flags which were either legacy or not
>>>> applicable to BM_BIND mode.
>>>> I have also removed fence array support (execbuffer2.cliprects_ptr)
>>>> as we have timeline fence array support. Is that fine?
>>>> Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
>>>>
>>>> Any thing else needs to be added or removed?
>>>>
>>>> Niranjana
>>>>
>>>>> Niranjana
>>>>>
>>>>>> -Daniel
>>>>>> -- 
>>>>>> Daniel Vetter
>>>>>> Software Engineer, Intel Corporation
>>>>>> http://blog.ffwll.ch
>>>
>>>
>
Matthew Auld June 8, 2022, 9:12 a.m. UTC | #19
On 08/06/2022 08:17, Tvrtko Ursulin wrote:
> 
> On 07/06/2022 20:37, Niranjana Vishwanathapura wrote:
>> On Tue, Jun 07, 2022 at 11:27:14AM +0100, Tvrtko Ursulin wrote:
>>>
>>> On 17/05/2022 19:32, Niranjana Vishwanathapura wrote:
>>>> VM_BIND and related uapi definitions
>>>>
>>>> v2: Ensure proper kernel-doc formatting with cross references.
>>>>     Also add new uapi and documentation as per review comments
>>>>     from Daniel.
>>>>
>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>> <niranjana.vishwanathapura@intel.com>
>>>> ---
>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
>>>>  1 file changed, 399 insertions(+)
>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>> new file mode 100644
>>>> index 000000000000..589c0a009107
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>> @@ -0,0 +1,399 @@
>>>> +/* SPDX-License-Identifier: MIT */
>>>> +/*
>>>> + * Copyright © 2022 Intel Corporation
>>>> + */
>>>> +
>>>> +/**
>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>> + *
>>>> + * VM_BIND feature availability.
>>>> + * See typedef drm_i915_getparam_t param.
>>>> + */
>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>> +
>>>> +/**
>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>> + *
>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>> + * See struct drm_i915_gem_vm_control flags.
>>>> + *
>>>> + * A VM in VM_BIND mode will not support the older execbuff mode of 
>>>> binding.
>>>> + * In VM_BIND mode, execbuff ioctl will not accept any execlist 
>>>> (ie., the
>>>> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be 
>>>> provided
>>>> + * to pass in the batch buffer addresses.
>>>> + *
>>>> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
>>>> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must 
>>>> always be
>>>> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>> + * The buffers_ptr, buffer_count, batch_start_offset and batch_len 
>>>> fields
>>>> + * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
>>>> + */
>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>> +
>>>> +/**
>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>> + *
>>>> + * Flag to declare context as long running.
>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>> + *
>>>> + * Usage of dma-fence expects that they complete in reasonable 
>>>> amount of time.
>>>> + * Compute on the other hand can be long running. Hence it is not 
>>>> appropriate
>>>> + * for compute contexts to export request completion dma-fence to 
>>>> user.
>>>> + * The dma-fence usage will be limited to in-kernel consumption only.
>>>> + * Compute contexts need to use user/memory fence.
>>>> + *
>>>> + * So, long running contexts do not support output fences. Hence,
>>>> + * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
>>>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) are 
>>>> expected
>>>> + * to be not used.
>>>> + *
>>>> + * DRM_I915_GEM_WAIT ioctl call is also not supported for objects 
>>>> mapped
>>>> + * to long running contexts.
>>>> + */
>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>> +
>>>> +/* VM_BIND related ioctls */
>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x3f
>>>> +
>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND        DRM_IOWR(DRM_COMMAND_BASE 
>>>> + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>> DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct 
>>>> drm_i915_gem_wait_user_fence)
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>> + *
>>>> + * This structure is passed to VM_BIND ioctl and specifies the 
>>>> mapping of GPU
>>>> + * virtual address (VA) range to the section of an object that 
>>>> should be bound
>>>> + * in the device page table of the specified address space (VM).
>>>> + * The VA range specified must be unique (ie., not currently bound) 
>>>> and can
>>>> + * be mapped to whole object or a section of the object (partial 
>>>> binding).
>>>> + * Multiple VA mappings can be created to the same section of the 
>>>> object
>>>> + * (aliasing).
>>>> + */
>>>> +struct drm_i915_gem_vm_bind {
>>>> +    /** @vm_id: VM (address space) id to bind */
>>>> +    __u32 vm_id;
>>>> +
>>>> +    /** @handle: Object handle */
>>>> +    __u32 handle;
>>>> +
>>>> +    /** @start: Virtual Address start to bind */
>>>> +    __u64 start;
>>>> +
>>>> +    /** @offset: Offset in object to bind */
>>>> +    __u64 offset;
>>>> +
>>>> +    /** @length: Length of mapping to bind */
>>>> +    __u64 length;
>>>
>>> Does it support, or should it, equivalent of EXEC_OBJECT_PAD_TO_SIZE? 
>>> Or if not userspace is expected to map the remainder of the space to 
>>> a dummy object? In which case would there be any alignment/padding 
>>> issues preventing the two bind to be placed next to each other?
>>>
>>> I ask because someone from the compute side asked me about a problem 
>>> with their strategy of dealing with overfetch and I suggested pad to 
>>> size.
>>>
>>
>> Thanks Tvrtko,
>> I think we shouldn't be needing it. As with VM_BIND VA assignment
>> is completely pushed to userspace, no padding should be necessary
>> once the 'start' and 'size' alignment conditions are met.
>>
>> I will add some documentation on alignment requirement here.
>> Generally, 'start' and 'size' should be 4K aligned. But, I think
>> when we have 64K lmem page sizes (dg2 and xehpsdv), they need to
>> be 64K aligned.
> 
> + Matt
> 
> Align to 64k is enough for all overfetch issues?
> 
> Apparently compute has a situation where a buffer is received by one 
> component and another has to apply more alignment to it, to deal with 
> overfetch. Since they cannot grow the actual BO if they wanted to 
> VM_BIND a scratch area on top? Or perhaps none of this is a problem on 
> discrete and original BO should be correctly allocated to start with.
> 
> Side question - what about the align to 2MiB mentioned in 
> i915_vma_insert to avoid mixing 4k and 64k PTEs? That does not apply to 
> discrete?

Not sure about the overfetch thing, but yeah dg2 & xehpsdv both require 
a minimum of 64K pages underneath for local memory, and the BO size will 
also be rounded up accordingly. And yeah the complication arises due to 
not being able to mix 4K + 64K GTT pages within the same page-table 
(existed since even gen8). Note that 4K here is what we typically get 
for system memory.

Originally we had a memory coloring scheme to track the "color" of each 
page-table, which basically ensures that userspace can't do something 
nasty like mixing page sizes. The advantage of that scheme is that we 
would only require 64K GTT alignment and no extra padding, but is 
perhaps a little complex.

The merged solution is just to align and pad (i.e vma->node.size and not 
vma->size) out of the vma to 2M, which is dead simple implementation 
wise, but does potentially waste some GTT space and some of the local 
memory used for the actual page-table. For the alignment the kernel just 
validates that the GTT address is aligned to 2M in vma_insert(), and 
then for the padding it just inflates it to 2M, if userspace hasn't already.

See the kernel-doc for @size: 
https://dri.freedesktop.org/docs/drm/gpu/driver-uapi.html?#c.drm_i915_gem_create_ext

> 
> Regards,
> 
> Tvrtko
> 
>>
>> Niranjana
>>
>>> Regards,
>>>
>>> Tvrtko
>>>
>>>> +
>>>> +    /**
>>>> +     * @flags: Supported flags are,
>>>> +     *
>>>> +     * I915_GEM_VM_BIND_READONLY:
>>>> +     * Mapping is read-only.
>>>> +     *
>>>> +     * I915_GEM_VM_BIND_CAPTURE:
>>>> +     * Capture this mapping in the dump upon GPU error.
>>>> +     */
>>>> +    __u64 flags;
>>>> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>> +
>>>> +    /** @extensions: 0-terminated chain of extensions for this 
>>>> mapping. */
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>>> + *
>>>> + * This structure is passed to VM_UNBIND ioctl and specifies the 
>>>> GPU virtual
>>>> + * address (VA) range that should be unbound from the device page 
>>>> table of the
>>>> + * specified address space (VM). The specified VA range must match 
>>>> one of the
>>>> + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>>>> + * completion.
>>>> + */
>>>> +struct drm_i915_gem_vm_unbind {
>>>> +    /** @vm_id: VM (address space) id to bind */
>>>> +    __u32 vm_id;
>>>> +
>>>> +    /** @rsvd: Reserved for future use; must be zero. */
>>>> +    __u32 rsvd;
>>>> +
>>>> +    /** @start: Virtual Address start to unbind */
>>>> +    __u64 start;
>>>> +
>>>> +    /** @length: Length of mapping to unbind */
>>>> +    __u64 length;
>>>> +
>>>> +    /** @flags: reserved for future usage, currently MBZ */
>>>> +    __u64 flags;
>>>> +
>>>> +    /** @extensions: 0-terminated chain of extensions for this 
>>>> mapping. */
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_fence - An input or output fence for the 
>>>> vm_bind
>>>> + * or the vm_unbind work.
>>>> + *
>>>> + * The vm_bind or vm_unbind aync worker will wait for input fence 
>>>> to signal
>>>> + * before starting the binding or unbinding.
>>>> + *
>>>> + * The vm_bind or vm_unbind async worker will signal the returned 
>>>> output fence
>>>> + * after the completion of binding or unbinding.
>>>> + */
>>>> +struct drm_i915_vm_bind_fence {
>>>> +    /** @handle: User's handle for a drm_syncobj to wait on or 
>>>> signal. */
>>>> +    __u32 handle;
>>>> +
>>>> +    /**
>>>> +     * @flags: Supported flags are,
>>>> +     *
>>>> +     * I915_VM_BIND_FENCE_WAIT:
>>>> +     * Wait for the input fence before binding/unbinding
>>>> +     *
>>>> +     * I915_VM_BIND_FENCE_SIGNAL:
>>>> +     * Return bind/unbind completion fence as output
>>>> +     */
>>>> +    __u32 flags;
>>>> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>>>> (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences 
>>>> for vm_bind
>>>> + * and vm_unbind.
>>>> + *
>>>> + * This structure describes an array of timeline drm_syncobj and 
>>>> associated
>>>> + * points for timeline variants of drm_syncobj. These timeline 
>>>> 'drm_syncobj's
>>>> + * can be input or output fences (See struct drm_i915_vm_bind_fence).
>>>> + */
>>>> +struct drm_i915_vm_bind_ext_timeline_fences {
>>>> +#define I915_VM_BIND_EXT_timeline_FENCES    0
>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>> +    struct i915_user_extension base;
>>>> +
>>>> +    /**
>>>> +     * @fence_count: Number of elements in the @handles_ptr & 
>>>> @value_ptr
>>>> +     * arrays.
>>>> +     */
>>>> +    __u64 fence_count;
>>>> +
>>>> +    /**
>>>> +     * @handles_ptr: Pointer to an array of struct 
>>>> drm_i915_vm_bind_fence
>>>> +     * of length @fence_count.
>>>> +     */
>>>> +    __u64 handles_ptr;
>>>> +
>>>> +    /**
>>>> +     * @values_ptr: Pointer to an array of u64 values of length
>>>> +     * @fence_count.
>>>> +     * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>>> +     * timeline drm_syncobj is invalid as it turns a drm_syncobj 
>>>> into a
>>>> +     * binary one.
>>>> +     */
>>>> +    __u64 values_ptr;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_user_fence - An input or output user 
>>>> fence for the
>>>> + * vm_bind or the vm_unbind work.
>>>> + *
>>>> + * The vm_bind or vm_unbind aync worker will wait for the input 
>>>> fence (value at
>>>> + * @addr to become equal to @val) before starting the binding or 
>>>> unbinding.
>>>> + *
>>>> + * The vm_bind or vm_unbind async worker will signal the output 
>>>> fence after
>>>> + * the completion of binding or unbinding by writing @val to memory 
>>>> location at
>>>> + * @addr
>>>> + */
>>>> +struct drm_i915_vm_bind_user_fence {
>>>> +    /** @addr: User/Memory fence qword aligned process virtual 
>>>> address */
>>>> +    __u64 addr;
>>>> +
>>>> +    /** @val: User/Memory fence value to be written after bind 
>>>> completion */
>>>> +    __u64 val;
>>>> +
>>>> +    /**
>>>> +     * @flags: Supported flags are,
>>>> +     *
>>>> +     * I915_VM_BIND_USER_FENCE_WAIT:
>>>> +     * Wait for the input fence before binding/unbinding
>>>> +     *
>>>> +     * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>> +     * Return bind/unbind completion fence as output
>>>> +     */
>>>> +    __u32 flags;
>>>> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>> +    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for 
>>>> vm_bind
>>>> + * and vm_unbind.
>>>> + *
>>>> + * These user fences can be input or output fences
>>>> + * (See struct drm_i915_vm_bind_user_fence).
>>>> + */
>>>> +struct drm_i915_vm_bind_ext_user_fence {
>>>> +#define I915_VM_BIND_EXT_USER_FENCES    1
>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>> +    struct i915_user_extension base;
>>>> +
>>>> +    /** @fence_count: Number of elements in the @user_fence_ptr 
>>>> array. */
>>>> +    __u64 fence_count;
>>>> +
>>>> +    /**
>>>> +     * @user_fence_ptr: Pointer to an array of
>>>> +     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>> +     */
>>>> +    __u64 user_fence_ptr;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array of 
>>>> batch buffer
>>>> + * gpu virtual addresses.
>>>> + *
>>>> + * In the execbuff ioctl (See struct drm_i915_gem_execbuffer2), 
>>>> this extension
>>>> + * must always be appended in the VM_BIND mode and it will be an 
>>>> error to
>>>> + * append this extension in older non-VM_BIND mode.
>>>> + */
>>>> +struct drm_i915_gem_execbuffer_ext_batch_addresses {
>>>> +#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES    1
>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>> +    struct i915_user_extension base;
>>>> +
>>>> +    /** @count: Number of addresses in the addr array. */
>>>> +    __u32 count;
>>>> +
>>>> +    /** @addr: An array of batch gpu virtual addresses. */
>>>> +    __u64 addr[0];
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_execbuffer_ext_user_fence - First level 
>>>> batch completion
>>>> + * signaling extension.
>>>> + *
>>>> + * This extension allows user to attach a user fence (@addr, @value 
>>>> pair) to an
>>>> + * execbuf to be signaled by the command streamer after the 
>>>> completion of first
>>>> + * level batch, by writing the @value at specified @addr and 
>>>> triggering an
>>>> + * interrupt.
>>>> + * User can either poll for this user fence to signal or can also 
>>>> wait on it
>>>> + * with i915_gem_wait_user_fence ioctl.
>>>> + * This is very much usefaul for long running contexts where 
>>>> waiting on dma-fence
>>>> + * by user (like i915_gem_wait ioctl) is not supported.
>>>> + */
>>>> +struct drm_i915_gem_execbuffer_ext_user_fence {
>>>> +#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE        2
>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>> +    struct i915_user_extension base;
>>>> +
>>>> +    /**
>>>> +     * @addr: User/Memory fence qword aligned GPU virtual address.
>>>> +     *
>>>> +     * Address has to be a valid GPU virtual address at the time of
>>>> +     * first level batch completion.
>>>> +     */
>>>> +    __u64 addr;
>>>> +
>>>> +    /**
>>>> +     * @value: User/Memory fence Value to be written to above address
>>>> +     * after first level batch completes.
>>>> +     */
>>>> +    __u64 value;
>>>> +
>>>> +    /** @rsvd: Reserved for future extensions, MBZ */
>>>> +    __u64 rsvd;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_create_ext_vm_private - Extension to make 
>>>> the object
>>>> + * private to the specified VM.
>>>> + *
>>>> + * See struct drm_i915_gem_create_ext.
>>>> + */
>>>> +struct drm_i915_gem_create_ext_vm_private {
>>>> +#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>> +    struct i915_user_extension base;
>>>> +
>>>> +    /** @vm_id: Id of the VM to which the object is private */
>>>> +    __u32 vm_id;
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>>> + *
>>>> + * User/Memory fence can be woken up either by:
>>>> + *
>>>> + * 1. GPU context indicated by @ctx_id, or,
>>>> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>> + *    @ctx_id is ignored when this flag is set.
>>>> + *
>>>> + * Wakeup condition is,
>>>> + * ``((*addr & mask) op (value & mask))``
>>>> + *
>>>> + * See :ref:`Documentation/driver-api/dma-buf.rst 
>>>> <indefinite_dma_fences>`
>>>> + */
>>>> +struct drm_i915_gem_wait_user_fence {
>>>> +    /** @extensions: Zero-terminated chain of extensions. */
>>>> +    __u64 extensions;
>>>> +
>>>> +    /** @addr: User/Memory fence address */
>>>> +    __u64 addr;
>>>> +
>>>> +    /** @ctx_id: Id of the Context which will signal the fence. */
>>>> +    __u32 ctx_id;
>>>> +
>>>> +    /** @op: Wakeup condition operator */
>>>> +    __u16 op;
>>>> +#define I915_UFENCE_WAIT_EQ      0
>>>> +#define I915_UFENCE_WAIT_NEQ     1
>>>> +#define I915_UFENCE_WAIT_GT      2
>>>> +#define I915_UFENCE_WAIT_GTE     3
>>>> +#define I915_UFENCE_WAIT_LT      4
>>>> +#define I915_UFENCE_WAIT_LTE     5
>>>> +#define I915_UFENCE_WAIT_BEFORE  6
>>>> +#define I915_UFENCE_WAIT_AFTER   7
>>>> +
>>>> +    /**
>>>> +     * @flags: Supported flags are,
>>>> +     *
>>>> +     * I915_UFENCE_WAIT_SOFT:
>>>> +     *
>>>> +     * To be woken up by i915 driver async worker (not by GPU).
>>>> +     *
>>>> +     * I915_UFENCE_WAIT_ABSTIME:
>>>> +     *
>>>> +     * Wait timeout specified as absolute time.
>>>> +     */
>>>> +    __u16 flags;
>>>> +#define I915_UFENCE_WAIT_SOFT    0x1
>>>> +#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>> +
>>>> +    /** @value: Wakeup value */
>>>> +    __u64 value;
>>>> +
>>>> +    /** @mask: Wakeup mask */
>>>> +    __u64 mask;
>>>> +#define I915_UFENCE_WAIT_U8     0xffu
>>>> +#define I915_UFENCE_WAIT_U16    0xffffu
>>>> +#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>> +
>>>> +    /**
>>>> +     * @timeout: Wait timeout in nanoseconds.
>>>> +     *
>>>> +     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout 
>>>> is the
>>>> +     * absolute time in nsec.
>>>> +     */
>>>> +    __s64 timeout;
>>>> +};
Niranjana Vishwanathapura June 8, 2022, 7:52 p.m. UTC | #20
On Wed, Jun 08, 2022 at 08:34:36AM +0100, Tvrtko Ursulin wrote:
>
>On 07/06/2022 22:25, Niranjana Vishwanathapura wrote:
>>On Tue, Jun 07, 2022 at 11:42:08AM +0100, Tvrtko Ursulin wrote:
>>>
>>>On 03/06/2022 07:53, Niranjana Vishwanathapura wrote:
>>>>On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana 
>>>>Vishwanathapura wrote:
>>>>>On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>>>>>On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>>>>
>>>>>>>On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>>>>><niranjana.vishwanathapura@intel.com> wrote:
>>>>>>>>
>>>>>>>>On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>>>>>>On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
>>>>>>>>>>VM_BIND and related uapi definitions
>>>>>>>>>>
>>>>>>>>>>v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>>>>>     Also add new uapi and documentation as per review comments
>>>>>>>>>>     from Daniel.
>>>>>>>>>>
>>>>>>>>>>Signed-off-by: Niranjana Vishwanathapura
>>>>>>>><niranjana.vishwanathapura@intel.com>
>>>>>>>>>>---
>>>>>>>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 399
>>>>>>>>+++++++++++++++++++++++++++
>>>>>>>>>>  1 file changed, 399 insertions(+)
>>>>>>>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>
>>>>>>>>>>diff --git a/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>new file mode 100644
>>>>>>>>>>index 000000000000..589c0a009107
>>>>>>>>>>--- /dev/null
>>>>>>>>>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>@@ -0,0 +1,399 @@
>>>>>>>>>>+/* SPDX-License-Identifier: MIT */
>>>>>>>>>>+/*
>>>>>>>>>>+ * Copyright © 2022 Intel Corporation
>>>>>>>>>>+ */
>>>>>>>>>>+
>>>>>>>>>>+/**
>>>>>>>>>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>>+ *
>>>>>>>>>>+ * VM_BIND feature availability.
>>>>>>>>>>+ * See typedef drm_i915_getparam_t param.
>>>>>>>>>>+ */
>>>>>>>>>>+#define I915_PARAM_HAS_VM_BIND               57
>>>>>>>>>>+
>>>>>>>>>>+/**
>>>>>>>>>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>>+ *
>>>>>>>>>>+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>>>>>>+ * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>>+ *
>>>>>>>>>>+ * A VM in VM_BIND mode will not support the older
>>>>>>>>execbuff mode of binding.
>>>>>>>>>>+ * In VM_BIND mode, execbuff ioctl will not accept 
>>>>>>>>>>any
>>>>>>>>execlist (ie., the
>>>>>>>>>>+ * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>>>>>>+ * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>>>>>>+ * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>>>>>>+ * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES 
>>>>>>>>>>extension
>>>>>>>>must be provided
>>>>>>>>>>+ * to pass in the batch buffer addresses.
>>>>>>>>>>+ *
>>>>>>>>>>+ * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>>>>>>+ * I915_EXEC_BATCH_FIRST of
>>>>>>>>&drm_i915_gem_execbuffer2.flags must be 0
>>>>>>>>>>+ * (not used) in VM_BIND mode. 
>>>>>>>>>>I915_EXEC_USE_EXTENSIONS
>>>>>>>>flag must always be
>>>>>>>>>>+ * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>>>>>>+ * The buffers_ptr, buffer_count, 
>>>>>>>>>>batch_start_offset and
>>>>>>>>batch_len fields
>>>>>>>>>>+ * of struct drm_i915_gem_execbuffer2 are also not 
>>>>>>>>>>used
>>>>>>>>and must be 0.
>>>>>>>>>>+ */
>>>>>>>>>
>>>>>>>>>From that description, it seems we have:
>>>>>>>>>
>>>>>>>>>struct drm_i915_gem_execbuffer2 {
>>>>>>>>>        __u64 buffers_ptr;              -> must be 0 (new)
>>>>>>>>>        __u32 buffer_count;             -> must be 0 (new)
>>>>>>>>>        __u32 batch_start_offset;       -> must be 0 (new)
>>>>>>>>>        __u32 batch_len;                -> must be 0 (new)
>>>>>>>>>        __u32 DR1;                      -> must be 0 (old)
>>>>>>>>>        __u32 DR4;                      -> must be 0 (old)
>>>>>>>>>        __u32 num_cliprects; (fences)   -> must be 0 
>>>>>>>>>since
>>>>>>>>using extensions
>>>>>>>>>        __u64 cliprects_ptr; (fences, extensions) ->
>>>>>>>>contains an actual pointer!
>>>>>>>>>        __u64 flags;                    -> some flags 
>>>>>>>>>must be 0 (new)
>>>>>>>>>        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>>>>>>        __u64 rsvd2;                    -> unused
>>>>>>>>>};
>>>>>>>>>
>>>>>>>>>Based on that, why can't we just get 
>>>>>>>>>drm_i915_gem_execbuffer3 instead
>>>>>>>>>of adding even more complexity to an already abused 
>>>>>>>>>interface? While
>>>>>>>>>the Vulkan-like extension thing is really nice, I don't think what
>>>>>>>>>we're doing here is extending the ioctl usage, we're completely
>>>>>>>>>changing how the base struct should be interpreted 
>>>>>>>>>based on
>>>>>>>>how the VM
>>>>>>>>>was created (which is an entirely different ioctl).
>>>>>>>>>
>>>>>>>>>From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
>>>>>>>>>already at -6 without these changes. I think after 
>>>>>>>>>vm_bind we'll need
>>>>>>>>>to create a -11 entry just to deal with this ioctl.
>>>>>>>>>
>>>>>>>>
>>>>>>>>The only change here is removing the execlist support for VM_BIND
>>>>>>>>mode (other than natual extensions).
>>>>>>>>Adding a new execbuffer3 was considered, but I think we 
>>>>>>>>need to be careful
>>>>>>>>with that as that goes beyond the VM_BIND support, 
>>>>>>>>including any future
>>>>>>>>requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>>>>
>>>>>>>Why not? it's not like adding extensions here is really 
>>>>>>>that different
>>>>>>>than adding new ioctls.
>>>>>>>
>>>>>>>I definitely think this deserves an execbuffer3 without even
>>>>>>>considering future requirements. Just  to burn down the old
>>>>>>>requirements and pointless fields.
>>>>>>>
>>>>>>>Make execbuffer3 be vm bind only, no relocs, no legacy 
>>>>>>>bits, leave the
>>>>>>>older sw on execbuf2 for ever.
>>>>>>
>>>>>>I guess another point in favour of execbuf3 would be that it's less
>>>>>>midlayer. If we share the entry point then there's quite a few vfuncs
>>>>>>needed to cleanly split out the vm_bind paths from the legacy
>>>>>>reloc/softping paths.
>>>>>>
>>>>>>If we invert this and do execbuf3, then there's the existing ioctl
>>>>>>vfunc, and then we share code (where it even makes sense, probably
>>>>>>request setup/submit need to be shared, anything else is probably
>>>>>>cleaner to just copypaste) with the usual helper approach.
>>>>>>
>>>>>>Also that would guarantee that really none of the old concepts like
>>>>>>i915_active on the vma or vma open counts and all that stuff leaks
>>>>>>into the new vm_bind execbuf.
>>>>>>
>>>>>>Finally I also think that copypasting would make backporting easier,
>>>>>>or at least more flexible, since it should make it easier to have the
>>>>>>upstream vm_bind co-exist with all the other things we have. Without
>>>>>>huge amounts of conflicts (or at least much less) that pushing a pile
>>>>>>of vfuncs into the existing code would cause.
>>>>>>
>>>>>>So maybe we should do this?
>>>>>
>>>>>Thanks Dave, Daniel.
>>>>>There are a few things that will be common between execbuf2 and
>>>>>execbuf3, like request setup/submit (as you said), fence 
>>>>>handling (timeline fences, fence array, composite fences), 
>>>>>engine selection,
>>>>>etc. Also, many of the 'flags' will be there in execbuf3 also (but
>>>>>bit position will differ).
>>>>>But I guess these should be fine as the suggestion here is to
>>>>>copy-paste the execbuff code and having a shared code where possible.
>>>>>Besides, we can stop supporting some older feature in execbuff3
>>>>>(like fence array in favor of newer timeline fences), which will
>>>>>further reduce common code.
>>>>>
>>>>>Ok, I will update this series by adding execbuf3 and send out soon.
>>>>>
>>>>
>>>>Does this sound reasonable?
>>>>
>>>>struct drm_i915_gem_execbuffer3 {
>>>>       __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>>>>
>>>>       __u32 batch_count;
>>>>       __u64 batch_addr_ptr;    /* Pointer to an array of batch 
>>>>gpu virtual addresses */
>>>
>>>Casual stumble upon..
>>>
>>>Alternatively you could embed N pointers to make life a bit easier 
>>>for both userspace and kernel side. Yes, but then "N batch buffers 
>>>should be enough for everyone" problem.. :)
>>>
>>
>>Thanks Tvrtko,
>>Yes, hence the batch_addr_ptr.
>
>Right, but then userspace has to allocate a separate buffer and kernel 
>has to access it separately from a single copy_from_user. Pros and 
>cons of "this many batches should be enough for everyone" versus the 
>extra operations.
>
>Hmm.. for the common case of one batch - you could define the uapi to 
>say if batch_count is one then pointer is GPU VA to the batch itself, 
>not a pointer to userspace array of GPU VA?
>

Yah, we can do that. ie., batch_addr_ptr is the batch VA when batch_count
is 1. Otherwise, it is pointer to an array of batch VAs.

Other option is to move multi-batch support to an extension and here
we will only have batch_addr (ie., support for 1 batch only).

I like the former one better (the one you suggested).

Niranjana

>Regards,
>
>Tvrtko
>
>>>>       __u64 flags;
>>>>#define I915_EXEC3_RING_MASK              (0x3f)
>>>>#define I915_EXEC3_DEFAULT                (0<<0)
>>>>#define I915_EXEC3_RENDER                 (1<<0)
>>>>#define I915_EXEC3_BSD                    (2<<0)
>>>>#define I915_EXEC3_BLT                    (3<<0)
>>>>#define I915_EXEC3_VEBOX                  (4<<0)
>>>>
>>>>#define I915_EXEC3_SECURE               (1<<6)
>>>>#define I915_EXEC3_IS_PINNED            (1<<7)
>>>>
>>>>#define I915_EXEC3_BSD_SHIFT     (8)
>>>>#define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
>>>>#define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
>>>>#define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
>>>>#define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
>>>
>>>I'd suggest legacy engine selection is unwanted, especially not 
>>>with the convoluted BSD1/2 flags. Can we just require context with 
>>>engine map and index? Or if default context has to be supported 
>>>then I'd suggest ...class_instance for that mode.
>>>
>>
>>Ok, I will be happy to remove it and only support contexts with
>>engine map, if UMDs agree on that.
>>
>>>>#define I915_EXEC3_FENCE_IN             (1<<10)
>>>>#define I915_EXEC3_FENCE_OUT            (1<<11)
>>>>#define I915_EXEC3_FENCE_SUBMIT         (1<<12)
>>>
>>>People are likely to object to submit fence since generic 
>>>mechanism to align submissions was rejected.
>>>
>>
>>Ok, again, I can remove it if UMDs are ok with it.
>>
>>>>
>>>>       __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
>>>
>>>New ioctl you can afford dedicated fields.
>>>
>>
>>Yes, but as I asked below, I am not sure if we need this or the
>>timeline fence arry extension we have is good enough.
>>
>>>In any case I suggest you involve UMD folks in designing it.
>>>
>>
>>Yah.
>>Paulo, Lionel, Jason, Daniel, can you comment on these regarding
>>what will UMD need in execbuf3 and what can be removed?
>>
>>Thanks,
>>Niranjana
>>
>>>Regards,
>>>
>>>Tvrtko
>>>
>>>>
>>>>       __u64 extensions;        /* currently only for 
>>>>DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
>>>>};
>>>>
>>>>With this, user can pass in batch addresses and count directly,
>>>>instead of as an extension (as this rfc series was proposing).
>>>>
>>>>I have removed many of the flags which were either legacy or not
>>>>applicable to BM_BIND mode.
>>>>I have also removed fence array support (execbuffer2.cliprects_ptr)
>>>>as we have timeline fence array support. Is that fine?
>>>>Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
>>>>
>>>>Any thing else needs to be added or removed?
>>>>
>>>>Niranjana
>>>>
>>>>>Niranjana
>>>>>
>>>>>>-Daniel
>>>>>>-- 
>>>>>>Daniel Vetter
>>>>>>Software Engineer, Intel Corporation
>>>>>>http://blog.ffwll.ch
Niranjana Vishwanathapura June 8, 2022, 8:45 p.m. UTC | #21
On Wed, Jun 08, 2022 at 09:54:24AM +0100, Tvrtko Ursulin wrote:
>
>On 08/06/2022 09:45, Lionel Landwerlin wrote:
>>On 08/06/2022 11:36, Tvrtko Ursulin wrote:
>>>
>>>On 08/06/2022 07:40, Lionel Landwerlin wrote:
>>>>On 03/06/2022 09:53, Niranjana Vishwanathapura wrote:
>>>>>On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana 
>>>>>Vishwanathapura wrote:
>>>>>>On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>>>>>>On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>>>>>
>>>>>>>>On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>>>>>><niranjana.vishwanathapura@intel.com> wrote:
>>>>>>>>>
>>>>>>>>>On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>>>>>>>On Tue, 2022-05-17 at 11:32 -0700, Niranjana 
>>>>>>>>>Vishwanathapura wrote:
>>>>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>>>>
>>>>>>>>>>> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>>>>>>     Also add new uapi and documentation as per review comments
>>>>>>>>>>>     from Daniel.
>>>>>>>>>>>
>>>>>>>>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>>>>>>><niranjana.vishwanathapura@intel.com>
>>>>>>>>>>> ---
>>>>>>>>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>>>>>+++++++++++++++++++++++++++
>>>>>>>>>>>  1 file changed, 399 insertions(+)
>>>>>>>>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>
>>>>>>>>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>> new file mode 100644
>>>>>>>>>>> index 000000000000..589c0a009107
>>>>>>>>>>> --- /dev/null
>>>>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>> @@ -0,0 +1,399 @@
>>>>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>>>>> +/*
>>>>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>>>>> + */
>>>>>>>>>>> +
>>>>>>>>>>> +/**
>>>>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>>> + *
>>>>>>>>>>> + * VM_BIND feature availability.
>>>>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>>>>> + */
>>>>>>>>>>> +#define I915_PARAM_HAS_VM_BIND 57
>>>>>>>>>>> +
>>>>>>>>>>> +/**
>>>>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>>> + *
>>>>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding 
>>>>>>>>>during VM creation.
>>>>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>>> + *
>>>>>>>>>>> + * A VM in VM_BIND mode will not support the older 
>>>>>>>>>execbuff mode of binding.
>>>>>>>>>>> + * In VM_BIND mode, execbuff ioctl will not accept 
>>>>>>>>>any execlist (ie., the
>>>>>>>>>>> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>>>>>>> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>>>>>>> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>>>>>>> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES 
>>>>>>>>>extension must be provided
>>>>>>>>>>> + * to pass in the batch buffer addresses.
>>>>>>>>>>> + *
>>>>>>>>>>> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>>>>>>> + * I915_EXEC_BATCH_FIRST of 
>>>>>>>>>&drm_i915_gem_execbuffer2.flags must be 0
>>>>>>>>>>> + * (not used) in VM_BIND mode. 
>>>>>>>>>I915_EXEC_USE_EXTENSIONS flag must always be
>>>>>>>>>>> + * set (See struct 
>>>>>>>>>drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>>>>>>> + * The buffers_ptr, buffer_count, 
>>>>>>>>>batch_start_offset and batch_len fields
>>>>>>>>>>> + * of struct drm_i915_gem_execbuffer2 are also not 
>>>>>>>>>used and must be 0.
>>>>>>>>>>> + */
>>>>>>>>>>
>>>>>>>>>>From that description, it seems we have:
>>>>>>>>>>
>>>>>>>>>>struct drm_i915_gem_execbuffer2 {
>>>>>>>>>>        __u64 buffers_ptr;              -> must be 0 (new)
>>>>>>>>>>        __u32 buffer_count;             -> must be 0 (new)
>>>>>>>>>>        __u32 batch_start_offset;       -> must be 0 (new)
>>>>>>>>>>        __u32 batch_len;                -> must be 0 (new)
>>>>>>>>>>        __u32 DR1;                      -> must be 0 (old)
>>>>>>>>>>        __u32 DR4;                      -> must be 0 (old)
>>>>>>>>>>        __u32 num_cliprects; (fences)   -> must be 0 
>>>>>>>>>since using extensions
>>>>>>>>>>        __u64 cliprects_ptr; (fences, extensions) -> 
>>>>>>>>>contains an actual pointer!
>>>>>>>>>>        __u64 flags;                    -> some flags 
>>>>>>>>>must be 0 (new)
>>>>>>>>>>        __u64 rsvd1; (context info)     -> repurposed field (old)
>>>>>>>>>>        __u64 rsvd2;                    -> unused
>>>>>>>>>>};
>>>>>>>>>>
>>>>>>>>>>Based on that, why can't we just get 
>>>>>>>>>drm_i915_gem_execbuffer3 instead
>>>>>>>>>>of adding even more complexity to an already abused 
>>>>>>>>>interface? While
>>>>>>>>>>the Vulkan-like extension thing is really nice, I don't think what
>>>>>>>>>>we're doing here is extending the ioctl usage, we're completely
>>>>>>>>>>changing how the base struct should be interpreted 
>>>>>>>>>based on how the VM
>>>>>>>>>>was created (which is an entirely different ioctl).
>>>>>>>>>>
>>>>>>>>>>From Rusty Russel's API Design grading, 
>>>>>>>>>drm_i915_gem_execbuffer2 is
>>>>>>>>>>already at -6 without these changes. I think after 
>>>>>>>>>vm_bind we'll need
>>>>>>>>>>to create a -11 entry just to deal with this ioctl.
>>>>>>>>>>
>>>>>>>>>
>>>>>>>>>The only change here is removing the execlist support for VM_BIND
>>>>>>>>>mode (other than natual extensions).
>>>>>>>>>Adding a new execbuffer3 was considered, but I think 
>>>>>>>>>we need to be careful
>>>>>>>>>with that as that goes beyond the VM_BIND support, 
>>>>>>>>>including any future
>>>>>>>>>requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>>>>>
>>>>>>>>Why not? it's not like adding extensions here is really 
>>>>>>>>that different
>>>>>>>>than adding new ioctls.
>>>>>>>>
>>>>>>>>I definitely think this deserves an execbuffer3 without even
>>>>>>>>considering future requirements. Just  to burn down the old
>>>>>>>>requirements and pointless fields.
>>>>>>>>
>>>>>>>>Make execbuffer3 be vm bind only, no relocs, no legacy 
>>>>>>>>bits, leave the
>>>>>>>>older sw on execbuf2 for ever.
>>>>>>>
>>>>>>>I guess another point in favour of execbuf3 would be that it's less
>>>>>>>midlayer. If we share the entry point then there's quite a few vfuncs
>>>>>>>needed to cleanly split out the vm_bind paths from the legacy
>>>>>>>reloc/softping paths.
>>>>>>>
>>>>>>>If we invert this and do execbuf3, then there's the existing ioctl
>>>>>>>vfunc, and then we share code (where it even makes sense, probably
>>>>>>>request setup/submit need to be shared, anything else is probably
>>>>>>>cleaner to just copypaste) with the usual helper approach.
>>>>>>>
>>>>>>>Also that would guarantee that really none of the old concepts like
>>>>>>>i915_active on the vma or vma open counts and all that stuff leaks
>>>>>>>into the new vm_bind execbuf.
>>>>>>>
>>>>>>>Finally I also think that copypasting would make backporting easier,
>>>>>>>or at least more flexible, since it should make it easier to have the
>>>>>>>upstream vm_bind co-exist with all the other things we have. Without
>>>>>>>huge amounts of conflicts (or at least much less) that pushing a pile
>>>>>>>of vfuncs into the existing code would cause.
>>>>>>>
>>>>>>>So maybe we should do this?
>>>>>>
>>>>>>Thanks Dave, Daniel.
>>>>>>There are a few things that will be common between execbuf2 and
>>>>>>execbuf3, like request setup/submit (as you said), fence 
>>>>>>handling (timeline fences, fence array, composite fences), 
>>>>>>engine selection,
>>>>>>etc. Also, many of the 'flags' will be there in execbuf3 also (but
>>>>>>bit position will differ).
>>>>>>But I guess these should be fine as the suggestion here is to
>>>>>>copy-paste the execbuff code and having a shared code where possible.
>>>>>>Besides, we can stop supporting some older feature in execbuff3
>>>>>>(like fence array in favor of newer timeline fences), which will
>>>>>>further reduce common code.
>>>>>>
>>>>>>Ok, I will update this series by adding execbuf3 and send out soon.
>>>>>>
>>>>>
>>>>>Does this sound reasonable?
>>>>
>>>>
>>>>Thanks for proposing this. Some comments below.
>>>>
>>>>
>>>>>
>>>>>struct drm_i915_gem_execbuffer3 {
>>>>>       __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>>>>>
>>>>>       __u32 batch_count;
>>>>>       __u64 batch_addr_ptr;    /* Pointer to an array of 
>>>>>batch gpu virtual addresses */
>>>>>
>>>>>       __u64 flags;
>>>>>#define I915_EXEC3_RING_MASK              (0x3f)
>>>>>#define I915_EXEC3_DEFAULT                (0<<0)
>>>>>#define I915_EXEC3_RENDER                 (1<<0)
>>>>>#define I915_EXEC3_BSD                    (2<<0)
>>>>>#define I915_EXEC3_BLT                    (3<<0)
>>>>>#define I915_EXEC3_VEBOX                  (4<<0)
>>>>
>>>>
>>>>Shouldn't we use the new engine selection uAPI instead?
>>>>
>>>>We can already create an engine map with 
>>>>I915_CONTEXT_PARAM_ENGINES in 
>>>>drm_i915_gem_context_create_ext_setparam.
>>>>
>>>>And you can also create virtual engines with the same extension.
>>>>
>>>>It feels like this could be a single u32 with the engine index 
>>>>(in the context engine map).
>>>
>>>Yes I said the same yesterday.
>>>
>>>Also note that as you can't any longer set engines on a default 
>>>context, question is whether userspace cares to use execbuf3 with 
>>>it (default context).
>>>
>>>If it does, it will need an alternative engine selection for that 
>>>case. I was proposing class:instance rather than legacy cumbersome 
>>>flags.
>>>
>>>If it does not, I  mean if the decision is to only allow execbuf3 
>>>with engine maps, then it leaves the default context a waste of 
>>>kernel memory in the execbuf3 future. :( Don't know what to do 
>>>there..
>>>
>>>Regards,
>>>
>>>Tvrtko
>>
>>
>>Thanks Tvrtko, I only saw your reply after responding.
>>
>>
>>Both Iris & Anv create a context with engines (if kernel supports 
>>it) : https://gitlab.freedesktop.org/mesa/mesa/-/blob/main/src/intel/common/intel_gem.c#L73
>>
>>
>>I think we should be fine with just a single engine id and we don't 
>>care about the default context.
>
>I wonder if in this case we could stop creating the default context 
>starting from a future "gen"? Otherwise, with engine map only execbuf3 
>and execbuf3 only userspace, it would serve no purpose apart from 
>wasting kernel memory.
>

Thanks Tvrtko, Lionell.

I will be glad to remove these flags, just define a uint32 engine_id and
mandate a context with user engines map.

Regarding removing the default context, yah, it depends on from which gen
onwards we will only be supporting execbuf3 and execbuf2 is fully
deprecated. Till then, we will have to keep it I guess :(.

>Regards,
>
>Tvrtko
>
>>
>>
>>-Lionel
>>
>>
>>>
>>>>
>>>>
>>>>>
>>>>>#define I915_EXEC3_SECURE               (1<<6)
>>>>>#define I915_EXEC3_IS_PINNED            (1<<7)
>>>>
>>>>
>>>>What's the meaning of PINNED?
>>>>

This turned out to be a legacy use case. Will remove it.
execbuf3 will anyway only be supported when HAS_VM_BIND is true.

>>>>
>>>>>
>>>>>#define I915_EXEC3_BSD_SHIFT     (8)
>>>>>#define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
>>>>>#define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
>>>>>#define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
>>>>>#define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
>>>>>
>>>>>#define I915_EXEC3_FENCE_IN             (1<<10)
>>>>>#define I915_EXEC3_FENCE_OUT            (1<<11)
>>>>
>>>>
>>>>For Mesa, as soon as we have 
>>>>DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES support, we only use 
>>>>that.
>>>>
>>>>So there isn't much point for FENCE_IN/OUT.
>>>>
>>>>Maybe check with other UMDs?
>>>>

Thanks, will remove it if other UMDs do not ask for it.

>>>>
>>>>>#define I915_EXEC3_FENCE_SUBMIT (1<<12)
>>>>
>>>>
>>>>What's FENCE_SUBMIT?
>>>>

This seems to be a mechanism to align requests submissions together.
As per Tvrtko, generic mechanism to align submissions was rejected.
So, if UMDs don't need it, we can remove it.

So, execbuf3 would look like (if all UMDS agree),

struct drm_i915_gem_execbuffer3 {
       __u32 ctx_id;       /* previously execbuffer2.rsvd1 */
       __u32 engine_id;    /* previously 'execbuffer2.flags & I915_EXEC_RING_MASK' */

       __u32 rsvd1;        /* Reserved */
       __u32 batch_count;
       /* batch VA if batch_count=1, otherwise a pointer to an array of batch VAs */
       __u64 batch_address;

       __u64 flags;
#define I915_EXEC3_SECURE   (1<<0)

       __u64 rsvd2;        /* Reserved */
       __u64 extensions;   /* currently only for DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
};

Also, wondered if we need to put timeline fences in the extension or should
we directly put it in drm_i915_gem_execbuffer3 struct.
I prefer putting it in extension if they are not specified for all execbuff calls.
Any thoughts?

Niranjana

>>>>
>>>>>
>>>>>       __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
>>>>>
>>>>>       __u64 extensions;        /* currently only for 
>>>>>DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
>>>>>};
>>>>>
>>>>>With this, user can pass in batch addresses and count directly,
>>>>>instead of as an extension (as this rfc series was proposing).
>>>>>
>>>>>I have removed many of the flags which were either legacy or not
>>>>>applicable to BM_BIND mode.
>>>>>I have also removed fence array support (execbuffer2.cliprects_ptr)
>>>>>as we have timeline fence array support. Is that fine?
>>>>>Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
>>>>>
>>>>>Any thing else needs to be added or removed?
>>>>>
>>>>>Niranjana
>>>>>
>>>>>>Niranjana
>>>>>>
>>>>>>>-Daniel
>>>>>>>-- 
>>>>>>>Daniel Vetter
>>>>>>>Software Engineer, Intel Corporation
>>>>>>>http://blog.ffwll.ch
>>>>
>>>>
>>
Matthew Brost June 8, 2022, 9:24 p.m. UTC | #22
On Wed, Jun 08, 2022 at 10:12:45AM +0300, Lionel Landwerlin wrote:
> On 03/06/2022 09:53, Niranjana Vishwanathapura wrote:
> > On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana Vishwanathapura
> > wrote:
> > > On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
> > > > On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
> > > > > 
> > > > > On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
> > > > > <niranjana.vishwanathapura@intel.com> wrote:
> > > > > > 
> > > > > > On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
> > > > > > >On Tue, 2022-05-17 at 11:32 -0700, Niranjana Vishwanathapura wrote:
> > > > > > >> VM_BIND and related uapi definitions
> > > > > > >>
> > > > > > >> v2: Ensure proper kernel-doc formatting with cross references.
> > > > > > >>     Also add new uapi and documentation as per review comments
> > > > > > >>     from Daniel.
> > > > > > >>
> > > > > > >> Signed-off-by: Niranjana Vishwanathapura
> > > > > > <niranjana.vishwanathapura@intel.com>
> > > > > > >> ---
> > > > > > >>  Documentation/gpu/rfc/i915_vm_bind.h | 399
> > > > > > +++++++++++++++++++++++++++
> > > > > > >>  1 file changed, 399 insertions(+)
> > > > > > >>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
> > > > > > >>
> > > > > > >> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h
> > > > > > b/Documentation/gpu/rfc/i915_vm_bind.h
> > > > > > >> new file mode 100644
> > > > > > >> index 000000000000..589c0a009107
> > > > > > >> --- /dev/null
> > > > > > >> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
> > > > > > >> @@ -0,0 +1,399 @@
> > > > > > >> +/* SPDX-License-Identifier: MIT */
> > > > > > >> +/*
> > > > > > >> + * Copyright © 2022 Intel Corporation
> > > > > > >> + */
> > > > > > >> +
> > > > > > >> +/**
> > > > > > >> + * DOC: I915_PARAM_HAS_VM_BIND
> > > > > > >> + *
> > > > > > >> + * VM_BIND feature availability.
> > > > > > >> + * See typedef drm_i915_getparam_t param.
> > > > > > >> + */
> > > > > > >> +#define I915_PARAM_HAS_VM_BIND               57
> > > > > > >> +
> > > > > > >> +/**
> > > > > > >> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
> > > > > > >> + *
> > > > > > >> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
> > > > > > >> + * See struct drm_i915_gem_vm_control flags.
> > > > > > >> + *
> > > > > > >> + * A VM in VM_BIND mode will not support the older
> > > > > > execbuff mode of binding.
> > > > > > >> + * In VM_BIND mode, execbuff ioctl will not accept
> > > > > > any execlist (ie., the
> > > > > > >> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
> > > > > > >> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
> > > > > > >> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
> > > > > > >> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES
> > > > > > extension must be provided
> > > > > > >> + * to pass in the batch buffer addresses.
> > > > > > >> + *
> > > > > > >> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
> > > > > > >> + * I915_EXEC_BATCH_FIRST of
> > > > > > &drm_i915_gem_execbuffer2.flags must be 0
> > > > > > >> + * (not used) in VM_BIND mode.
> > > > > > I915_EXEC_USE_EXTENSIONS flag must always be
> > > > > > >> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
> > > > > > >> + * The buffers_ptr, buffer_count, batch_start_offset
> > > > > > and batch_len fields
> > > > > > >> + * of struct drm_i915_gem_execbuffer2 are also not
> > > > > > used and must be 0.
> > > > > > >> + */
> > > > > > >
> > > > > > >From that description, it seems we have:
> > > > > > >
> > > > > > >struct drm_i915_gem_execbuffer2 {
> > > > > > >        __u64 buffers_ptr;              -> must be 0 (new)
> > > > > > >        __u32 buffer_count;             -> must be 0 (new)
> > > > > > >        __u32 batch_start_offset;       -> must be 0 (new)
> > > > > > >        __u32 batch_len;                -> must be 0 (new)
> > > > > > >        __u32 DR1;                      -> must be 0 (old)
> > > > > > >        __u32 DR4;                      -> must be 0 (old)
> > > > > > >        __u32 num_cliprects; (fences)   -> must be 0
> > > > > > since using extensions
> > > > > > >        __u64 cliprects_ptr; (fences, extensions) ->
> > > > > > contains an actual pointer!
> > > > > > >        __u64 flags;                    -> some flags
> > > > > > must be 0 (new)
> > > > > > >        __u64 rsvd1; (context info)     -> repurposed field (old)
> > > > > > >        __u64 rsvd2;                    -> unused
> > > > > > >};
> > > > > > >
> > > > > > >Based on that, why can't we just get
> > > > > > drm_i915_gem_execbuffer3 instead
> > > > > > >of adding even more complexity to an already abused interface? While
> > > > > > >the Vulkan-like extension thing is really nice, I don't think what
> > > > > > >we're doing here is extending the ioctl usage, we're completely
> > > > > > >changing how the base struct should be interpreted
> > > > > > based on how the VM
> > > > > > >was created (which is an entirely different ioctl).
> > > > > > >
> > > > > > >From Rusty Russel's API Design grading, drm_i915_gem_execbuffer2 is
> > > > > > >already at -6 without these changes. I think after
> > > > > > vm_bind we'll need
> > > > > > >to create a -11 entry just to deal with this ioctl.
> > > > > > >
> > > > > > 
> > > > > > The only change here is removing the execlist support for VM_BIND
> > > > > > mode (other than natual extensions).
> > > > > > Adding a new execbuffer3 was considered, but I think we
> > > > > > need to be careful
> > > > > > with that as that goes beyond the VM_BIND support,
> > > > > > including any future
> > > > > > requirements (as we don't want an execbuffer4 after VM_BIND).
> > > > > 
> > > > > Why not? it's not like adding extensions here is really that different
> > > > > than adding new ioctls.
> > > > > 
> > > > > I definitely think this deserves an execbuffer3 without even
> > > > > considering future requirements. Just  to burn down the old
> > > > > requirements and pointless fields.
> > > > > 
> > > > > Make execbuffer3 be vm bind only, no relocs, no legacy bits, leave the
> > > > > older sw on execbuf2 for ever.
> > > > 
> > > > I guess another point in favour of execbuf3 would be that it's less
> > > > midlayer. If we share the entry point then there's quite a few vfuncs
> > > > needed to cleanly split out the vm_bind paths from the legacy
> > > > reloc/softping paths.
> > > > 
> > > > If we invert this and do execbuf3, then there's the existing ioctl
> > > > vfunc, and then we share code (where it even makes sense, probably
> > > > request setup/submit need to be shared, anything else is probably
> > > > cleaner to just copypaste) with the usual helper approach.
> > > > 
> > > > Also that would guarantee that really none of the old concepts like
> > > > i915_active on the vma or vma open counts and all that stuff leaks
> > > > into the new vm_bind execbuf.
> > > > 
> > > > Finally I also think that copypasting would make backporting easier,
> > > > or at least more flexible, since it should make it easier to have the
> > > > upstream vm_bind co-exist with all the other things we have. Without
> > > > huge amounts of conflicts (or at least much less) that pushing a pile
> > > > of vfuncs into the existing code would cause.
> > > > 
> > > > So maybe we should do this?
> > > 
> > > Thanks Dave, Daniel.
> > > There are a few things that will be common between execbuf2 and
> > > execbuf3, like request setup/submit (as you said), fence handling
> > > (timeline fences, fence array, composite fences), engine selection,
> > > etc. Also, many of the 'flags' will be there in execbuf3 also (but
> > > bit position will differ).
> > > But I guess these should be fine as the suggestion here is to
> > > copy-paste the execbuff code and having a shared code where possible.
> > > Besides, we can stop supporting some older feature in execbuff3
> > > (like fence array in favor of newer timeline fences), which will
> > > further reduce common code.
> > > 
> > > Ok, I will update this series by adding execbuf3 and send out soon.
> > > 
> > 
> > Does this sound reasonable?
> > 
> > struct drm_i915_gem_execbuffer3 {
> >        __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
> > 
> >        __u32 batch_count;
> >        __u64 batch_addr_ptr;    /* Pointer to an array of batch gpu
> > virtual addresses */
> 
> 
> Quick question raised on IRC about the batches : Are multiple batches
> limited to virtual engines?
> 

Parallel engines, see i915_context_engines_parallel_submit in i915_drm.h.

Currently the media UMD uses this uAPI to do split frame (e.g. run
multiple batches in parallel on the video engines to decode a 8k frame).

Of course there could be future users of this uAPI too.

Matt

> 
> Thanks,
> 
> 
> -Lionel
> 
> 
> > 
> >        __u64 flags;
> > #define I915_EXEC3_RING_MASK              (0x3f)
> > #define I915_EXEC3_DEFAULT                (0<<0)
> > #define I915_EXEC3_RENDER                 (1<<0)
> > #define I915_EXEC3_BSD                    (2<<0)
> > #define I915_EXEC3_BLT                    (3<<0)
> > #define I915_EXEC3_VEBOX                  (4<<0)
> > 
> > #define I915_EXEC3_SECURE               (1<<6)
> > #define I915_EXEC3_IS_PINNED            (1<<7)
> > 
> > #define I915_EXEC3_BSD_SHIFT     (8)
> > #define I915_EXEC3_BSD_MASK      (3 << I915_EXEC3_BSD_SHIFT)
> > #define I915_EXEC3_BSD_DEFAULT   (0 << I915_EXEC3_BSD_SHIFT)
> > #define I915_EXEC3_BSD_RING1     (1 << I915_EXEC3_BSD_SHIFT)
> > #define I915_EXEC3_BSD_RING2     (2 << I915_EXEC3_BSD_SHIFT)
> > 
> > #define I915_EXEC3_FENCE_IN             (1<<10)
> > #define I915_EXEC3_FENCE_OUT            (1<<11)
> > #define I915_EXEC3_FENCE_SUBMIT         (1<<12)
> > 
> >        __u64 in_out_fence;        /* previously execbuffer2.rsvd2 */
> > 
> >        __u64 extensions;        /* currently only for
> > DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES */
> > };
> > 
> > With this, user can pass in batch addresses and count directly,
> > instead of as an extension (as this rfc series was proposing).
> > 
> > I have removed many of the flags which were either legacy or not
> > applicable to BM_BIND mode.
> > I have also removed fence array support (execbuffer2.cliprects_ptr)
> > as we have timeline fence array support. Is that fine?
> > Do we still need FENCE_IN/FENCE_OUT/FENCE_SUBMIT support?
> > 
> > Any thing else needs to be added or removed?
> > 
> > Niranjana
> > 
> > > Niranjana
> > > 
> > > > -Daniel
> > > > -- 
> > > > Daniel Vetter
> > > > Software Engineer, Intel Corporation
> > > > http://blog.ffwll.ch
> 
>
Niranjana Vishwanathapura June 8, 2022, 9:32 p.m. UTC | #23
On Wed, Jun 08, 2022 at 10:12:05AM +0100, Matthew Auld wrote:
>On 08/06/2022 08:17, Tvrtko Ursulin wrote:
>>
>>On 07/06/2022 20:37, Niranjana Vishwanathapura wrote:
>>>On Tue, Jun 07, 2022 at 11:27:14AM +0100, Tvrtko Ursulin wrote:
>>>>
>>>>On 17/05/2022 19:32, Niranjana Vishwanathapura wrote:
>>>>>VM_BIND and related uapi definitions
>>>>>
>>>>>v2: Ensure proper kernel-doc formatting with cross references.
>>>>>    Also add new uapi and documentation as per review comments
>>>>>    from Daniel.
>>>>>
>>>>>Signed-off-by: Niranjana Vishwanathapura 
>>>>><niranjana.vishwanathapura@intel.com>
>>>>>---
>>>>> Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
>>>>> 1 file changed, 399 insertions(+)
>>>>> create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>
>>>>>diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>new file mode 100644
>>>>>index 000000000000..589c0a009107
>>>>>--- /dev/null
>>>>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>@@ -0,0 +1,399 @@
>>>>>+/* SPDX-License-Identifier: MIT */
>>>>>+/*
>>>>>+ * Copyright © 2022 Intel Corporation
>>>>>+ */
>>>>>+
>>>>>+/**
>>>>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>>>>+ *
>>>>>+ * VM_BIND feature availability.
>>>>>+ * See typedef drm_i915_getparam_t param.
>>>>>+ */
>>>>>+#define I915_PARAM_HAS_VM_BIND        57
>>>>>+
>>>>>+/**
>>>>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>+ *
>>>>>+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>+ * See struct drm_i915_gem_vm_control flags.
>>>>>+ *
>>>>>+ * A VM in VM_BIND mode will not support the older execbuff 
>>>>>mode of binding.
>>>>>+ * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>>execlist (ie., the
>>>>>+ * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>+ * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>+ * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>+ * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must 
>>>>>be provided
>>>>>+ * to pass in the batch buffer addresses.
>>>>>+ *
>>>>>+ * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>+ * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
>>>>>+ * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag 
>>>>>must always be
>>>>>+ * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>+ * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>batch_len fields
>>>>>+ * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
>>>>>+ */
>>>>>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>+
>>>>>+/**
>>>>>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>+ *
>>>>>+ * Flag to declare context as long running.
>>>>>+ * See struct drm_i915_gem_context_create_ext flags.
>>>>>+ *
>>>>>+ * Usage of dma-fence expects that they complete in 
>>>>>reasonable amount of time.
>>>>>+ * Compute on the other hand can be long running. Hence it is 
>>>>>not appropriate
>>>>>+ * for compute contexts to export request completion 
>>>>>dma-fence to user.
>>>>>+ * The dma-fence usage will be limited to in-kernel consumption only.
>>>>>+ * Compute contexts need to use user/memory fence.
>>>>>+ *
>>>>>+ * So, long running contexts do not support output fences. Hence,
>>>>>+ * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
>>>>>+ * I915_EXEC_FENCE_SIGNAL (See 
>>>>>&drm_i915_gem_exec_fence.flags) are expected
>>>>>+ * to be not used.
>>>>>+ *
>>>>>+ * DRM_I915_GEM_WAIT ioctl call is also not supported for 
>>>>>objects mapped
>>>>>+ * to long running contexts.
>>>>>+ */
>>>>>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>+
>>>>>+/* VM_BIND related ioctls */
>>>>>+#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>+#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>+#define DRM_I915_GEM_WAIT_USER_FENCE    0x3f
>>>>>+
>>>>>+#define DRM_IOCTL_I915_GEM_VM_BIND        
>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct 
>>>>>drm_i915_gem_vm_bind)
>>>>>+#define DRM_IOCTL_I915_GEM_VM_UNBIND 
>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct 
>>>>>drm_i915_gem_vm_bind)
>>>>>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, 
>>>>>struct drm_i915_gem_wait_user_fence)
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>+ *
>>>>>+ * This structure is passed to VM_BIND ioctl and specifies 
>>>>>the mapping of GPU
>>>>>+ * virtual address (VA) range to the section of an object 
>>>>>that should be bound
>>>>>+ * in the device page table of the specified address space (VM).
>>>>>+ * The VA range specified must be unique (ie., not currently 
>>>>>bound) and can
>>>>>+ * be mapped to whole object or a section of the object 
>>>>>(partial binding).
>>>>>+ * Multiple VA mappings can be created to the same section of 
>>>>>the object
>>>>>+ * (aliasing).
>>>>>+ */
>>>>>+struct drm_i915_gem_vm_bind {
>>>>>+    /** @vm_id: VM (address space) id to bind */
>>>>>+    __u32 vm_id;
>>>>>+
>>>>>+    /** @handle: Object handle */
>>>>>+    __u32 handle;
>>>>>+
>>>>>+    /** @start: Virtual Address start to bind */
>>>>>+    __u64 start;
>>>>>+
>>>>>+    /** @offset: Offset in object to bind */
>>>>>+    __u64 offset;
>>>>>+
>>>>>+    /** @length: Length of mapping to bind */
>>>>>+    __u64 length;
>>>>
>>>>Does it support, or should it, equivalent of 
>>>>EXEC_OBJECT_PAD_TO_SIZE? Or if not userspace is expected to map 
>>>>the remainder of the space to a dummy object? In which case 
>>>>would there be any alignment/padding issues preventing the two 
>>>>bind to be placed next to each other?
>>>>
>>>>I ask because someone from the compute side asked me about a 
>>>>problem with their strategy of dealing with overfetch and I 
>>>>suggested pad to size.
>>>>
>>>
>>>Thanks Tvrtko,
>>>I think we shouldn't be needing it. As with VM_BIND VA assignment
>>>is completely pushed to userspace, no padding should be necessary
>>>once the 'start' and 'size' alignment conditions are met.
>>>
>>>I will add some documentation on alignment requirement here.
>>>Generally, 'start' and 'size' should be 4K aligned. But, I think
>>>when we have 64K lmem page sizes (dg2 and xehpsdv), they need to
>>>be 64K aligned.
>>
>>+ Matt
>>
>>Align to 64k is enough for all overfetch issues?
>>
>>Apparently compute has a situation where a buffer is received by one 
>>component and another has to apply more alignment to it, to deal 
>>with overfetch. Since they cannot grow the actual BO if they wanted 
>>to VM_BIND a scratch area on top? Or perhaps none of this is a 
>>problem on discrete and original BO should be correctly allocated to 
>>start with.
>>
>>Side question - what about the align to 2MiB mentioned in 
>>i915_vma_insert to avoid mixing 4k and 64k PTEs? That does not apply 
>>to discrete?
>
>Not sure about the overfetch thing, but yeah dg2 & xehpsdv both 
>require a minimum of 64K pages underneath for local memory, and the BO 
>size will also be rounded up accordingly. And yeah the complication 
>arises due to not being able to mix 4K + 64K GTT pages within the same 
>page-table (existed since even gen8). Note that 4K here is what we 
>typically get for system memory.
>
>Originally we had a memory coloring scheme to track the "color" of 
>each page-table, which basically ensures that userspace can't do 
>something nasty like mixing page sizes. The advantage of that scheme 
>is that we would only require 64K GTT alignment and no extra padding, 
>but is perhaps a little complex.
>
>The merged solution is just to align and pad (i.e vma->node.size and 
>not vma->size) out of the vma to 2M, which is dead simple 
>implementation wise, but does potentially waste some GTT space and 
>some of the local memory used for the actual page-table. For the 
>alignment the kernel just validates that the GTT address is aligned to 
>2M in vma_insert(), and then for the padding it just inflates it to 
>2M, if userspace hasn't already.
>
>See the kernel-doc for @size: https://dri.freedesktop.org/docs/drm/gpu/driver-uapi.html?#c.drm_i915_gem_create_ext
>

Ok, those requirements (2M VA alignment) will apply to VM_BIND also.
This is unfortunate, but it is not something new enforced by VM_BIND.
Other option is to go with 64K alignment and in VM_BIND case, user
must ensure there is no mix-matching of 64K (lmem) and 4k (smem)
mappings in the same 2M range. But this is not VM_BIND specific
(will apply to soft-pinning in execbuf2 also).

I don't think we need any VA padding here as with VM_BIND VA is
managed fully by the user. If we enforce VA to be 2M aligned, it
will leave holes (if BOs are smaller then 2M), but nobody is going
to allocate anything form there.

Niranjana

>>
>>Regards,
>>
>>Tvrtko
>>
>>>
>>>Niranjana
>>>
>>>>Regards,
>>>>
>>>>Tvrtko
>>>>
>>>>>+
>>>>>+    /**
>>>>>+     * @flags: Supported flags are,
>>>>>+     *
>>>>>+     * I915_GEM_VM_BIND_READONLY:
>>>>>+     * Mapping is read-only.
>>>>>+     *
>>>>>+     * I915_GEM_VM_BIND_CAPTURE:
>>>>>+     * Capture this mapping in the dump upon GPU error.
>>>>>+     */
>>>>>+    __u64 flags;
>>>>>+#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>>>+#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>>>+
>>>>>+    /** @extensions: 0-terminated chain of extensions for 
>>>>>this mapping. */
>>>>>+    __u64 extensions;
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>>>>+ *
>>>>>+ * This structure is passed to VM_UNBIND ioctl and specifies 
>>>>>the GPU virtual
>>>>>+ * address (VA) range that should be unbound from the device 
>>>>>page table of the
>>>>>+ * specified address space (VM). The specified VA range must 
>>>>>match one of the
>>>>>+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
>>>>>+ * completion.
>>>>>+ */
>>>>>+struct drm_i915_gem_vm_unbind {
>>>>>+    /** @vm_id: VM (address space) id to bind */
>>>>>+    __u32 vm_id;
>>>>>+
>>>>>+    /** @rsvd: Reserved for future use; must be zero. */
>>>>>+    __u32 rsvd;
>>>>>+
>>>>>+    /** @start: Virtual Address start to unbind */
>>>>>+    __u64 start;
>>>>>+
>>>>>+    /** @length: Length of mapping to unbind */
>>>>>+    __u64 length;
>>>>>+
>>>>>+    /** @flags: reserved for future usage, currently MBZ */
>>>>>+    __u64 flags;
>>>>>+
>>>>>+    /** @extensions: 0-terminated chain of extensions for 
>>>>>this mapping. */
>>>>>+    __u64 extensions;
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_vm_bind_fence - An input or output fence 
>>>>>for the vm_bind
>>>>>+ * or the vm_unbind work.
>>>>>+ *
>>>>>+ * The vm_bind or vm_unbind aync worker will wait for input 
>>>>>fence to signal
>>>>>+ * before starting the binding or unbinding.
>>>>>+ *
>>>>>+ * The vm_bind or vm_unbind async worker will signal the 
>>>>>returned output fence
>>>>>+ * after the completion of binding or unbinding.
>>>>>+ */
>>>>>+struct drm_i915_vm_bind_fence {
>>>>>+    /** @handle: User's handle for a drm_syncobj to wait on 
>>>>>or signal. */
>>>>>+    __u32 handle;
>>>>>+
>>>>>+    /**
>>>>>+     * @flags: Supported flags are,
>>>>>+     *
>>>>>+     * I915_VM_BIND_FENCE_WAIT:
>>>>>+     * Wait for the input fence before binding/unbinding
>>>>>+     *
>>>>>+     * I915_VM_BIND_FENCE_SIGNAL:
>>>>>+     * Return bind/unbind completion fence as output
>>>>>+     */
>>>>>+    __u32 flags;
>>>>>+#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>>>+#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>>>+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>>>>>(-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline 
>>>>>fences for vm_bind
>>>>>+ * and vm_unbind.
>>>>>+ *
>>>>>+ * This structure describes an array of timeline drm_syncobj 
>>>>>and associated
>>>>>+ * points for timeline variants of drm_syncobj. These 
>>>>>timeline 'drm_syncobj's
>>>>>+ * can be input or output fences (See struct drm_i915_vm_bind_fence).
>>>>>+ */
>>>>>+struct drm_i915_vm_bind_ext_timeline_fences {
>>>>>+#define I915_VM_BIND_EXT_timeline_FENCES    0
>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>+    struct i915_user_extension base;
>>>>>+
>>>>>+    /**
>>>>>+     * @fence_count: Number of elements in the @handles_ptr & 
>>>>>@value_ptr
>>>>>+     * arrays.
>>>>>+     */
>>>>>+    __u64 fence_count;
>>>>>+
>>>>>+    /**
>>>>>+     * @handles_ptr: Pointer to an array of struct 
>>>>>drm_i915_vm_bind_fence
>>>>>+     * of length @fence_count.
>>>>>+     */
>>>>>+    __u64 handles_ptr;
>>>>>+
>>>>>+    /**
>>>>>+     * @values_ptr: Pointer to an array of u64 values of length
>>>>>+     * @fence_count.
>>>>>+     * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>>>>+     * timeline drm_syncobj is invalid as it turns a 
>>>>>drm_syncobj into a
>>>>>+     * binary one.
>>>>>+     */
>>>>>+    __u64 values_ptr;
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_vm_bind_user_fence - An input or output 
>>>>>user fence for the
>>>>>+ * vm_bind or the vm_unbind work.
>>>>>+ *
>>>>>+ * The vm_bind or vm_unbind aync worker will wait for the 
>>>>>input fence (value at
>>>>>+ * @addr to become equal to @val) before starting the binding 
>>>>>or unbinding.
>>>>>+ *
>>>>>+ * The vm_bind or vm_unbind async worker will signal the 
>>>>>output fence after
>>>>>+ * the completion of binding or unbinding by writing @val to 
>>>>>memory location at
>>>>>+ * @addr
>>>>>+ */
>>>>>+struct drm_i915_vm_bind_user_fence {
>>>>>+    /** @addr: User/Memory fence qword aligned process 
>>>>>virtual address */
>>>>>+    __u64 addr;
>>>>>+
>>>>>+    /** @val: User/Memory fence value to be written after 
>>>>>bind completion */
>>>>>+    __u64 val;
>>>>>+
>>>>>+    /**
>>>>>+     * @flags: Supported flags are,
>>>>>+     *
>>>>>+     * I915_VM_BIND_USER_FENCE_WAIT:
>>>>>+     * Wait for the input fence before binding/unbinding
>>>>>+     *
>>>>>+     * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>>>+     * Return bind/unbind completion fence as output
>>>>>+     */
>>>>>+    __u32 flags;
>>>>>+#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>>>+#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>>>+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>>>+    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_vm_bind_ext_user_fence - User/memory 
>>>>>fences for vm_bind
>>>>>+ * and vm_unbind.
>>>>>+ *
>>>>>+ * These user fences can be input or output fences
>>>>>+ * (See struct drm_i915_vm_bind_user_fence).
>>>>>+ */
>>>>>+struct drm_i915_vm_bind_ext_user_fence {
>>>>>+#define I915_VM_BIND_EXT_USER_FENCES    1
>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>+    struct i915_user_extension base;
>>>>>+
>>>>>+    /** @fence_count: Number of elements in the 
>>>>>@user_fence_ptr array. */
>>>>>+    __u64 fence_count;
>>>>>+
>>>>>+    /**
>>>>>+     * @user_fence_ptr: Pointer to an array of
>>>>>+     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>>>+     */
>>>>>+    __u64 user_fence_ptr;
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array 
>>>>>of batch buffer
>>>>>+ * gpu virtual addresses.
>>>>>+ *
>>>>>+ * In the execbuff ioctl (See struct 
>>>>>drm_i915_gem_execbuffer2), this extension
>>>>>+ * must always be appended in the VM_BIND mode and it will be 
>>>>>an error to
>>>>>+ * append this extension in older non-VM_BIND mode.
>>>>>+ */
>>>>>+struct drm_i915_gem_execbuffer_ext_batch_addresses {
>>>>>+#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES    1
>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>+    struct i915_user_extension base;
>>>>>+
>>>>>+    /** @count: Number of addresses in the addr array. */
>>>>>+    __u32 count;
>>>>>+
>>>>>+    /** @addr: An array of batch gpu virtual addresses. */
>>>>>+    __u64 addr[0];
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_execbuffer_ext_user_fence - First 
>>>>>level batch completion
>>>>>+ * signaling extension.
>>>>>+ *
>>>>>+ * This extension allows user to attach a user fence (@addr, 
>>>>>@value pair) to an
>>>>>+ * execbuf to be signaled by the command streamer after the 
>>>>>completion of first
>>>>>+ * level batch, by writing the @value at specified @addr and 
>>>>>triggering an
>>>>>+ * interrupt.
>>>>>+ * User can either poll for this user fence to signal or can 
>>>>>also wait on it
>>>>>+ * with i915_gem_wait_user_fence ioctl.
>>>>>+ * This is very much usefaul for long running contexts where 
>>>>>waiting on dma-fence
>>>>>+ * by user (like i915_gem_wait ioctl) is not supported.
>>>>>+ */
>>>>>+struct drm_i915_gem_execbuffer_ext_user_fence {
>>>>>+#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE        2
>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>+    struct i915_user_extension base;
>>>>>+
>>>>>+    /**
>>>>>+     * @addr: User/Memory fence qword aligned GPU virtual address.
>>>>>+     *
>>>>>+     * Address has to be a valid GPU virtual address at the time of
>>>>>+     * first level batch completion.
>>>>>+     */
>>>>>+    __u64 addr;
>>>>>+
>>>>>+    /**
>>>>>+     * @value: User/Memory fence Value to be written to above address
>>>>>+     * after first level batch completes.
>>>>>+     */
>>>>>+    __u64 value;
>>>>>+
>>>>>+    /** @rsvd: Reserved for future extensions, MBZ */
>>>>>+    __u64 rsvd;
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_create_ext_vm_private - Extension to 
>>>>>make the object
>>>>>+ * private to the specified VM.
>>>>>+ *
>>>>>+ * See struct drm_i915_gem_create_ext.
>>>>>+ */
>>>>>+struct drm_i915_gem_create_ext_vm_private {
>>>>>+#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>+    struct i915_user_extension base;
>>>>>+
>>>>>+    /** @vm_id: Id of the VM to which the object is private */
>>>>>+    __u32 vm_id;
>>>>>+};
>>>>>+
>>>>>+/**
>>>>>+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>>>>+ *
>>>>>+ * User/Memory fence can be woken up either by:
>>>>>+ *
>>>>>+ * 1. GPU context indicated by @ctx_id, or,
>>>>>+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>>>+ *    @ctx_id is ignored when this flag is set.
>>>>>+ *
>>>>>+ * Wakeup condition is,
>>>>>+ * ``((*addr & mask) op (value & mask))``
>>>>>+ *
>>>>>+ * See :ref:`Documentation/driver-api/dma-buf.rst 
>>>>><indefinite_dma_fences>`
>>>>>+ */
>>>>>+struct drm_i915_gem_wait_user_fence {
>>>>>+    /** @extensions: Zero-terminated chain of extensions. */
>>>>>+    __u64 extensions;
>>>>>+
>>>>>+    /** @addr: User/Memory fence address */
>>>>>+    __u64 addr;
>>>>>+
>>>>>+    /** @ctx_id: Id of the Context which will signal the fence. */
>>>>>+    __u32 ctx_id;
>>>>>+
>>>>>+    /** @op: Wakeup condition operator */
>>>>>+    __u16 op;
>>>>>+#define I915_UFENCE_WAIT_EQ      0
>>>>>+#define I915_UFENCE_WAIT_NEQ     1
>>>>>+#define I915_UFENCE_WAIT_GT      2
>>>>>+#define I915_UFENCE_WAIT_GTE     3
>>>>>+#define I915_UFENCE_WAIT_LT      4
>>>>>+#define I915_UFENCE_WAIT_LTE     5
>>>>>+#define I915_UFENCE_WAIT_BEFORE  6
>>>>>+#define I915_UFENCE_WAIT_AFTER   7
>>>>>+
>>>>>+    /**
>>>>>+     * @flags: Supported flags are,
>>>>>+     *
>>>>>+     * I915_UFENCE_WAIT_SOFT:
>>>>>+     *
>>>>>+     * To be woken up by i915 driver async worker (not by GPU).
>>>>>+     *
>>>>>+     * I915_UFENCE_WAIT_ABSTIME:
>>>>>+     *
>>>>>+     * Wait timeout specified as absolute time.
>>>>>+     */
>>>>>+    __u16 flags;
>>>>>+#define I915_UFENCE_WAIT_SOFT    0x1
>>>>>+#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>>>+
>>>>>+    /** @value: Wakeup value */
>>>>>+    __u64 value;
>>>>>+
>>>>>+    /** @mask: Wakeup mask */
>>>>>+    __u64 mask;
>>>>>+#define I915_UFENCE_WAIT_U8     0xffu
>>>>>+#define I915_UFENCE_WAIT_U16    0xffffu
>>>>>+#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>>>+#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>>>+
>>>>>+    /**
>>>>>+     * @timeout: Wait timeout in nanoseconds.
>>>>>+     *
>>>>>+     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time 
>>>>>timeout is the
>>>>>+     * absolute time in nsec.
>>>>>+     */
>>>>>+    __s64 timeout;
>>>>>+};
Matthew Auld June 9, 2022, 8:36 a.m. UTC | #24
On 08/06/2022 22:32, Niranjana Vishwanathapura wrote:
> On Wed, Jun 08, 2022 at 10:12:05AM +0100, Matthew Auld wrote:
>> On 08/06/2022 08:17, Tvrtko Ursulin wrote:
>>>
>>> On 07/06/2022 20:37, Niranjana Vishwanathapura wrote:
>>>> On Tue, Jun 07, 2022 at 11:27:14AM +0100, Tvrtko Ursulin wrote:
>>>>>
>>>>> On 17/05/2022 19:32, Niranjana Vishwanathapura wrote:
>>>>>> VM_BIND and related uapi definitions
>>>>>>
>>>>>> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>     Also add new uapi and documentation as per review comments
>>>>>>     from Daniel.
>>>>>>
>>>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>> ---
>>>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>> +++++++++++++++++++++++++++
>>>>>>  1 file changed, 399 insertions(+)
>>>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>
>>>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>> new file mode 100644
>>>>>> index 000000000000..589c0a009107
>>>>>> --- /dev/null
>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>> @@ -0,0 +1,399 @@
>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>> +/*
>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>> + */
>>>>>> +
>>>>>> +/**
>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>> + *
>>>>>> + * VM_BIND feature availability.
>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>> + */
>>>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>>>> +
>>>>>> +/**
>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>> + *
>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>> + *
>>>>>> + * A VM in VM_BIND mode will not support the older execbuff mode 
>>>>>> of binding.
>>>>>> + * In VM_BIND mode, execbuff ioctl will not accept any execlist 
>>>>>> (ie., the
>>>>>> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be 
>>>>>> provided
>>>>>> + * to pass in the batch buffer addresses.
>>>>>> + *
>>>>>> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must 
>>>>>> be 0
>>>>>> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must 
>>>>>> always be
>>>>>> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>> batch_len fields
>>>>>> + * of struct drm_i915_gem_execbuffer2 are also not used and must 
>>>>>> be 0.
>>>>>> + */
>>>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>> +
>>>>>> +/**
>>>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>> + *
>>>>>> + * Flag to declare context as long running.
>>>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>>>> + *
>>>>>> + * Usage of dma-fence expects that they complete in reasonable 
>>>>>> amount of time.
>>>>>> + * Compute on the other hand can be long running. Hence it is not 
>>>>>> appropriate
>>>>>> + * for compute contexts to export request completion dma-fence to 
>>>>>> user.
>>>>>> + * The dma-fence usage will be limited to in-kernel consumption 
>>>>>> only.
>>>>>> + * Compute contexts need to use user/memory fence.
>>>>>> + *
>>>>>> + * So, long running contexts do not support output fences. Hence,
>>>>>> + * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
>>>>>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) 
>>>>>> are expected
>>>>>> + * to be not used.
>>>>>> + *
>>>>>> + * DRM_I915_GEM_WAIT ioctl call is also not supported for objects 
>>>>>> mapped
>>>>>> + * to long running contexts.
>>>>>> + */
>>>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>> +
>>>>>> +/* VM_BIND related ioctls */
>>>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x3f
>>>>>> +
>>>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>> DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>> DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct 
>>>>>> drm_i915_gem_wait_user_fence)
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>> + *
>>>>>> + * This structure is passed to VM_BIND ioctl and specifies the 
>>>>>> mapping of GPU
>>>>>> + * virtual address (VA) range to the section of an object that 
>>>>>> should be bound
>>>>>> + * in the device page table of the specified address space (VM).
>>>>>> + * The VA range specified must be unique (ie., not currently 
>>>>>> bound) and can
>>>>>> + * be mapped to whole object or a section of the object (partial 
>>>>>> binding).
>>>>>> + * Multiple VA mappings can be created to the same section of the 
>>>>>> object
>>>>>> + * (aliasing).
>>>>>> + */
>>>>>> +struct drm_i915_gem_vm_bind {
>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>> +    __u32 vm_id;
>>>>>> +
>>>>>> +    /** @handle: Object handle */
>>>>>> +    __u32 handle;
>>>>>> +
>>>>>> +    /** @start: Virtual Address start to bind */
>>>>>> +    __u64 start;
>>>>>> +
>>>>>> +    /** @offset: Offset in object to bind */
>>>>>> +    __u64 offset;
>>>>>> +
>>>>>> +    /** @length: Length of mapping to bind */
>>>>>> +    __u64 length;
>>>>>
>>>>> Does it support, or should it, equivalent of 
>>>>> EXEC_OBJECT_PAD_TO_SIZE? Or if not userspace is expected to map the 
>>>>> remainder of the space to a dummy object? In which case would there 
>>>>> be any alignment/padding issues preventing the two bind to be 
>>>>> placed next to each other?
>>>>>
>>>>> I ask because someone from the compute side asked me about a 
>>>>> problem with their strategy of dealing with overfetch and I 
>>>>> suggested pad to size.
>>>>>
>>>>
>>>> Thanks Tvrtko,
>>>> I think we shouldn't be needing it. As with VM_BIND VA assignment
>>>> is completely pushed to userspace, no padding should be necessary
>>>> once the 'start' and 'size' alignment conditions are met.
>>>>
>>>> I will add some documentation on alignment requirement here.
>>>> Generally, 'start' and 'size' should be 4K aligned. But, I think
>>>> when we have 64K lmem page sizes (dg2 and xehpsdv), they need to
>>>> be 64K aligned.
>>>
>>> + Matt
>>>
>>> Align to 64k is enough for all overfetch issues?
>>>
>>> Apparently compute has a situation where a buffer is received by one 
>>> component and another has to apply more alignment to it, to deal with 
>>> overfetch. Since they cannot grow the actual BO if they wanted to 
>>> VM_BIND a scratch area on top? Or perhaps none of this is a problem 
>>> on discrete and original BO should be correctly allocated to start with.
>>>
>>> Side question - what about the align to 2MiB mentioned in 
>>> i915_vma_insert to avoid mixing 4k and 64k PTEs? That does not apply 
>>> to discrete?
>>
>> Not sure about the overfetch thing, but yeah dg2 & xehpsdv both 
>> require a minimum of 64K pages underneath for local memory, and the BO 
>> size will also be rounded up accordingly. And yeah the complication 
>> arises due to not being able to mix 4K + 64K GTT pages within the same 
>> page-table (existed since even gen8). Note that 4K here is what we 
>> typically get for system memory.
>>
>> Originally we had a memory coloring scheme to track the "color" of 
>> each page-table, which basically ensures that userspace can't do 
>> something nasty like mixing page sizes. The advantage of that scheme 
>> is that we would only require 64K GTT alignment and no extra padding, 
>> but is perhaps a little complex.
>>
>> The merged solution is just to align and pad (i.e vma->node.size and 
>> not vma->size) out of the vma to 2M, which is dead simple 
>> implementation wise, but does potentially waste some GTT space and 
>> some of the local memory used for the actual page-table. For the 
>> alignment the kernel just validates that the GTT address is aligned to 
>> 2M in vma_insert(), and then for the padding it just inflates it to 
>> 2M, if userspace hasn't already.
>>
>> See the kernel-doc for @size: 
>> https://dri.freedesktop.org/docs/drm/gpu/driver-uapi.html?#c.drm_i915_gem_create_ext 
>>
>>
> 
> Ok, those requirements (2M VA alignment) will apply to VM_BIND also.
> This is unfortunate, but it is not something new enforced by VM_BIND.
> Other option is to go with 64K alignment and in VM_BIND case, user
> must ensure there is no mix-matching of 64K (lmem) and 4k (smem)
> mappings in the same 2M range. But this is not VM_BIND specific
> (will apply to soft-pinning in execbuf2 also).
> 
> I don't think we need any VA padding here as with VM_BIND VA is
> managed fully by the user. If we enforce VA to be 2M aligned, it
> will leave holes (if BOs are smaller then 2M), but nobody is going
> to allocate anything form there.

Note that we only apply the 2M alignment + padding for local memory 
pages, for system memory we don't have/need such restrictions. The VA 
padding then importantly prevents userspace from incorrectly (or 
maliciously) inserting 4K system memory object in some page-table 
operating in 64K GTT mode.

> 
> Niranjana
> 
>>>
>>> Regards,
>>>
>>> Tvrtko
>>>
>>>>
>>>> Niranjana
>>>>
>>>>> Regards,
>>>>>
>>>>> Tvrtko
>>>>>
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @flags: Supported flags are,
>>>>>> +     *
>>>>>> +     * I915_GEM_VM_BIND_READONLY:
>>>>>> +     * Mapping is read-only.
>>>>>> +     *
>>>>>> +     * I915_GEM_VM_BIND_CAPTURE:
>>>>>> +     * Capture this mapping in the dump upon GPU error.
>>>>>> +     */
>>>>>> +    __u64 flags;
>>>>>> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>>>> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>>>> +
>>>>>> +    /** @extensions: 0-terminated chain of extensions for this 
>>>>>> mapping. */
>>>>>> +    __u64 extensions;
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>>>>> + *
>>>>>> + * This structure is passed to VM_UNBIND ioctl and specifies the 
>>>>>> GPU virtual
>>>>>> + * address (VA) range that should be unbound from the device page 
>>>>>> table of the
>>>>>> + * specified address space (VM). The specified VA range must 
>>>>>> match one of the
>>>>>> + * mappings created with the VM_BIND ioctl. TLB is flushed upon 
>>>>>> unbind
>>>>>> + * completion.
>>>>>> + */
>>>>>> +struct drm_i915_gem_vm_unbind {
>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>> +    __u32 vm_id;
>>>>>> +
>>>>>> +    /** @rsvd: Reserved for future use; must be zero. */
>>>>>> +    __u32 rsvd;
>>>>>> +
>>>>>> +    /** @start: Virtual Address start to unbind */
>>>>>> +    __u64 start;
>>>>>> +
>>>>>> +    /** @length: Length of mapping to unbind */
>>>>>> +    __u64 length;
>>>>>> +
>>>>>> +    /** @flags: reserved for future usage, currently MBZ */
>>>>>> +    __u64 flags;
>>>>>> +
>>>>>> +    /** @extensions: 0-terminated chain of extensions for this 
>>>>>> mapping. */
>>>>>> +    __u64 extensions;
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_vm_bind_fence - An input or output fence for 
>>>>>> the vm_bind
>>>>>> + * or the vm_unbind work.
>>>>>> + *
>>>>>> + * The vm_bind or vm_unbind aync worker will wait for input fence 
>>>>>> to signal
>>>>>> + * before starting the binding or unbinding.
>>>>>> + *
>>>>>> + * The vm_bind or vm_unbind async worker will signal the returned 
>>>>>> output fence
>>>>>> + * after the completion of binding or unbinding.
>>>>>> + */
>>>>>> +struct drm_i915_vm_bind_fence {
>>>>>> +    /** @handle: User's handle for a drm_syncobj to wait on or 
>>>>>> signal. */
>>>>>> +    __u32 handle;
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @flags: Supported flags are,
>>>>>> +     *
>>>>>> +     * I915_VM_BIND_FENCE_WAIT:
>>>>>> +     * Wait for the input fence before binding/unbinding
>>>>>> +     *
>>>>>> +     * I915_VM_BIND_FENCE_SIGNAL:
>>>>>> +     * Return bind/unbind completion fence as output
>>>>>> +     */
>>>>>> +    __u32 flags;
>>>>>> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>>>> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>>>> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>>>>>> (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences 
>>>>>> for vm_bind
>>>>>> + * and vm_unbind.
>>>>>> + *
>>>>>> + * This structure describes an array of timeline drm_syncobj and 
>>>>>> associated
>>>>>> + * points for timeline variants of drm_syncobj. These timeline 
>>>>>> 'drm_syncobj's
>>>>>> + * can be input or output fences (See struct 
>>>>>> drm_i915_vm_bind_fence).
>>>>>> + */
>>>>>> +struct drm_i915_vm_bind_ext_timeline_fences {
>>>>>> +#define I915_VM_BIND_EXT_timeline_FENCES    0
>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>> +    struct i915_user_extension base;
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @fence_count: Number of elements in the @handles_ptr & 
>>>>>> @value_ptr
>>>>>> +     * arrays.
>>>>>> +     */
>>>>>> +    __u64 fence_count;
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @handles_ptr: Pointer to an array of struct 
>>>>>> drm_i915_vm_bind_fence
>>>>>> +     * of length @fence_count.
>>>>>> +     */
>>>>>> +    __u64 handles_ptr;
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @values_ptr: Pointer to an array of u64 values of length
>>>>>> +     * @fence_count.
>>>>>> +     * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>>>>> +     * timeline drm_syncobj is invalid as it turns a drm_syncobj 
>>>>>> into a
>>>>>> +     * binary one.
>>>>>> +     */
>>>>>> +    __u64 values_ptr;
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_vm_bind_user_fence - An input or output user 
>>>>>> fence for the
>>>>>> + * vm_bind or the vm_unbind work.
>>>>>> + *
>>>>>> + * The vm_bind or vm_unbind aync worker will wait for the input 
>>>>>> fence (value at
>>>>>> + * @addr to become equal to @val) before starting the binding or 
>>>>>> unbinding.
>>>>>> + *
>>>>>> + * The vm_bind or vm_unbind async worker will signal the output 
>>>>>> fence after
>>>>>> + * the completion of binding or unbinding by writing @val to 
>>>>>> memory location at
>>>>>> + * @addr
>>>>>> + */
>>>>>> +struct drm_i915_vm_bind_user_fence {
>>>>>> +    /** @addr: User/Memory fence qword aligned process virtual 
>>>>>> address */
>>>>>> +    __u64 addr;
>>>>>> +
>>>>>> +    /** @val: User/Memory fence value to be written after bind 
>>>>>> completion */
>>>>>> +    __u64 val;
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @flags: Supported flags are,
>>>>>> +     *
>>>>>> +     * I915_VM_BIND_USER_FENCE_WAIT:
>>>>>> +     * Wait for the input fence before binding/unbinding
>>>>>> +     *
>>>>>> +     * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>>>> +     * Return bind/unbind completion fence as output
>>>>>> +     */
>>>>>> +    __u32 flags;
>>>>>> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>>>> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>>>> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>>>> +    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences 
>>>>>> for vm_bind
>>>>>> + * and vm_unbind.
>>>>>> + *
>>>>>> + * These user fences can be input or output fences
>>>>>> + * (See struct drm_i915_vm_bind_user_fence).
>>>>>> + */
>>>>>> +struct drm_i915_vm_bind_ext_user_fence {
>>>>>> +#define I915_VM_BIND_EXT_USER_FENCES    1
>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>> +    struct i915_user_extension base;
>>>>>> +
>>>>>> +    /** @fence_count: Number of elements in the @user_fence_ptr 
>>>>>> array. */
>>>>>> +    __u64 fence_count;
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @user_fence_ptr: Pointer to an array of
>>>>>> +     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>>>> +     */
>>>>>> +    __u64 user_fence_ptr;
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array of 
>>>>>> batch buffer
>>>>>> + * gpu virtual addresses.
>>>>>> + *
>>>>>> + * In the execbuff ioctl (See struct drm_i915_gem_execbuffer2), 
>>>>>> this extension
>>>>>> + * must always be appended in the VM_BIND mode and it will be an 
>>>>>> error to
>>>>>> + * append this extension in older non-VM_BIND mode.
>>>>>> + */
>>>>>> +struct drm_i915_gem_execbuffer_ext_batch_addresses {
>>>>>> +#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES    1
>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>> +    struct i915_user_extension base;
>>>>>> +
>>>>>> +    /** @count: Number of addresses in the addr array. */
>>>>>> +    __u32 count;
>>>>>> +
>>>>>> +    /** @addr: An array of batch gpu virtual addresses. */
>>>>>> +    __u64 addr[0];
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_gem_execbuffer_ext_user_fence - First level 
>>>>>> batch completion
>>>>>> + * signaling extension.
>>>>>> + *
>>>>>> + * This extension allows user to attach a user fence (@addr, 
>>>>>> @value pair) to an
>>>>>> + * execbuf to be signaled by the command streamer after the 
>>>>>> completion of first
>>>>>> + * level batch, by writing the @value at specified @addr and 
>>>>>> triggering an
>>>>>> + * interrupt.
>>>>>> + * User can either poll for this user fence to signal or can also 
>>>>>> wait on it
>>>>>> + * with i915_gem_wait_user_fence ioctl.
>>>>>> + * This is very much usefaul for long running contexts where 
>>>>>> waiting on dma-fence
>>>>>> + * by user (like i915_gem_wait ioctl) is not supported.
>>>>>> + */
>>>>>> +struct drm_i915_gem_execbuffer_ext_user_fence {
>>>>>> +#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE        2
>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>> +    struct i915_user_extension base;
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @addr: User/Memory fence qword aligned GPU virtual address.
>>>>>> +     *
>>>>>> +     * Address has to be a valid GPU virtual address at the time of
>>>>>> +     * first level batch completion.
>>>>>> +     */
>>>>>> +    __u64 addr;
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @value: User/Memory fence Value to be written to above 
>>>>>> address
>>>>>> +     * after first level batch completes.
>>>>>> +     */
>>>>>> +    __u64 value;
>>>>>> +
>>>>>> +    /** @rsvd: Reserved for future extensions, MBZ */
>>>>>> +    __u64 rsvd;
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_gem_create_ext_vm_private - Extension to make 
>>>>>> the object
>>>>>> + * private to the specified VM.
>>>>>> + *
>>>>>> + * See struct drm_i915_gem_create_ext.
>>>>>> + */
>>>>>> +struct drm_i915_gem_create_ext_vm_private {
>>>>>> +#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>> +    struct i915_user_extension base;
>>>>>> +
>>>>>> +    /** @vm_id: Id of the VM to which the object is private */
>>>>>> +    __u32 vm_id;
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>>>>> + *
>>>>>> + * User/Memory fence can be woken up either by:
>>>>>> + *
>>>>>> + * 1. GPU context indicated by @ctx_id, or,
>>>>>> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>>>> + *    @ctx_id is ignored when this flag is set.
>>>>>> + *
>>>>>> + * Wakeup condition is,
>>>>>> + * ``((*addr & mask) op (value & mask))``
>>>>>> + *
>>>>>> + * See :ref:`Documentation/driver-api/dma-buf.rst 
>>>>>> <indefinite_dma_fences>`
>>>>>> + */
>>>>>> +struct drm_i915_gem_wait_user_fence {
>>>>>> +    /** @extensions: Zero-terminated chain of extensions. */
>>>>>> +    __u64 extensions;
>>>>>> +
>>>>>> +    /** @addr: User/Memory fence address */
>>>>>> +    __u64 addr;
>>>>>> +
>>>>>> +    /** @ctx_id: Id of the Context which will signal the fence. */
>>>>>> +    __u32 ctx_id;
>>>>>> +
>>>>>> +    /** @op: Wakeup condition operator */
>>>>>> +    __u16 op;
>>>>>> +#define I915_UFENCE_WAIT_EQ      0
>>>>>> +#define I915_UFENCE_WAIT_NEQ     1
>>>>>> +#define I915_UFENCE_WAIT_GT      2
>>>>>> +#define I915_UFENCE_WAIT_GTE     3
>>>>>> +#define I915_UFENCE_WAIT_LT      4
>>>>>> +#define I915_UFENCE_WAIT_LTE     5
>>>>>> +#define I915_UFENCE_WAIT_BEFORE  6
>>>>>> +#define I915_UFENCE_WAIT_AFTER   7
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @flags: Supported flags are,
>>>>>> +     *
>>>>>> +     * I915_UFENCE_WAIT_SOFT:
>>>>>> +     *
>>>>>> +     * To be woken up by i915 driver async worker (not by GPU).
>>>>>> +     *
>>>>>> +     * I915_UFENCE_WAIT_ABSTIME:
>>>>>> +     *
>>>>>> +     * Wait timeout specified as absolute time.
>>>>>> +     */
>>>>>> +    __u16 flags;
>>>>>> +#define I915_UFENCE_WAIT_SOFT    0x1
>>>>>> +#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>>>> +
>>>>>> +    /** @value: Wakeup value */
>>>>>> +    __u64 value;
>>>>>> +
>>>>>> +    /** @mask: Wakeup mask */
>>>>>> +    __u64 mask;
>>>>>> +#define I915_UFENCE_WAIT_U8     0xffu
>>>>>> +#define I915_UFENCE_WAIT_U16    0xffffu
>>>>>> +#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>>>> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>>>> +
>>>>>> +    /**
>>>>>> +     * @timeout: Wait timeout in nanoseconds.
>>>>>> +     *
>>>>>> +     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout 
>>>>>> is the
>>>>>> +     * absolute time in nsec.
>>>>>> +     */
>>>>>> +    __s64 timeout;
>>>>>> +};
Niranjana Vishwanathapura June 9, 2022, 6:53 p.m. UTC | #25
On Thu, Jun 09, 2022 at 09:36:48AM +0100, Matthew Auld wrote:
>On 08/06/2022 22:32, Niranjana Vishwanathapura wrote:
>>On Wed, Jun 08, 2022 at 10:12:05AM +0100, Matthew Auld wrote:
>>>On 08/06/2022 08:17, Tvrtko Ursulin wrote:
>>>>
>>>>On 07/06/2022 20:37, Niranjana Vishwanathapura wrote:
>>>>>On Tue, Jun 07, 2022 at 11:27:14AM +0100, Tvrtko Ursulin wrote:
>>>>>>
>>>>>>On 17/05/2022 19:32, Niranjana Vishwanathapura wrote:
>>>>>>>VM_BIND and related uapi definitions
>>>>>>>
>>>>>>>v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>>    Also add new uapi and documentation as per review comments
>>>>>>>    from Daniel.
>>>>>>>
>>>>>>>Signed-off-by: Niranjana Vishwanathapura 
>>>>>>><niranjana.vishwanathapura@intel.com>
>>>>>>>---
>>>>>>> Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>>>+++++++++++++++++++++++++++
>>>>>>> 1 file changed, 399 insertions(+)
>>>>>>> create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>
>>>>>>>diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>>b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>new file mode 100644
>>>>>>>index 000000000000..589c0a009107
>>>>>>>--- /dev/null
>>>>>>>+++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>@@ -0,0 +1,399 @@
>>>>>>>+/* SPDX-License-Identifier: MIT */
>>>>>>>+/*
>>>>>>>+ * Copyright © 2022 Intel Corporation
>>>>>>>+ */
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>+ *
>>>>>>>+ * VM_BIND feature availability.
>>>>>>>+ * See typedef drm_i915_getparam_t param.
>>>>>>>+ */
>>>>>>>+#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>+ *
>>>>>>>+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>>>+ * See struct drm_i915_gem_vm_control flags.
>>>>>>>+ *
>>>>>>>+ * A VM in VM_BIND mode will not support the older 
>>>>>>>execbuff mode of binding.
>>>>>>>+ * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>>>>execlist (ie., the
>>>>>>>+ * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>>>+ * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>>>+ * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>>>+ * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension 
>>>>>>>must be provided
>>>>>>>+ * to pass in the batch buffer addresses.
>>>>>>>+ *
>>>>>>>+ * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>>>+ * I915_EXEC_BATCH_FIRST of 
>>>>>>>&drm_i915_gem_execbuffer2.flags must be 0
>>>>>>>+ * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS 
>>>>>>>flag must always be
>>>>>>>+ * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>>>+ * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>>>batch_len fields
>>>>>>>+ * of struct drm_i915_gem_execbuffer2 are also not used 
>>>>>>>and must be 0.
>>>>>>>+ */
>>>>>>>+#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>+ *
>>>>>>>+ * Flag to declare context as long running.
>>>>>>>+ * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>+ *
>>>>>>>+ * Usage of dma-fence expects that they complete in 
>>>>>>>reasonable amount of time.
>>>>>>>+ * Compute on the other hand can be long running. Hence 
>>>>>>>it is not appropriate
>>>>>>>+ * for compute contexts to export request completion 
>>>>>>>dma-fence to user.
>>>>>>>+ * The dma-fence usage will be limited to in-kernel 
>>>>>>>consumption only.
>>>>>>>+ * Compute contexts need to use user/memory fence.
>>>>>>>+ *
>>>>>>>+ * So, long running contexts do not support output fences. Hence,
>>>>>>>+ * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
>>>>>>>+ * I915_EXEC_FENCE_SIGNAL (See 
>>>>>>>&drm_i915_gem_exec_fence.flags) are expected
>>>>>>>+ * to be not used.
>>>>>>>+ *
>>>>>>>+ * DRM_I915_GEM_WAIT ioctl call is also not supported for 
>>>>>>>objects mapped
>>>>>>>+ * to long running contexts.
>>>>>>>+ */
>>>>>>>+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>+
>>>>>>>+/* VM_BIND related ioctls */
>>>>>>>+#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>+#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>+#define DRM_I915_GEM_WAIT_USER_FENCE    0x3f
>>>>>>>+
>>>>>>>+#define DRM_IOCTL_I915_GEM_VM_BIND 
>>>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct 
>>>>>>>drm_i915_gem_vm_bind)
>>>>>>>+#define DRM_IOCTL_I915_GEM_VM_UNBIND 
>>>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct 
>>>>>>>drm_i915_gem_vm_bind)
>>>>>>>+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>>>>>DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, 
>>>>>>>struct drm_i915_gem_wait_user_fence)
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>>>+ *
>>>>>>>+ * This structure is passed to VM_BIND ioctl and 
>>>>>>>specifies the mapping of GPU
>>>>>>>+ * virtual address (VA) range to the section of an object 
>>>>>>>that should be bound
>>>>>>>+ * in the device page table of the specified address space (VM).
>>>>>>>+ * The VA range specified must be unique (ie., not 
>>>>>>>currently bound) and can
>>>>>>>+ * be mapped to whole object or a section of the object 
>>>>>>>(partial binding).
>>>>>>>+ * Multiple VA mappings can be created to the same 
>>>>>>>section of the object
>>>>>>>+ * (aliasing).
>>>>>>>+ */
>>>>>>>+struct drm_i915_gem_vm_bind {
>>>>>>>+    /** @vm_id: VM (address space) id to bind */
>>>>>>>+    __u32 vm_id;
>>>>>>>+
>>>>>>>+    /** @handle: Object handle */
>>>>>>>+    __u32 handle;
>>>>>>>+
>>>>>>>+    /** @start: Virtual Address start to bind */
>>>>>>>+    __u64 start;
>>>>>>>+
>>>>>>>+    /** @offset: Offset in object to bind */
>>>>>>>+    __u64 offset;
>>>>>>>+
>>>>>>>+    /** @length: Length of mapping to bind */
>>>>>>>+    __u64 length;
>>>>>>
>>>>>>Does it support, or should it, equivalent of 
>>>>>>EXEC_OBJECT_PAD_TO_SIZE? Or if not userspace is expected to 
>>>>>>map the remainder of the space to a dummy object? In which 
>>>>>>case would there be any alignment/padding issues preventing 
>>>>>>the two bind to be placed next to each other?
>>>>>>
>>>>>>I ask because someone from the compute side asked me about a 
>>>>>>problem with their strategy of dealing with overfetch and I 
>>>>>>suggested pad to size.
>>>>>>
>>>>>
>>>>>Thanks Tvrtko,
>>>>>I think we shouldn't be needing it. As with VM_BIND VA assignment
>>>>>is completely pushed to userspace, no padding should be necessary
>>>>>once the 'start' and 'size' alignment conditions are met.
>>>>>
>>>>>I will add some documentation on alignment requirement here.
>>>>>Generally, 'start' and 'size' should be 4K aligned. But, I think
>>>>>when we have 64K lmem page sizes (dg2 and xehpsdv), they need to
>>>>>be 64K aligned.
>>>>
>>>>+ Matt
>>>>
>>>>Align to 64k is enough for all overfetch issues?
>>>>
>>>>Apparently compute has a situation where a buffer is received by 
>>>>one component and another has to apply more alignment to it, to 
>>>>deal with overfetch. Since they cannot grow the actual BO if 
>>>>they wanted to VM_BIND a scratch area on top? Or perhaps none of 
>>>>this is a problem on discrete and original BO should be 
>>>>correctly allocated to start with.
>>>>
>>>>Side question - what about the align to 2MiB mentioned in 
>>>>i915_vma_insert to avoid mixing 4k and 64k PTEs? That does not 
>>>>apply to discrete?
>>>
>>>Not sure about the overfetch thing, but yeah dg2 & xehpsdv both 
>>>require a minimum of 64K pages underneath for local memory, and 
>>>the BO size will also be rounded up accordingly. And yeah the 
>>>complication arises due to not being able to mix 4K + 64K GTT 
>>>pages within the same page-table (existed since even gen8). Note 
>>>that 4K here is what we typically get for system memory.
>>>
>>>Originally we had a memory coloring scheme to track the "color" of 
>>>each page-table, which basically ensures that userspace can't do 
>>>something nasty like mixing page sizes. The advantage of that 
>>>scheme is that we would only require 64K GTT alignment and no 
>>>extra padding, but is perhaps a little complex.
>>>
>>>The merged solution is just to align and pad (i.e vma->node.size 
>>>and not vma->size) out of the vma to 2M, which is dead simple 
>>>implementation wise, but does potentially waste some GTT space and 
>>>some of the local memory used for the actual page-table. For the 
>>>alignment the kernel just validates that the GTT address is 
>>>aligned to 2M in vma_insert(), and then for the padding it just 
>>>inflates it to 2M, if userspace hasn't already.
>>>
>>>See the kernel-doc for @size: https://dri.freedesktop.org/docs/drm/gpu/driver-uapi.html?#c.drm_i915_gem_create_ext
>>>
>>>
>>
>>Ok, those requirements (2M VA alignment) will apply to VM_BIND also.
>>This is unfortunate, but it is not something new enforced by VM_BIND.
>>Other option is to go with 64K alignment and in VM_BIND case, user
>>must ensure there is no mix-matching of 64K (lmem) and 4k (smem)
>>mappings in the same 2M range. But this is not VM_BIND specific
>>(will apply to soft-pinning in execbuf2 also).
>>
>>I don't think we need any VA padding here as with VM_BIND VA is
>>managed fully by the user. If we enforce VA to be 2M aligned, it
>>will leave holes (if BOs are smaller then 2M), but nobody is going
>>to allocate anything form there.
>
>Note that we only apply the 2M alignment + padding for local memory 
>pages, for system memory we don't have/need such restrictions. The VA 
>padding then importantly prevents userspace from incorrectly (or 
>maliciously) inserting 4K system memory object in some page-table 
>operating in 64K GTT mode.
>

Thanks Matt.
I also, syned offline with Matt a bit on this.
We don't need explicit 'pad_to_size' size. i915 driver is implicitly
padding the size to 2M boundary for LMEM BOs which will apply for
VM_BIND also.
The remaining question is whether we enforce 2M VA alignment for
lmem BOs (just like legacy execbuff path) on dg2 & xehpsdv, or go with
just 64K alignment but ensure there is no mixing of 4K and 64K
mappings in same 2M range. I think we can go with 2M alignment
requirement for VM_BIND also. So, no new requirements here for VM_BIND.

I will update the documentation.

Niranjana

>>
>>Niranjana
>>
>>>>
>>>>Regards,
>>>>
>>>>Tvrtko
>>>>
>>>>>
>>>>>Niranjana
>>>>>
>>>>>>Regards,
>>>>>>
>>>>>>Tvrtko
>>>>>>
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @flags: Supported flags are,
>>>>>>>+     *
>>>>>>>+     * I915_GEM_VM_BIND_READONLY:
>>>>>>>+     * Mapping is read-only.
>>>>>>>+     *
>>>>>>>+     * I915_GEM_VM_BIND_CAPTURE:
>>>>>>>+     * Capture this mapping in the dump upon GPU error.
>>>>>>>+     */
>>>>>>>+    __u64 flags;
>>>>>>>+#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>>>>>+#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>>>>>+
>>>>>>>+    /** @extensions: 0-terminated chain of extensions for 
>>>>>>>this mapping. */
>>>>>>>+    __u64 extensions;
>>>>>>>+};
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>>>>>>+ *
>>>>>>>+ * This structure is passed to VM_UNBIND ioctl and 
>>>>>>>specifies the GPU virtual
>>>>>>>+ * address (VA) range that should be unbound from the 
>>>>>>>device page table of the
>>>>>>>+ * specified address space (VM). The specified VA range 
>>>>>>>must match one of the
>>>>>>>+ * mappings created with the VM_BIND ioctl. TLB is 
>>>>>>>flushed upon unbind
>>>>>>>+ * completion.
>>>>>>>+ */
>>>>>>>+struct drm_i915_gem_vm_unbind {
>>>>>>>+    /** @vm_id: VM (address space) id to bind */
>>>>>>>+    __u32 vm_id;
>>>>>>>+
>>>>>>>+    /** @rsvd: Reserved for future use; must be zero. */
>>>>>>>+    __u32 rsvd;
>>>>>>>+
>>>>>>>+    /** @start: Virtual Address start to unbind */
>>>>>>>+    __u64 start;
>>>>>>>+
>>>>>>>+    /** @length: Length of mapping to unbind */
>>>>>>>+    __u64 length;
>>>>>>>+
>>>>>>>+    /** @flags: reserved for future usage, currently MBZ */
>>>>>>>+    __u64 flags;
>>>>>>>+
>>>>>>>+    /** @extensions: 0-terminated chain of extensions for 
>>>>>>>this mapping. */
>>>>>>>+    __u64 extensions;
>>>>>>>+};
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_vm_bind_fence - An input or output 
>>>>>>>fence for the vm_bind
>>>>>>>+ * or the vm_unbind work.
>>>>>>>+ *
>>>>>>>+ * The vm_bind or vm_unbind aync worker will wait for 
>>>>>>>input fence to signal
>>>>>>>+ * before starting the binding or unbinding.
>>>>>>>+ *
>>>>>>>+ * The vm_bind or vm_unbind async worker will signal the 
>>>>>>>returned output fence
>>>>>>>+ * after the completion of binding or unbinding.
>>>>>>>+ */
>>>>>>>+struct drm_i915_vm_bind_fence {
>>>>>>>+    /** @handle: User's handle for a drm_syncobj to wait 
>>>>>>>on or signal. */
>>>>>>>+    __u32 handle;
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @flags: Supported flags are,
>>>>>>>+     *
>>>>>>>+     * I915_VM_BIND_FENCE_WAIT:
>>>>>>>+     * Wait for the input fence before binding/unbinding
>>>>>>>+     *
>>>>>>>+     * I915_VM_BIND_FENCE_SIGNAL:
>>>>>>>+     * Return bind/unbind completion fence as output
>>>>>>>+     */
>>>>>>>+    __u32 flags;
>>>>>>>+#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>>>>>+#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>>>>>+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>>>>>>>(-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>>>>>+};
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline 
>>>>>>>fences for vm_bind
>>>>>>>+ * and vm_unbind.
>>>>>>>+ *
>>>>>>>+ * This structure describes an array of timeline 
>>>>>>>drm_syncobj and associated
>>>>>>>+ * points for timeline variants of drm_syncobj. These 
>>>>>>>timeline 'drm_syncobj's
>>>>>>>+ * can be input or output fences (See struct 
>>>>>>>drm_i915_vm_bind_fence).
>>>>>>>+ */
>>>>>>>+struct drm_i915_vm_bind_ext_timeline_fences {
>>>>>>>+#define I915_VM_BIND_EXT_timeline_FENCES    0
>>>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>+    struct i915_user_extension base;
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @fence_count: Number of elements in the 
>>>>>>>@handles_ptr & @value_ptr
>>>>>>>+     * arrays.
>>>>>>>+     */
>>>>>>>+    __u64 fence_count;
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @handles_ptr: Pointer to an array of struct 
>>>>>>>drm_i915_vm_bind_fence
>>>>>>>+     * of length @fence_count.
>>>>>>>+     */
>>>>>>>+    __u64 handles_ptr;
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @values_ptr: Pointer to an array of u64 values of length
>>>>>>>+     * @fence_count.
>>>>>>>+     * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
>>>>>>>+     * timeline drm_syncobj is invalid as it turns a 
>>>>>>>drm_syncobj into a
>>>>>>>+     * binary one.
>>>>>>>+     */
>>>>>>>+    __u64 values_ptr;
>>>>>>>+};
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_vm_bind_user_fence - An input or 
>>>>>>>output user fence for the
>>>>>>>+ * vm_bind or the vm_unbind work.
>>>>>>>+ *
>>>>>>>+ * The vm_bind or vm_unbind aync worker will wait for the 
>>>>>>>input fence (value at
>>>>>>>+ * @addr to become equal to @val) before starting the 
>>>>>>>binding or unbinding.
>>>>>>>+ *
>>>>>>>+ * The vm_bind or vm_unbind async worker will signal the 
>>>>>>>output fence after
>>>>>>>+ * the completion of binding or unbinding by writing @val 
>>>>>>>to memory location at
>>>>>>>+ * @addr
>>>>>>>+ */
>>>>>>>+struct drm_i915_vm_bind_user_fence {
>>>>>>>+    /** @addr: User/Memory fence qword aligned process 
>>>>>>>virtual address */
>>>>>>>+    __u64 addr;
>>>>>>>+
>>>>>>>+    /** @val: User/Memory fence value to be written after 
>>>>>>>bind completion */
>>>>>>>+    __u64 val;
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @flags: Supported flags are,
>>>>>>>+     *
>>>>>>>+     * I915_VM_BIND_USER_FENCE_WAIT:
>>>>>>>+     * Wait for the input fence before binding/unbinding
>>>>>>>+     *
>>>>>>>+     * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>>>>>+     * Return bind/unbind completion fence as output
>>>>>>>+     */
>>>>>>>+    __u32 flags;
>>>>>>>+#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>>>>>+#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>>>>>+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>>>>>+    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>>>>>+};
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_vm_bind_ext_user_fence - User/memory 
>>>>>>>fences for vm_bind
>>>>>>>+ * and vm_unbind.
>>>>>>>+ *
>>>>>>>+ * These user fences can be input or output fences
>>>>>>>+ * (See struct drm_i915_vm_bind_user_fence).
>>>>>>>+ */
>>>>>>>+struct drm_i915_vm_bind_ext_user_fence {
>>>>>>>+#define I915_VM_BIND_EXT_USER_FENCES    1
>>>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>+    struct i915_user_extension base;
>>>>>>>+
>>>>>>>+    /** @fence_count: Number of elements in the 
>>>>>>>@user_fence_ptr array. */
>>>>>>>+    __u64 fence_count;
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @user_fence_ptr: Pointer to an array of
>>>>>>>+     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>>>>>+     */
>>>>>>>+    __u64 user_fence_ptr;
>>>>>>>+};
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_gem_execbuffer_ext_batch_addresses - 
>>>>>>>Array of batch buffer
>>>>>>>+ * gpu virtual addresses.
>>>>>>>+ *
>>>>>>>+ * In the execbuff ioctl (See struct 
>>>>>>>drm_i915_gem_execbuffer2), this extension
>>>>>>>+ * must always be appended in the VM_BIND mode and it 
>>>>>>>will be an error to
>>>>>>>+ * append this extension in older non-VM_BIND mode.
>>>>>>>+ */
>>>>>>>+struct drm_i915_gem_execbuffer_ext_batch_addresses {
>>>>>>>+#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES    1
>>>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>+    struct i915_user_extension base;
>>>>>>>+
>>>>>>>+    /** @count: Number of addresses in the addr array. */
>>>>>>>+    __u32 count;
>>>>>>>+
>>>>>>>+    /** @addr: An array of batch gpu virtual addresses. */
>>>>>>>+    __u64 addr[0];
>>>>>>>+};
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_gem_execbuffer_ext_user_fence - First 
>>>>>>>level batch completion
>>>>>>>+ * signaling extension.
>>>>>>>+ *
>>>>>>>+ * This extension allows user to attach a user fence 
>>>>>>>(@addr, @value pair) to an
>>>>>>>+ * execbuf to be signaled by the command streamer after 
>>>>>>>the completion of first
>>>>>>>+ * level batch, by writing the @value at specified @addr 
>>>>>>>and triggering an
>>>>>>>+ * interrupt.
>>>>>>>+ * User can either poll for this user fence to signal or 
>>>>>>>can also wait on it
>>>>>>>+ * with i915_gem_wait_user_fence ioctl.
>>>>>>>+ * This is very much usefaul for long running contexts 
>>>>>>>where waiting on dma-fence
>>>>>>>+ * by user (like i915_gem_wait ioctl) is not supported.
>>>>>>>+ */
>>>>>>>+struct drm_i915_gem_execbuffer_ext_user_fence {
>>>>>>>+#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE        2
>>>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>+    struct i915_user_extension base;
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @addr: User/Memory fence qword aligned GPU virtual address.
>>>>>>>+     *
>>>>>>>+     * Address has to be a valid GPU virtual address at the time of
>>>>>>>+     * first level batch completion.
>>>>>>>+     */
>>>>>>>+    __u64 addr;
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @value: User/Memory fence Value to be written to 
>>>>>>>above address
>>>>>>>+     * after first level batch completes.
>>>>>>>+     */
>>>>>>>+    __u64 value;
>>>>>>>+
>>>>>>>+    /** @rsvd: Reserved for future extensions, MBZ */
>>>>>>>+    __u64 rsvd;
>>>>>>>+};
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_gem_create_ext_vm_private - Extension 
>>>>>>>to make the object
>>>>>>>+ * private to the specified VM.
>>>>>>>+ *
>>>>>>>+ * See struct drm_i915_gem_create_ext.
>>>>>>>+ */
>>>>>>>+struct drm_i915_gem_create_ext_vm_private {
>>>>>>>+#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>>>>>>>+    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>+    struct i915_user_extension base;
>>>>>>>+
>>>>>>>+    /** @vm_id: Id of the VM to which the object is private */
>>>>>>>+    __u32 vm_id;
>>>>>>>+};
>>>>>>>+
>>>>>>>+/**
>>>>>>>+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
>>>>>>>+ *
>>>>>>>+ * User/Memory fence can be woken up either by:
>>>>>>>+ *
>>>>>>>+ * 1. GPU context indicated by @ctx_id, or,
>>>>>>>+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>>>>>+ *    @ctx_id is ignored when this flag is set.
>>>>>>>+ *
>>>>>>>+ * Wakeup condition is,
>>>>>>>+ * ``((*addr & mask) op (value & mask))``
>>>>>>>+ *
>>>>>>>+ * See :ref:`Documentation/driver-api/dma-buf.rst 
>>>>>>><indefinite_dma_fences>`
>>>>>>>+ */
>>>>>>>+struct drm_i915_gem_wait_user_fence {
>>>>>>>+    /** @extensions: Zero-terminated chain of extensions. */
>>>>>>>+    __u64 extensions;
>>>>>>>+
>>>>>>>+    /** @addr: User/Memory fence address */
>>>>>>>+    __u64 addr;
>>>>>>>+
>>>>>>>+    /** @ctx_id: Id of the Context which will signal the fence. */
>>>>>>>+    __u32 ctx_id;
>>>>>>>+
>>>>>>>+    /** @op: Wakeup condition operator */
>>>>>>>+    __u16 op;
>>>>>>>+#define I915_UFENCE_WAIT_EQ      0
>>>>>>>+#define I915_UFENCE_WAIT_NEQ     1
>>>>>>>+#define I915_UFENCE_WAIT_GT      2
>>>>>>>+#define I915_UFENCE_WAIT_GTE     3
>>>>>>>+#define I915_UFENCE_WAIT_LT      4
>>>>>>>+#define I915_UFENCE_WAIT_LTE     5
>>>>>>>+#define I915_UFENCE_WAIT_BEFORE  6
>>>>>>>+#define I915_UFENCE_WAIT_AFTER   7
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @flags: Supported flags are,
>>>>>>>+     *
>>>>>>>+     * I915_UFENCE_WAIT_SOFT:
>>>>>>>+     *
>>>>>>>+     * To be woken up by i915 driver async worker (not by GPU).
>>>>>>>+     *
>>>>>>>+     * I915_UFENCE_WAIT_ABSTIME:
>>>>>>>+     *
>>>>>>>+     * Wait timeout specified as absolute time.
>>>>>>>+     */
>>>>>>>+    __u16 flags;
>>>>>>>+#define I915_UFENCE_WAIT_SOFT    0x1
>>>>>>>+#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>>>>>+
>>>>>>>+    /** @value: Wakeup value */
>>>>>>>+    __u64 value;
>>>>>>>+
>>>>>>>+    /** @mask: Wakeup mask */
>>>>>>>+    __u64 mask;
>>>>>>>+#define I915_UFENCE_WAIT_U8     0xffu
>>>>>>>+#define I915_UFENCE_WAIT_U16    0xffffu
>>>>>>>+#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>>>>>+#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>>>>>+
>>>>>>>+    /**
>>>>>>>+     * @timeout: Wait timeout in nanoseconds.
>>>>>>>+     *
>>>>>>>+     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time 
>>>>>>>timeout is the
>>>>>>>+     * absolute time in nsec.
>>>>>>>+     */
>>>>>>>+    __s64 timeout;
>>>>>>>+};
Matthew Brost June 10, 2022, 8:34 a.m. UTC | #26
On Tue, May 17, 2022 at 11:32:12AM -0700, Niranjana Vishwanathapura wrote:
> VM_BIND and related uapi definitions
> 
> v2: Ensure proper kernel-doc formatting with cross references.
>     Also add new uapi and documentation as per review comments
>     from Daniel.
> 
> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
> ---
>  Documentation/gpu/rfc/i915_vm_bind.h | 399 +++++++++++++++++++++++++++
>  1 file changed, 399 insertions(+)
>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
> 
> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
> new file mode 100644
> index 000000000000..589c0a009107
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
> @@ -0,0 +1,399 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2022 Intel Corporation
> + */
> +
> +/**
> + * DOC: I915_PARAM_HAS_VM_BIND
> + *
> + * VM_BIND feature availability.
> + * See typedef drm_i915_getparam_t param.
> + */
> +#define I915_PARAM_HAS_VM_BIND		57
> +
> +/**
> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
> + *
> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
> + * See struct drm_i915_gem_vm_control flags.
> + *
> + * A VM in VM_BIND mode will not support the older execbuff mode of binding.
> + * In VM_BIND mode, execbuff ioctl will not accept any execlist (ie., the
> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be provided
> + * to pass in the batch buffer addresses.
> + *
> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must always be
> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
> + * The buffers_ptr, buffer_count, batch_start_offset and batch_len fields
> + * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
> + */
> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
> +
> +/**
> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
> + *
> + * Flag to declare context as long running.
> + * See struct drm_i915_gem_context_create_ext flags.
> + *
> + * Usage of dma-fence expects that they complete in reasonable amount of time.
> + * Compute on the other hand can be long running. Hence it is not appropriate
> + * for compute contexts to export request completion dma-fence to user.
> + * The dma-fence usage will be limited to in-kernel consumption only.
> + * Compute contexts need to use user/memory fence.
> + *
> + * So, long running contexts do not support output fences. Hence,
> + * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) are expected
> + * to be not used.
> + *
> + * DRM_I915_GEM_WAIT ioctl call is also not supported for objects mapped
> + * to long running contexts.
> + */
> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
> +
> +/* VM_BIND related ioctls */
> +#define DRM_I915_GEM_VM_BIND		0x3d
> +#define DRM_I915_GEM_VM_UNBIND		0x3e
> +#define DRM_I915_GEM_WAIT_USER_FENCE	0x3f
> +
> +#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
> +#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
> +
> +/**
> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
> + *
> + * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
> + * virtual address (VA) range to the section of an object that should be bound
> + * in the device page table of the specified address space (VM).
> + * The VA range specified must be unique (ie., not currently bound) and can
> + * be mapped to whole object or a section of the object (partial binding).
> + * Multiple VA mappings can be created to the same section of the object
> + * (aliasing).
> + */
> +struct drm_i915_gem_vm_bind {
> +	/** @vm_id: VM (address space) id to bind */
> +	__u32 vm_id;
> +
> +	/** @handle: Object handle */
> +	__u32 handle;
> +
> +	/** @start: Virtual Address start to bind */
> +	__u64 start;
> +
> +	/** @offset: Offset in object to bind */
> +	__u64 offset;
> +
> +	/** @length: Length of mapping to bind */
> +	__u64 length;
> +
> +	/**
> +	 * @flags: Supported flags are,
> +	 *
> +	 * I915_GEM_VM_BIND_READONLY:
> +	 * Mapping is read-only.
> +	 *
> +	 * I915_GEM_VM_BIND_CAPTURE:
> +	 * Capture this mapping in the dump upon GPU error.
> +	 */
> +	__u64 flags;
> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
> +
> +	/** @extensions: 0-terminated chain of extensions for this mapping. */
> +	__u64 extensions;
> +};
> +
> +/**
> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
> + *
> + * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
> + * address (VA) range that should be unbound from the device page table of the
> + * specified address space (VM). The specified VA range must match one of the
> + * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
> + * completion.
> + */
> +struct drm_i915_gem_vm_unbind {
> +	/** @vm_id: VM (address space) id to bind */
> +	__u32 vm_id;
> +
> +	/** @rsvd: Reserved for future use; must be zero. */
> +	__u32 rsvd;
> +
> +	/** @start: Virtual Address start to unbind */
> +	__u64 start;
> +
> +	/** @length: Length of mapping to unbind */
> +	__u64 length;

This probably isn't needed. We are never going to unbind a subset of a
VMA are we? That being said it can't hurt as a sanity check (e.g.
internal vma->length == user unbind length).

> +
> +	/** @flags: reserved for future usage, currently MBZ */
> +	__u64 flags;
> +
> +	/** @extensions: 0-terminated chain of extensions for this mapping. */
> +	__u64 extensions;
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
> + * or the vm_unbind work.
> + *
> + * The vm_bind or vm_unbind aync worker will wait for input fence to signal
> + * before starting the binding or unbinding.
> + *
> + * The vm_bind or vm_unbind async worker will signal the returned output fence
> + * after the completion of binding or unbinding.
> + */
> +struct drm_i915_vm_bind_fence {
> +	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
> +	__u32 handle;
> +
> +	/**
> +	 * @flags: Supported flags are,
> +	 *
> +	 * I915_VM_BIND_FENCE_WAIT:
> +	 * Wait for the input fence before binding/unbinding
> +	 *
> +	 * I915_VM_BIND_FENCE_SIGNAL:
> +	 * Return bind/unbind completion fence as output
> +	 */
> +	__u32 flags;
> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
> + * and vm_unbind.
> + *
> + * This structure describes an array of timeline drm_syncobj and associated
> + * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
> + * can be input or output fences (See struct drm_i915_vm_bind_fence).
> + */
> +struct drm_i915_vm_bind_ext_timeline_fences {
> +#define I915_VM_BIND_EXT_timeline_FENCES	0
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/**
> +	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
> +	 * arrays.
> +	 */
> +	__u64 fence_count;
> +
> +	/**
> +	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
> +	 * of length @fence_count.
> +	 */
> +	__u64 handles_ptr;
> +
> +	/**
> +	 * @values_ptr: Pointer to an array of u64 values of length
> +	 * @fence_count.
> +	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
> +	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
> +	 * binary one.
> +	 */
> +	__u64 values_ptr;
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
> + * vm_bind or the vm_unbind work.
> + *
> + * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
> + * @addr to become equal to @val) before starting the binding or unbinding.
> + *
> + * The vm_bind or vm_unbind async worker will signal the output fence after
> + * the completion of binding or unbinding by writing @val to memory location at
> + * @addr
> + */
> +struct drm_i915_vm_bind_user_fence {
> +	/** @addr: User/Memory fence qword aligned process virtual address */
> +	__u64 addr;
> +
> +	/** @val: User/Memory fence value to be written after bind completion */
> +	__u64 val;
> +
> +	/**
> +	 * @flags: Supported flags are,
> +	 *
> +	 * I915_VM_BIND_USER_FENCE_WAIT:
> +	 * Wait for the input fence before binding/unbinding
> +	 *
> +	 * I915_VM_BIND_USER_FENCE_SIGNAL:
> +	 * Return bind/unbind completion fence as output
> +	 */
> +	__u32 flags;
> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
> +	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
> +};
> +
> +/**
> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
> + * and vm_unbind.
> + *
> + * These user fences can be input or output fences
> + * (See struct drm_i915_vm_bind_user_fence).
> + */
> +struct drm_i915_vm_bind_ext_user_fence {
> +#define I915_VM_BIND_EXT_USER_FENCES	1
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/** @fence_count: Number of elements in the @user_fence_ptr array. */
> +	__u64 fence_count;
> +
> +	/**
> +	 * @user_fence_ptr: Pointer to an array of
> +	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
> +	 */
> +	__u64 user_fence_ptr;
> +};
> +

IMO all of these fence structs should be a generic sync interface shared
between both vm bind and exec3 rather than unique extenisons.

Both vm bind and exec3 should have something like this:

__64 syncs;	/* userptr to an array of generic syncs */
__64 n_syncs;

Having an array of syncs lets the kernel do one user copy for all the
syncs rather than reading them in a a chain.

A generic sync object encapsulates all possible syncs (in / out -
syncobj, syncobj timeline, ufence, future sync concepts).

e.g.

struct {
	__u32 user_ext;
	__u32 flag;	/* in / out, type, whatever else info we need */
	union {
		__u32 handle; 	/* to syncobj */
		__u64 addr; 	/* ufence address */
	};
	__64 seqno;	/* syncobj timeline, ufence write value */
	...reserve enough bits for future...
}

This unifies binds and execs by using the same sync interface
instilling the concept that binds and execs are the same op (queue'd
operation /w in/out fences).

Matt

> +/**
> + * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array of batch buffer
> + * gpu virtual addresses.
> + *
> + * In the execbuff ioctl (See struct drm_i915_gem_execbuffer2), this extension
> + * must always be appended in the VM_BIND mode and it will be an error to
> + * append this extension in older non-VM_BIND mode.
> + */
> +struct drm_i915_gem_execbuffer_ext_batch_addresses {
> +#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES	1
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/** @count: Number of addresses in the addr array. */
> +	__u32 count;
> +
> +	/** @addr: An array of batch gpu virtual addresses. */
> +	__u64 addr[0];
> +};
> +
> +/**
> + * struct drm_i915_gem_execbuffer_ext_user_fence - First level batch completion
> + * signaling extension.
> + *
> + * This extension allows user to attach a user fence (@addr, @value pair) to an
> + * execbuf to be signaled by the command streamer after the completion of first
> + * level batch, by writing the @value at specified @addr and triggering an
> + * interrupt.
> + * User can either poll for this user fence to signal or can also wait on it
> + * with i915_gem_wait_user_fence ioctl.
> + * This is very much usefaul for long running contexts where waiting on dma-fence
> + * by user (like i915_gem_wait ioctl) is not supported.
> + */
> +struct drm_i915_gem_execbuffer_ext_user_fence {
> +#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE		2
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/**
> +	 * @addr: User/Memory fence qword aligned GPU virtual address.
> +	 *
> +	 * Address has to be a valid GPU virtual address at the time of
> +	 * first level batch completion.
> +	 */
> +	__u64 addr;
> +
> +	/**
> +	 * @value: User/Memory fence Value to be written to above address
> +	 * after first level batch completes.
> +	 */
> +	__u64 value;
> +
> +	/** @rsvd: Reserved for future extensions, MBZ */
> +	__u64 rsvd;
> +};
> +
> +/**
> + * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
> + * private to the specified VM.
> + *
> + * See struct drm_i915_gem_create_ext.
> + */
> +struct drm_i915_gem_create_ext_vm_private {
> +#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
> +	/** @base: Extension link. See struct i915_user_extension. */
> +	struct i915_user_extension base;
> +
> +	/** @vm_id: Id of the VM to which the object is private */
> +	__u32 vm_id;
> +};
> +
> +/**
> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
> + *
> + * User/Memory fence can be woken up either by:
> + *
> + * 1. GPU context indicated by @ctx_id, or,
> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
> + *    @ctx_id is ignored when this flag is set.
> + *
> + * Wakeup condition is,
> + * ``((*addr & mask) op (value & mask))``
> + *
> + * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
> + */
> +struct drm_i915_gem_wait_user_fence {
> +	/** @extensions: Zero-terminated chain of extensions. */
> +	__u64 extensions;
> +
> +	/** @addr: User/Memory fence address */
> +	__u64 addr;
> +
> +	/** @ctx_id: Id of the Context which will signal the fence. */
> +	__u32 ctx_id;
> +
> +	/** @op: Wakeup condition operator */
> +	__u16 op;
> +#define I915_UFENCE_WAIT_EQ      0
> +#define I915_UFENCE_WAIT_NEQ     1
> +#define I915_UFENCE_WAIT_GT      2
> +#define I915_UFENCE_WAIT_GTE     3
> +#define I915_UFENCE_WAIT_LT      4
> +#define I915_UFENCE_WAIT_LTE     5
> +#define I915_UFENCE_WAIT_BEFORE  6
> +#define I915_UFENCE_WAIT_AFTER   7
> +
> +	/**
> +	 * @flags: Supported flags are,
> +	 *
> +	 * I915_UFENCE_WAIT_SOFT:
> +	 *
> +	 * To be woken up by i915 driver async worker (not by GPU).
> +	 *
> +	 * I915_UFENCE_WAIT_ABSTIME:
> +	 *
> +	 * Wait timeout specified as absolute time.
> +	 */
> +	__u16 flags;
> +#define I915_UFENCE_WAIT_SOFT    0x1
> +#define I915_UFENCE_WAIT_ABSTIME 0x2
> +
> +	/** @value: Wakeup value */
> +	__u64 value;
> +
> +	/** @mask: Wakeup mask */
> +	__u64 mask;
> +#define I915_UFENCE_WAIT_U8     0xffu
> +#define I915_UFENCE_WAIT_U16    0xffffu
> +#define I915_UFENCE_WAIT_U32    0xfffffffful
> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
> +
> +	/**
> +	 * @timeout: Wait timeout in nanoseconds.
> +	 *
> +	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
> +	 * absolute time in nsec.
> +	 */
> +	__s64 timeout;
> +};
> -- 
> 2.21.0.rc0.32.g243a4c7e27
>
Tvrtko Ursulin June 10, 2022, 10:16 a.m. UTC | #27
On 09/06/2022 19:53, Niranjana Vishwanathapura wrote:
> On Thu, Jun 09, 2022 at 09:36:48AM +0100, Matthew Auld wrote:
>> On 08/06/2022 22:32, Niranjana Vishwanathapura wrote:
>>> On Wed, Jun 08, 2022 at 10:12:05AM +0100, Matthew Auld wrote:
>>>> On 08/06/2022 08:17, Tvrtko Ursulin wrote:
>>>>>
>>>>> On 07/06/2022 20:37, Niranjana Vishwanathapura wrote:
>>>>>> On Tue, Jun 07, 2022 at 11:27:14AM +0100, Tvrtko Ursulin wrote:
>>>>>>>
>>>>>>> On 17/05/2022 19:32, Niranjana Vishwanathapura wrote:
>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>
>>>>>>>> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>>>     Also add new uapi and documentation as per review comments
>>>>>>>>     from Daniel.
>>>>>>>>
>>>>>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>> ---
>>>>>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>>>> +++++++++++++++++++++++++++
>>>>>>>>  1 file changed, 399 insertions(+)
>>>>>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>
>>>>>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>> new file mode 100644
>>>>>>>> index 000000000000..589c0a009107
>>>>>>>> --- /dev/null
>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>> @@ -0,0 +1,399 @@
>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>> +/*
>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>> + */
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>> + *
>>>>>>>> + * VM_BIND feature availability.
>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>> + */
>>>>>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>> + *
>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>> + *
>>>>>>>> + * A VM in VM_BIND mode will not support the older execbuff 
>>>>>>>> mode of binding.
>>>>>>>> + * In VM_BIND mode, execbuff ioctl will not accept any execlist 
>>>>>>>> (ie., the
>>>>>>>> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>>>> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>>>> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>>>> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must 
>>>>>>>> be provided
>>>>>>>> + * to pass in the batch buffer addresses.
>>>>>>>> + *
>>>>>>>> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>>>> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags 
>>>>>>>> must be 0
>>>>>>>> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag 
>>>>>>>> must always be
>>>>>>>> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>>>> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>>>> batch_len fields
>>>>>>>> + * of struct drm_i915_gem_execbuffer2 are also not used and 
>>>>>>>> must be 0.
>>>>>>>> + */
>>>>>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>> + *
>>>>>>>> + * Flag to declare context as long running.
>>>>>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>> + *
>>>>>>>> + * Usage of dma-fence expects that they complete in reasonable 
>>>>>>>> amount of time.
>>>>>>>> + * Compute on the other hand can be long running. Hence it is 
>>>>>>>> not appropriate
>>>>>>>> + * for compute contexts to export request completion dma-fence 
>>>>>>>> to user.
>>>>>>>> + * The dma-fence usage will be limited to in-kernel consumption 
>>>>>>>> only.
>>>>>>>> + * Compute contexts need to use user/memory fence.
>>>>>>>> + *
>>>>>>>> + * So, long running contexts do not support output fences. Hence,
>>>>>>>> + * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
>>>>>>>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) 
>>>>>>>> are expected
>>>>>>>> + * to be not used.
>>>>>>>> + *
>>>>>>>> + * DRM_I915_GEM_WAIT ioctl call is also not supported for 
>>>>>>>> objects mapped
>>>>>>>> + * to long running contexts.
>>>>>>>> + */
>>>>>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>> +
>>>>>>>> +/* VM_BIND related ioctls */
>>>>>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x3f
>>>>>>>> +
>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>>>> DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE 
>>>>>>>> + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>>>>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct 
>>>>>>>> drm_i915_gem_wait_user_fence)
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>>>> + *
>>>>>>>> + * This structure is passed to VM_BIND ioctl and specifies the 
>>>>>>>> mapping of GPU
>>>>>>>> + * virtual address (VA) range to the section of an object that 
>>>>>>>> should be bound
>>>>>>>> + * in the device page table of the specified address space (VM).
>>>>>>>> + * The VA range specified must be unique (ie., not currently 
>>>>>>>> bound) and can
>>>>>>>> + * be mapped to whole object or a section of the object 
>>>>>>>> (partial binding).
>>>>>>>> + * Multiple VA mappings can be created to the same section of 
>>>>>>>> the object
>>>>>>>> + * (aliasing).
>>>>>>>> + */
>>>>>>>> +struct drm_i915_gem_vm_bind {
>>>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>>>> +    __u32 vm_id;
>>>>>>>> +
>>>>>>>> +    /** @handle: Object handle */
>>>>>>>> +    __u32 handle;
>>>>>>>> +
>>>>>>>> +    /** @start: Virtual Address start to bind */
>>>>>>>> +    __u64 start;
>>>>>>>> +
>>>>>>>> +    /** @offset: Offset in object to bind */
>>>>>>>> +    __u64 offset;
>>>>>>>> +
>>>>>>>> +    /** @length: Length of mapping to bind */
>>>>>>>> +    __u64 length;
>>>>>>>
>>>>>>> Does it support, or should it, equivalent of 
>>>>>>> EXEC_OBJECT_PAD_TO_SIZE? Or if not userspace is expected to map 
>>>>>>> the remainder of the space to a dummy object? In which case would 
>>>>>>> there be any alignment/padding issues preventing the two bind to 
>>>>>>> be placed next to each other?
>>>>>>>
>>>>>>> I ask because someone from the compute side asked me about a 
>>>>>>> problem with their strategy of dealing with overfetch and I 
>>>>>>> suggested pad to size.
>>>>>>>
>>>>>>
>>>>>> Thanks Tvrtko,
>>>>>> I think we shouldn't be needing it. As with VM_BIND VA assignment
>>>>>> is completely pushed to userspace, no padding should be necessary
>>>>>> once the 'start' and 'size' alignment conditions are met.
>>>>>>
>>>>>> I will add some documentation on alignment requirement here.
>>>>>> Generally, 'start' and 'size' should be 4K aligned. But, I think
>>>>>> when we have 64K lmem page sizes (dg2 and xehpsdv), they need to
>>>>>> be 64K aligned.
>>>>>
>>>>> + Matt
>>>>>
>>>>> Align to 64k is enough for all overfetch issues?
>>>>>
>>>>> Apparently compute has a situation where a buffer is received by 
>>>>> one component and another has to apply more alignment to it, to 
>>>>> deal with overfetch. Since they cannot grow the actual BO if they 
>>>>> wanted to VM_BIND a scratch area on top? Or perhaps none of this is 
>>>>> a problem on discrete and original BO should be correctly allocated 
>>>>> to start with.
>>>>>
>>>>> Side question - what about the align to 2MiB mentioned in 
>>>>> i915_vma_insert to avoid mixing 4k and 64k PTEs? That does not 
>>>>> apply to discrete?
>>>>
>>>> Not sure about the overfetch thing, but yeah dg2 & xehpsdv both 
>>>> require a minimum of 64K pages underneath for local memory, and the 
>>>> BO size will also be rounded up accordingly. And yeah the 
>>>> complication arises due to not being able to mix 4K + 64K GTT pages 
>>>> within the same page-table (existed since even gen8). Note that 4K 
>>>> here is what we typically get for system memory.
>>>>
>>>> Originally we had a memory coloring scheme to track the "color" of 
>>>> each page-table, which basically ensures that userspace can't do 
>>>> something nasty like mixing page sizes. The advantage of that scheme 
>>>> is that we would only require 64K GTT alignment and no extra 
>>>> padding, but is perhaps a little complex.
>>>>
>>>> The merged solution is just to align and pad (i.e vma->node.size and 
>>>> not vma->size) out of the vma to 2M, which is dead simple 
>>>> implementation wise, but does potentially waste some GTT space and 
>>>> some of the local memory used for the actual page-table. For the 
>>>> alignment the kernel just validates that the GTT address is aligned 
>>>> to 2M in vma_insert(), and then for the padding it just inflates it 
>>>> to 2M, if userspace hasn't already.
>>>>
>>>> See the kernel-doc for @size: 
>>>> https://dri.freedesktop.org/docs/drm/gpu/driver-uapi.html?#c.drm_i915_gem_create_ext 
>>>>
>>>>
>>>>
>>>
>>> Ok, those requirements (2M VA alignment) will apply to VM_BIND also.
>>> This is unfortunate, but it is not something new enforced by VM_BIND.
>>> Other option is to go with 64K alignment and in VM_BIND case, user
>>> must ensure there is no mix-matching of 64K (lmem) and 4k (smem)
>>> mappings in the same 2M range. But this is not VM_BIND specific
>>> (will apply to soft-pinning in execbuf2 also).
>>>
>>> I don't think we need any VA padding here as with VM_BIND VA is
>>> managed fully by the user. If we enforce VA to be 2M aligned, it
>>> will leave holes (if BOs are smaller then 2M), but nobody is going
>>> to allocate anything form there.
>>
>> Note that we only apply the 2M alignment + padding for local memory 
>> pages, for system memory we don't have/need such restrictions. The VA 
>> padding then importantly prevents userspace from incorrectly (or 
>> maliciously) inserting 4K system memory object in some page-table 
>> operating in 64K GTT mode.
>>
> 
> Thanks Matt.
> I also, syned offline with Matt a bit on this.
> We don't need explicit 'pad_to_size' size. i915 driver is implicitly
> padding the size to 2M boundary for LMEM BOs which will apply for
> VM_BIND also.
> The remaining question is whether we enforce 2M VA alignment for
> lmem BOs (just like legacy execbuff path) on dg2 & xehpsdv, or go with
> just 64K alignment but ensure there is no mixing of 4K and 64K

"Driver is implicitly padding the size to 2MB boundary" - this is the 
backing store?

> mappings in same 2M range. I think we can go with 2M alignment
> requirement for VM_BIND also. So, no new requirements here for VM_BIND.

Are there any considerations here of letting the userspace know? 
Presumably userspace allocator has to know or it would try to ask for 
impossible addresses.

Regards,

Tvrtko

> 
> I will update the documentation.
> 
> Niranjana
> 
>>>
>>> Niranjana
>>>
>>>>>
>>>>> Regards,
>>>>>
>>>>> Tvrtko
>>>>>
>>>>>>
>>>>>> Niranjana
>>>>>>
>>>>>>> Regards,
>>>>>>>
>>>>>>> Tvrtko
>>>>>>>
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @flags: Supported flags are,
>>>>>>>> +     *
>>>>>>>> +     * I915_GEM_VM_BIND_READONLY:
>>>>>>>> +     * Mapping is read-only.
>>>>>>>> +     *
>>>>>>>> +     * I915_GEM_VM_BIND_CAPTURE:
>>>>>>>> +     * Capture this mapping in the dump upon GPU error.
>>>>>>>> +     */
>>>>>>>> +    __u64 flags;
>>>>>>>> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>>>>>> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>>>>>> +
>>>>>>>> +    /** @extensions: 0-terminated chain of extensions for this 
>>>>>>>> mapping. */
>>>>>>>> +    __u64 extensions;
>>>>>>>> +};
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
>>>>>>>> + *
>>>>>>>> + * This structure is passed to VM_UNBIND ioctl and specifies 
>>>>>>>> the GPU virtual
>>>>>>>> + * address (VA) range that should be unbound from the device 
>>>>>>>> page table of the
>>>>>>>> + * specified address space (VM). The specified VA range must 
>>>>>>>> match one of the
>>>>>>>> + * mappings created with the VM_BIND ioctl. TLB is flushed upon 
>>>>>>>> unbind
>>>>>>>> + * completion.
>>>>>>>> + */
>>>>>>>> +struct drm_i915_gem_vm_unbind {
>>>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>>>> +    __u32 vm_id;
>>>>>>>> +
>>>>>>>> +    /** @rsvd: Reserved for future use; must be zero. */
>>>>>>>> +    __u32 rsvd;
>>>>>>>> +
>>>>>>>> +    /** @start: Virtual Address start to unbind */
>>>>>>>> +    __u64 start;
>>>>>>>> +
>>>>>>>> +    /** @length: Length of mapping to unbind */
>>>>>>>> +    __u64 length;
>>>>>>>> +
>>>>>>>> +    /** @flags: reserved for future usage, currently MBZ */
>>>>>>>> +    __u64 flags;
>>>>>>>> +
>>>>>>>> +    /** @extensions: 0-terminated chain of extensions for this 
>>>>>>>> mapping. */
>>>>>>>> +    __u64 extensions;
>>>>>>>> +};
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_vm_bind_fence - An input or output fence for 
>>>>>>>> the vm_bind
>>>>>>>> + * or the vm_unbind work.
>>>>>>>> + *
>>>>>>>> + * The vm_bind or vm_unbind aync worker will wait for input 
>>>>>>>> fence to signal
>>>>>>>> + * before starting the binding or unbinding.
>>>>>>>> + *
>>>>>>>> + * The vm_bind or vm_unbind async worker will signal the 
>>>>>>>> returned output fence
>>>>>>>> + * after the completion of binding or unbinding.
>>>>>>>> + */
>>>>>>>> +struct drm_i915_vm_bind_fence {
>>>>>>>> +    /** @handle: User's handle for a drm_syncobj to wait on or 
>>>>>>>> signal. */
>>>>>>>> +    __u32 handle;
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @flags: Supported flags are,
>>>>>>>> +     *
>>>>>>>> +     * I915_VM_BIND_FENCE_WAIT:
>>>>>>>> +     * Wait for the input fence before binding/unbinding
>>>>>>>> +     *
>>>>>>>> +     * I915_VM_BIND_FENCE_SIGNAL:
>>>>>>>> +     * Return bind/unbind completion fence as output
>>>>>>>> +     */
>>>>>>>> +    __u32 flags;
>>>>>>>> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>>>>>> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>>>>>> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>>>>>>>> (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>>>>>> +};
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline 
>>>>>>>> fences for vm_bind
>>>>>>>> + * and vm_unbind.
>>>>>>>> + *
>>>>>>>> + * This structure describes an array of timeline drm_syncobj 
>>>>>>>> and associated
>>>>>>>> + * points for timeline variants of drm_syncobj. These timeline 
>>>>>>>> 'drm_syncobj's
>>>>>>>> + * can be input or output fences (See struct 
>>>>>>>> drm_i915_vm_bind_fence).
>>>>>>>> + */
>>>>>>>> +struct drm_i915_vm_bind_ext_timeline_fences {
>>>>>>>> +#define I915_VM_BIND_EXT_timeline_FENCES    0
>>>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>> +    struct i915_user_extension base;
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @fence_count: Number of elements in the @handles_ptr & 
>>>>>>>> @value_ptr
>>>>>>>> +     * arrays.
>>>>>>>> +     */
>>>>>>>> +    __u64 fence_count;
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @handles_ptr: Pointer to an array of struct 
>>>>>>>> drm_i915_vm_bind_fence
>>>>>>>> +     * of length @fence_count.
>>>>>>>> +     */
>>>>>>>> +    __u64 handles_ptr;
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @values_ptr: Pointer to an array of u64 values of length
>>>>>>>> +     * @fence_count.
>>>>>>>> +     * Values must be 0 for a binary drm_syncobj. A Value of 0 
>>>>>>>> for a
>>>>>>>> +     * timeline drm_syncobj is invalid as it turns a 
>>>>>>>> drm_syncobj into a
>>>>>>>> +     * binary one.
>>>>>>>> +     */
>>>>>>>> +    __u64 values_ptr;
>>>>>>>> +};
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_vm_bind_user_fence - An input or output user 
>>>>>>>> fence for the
>>>>>>>> + * vm_bind or the vm_unbind work.
>>>>>>>> + *
>>>>>>>> + * The vm_bind or vm_unbind aync worker will wait for the input 
>>>>>>>> fence (value at
>>>>>>>> + * @addr to become equal to @val) before starting the binding 
>>>>>>>> or unbinding.
>>>>>>>> + *
>>>>>>>> + * The vm_bind or vm_unbind async worker will signal the output 
>>>>>>>> fence after
>>>>>>>> + * the completion of binding or unbinding by writing @val to 
>>>>>>>> memory location at
>>>>>>>> + * @addr
>>>>>>>> + */
>>>>>>>> +struct drm_i915_vm_bind_user_fence {
>>>>>>>> +    /** @addr: User/Memory fence qword aligned process virtual 
>>>>>>>> address */
>>>>>>>> +    __u64 addr;
>>>>>>>> +
>>>>>>>> +    /** @val: User/Memory fence value to be written after bind 
>>>>>>>> completion */
>>>>>>>> +    __u64 val;
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @flags: Supported flags are,
>>>>>>>> +     *
>>>>>>>> +     * I915_VM_BIND_USER_FENCE_WAIT:
>>>>>>>> +     * Wait for the input fence before binding/unbinding
>>>>>>>> +     *
>>>>>>>> +     * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>>>>>> +     * Return bind/unbind completion fence as output
>>>>>>>> +     */
>>>>>>>> +    __u32 flags;
>>>>>>>> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>>>>>> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>>>>>> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>>>>>> +    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>>>>>> +};
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences 
>>>>>>>> for vm_bind
>>>>>>>> + * and vm_unbind.
>>>>>>>> + *
>>>>>>>> + * These user fences can be input or output fences
>>>>>>>> + * (See struct drm_i915_vm_bind_user_fence).
>>>>>>>> + */
>>>>>>>> +struct drm_i915_vm_bind_ext_user_fence {
>>>>>>>> +#define I915_VM_BIND_EXT_USER_FENCES    1
>>>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>> +    struct i915_user_extension base;
>>>>>>>> +
>>>>>>>> +    /** @fence_count: Number of elements in the @user_fence_ptr 
>>>>>>>> array. */
>>>>>>>> +    __u64 fence_count;
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @user_fence_ptr: Pointer to an array of
>>>>>>>> +     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>>>>>> +     */
>>>>>>>> +    __u64 user_fence_ptr;
>>>>>>>> +};
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array 
>>>>>>>> of batch buffer
>>>>>>>> + * gpu virtual addresses.
>>>>>>>> + *
>>>>>>>> + * In the execbuff ioctl (See struct drm_i915_gem_execbuffer2), 
>>>>>>>> this extension
>>>>>>>> + * must always be appended in the VM_BIND mode and it will be 
>>>>>>>> an error to
>>>>>>>> + * append this extension in older non-VM_BIND mode.
>>>>>>>> + */
>>>>>>>> +struct drm_i915_gem_execbuffer_ext_batch_addresses {
>>>>>>>> +#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES    1
>>>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>> +    struct i915_user_extension base;
>>>>>>>> +
>>>>>>>> +    /** @count: Number of addresses in the addr array. */
>>>>>>>> +    __u32 count;
>>>>>>>> +
>>>>>>>> +    /** @addr: An array of batch gpu virtual addresses. */
>>>>>>>> +    __u64 addr[0];
>>>>>>>> +};
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_gem_execbuffer_ext_user_fence - First level 
>>>>>>>> batch completion
>>>>>>>> + * signaling extension.
>>>>>>>> + *
>>>>>>>> + * This extension allows user to attach a user fence (@addr, 
>>>>>>>> @value pair) to an
>>>>>>>> + * execbuf to be signaled by the command streamer after the 
>>>>>>>> completion of first
>>>>>>>> + * level batch, by writing the @value at specified @addr and 
>>>>>>>> triggering an
>>>>>>>> + * interrupt.
>>>>>>>> + * User can either poll for this user fence to signal or can 
>>>>>>>> also wait on it
>>>>>>>> + * with i915_gem_wait_user_fence ioctl.
>>>>>>>> + * This is very much usefaul for long running contexts where 
>>>>>>>> waiting on dma-fence
>>>>>>>> + * by user (like i915_gem_wait ioctl) is not supported.
>>>>>>>> + */
>>>>>>>> +struct drm_i915_gem_execbuffer_ext_user_fence {
>>>>>>>> +#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE        2
>>>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>> +    struct i915_user_extension base;
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @addr: User/Memory fence qword aligned GPU virtual address.
>>>>>>>> +     *
>>>>>>>> +     * Address has to be a valid GPU virtual address at the 
>>>>>>>> time of
>>>>>>>> +     * first level batch completion.
>>>>>>>> +     */
>>>>>>>> +    __u64 addr;
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @value: User/Memory fence Value to be written to above 
>>>>>>>> address
>>>>>>>> +     * after first level batch completes.
>>>>>>>> +     */
>>>>>>>> +    __u64 value;
>>>>>>>> +
>>>>>>>> +    /** @rsvd: Reserved for future extensions, MBZ */
>>>>>>>> +    __u64 rsvd;
>>>>>>>> +};
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_gem_create_ext_vm_private - Extension to 
>>>>>>>> make the object
>>>>>>>> + * private to the specified VM.
>>>>>>>> + *
>>>>>>>> + * See struct drm_i915_gem_create_ext.
>>>>>>>> + */
>>>>>>>> +struct drm_i915_gem_create_ext_vm_private {
>>>>>>>> +#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>>>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>> +    struct i915_user_extension base;
>>>>>>>> +
>>>>>>>> +    /** @vm_id: Id of the VM to which the object is private */
>>>>>>>> +    __u32 vm_id;
>>>>>>>> +};
>>>>>>>> +
>>>>>>>> +/**
>>>>>>>> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory 
>>>>>>>> fence.
>>>>>>>> + *
>>>>>>>> + * User/Memory fence can be woken up either by:
>>>>>>>> + *
>>>>>>>> + * 1. GPU context indicated by @ctx_id, or,
>>>>>>>> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>>>>>> + *    @ctx_id is ignored when this flag is set.
>>>>>>>> + *
>>>>>>>> + * Wakeup condition is,
>>>>>>>> + * ``((*addr & mask) op (value & mask))``
>>>>>>>> + *
>>>>>>>> + * See :ref:`Documentation/driver-api/dma-buf.rst 
>>>>>>>> <indefinite_dma_fences>`
>>>>>>>> + */
>>>>>>>> +struct drm_i915_gem_wait_user_fence {
>>>>>>>> +    /** @extensions: Zero-terminated chain of extensions. */
>>>>>>>> +    __u64 extensions;
>>>>>>>> +
>>>>>>>> +    /** @addr: User/Memory fence address */
>>>>>>>> +    __u64 addr;
>>>>>>>> +
>>>>>>>> +    /** @ctx_id: Id of the Context which will signal the fence. */
>>>>>>>> +    __u32 ctx_id;
>>>>>>>> +
>>>>>>>> +    /** @op: Wakeup condition operator */
>>>>>>>> +    __u16 op;
>>>>>>>> +#define I915_UFENCE_WAIT_EQ      0
>>>>>>>> +#define I915_UFENCE_WAIT_NEQ     1
>>>>>>>> +#define I915_UFENCE_WAIT_GT      2
>>>>>>>> +#define I915_UFENCE_WAIT_GTE     3
>>>>>>>> +#define I915_UFENCE_WAIT_LT      4
>>>>>>>> +#define I915_UFENCE_WAIT_LTE     5
>>>>>>>> +#define I915_UFENCE_WAIT_BEFORE  6
>>>>>>>> +#define I915_UFENCE_WAIT_AFTER   7
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @flags: Supported flags are,
>>>>>>>> +     *
>>>>>>>> +     * I915_UFENCE_WAIT_SOFT:
>>>>>>>> +     *
>>>>>>>> +     * To be woken up by i915 driver async worker (not by GPU).
>>>>>>>> +     *
>>>>>>>> +     * I915_UFENCE_WAIT_ABSTIME:
>>>>>>>> +     *
>>>>>>>> +     * Wait timeout specified as absolute time.
>>>>>>>> +     */
>>>>>>>> +    __u16 flags;
>>>>>>>> +#define I915_UFENCE_WAIT_SOFT    0x1
>>>>>>>> +#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>>>>>> +
>>>>>>>> +    /** @value: Wakeup value */
>>>>>>>> +    __u64 value;
>>>>>>>> +
>>>>>>>> +    /** @mask: Wakeup mask */
>>>>>>>> +    __u64 mask;
>>>>>>>> +#define I915_UFENCE_WAIT_U8     0xffu
>>>>>>>> +#define I915_UFENCE_WAIT_U16    0xffffu
>>>>>>>> +#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>>>>>> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>>>>>> +
>>>>>>>> +    /**
>>>>>>>> +     * @timeout: Wait timeout in nanoseconds.
>>>>>>>> +     *
>>>>>>>> +     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time 
>>>>>>>> timeout is the
>>>>>>>> +     * absolute time in nsec.
>>>>>>>> +     */
>>>>>>>> +    __s64 timeout;
>>>>>>>> +};
Matthew Auld June 10, 2022, 10:32 a.m. UTC | #28
On 10/06/2022 11:16, Tvrtko Ursulin wrote:
> 
> On 09/06/2022 19:53, Niranjana Vishwanathapura wrote:
>> On Thu, Jun 09, 2022 at 09:36:48AM +0100, Matthew Auld wrote:
>>> On 08/06/2022 22:32, Niranjana Vishwanathapura wrote:
>>>> On Wed, Jun 08, 2022 at 10:12:05AM +0100, Matthew Auld wrote:
>>>>> On 08/06/2022 08:17, Tvrtko Ursulin wrote:
>>>>>>
>>>>>> On 07/06/2022 20:37, Niranjana Vishwanathapura wrote:
>>>>>>> On Tue, Jun 07, 2022 at 11:27:14AM +0100, Tvrtko Ursulin wrote:
>>>>>>>>
>>>>>>>> On 17/05/2022 19:32, Niranjana Vishwanathapura wrote:
>>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>>
>>>>>>>>> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>>>>     Also add new uapi and documentation as per review comments
>>>>>>>>>     from Daniel.
>>>>>>>>>
>>>>>>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>>> ---
>>>>>>>>>  Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>>>>> +++++++++++++++++++++++++++
>>>>>>>>>  1 file changed, 399 insertions(+)
>>>>>>>>>  create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>
>>>>>>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>> new file mode 100644
>>>>>>>>> index 000000000000..589c0a009107
>>>>>>>>> --- /dev/null
>>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>> @@ -0,0 +1,399 @@
>>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>>> +/*
>>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>>> + */
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>> + *
>>>>>>>>> + * VM_BIND feature availability.
>>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>>> + */
>>>>>>>>> +#define I915_PARAM_HAS_VM_BIND        57
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>> + *
>>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding during VM creation.
>>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>>> + *
>>>>>>>>> + * A VM in VM_BIND mode will not support the older execbuff 
>>>>>>>>> mode of binding.
>>>>>>>>> + * In VM_BIND mode, execbuff ioctl will not accept any 
>>>>>>>>> execlist (ie., the
>>>>>>>>> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>>>>> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>>>>> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>>>>> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must 
>>>>>>>>> be provided
>>>>>>>>> + * to pass in the batch buffer addresses.
>>>>>>>>> + *
>>>>>>>>> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>>>>> + * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags 
>>>>>>>>> must be 0
>>>>>>>>> + * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag 
>>>>>>>>> must always be
>>>>>>>>> + * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>>>>> + * The buffers_ptr, buffer_count, batch_start_offset and 
>>>>>>>>> batch_len fields
>>>>>>>>> + * of struct drm_i915_gem_execbuffer2 are also not used and 
>>>>>>>>> must be 0.
>>>>>>>>> + */
>>>>>>>>> +#define I915_VM_CREATE_FLAGS_USE_VM_BIND    (1 << 0)
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
>>>>>>>>> + *
>>>>>>>>> + * Flag to declare context as long running.
>>>>>>>>> + * See struct drm_i915_gem_context_create_ext flags.
>>>>>>>>> + *
>>>>>>>>> + * Usage of dma-fence expects that they complete in reasonable 
>>>>>>>>> amount of time.
>>>>>>>>> + * Compute on the other hand can be long running. Hence it is 
>>>>>>>>> not appropriate
>>>>>>>>> + * for compute contexts to export request completion dma-fence 
>>>>>>>>> to user.
>>>>>>>>> + * The dma-fence usage will be limited to in-kernel 
>>>>>>>>> consumption only.
>>>>>>>>> + * Compute contexts need to use user/memory fence.
>>>>>>>>> + *
>>>>>>>>> + * So, long running contexts do not support output fences. Hence,
>>>>>>>>> + * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
>>>>>>>>> + * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) 
>>>>>>>>> are expected
>>>>>>>>> + * to be not used.
>>>>>>>>> + *
>>>>>>>>> + * DRM_I915_GEM_WAIT ioctl call is also not supported for 
>>>>>>>>> objects mapped
>>>>>>>>> + * to long running contexts.
>>>>>>>>> + */
>>>>>>>>> +#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
>>>>>>>>> +
>>>>>>>>> +/* VM_BIND related ioctls */
>>>>>>>>> +#define DRM_I915_GEM_VM_BIND        0x3d
>>>>>>>>> +#define DRM_I915_GEM_VM_UNBIND        0x3e
>>>>>>>>> +#define DRM_I915_GEM_WAIT_USER_FENCE    0x3f
>>>>>>>>> +
>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + 
>>>>>>>>> DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
>>>>>>>>> +#define DRM_IOCTL_I915_GEM_VM_UNBIND DRM_IOWR(DRM_COMMAND_BASE 
>>>>>>>>> + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
>>>>>>>>> +#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE 
>>>>>>>>> DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, 
>>>>>>>>> struct drm_i915_gem_wait_user_fence)
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
>>>>>>>>> + *
>>>>>>>>> + * This structure is passed to VM_BIND ioctl and specifies the 
>>>>>>>>> mapping of GPU
>>>>>>>>> + * virtual address (VA) range to the section of an object that 
>>>>>>>>> should be bound
>>>>>>>>> + * in the device page table of the specified address space (VM).
>>>>>>>>> + * The VA range specified must be unique (ie., not currently 
>>>>>>>>> bound) and can
>>>>>>>>> + * be mapped to whole object or a section of the object 
>>>>>>>>> (partial binding).
>>>>>>>>> + * Multiple VA mappings can be created to the same section of 
>>>>>>>>> the object
>>>>>>>>> + * (aliasing).
>>>>>>>>> + */
>>>>>>>>> +struct drm_i915_gem_vm_bind {
>>>>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>>>>> +    __u32 vm_id;
>>>>>>>>> +
>>>>>>>>> +    /** @handle: Object handle */
>>>>>>>>> +    __u32 handle;
>>>>>>>>> +
>>>>>>>>> +    /** @start: Virtual Address start to bind */
>>>>>>>>> +    __u64 start;
>>>>>>>>> +
>>>>>>>>> +    /** @offset: Offset in object to bind */
>>>>>>>>> +    __u64 offset;
>>>>>>>>> +
>>>>>>>>> +    /** @length: Length of mapping to bind */
>>>>>>>>> +    __u64 length;
>>>>>>>>
>>>>>>>> Does it support, or should it, equivalent of 
>>>>>>>> EXEC_OBJECT_PAD_TO_SIZE? Or if not userspace is expected to map 
>>>>>>>> the remainder of the space to a dummy object? In which case 
>>>>>>>> would there be any alignment/padding issues preventing the two 
>>>>>>>> bind to be placed next to each other?
>>>>>>>>
>>>>>>>> I ask because someone from the compute side asked me about a 
>>>>>>>> problem with their strategy of dealing with overfetch and I 
>>>>>>>> suggested pad to size.
>>>>>>>>
>>>>>>>
>>>>>>> Thanks Tvrtko,
>>>>>>> I think we shouldn't be needing it. As with VM_BIND VA assignment
>>>>>>> is completely pushed to userspace, no padding should be necessary
>>>>>>> once the 'start' and 'size' alignment conditions are met.
>>>>>>>
>>>>>>> I will add some documentation on alignment requirement here.
>>>>>>> Generally, 'start' and 'size' should be 4K aligned. But, I think
>>>>>>> when we have 64K lmem page sizes (dg2 and xehpsdv), they need to
>>>>>>> be 64K aligned.
>>>>>>
>>>>>> + Matt
>>>>>>
>>>>>> Align to 64k is enough for all overfetch issues?
>>>>>>
>>>>>> Apparently compute has a situation where a buffer is received by 
>>>>>> one component and another has to apply more alignment to it, to 
>>>>>> deal with overfetch. Since they cannot grow the actual BO if they 
>>>>>> wanted to VM_BIND a scratch area on top? Or perhaps none of this 
>>>>>> is a problem on discrete and original BO should be correctly 
>>>>>> allocated to start with.
>>>>>>
>>>>>> Side question - what about the align to 2MiB mentioned in 
>>>>>> i915_vma_insert to avoid mixing 4k and 64k PTEs? That does not 
>>>>>> apply to discrete?
>>>>>
>>>>> Not sure about the overfetch thing, but yeah dg2 & xehpsdv both 
>>>>> require a minimum of 64K pages underneath for local memory, and the 
>>>>> BO size will also be rounded up accordingly. And yeah the 
>>>>> complication arises due to not being able to mix 4K + 64K GTT pages 
>>>>> within the same page-table (existed since even gen8). Note that 4K 
>>>>> here is what we typically get for system memory.
>>>>>
>>>>> Originally we had a memory coloring scheme to track the "color" of 
>>>>> each page-table, which basically ensures that userspace can't do 
>>>>> something nasty like mixing page sizes. The advantage of that 
>>>>> scheme is that we would only require 64K GTT alignment and no extra 
>>>>> padding, but is perhaps a little complex.
>>>>>
>>>>> The merged solution is just to align and pad (i.e vma->node.size 
>>>>> and not vma->size) out of the vma to 2M, which is dead simple 
>>>>> implementation wise, but does potentially waste some GTT space and 
>>>>> some of the local memory used for the actual page-table. For the 
>>>>> alignment the kernel just validates that the GTT address is aligned 
>>>>> to 2M in vma_insert(), and then for the padding it just inflates it 
>>>>> to 2M, if userspace hasn't already.
>>>>>
>>>>> See the kernel-doc for @size: 
>>>>> https://dri.freedesktop.org/docs/drm/gpu/driver-uapi.html?#c.drm_i915_gem_create_ext 
>>>>>
>>>>>
>>>>>
>>>>
>>>> Ok, those requirements (2M VA alignment) will apply to VM_BIND also.
>>>> This is unfortunate, but it is not something new enforced by VM_BIND.
>>>> Other option is to go with 64K alignment and in VM_BIND case, user
>>>> must ensure there is no mix-matching of 64K (lmem) and 4k (smem)
>>>> mappings in the same 2M range. But this is not VM_BIND specific
>>>> (will apply to soft-pinning in execbuf2 also).
>>>>
>>>> I don't think we need any VA padding here as with VM_BIND VA is
>>>> managed fully by the user. If we enforce VA to be 2M aligned, it
>>>> will leave holes (if BOs are smaller then 2M), but nobody is going
>>>> to allocate anything form there.
>>>
>>> Note that we only apply the 2M alignment + padding for local memory 
>>> pages, for system memory we don't have/need such restrictions. The VA 
>>> padding then importantly prevents userspace from incorrectly (or 
>>> maliciously) inserting 4K system memory object in some page-table 
>>> operating in 64K GTT mode.
>>>
>>
>> Thanks Matt.
>> I also, syned offline with Matt a bit on this.
>> We don't need explicit 'pad_to_size' size. i915 driver is implicitly
>> padding the size to 2M boundary for LMEM BOs which will apply for
>> VM_BIND also.
>> The remaining question is whether we enforce 2M VA alignment for
>> lmem BOs (just like legacy execbuff path) on dg2 & xehpsdv, or go with
>> just 64K alignment but ensure there is no mixing of 4K and 64K
> 
> "Driver is implicitly padding the size to 2MB boundary" - this is the 
> backing store?

Just the GTT space, i.e vma->node.size. Backing store just needs to use 
64K pages.

> 
>> mappings in same 2M range. I think we can go with 2M alignment
>> requirement for VM_BIND also. So, no new requirements here for VM_BIND.
> 
> Are there any considerations here of letting the userspace know? 
> Presumably userspace allocator has to know or it would try to ask for 
> impossible addresses.

It's the existing behaviour with execbuf, so I assume userspace must 
already get this right, on platforms like dg2.

> 
> Regards,
> 
> Tvrtko
> 
>>
>> I will update the documentation.
>>
>> Niranjana
>>
>>>>
>>>> Niranjana
>>>>
>>>>>>
>>>>>> Regards,
>>>>>>
>>>>>> Tvrtko
>>>>>>
>>>>>>>
>>>>>>> Niranjana
>>>>>>>
>>>>>>>> Regards,
>>>>>>>>
>>>>>>>> Tvrtko
>>>>>>>>
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @flags: Supported flags are,
>>>>>>>>> +     *
>>>>>>>>> +     * I915_GEM_VM_BIND_READONLY:
>>>>>>>>> +     * Mapping is read-only.
>>>>>>>>> +     *
>>>>>>>>> +     * I915_GEM_VM_BIND_CAPTURE:
>>>>>>>>> +     * Capture this mapping in the dump upon GPU error.
>>>>>>>>> +     */
>>>>>>>>> +    __u64 flags;
>>>>>>>>> +#define I915_GEM_VM_BIND_READONLY    (1 << 0)
>>>>>>>>> +#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
>>>>>>>>> +
>>>>>>>>> +    /** @extensions: 0-terminated chain of extensions for this 
>>>>>>>>> mapping. */
>>>>>>>>> +    __u64 extensions;
>>>>>>>>> +};
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * struct drm_i915_gem_vm_unbind - VA to object mapping to 
>>>>>>>>> unbind.
>>>>>>>>> + *
>>>>>>>>> + * This structure is passed to VM_UNBIND ioctl and specifies 
>>>>>>>>> the GPU virtual
>>>>>>>>> + * address (VA) range that should be unbound from the device 
>>>>>>>>> page table of the
>>>>>>>>> + * specified address space (VM). The specified VA range must 
>>>>>>>>> match one of the
>>>>>>>>> + * mappings created with the VM_BIND ioctl. TLB is flushed 
>>>>>>>>> upon unbind
>>>>>>>>> + * completion.
>>>>>>>>> + */
>>>>>>>>> +struct drm_i915_gem_vm_unbind {
>>>>>>>>> +    /** @vm_id: VM (address space) id to bind */
>>>>>>>>> +    __u32 vm_id;
>>>>>>>>> +
>>>>>>>>> +    /** @rsvd: Reserved for future use; must be zero. */
>>>>>>>>> +    __u32 rsvd;
>>>>>>>>> +
>>>>>>>>> +    /** @start: Virtual Address start to unbind */
>>>>>>>>> +    __u64 start;
>>>>>>>>> +
>>>>>>>>> +    /** @length: Length of mapping to unbind */
>>>>>>>>> +    __u64 length;
>>>>>>>>> +
>>>>>>>>> +    /** @flags: reserved for future usage, currently MBZ */
>>>>>>>>> +    __u64 flags;
>>>>>>>>> +
>>>>>>>>> +    /** @extensions: 0-terminated chain of extensions for this 
>>>>>>>>> mapping. */
>>>>>>>>> +    __u64 extensions;
>>>>>>>>> +};
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * struct drm_i915_vm_bind_fence - An input or output fence 
>>>>>>>>> for the vm_bind
>>>>>>>>> + * or the vm_unbind work.
>>>>>>>>> + *
>>>>>>>>> + * The vm_bind or vm_unbind aync worker will wait for input 
>>>>>>>>> fence to signal
>>>>>>>>> + * before starting the binding or unbinding.
>>>>>>>>> + *
>>>>>>>>> + * The vm_bind or vm_unbind async worker will signal the 
>>>>>>>>> returned output fence
>>>>>>>>> + * after the completion of binding or unbinding.
>>>>>>>>> + */
>>>>>>>>> +struct drm_i915_vm_bind_fence {
>>>>>>>>> +    /** @handle: User's handle for a drm_syncobj to wait on or 
>>>>>>>>> signal. */
>>>>>>>>> +    __u32 handle;
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @flags: Supported flags are,
>>>>>>>>> +     *
>>>>>>>>> +     * I915_VM_BIND_FENCE_WAIT:
>>>>>>>>> +     * Wait for the input fence before binding/unbinding
>>>>>>>>> +     *
>>>>>>>>> +     * I915_VM_BIND_FENCE_SIGNAL:
>>>>>>>>> +     * Return bind/unbind completion fence as output
>>>>>>>>> +     */
>>>>>>>>> +    __u32 flags;
>>>>>>>>> +#define I915_VM_BIND_FENCE_WAIT            (1<<0)
>>>>>>>>> +#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
>>>>>>>>> +#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS 
>>>>>>>>> (-(I915_VM_BIND_FENCE_SIGNAL << 1))
>>>>>>>>> +};
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * struct drm_i915_vm_bind_ext_timeline_fences - Timeline 
>>>>>>>>> fences for vm_bind
>>>>>>>>> + * and vm_unbind.
>>>>>>>>> + *
>>>>>>>>> + * This structure describes an array of timeline drm_syncobj 
>>>>>>>>> and associated
>>>>>>>>> + * points for timeline variants of drm_syncobj. These timeline 
>>>>>>>>> 'drm_syncobj's
>>>>>>>>> + * can be input or output fences (See struct 
>>>>>>>>> drm_i915_vm_bind_fence).
>>>>>>>>> + */
>>>>>>>>> +struct drm_i915_vm_bind_ext_timeline_fences {
>>>>>>>>> +#define I915_VM_BIND_EXT_timeline_FENCES    0
>>>>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>>> +    struct i915_user_extension base;
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @fence_count: Number of elements in the @handles_ptr & 
>>>>>>>>> @value_ptr
>>>>>>>>> +     * arrays.
>>>>>>>>> +     */
>>>>>>>>> +    __u64 fence_count;
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @handles_ptr: Pointer to an array of struct 
>>>>>>>>> drm_i915_vm_bind_fence
>>>>>>>>> +     * of length @fence_count.
>>>>>>>>> +     */
>>>>>>>>> +    __u64 handles_ptr;
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @values_ptr: Pointer to an array of u64 values of length
>>>>>>>>> +     * @fence_count.
>>>>>>>>> +     * Values must be 0 for a binary drm_syncobj. A Value of 0 
>>>>>>>>> for a
>>>>>>>>> +     * timeline drm_syncobj is invalid as it turns a 
>>>>>>>>> drm_syncobj into a
>>>>>>>>> +     * binary one.
>>>>>>>>> +     */
>>>>>>>>> +    __u64 values_ptr;
>>>>>>>>> +};
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * struct drm_i915_vm_bind_user_fence - An input or output 
>>>>>>>>> user fence for the
>>>>>>>>> + * vm_bind or the vm_unbind work.
>>>>>>>>> + *
>>>>>>>>> + * The vm_bind or vm_unbind aync worker will wait for the 
>>>>>>>>> input fence (value at
>>>>>>>>> + * @addr to become equal to @val) before starting the binding 
>>>>>>>>> or unbinding.
>>>>>>>>> + *
>>>>>>>>> + * The vm_bind or vm_unbind async worker will signal the 
>>>>>>>>> output fence after
>>>>>>>>> + * the completion of binding or unbinding by writing @val to 
>>>>>>>>> memory location at
>>>>>>>>> + * @addr
>>>>>>>>> + */
>>>>>>>>> +struct drm_i915_vm_bind_user_fence {
>>>>>>>>> +    /** @addr: User/Memory fence qword aligned process virtual 
>>>>>>>>> address */
>>>>>>>>> +    __u64 addr;
>>>>>>>>> +
>>>>>>>>> +    /** @val: User/Memory fence value to be written after bind 
>>>>>>>>> completion */
>>>>>>>>> +    __u64 val;
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @flags: Supported flags are,
>>>>>>>>> +     *
>>>>>>>>> +     * I915_VM_BIND_USER_FENCE_WAIT:
>>>>>>>>> +     * Wait for the input fence before binding/unbinding
>>>>>>>>> +     *
>>>>>>>>> +     * I915_VM_BIND_USER_FENCE_SIGNAL:
>>>>>>>>> +     * Return bind/unbind completion fence as output
>>>>>>>>> +     */
>>>>>>>>> +    __u32 flags;
>>>>>>>>> +#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
>>>>>>>>> +#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
>>>>>>>>> +#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
>>>>>>>>> +    (-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
>>>>>>>>> +};
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * struct drm_i915_vm_bind_ext_user_fence - User/memory fences 
>>>>>>>>> for vm_bind
>>>>>>>>> + * and vm_unbind.
>>>>>>>>> + *
>>>>>>>>> + * These user fences can be input or output fences
>>>>>>>>> + * (See struct drm_i915_vm_bind_user_fence).
>>>>>>>>> + */
>>>>>>>>> +struct drm_i915_vm_bind_ext_user_fence {
>>>>>>>>> +#define I915_VM_BIND_EXT_USER_FENCES    1
>>>>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>>> +    struct i915_user_extension base;
>>>>>>>>> +
>>>>>>>>> +    /** @fence_count: Number of elements in the 
>>>>>>>>> @user_fence_ptr array. */
>>>>>>>>> +    __u64 fence_count;
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @user_fence_ptr: Pointer to an array of
>>>>>>>>> +     * struct drm_i915_vm_bind_user_fence of length @fence_count.
>>>>>>>>> +     */
>>>>>>>>> +    __u64 user_fence_ptr;
>>>>>>>>> +};
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array 
>>>>>>>>> of batch buffer
>>>>>>>>> + * gpu virtual addresses.
>>>>>>>>> + *
>>>>>>>>> + * In the execbuff ioctl (See struct 
>>>>>>>>> drm_i915_gem_execbuffer2), this extension
>>>>>>>>> + * must always be appended in the VM_BIND mode and it will be 
>>>>>>>>> an error to
>>>>>>>>> + * append this extension in older non-VM_BIND mode.
>>>>>>>>> + */
>>>>>>>>> +struct drm_i915_gem_execbuffer_ext_batch_addresses {
>>>>>>>>> +#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES    1
>>>>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>>> +    struct i915_user_extension base;
>>>>>>>>> +
>>>>>>>>> +    /** @count: Number of addresses in the addr array. */
>>>>>>>>> +    __u32 count;
>>>>>>>>> +
>>>>>>>>> +    /** @addr: An array of batch gpu virtual addresses. */
>>>>>>>>> +    __u64 addr[0];
>>>>>>>>> +};
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * struct drm_i915_gem_execbuffer_ext_user_fence - First level 
>>>>>>>>> batch completion
>>>>>>>>> + * signaling extension.
>>>>>>>>> + *
>>>>>>>>> + * This extension allows user to attach a user fence (@addr, 
>>>>>>>>> @value pair) to an
>>>>>>>>> + * execbuf to be signaled by the command streamer after the 
>>>>>>>>> completion of first
>>>>>>>>> + * level batch, by writing the @value at specified @addr and 
>>>>>>>>> triggering an
>>>>>>>>> + * interrupt.
>>>>>>>>> + * User can either poll for this user fence to signal or can 
>>>>>>>>> also wait on it
>>>>>>>>> + * with i915_gem_wait_user_fence ioctl.
>>>>>>>>> + * This is very much usefaul for long running contexts where 
>>>>>>>>> waiting on dma-fence
>>>>>>>>> + * by user (like i915_gem_wait ioctl) is not supported.
>>>>>>>>> + */
>>>>>>>>> +struct drm_i915_gem_execbuffer_ext_user_fence {
>>>>>>>>> +#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE        2
>>>>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>>> +    struct i915_user_extension base;
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @addr: User/Memory fence qword aligned GPU virtual 
>>>>>>>>> address.
>>>>>>>>> +     *
>>>>>>>>> +     * Address has to be a valid GPU virtual address at the 
>>>>>>>>> time of
>>>>>>>>> +     * first level batch completion.
>>>>>>>>> +     */
>>>>>>>>> +    __u64 addr;
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @value: User/Memory fence Value to be written to above 
>>>>>>>>> address
>>>>>>>>> +     * after first level batch completes.
>>>>>>>>> +     */
>>>>>>>>> +    __u64 value;
>>>>>>>>> +
>>>>>>>>> +    /** @rsvd: Reserved for future extensions, MBZ */
>>>>>>>>> +    __u64 rsvd;
>>>>>>>>> +};
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * struct drm_i915_gem_create_ext_vm_private - Extension to 
>>>>>>>>> make the object
>>>>>>>>> + * private to the specified VM.
>>>>>>>>> + *
>>>>>>>>> + * See struct drm_i915_gem_create_ext.
>>>>>>>>> + */
>>>>>>>>> +struct drm_i915_gem_create_ext_vm_private {
>>>>>>>>> +#define I915_GEM_CREATE_EXT_VM_PRIVATE        2
>>>>>>>>> +    /** @base: Extension link. See struct i915_user_extension. */
>>>>>>>>> +    struct i915_user_extension base;
>>>>>>>>> +
>>>>>>>>> +    /** @vm_id: Id of the VM to which the object is private */
>>>>>>>>> +    __u32 vm_id;
>>>>>>>>> +};
>>>>>>>>> +
>>>>>>>>> +/**
>>>>>>>>> + * struct drm_i915_gem_wait_user_fence - Wait on user/memory 
>>>>>>>>> fence.
>>>>>>>>> + *
>>>>>>>>> + * User/Memory fence can be woken up either by:
>>>>>>>>> + *
>>>>>>>>> + * 1. GPU context indicated by @ctx_id, or,
>>>>>>>>> + * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
>>>>>>>>> + *    @ctx_id is ignored when this flag is set.
>>>>>>>>> + *
>>>>>>>>> + * Wakeup condition is,
>>>>>>>>> + * ``((*addr & mask) op (value & mask))``
>>>>>>>>> + *
>>>>>>>>> + * See :ref:`Documentation/driver-api/dma-buf.rst 
>>>>>>>>> <indefinite_dma_fences>`
>>>>>>>>> + */
>>>>>>>>> +struct drm_i915_gem_wait_user_fence {
>>>>>>>>> +    /** @extensions: Zero-terminated chain of extensions. */
>>>>>>>>> +    __u64 extensions;
>>>>>>>>> +
>>>>>>>>> +    /** @addr: User/Memory fence address */
>>>>>>>>> +    __u64 addr;
>>>>>>>>> +
>>>>>>>>> +    /** @ctx_id: Id of the Context which will signal the 
>>>>>>>>> fence. */
>>>>>>>>> +    __u32 ctx_id;
>>>>>>>>> +
>>>>>>>>> +    /** @op: Wakeup condition operator */
>>>>>>>>> +    __u16 op;
>>>>>>>>> +#define I915_UFENCE_WAIT_EQ      0
>>>>>>>>> +#define I915_UFENCE_WAIT_NEQ     1
>>>>>>>>> +#define I915_UFENCE_WAIT_GT      2
>>>>>>>>> +#define I915_UFENCE_WAIT_GTE     3
>>>>>>>>> +#define I915_UFENCE_WAIT_LT      4
>>>>>>>>> +#define I915_UFENCE_WAIT_LTE     5
>>>>>>>>> +#define I915_UFENCE_WAIT_BEFORE  6
>>>>>>>>> +#define I915_UFENCE_WAIT_AFTER   7
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @flags: Supported flags are,
>>>>>>>>> +     *
>>>>>>>>> +     * I915_UFENCE_WAIT_SOFT:
>>>>>>>>> +     *
>>>>>>>>> +     * To be woken up by i915 driver async worker (not by GPU).
>>>>>>>>> +     *
>>>>>>>>> +     * I915_UFENCE_WAIT_ABSTIME:
>>>>>>>>> +     *
>>>>>>>>> +     * Wait timeout specified as absolute time.
>>>>>>>>> +     */
>>>>>>>>> +    __u16 flags;
>>>>>>>>> +#define I915_UFENCE_WAIT_SOFT    0x1
>>>>>>>>> +#define I915_UFENCE_WAIT_ABSTIME 0x2
>>>>>>>>> +
>>>>>>>>> +    /** @value: Wakeup value */
>>>>>>>>> +    __u64 value;
>>>>>>>>> +
>>>>>>>>> +    /** @mask: Wakeup mask */
>>>>>>>>> +    __u64 mask;
>>>>>>>>> +#define I915_UFENCE_WAIT_U8     0xffu
>>>>>>>>> +#define I915_UFENCE_WAIT_U16    0xffffu
>>>>>>>>> +#define I915_UFENCE_WAIT_U32    0xfffffffful
>>>>>>>>> +#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
>>>>>>>>> +
>>>>>>>>> +    /**
>>>>>>>>> +     * @timeout: Wait timeout in nanoseconds.
>>>>>>>>> +     *
>>>>>>>>> +     * If I915_UFENCE_WAIT_ABSTIME flag is set, then time 
>>>>>>>>> timeout is the
>>>>>>>>> +     * absolute time in nsec.
>>>>>>>>> +     */
>>>>>>>>> +    __s64 timeout;
>>>>>>>>> +};
Tvrtko Ursulin June 15, 2022, 9:49 a.m. UTC | #29
On 08/06/2022 21:45, Niranjana Vishwanathapura wrote:
> On Wed, Jun 08, 2022 at 09:54:24AM +0100, Tvrtko Ursulin wrote:
>>
>> On 08/06/2022 09:45, Lionel Landwerlin wrote:
>>> On 08/06/2022 11:36, Tvrtko Ursulin wrote:
>>>>
>>>> On 08/06/2022 07:40, Lionel Landwerlin wrote:
>>>>> On 03/06/2022 09:53, Niranjana Vishwanathapura wrote:
>>>>>> On Wed, Jun 01, 2022 at 10:08:35PM -0700, Niranjana 
>>>>>> Vishwanathapura wrote:
>>>>>>> On Wed, Jun 01, 2022 at 11:27:17AM +0200, Daniel Vetter wrote:
>>>>>>>> On Wed, 1 Jun 2022 at 11:03, Dave Airlie <airlied@gmail.com> wrote:
>>>>>>>>>
>>>>>>>>> On Tue, 24 May 2022 at 05:20, Niranjana Vishwanathapura
>>>>>>>>> <niranjana.vishwanathapura@intel.com> wrote:
>>>>>>>>>>
>>>>>>>>>> On Thu, May 19, 2022 at 04:07:30PM -0700, Zanoni, Paulo R wrote:
>>>>>>>>>>> On Tue, 2022-05-17 at 11:32 -0700, Niranjana 
>>>>>>>>>> Vishwanathapura wrote:
>>>>>>>>>>>> VM_BIND and related uapi definitions
>>>>>>>>>>>>
>>>>>>>>>>>> v2: Ensure proper kernel-doc formatting with cross references.
>>>>>>>>>>>>      Also add new uapi and documentation as per review comments
>>>>>>>>>>>>      from Daniel.
>>>>>>>>>>>>
>>>>>>>>>>>> Signed-off-by: Niranjana Vishwanathapura 
>>>>>>>>>> <niranjana.vishwanathapura@intel.com>
>>>>>>>>>>>> ---
>>>>>>>>>>>>   Documentation/gpu/rfc/i915_vm_bind.h | 399 
>>>>>>>>>> +++++++++++++++++++++++++++
>>>>>>>>>>>>   1 file changed, 399 insertions(+)
>>>>>>>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>>
>>>>>>>>>>>> diff --git a/Documentation/gpu/rfc/i915_vm_bind.h 
>>>>>>>>>> b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>> new file mode 100644
>>>>>>>>>>>> index 000000000000..589c0a009107
>>>>>>>>>>>> --- /dev/null
>>>>>>>>>>>> +++ b/Documentation/gpu/rfc/i915_vm_bind.h
>>>>>>>>>>>> @@ -0,0 +1,399 @@
>>>>>>>>>>>> +/* SPDX-License-Identifier: MIT */
>>>>>>>>>>>> +/*
>>>>>>>>>>>> + * Copyright © 2022 Intel Corporation
>>>>>>>>>>>> + */
>>>>>>>>>>>> +
>>>>>>>>>>>> +/**
>>>>>>>>>>>> + * DOC: I915_PARAM_HAS_VM_BIND
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * VM_BIND feature availability.
>>>>>>>>>>>> + * See typedef drm_i915_getparam_t param.
>>>>>>>>>>>> + */
>>>>>>>>>>>> +#define I915_PARAM_HAS_VM_BIND 57
>>>>>>>>>>>> +
>>>>>>>>>>>> +/**
>>>>>>>>>>>> + * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * Flag to opt-in for VM_BIND mode of binding 
>>>>>>>>>> during VM creation.
>>>>>>>>>>>> + * See struct drm_i915_gem_vm_control flags.
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * A VM in VM_BIND mode will not support the older 
>>>>>>>>>> execbuff mode of binding.
>>>>>>>>>>>> + * In VM_BIND mode, execbuff ioctl will not accept 
>>>>>>>>>> any execlist (ie., the
>>>>>>>>>>>> + * &drm_i915_gem_execbuffer2.buffer_count must be 0).
>>>>>>>>>>>> + * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
>>>>>>>>>>>> + * &drm_i915_gem_execbuffer2.batch_len must be 0.
>>>>>>>>>>>> + * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES 
>>>>>>>>>> extension must be provided
>>>>>>>>>>>> + * to pass in the batch buffer addresses.
>>>>>>>>>>>> + *
>>>>>>>>>>>> + * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
>>>>>>>>>>>> + * I915_EXEC_BATCH_FIRST of 
>>>>>>>>>> &drm_i915_gem_execbuffer2.flags must be 0
>>>>>>>>>>>> + * (not used) in VM_BIND mode. 
>>>>>>>>>> I915_EXEC_USE_EXTENSIONS flag must always be
>>>>>>>>>>>> + * set (See struct 
>>>>>>>>>> drm_i915_gem_execbuffer_ext_batch_addresses).
>>>>>>>>>>>> + * The buffers_ptr, buffer_count, 
>>>>>>>>>> batch_start_offset and batch_len fields
>>>>>>>>>>>> + * of struct drm_i915_gem_execbuffer2 are also not 
>>>>>>>>>> used and must be 0.
>>>>>>>>>>>> + */
>>>>>>>>>>>
>>>>>>>>>>> From that description, it seems we have:
>>>>>>>>>>>
>>>>>>>>>>> struct drm_i915_gem_execbuffer2 {
>>>>>>>>>>>         __u64 buffers_ptr;              -> must be 0 (new)
>>>>>>>>>>>         __u32 buffer_count;             -> must be 0 (new)
>>>>>>>>>>>         __u32 batch_start_offset;       -> must be 0 (new)
>>>>>>>>>>>         __u32 batch_len;                -> must be 0 (new)
>>>>>>>>>>>         __u32 DR1;                      -> must be 0 (old)
>>>>>>>>>>>         __u32 DR4;                      -> must be 0 (old)
>>>>>>>>>>>         __u32 num_cliprects; (fences)   -> must be 0 
>>>>>>>>>> since using extensions
>>>>>>>>>>>         __u64 cliprects_ptr; (fences, extensions) -> 
>>>>>>>>>> contains an actual pointer!
>>>>>>>>>>>         __u64 flags;                    -> some flags 
>>>>>>>>>> must be 0 (new)
>>>>>>>>>>>         __u64 rsvd1; (context info)     -> repurposed field 
>>>>>>>>>>> (old)
>>>>>>>>>>>         __u64 rsvd2;                    -> unused
>>>>>>>>>>> };
>>>>>>>>>>>
>>>>>>>>>>> Based on that, why can't we just get 
>>>>>>>>>> drm_i915_gem_execbuffer3 instead
>>>>>>>>>>> of adding even more complexity to an already abused 
>>>>>>>>>> interface? While
>>>>>>>>>>> the Vulkan-like extension thing is really nice, I don't think 
>>>>>>>>>>> what
>>>>>>>>>>> we're doing here is extending the ioctl usage, we're completely
>>>>>>>>>>> changing how the base struct should be interpreted 
>>>>>>>>>> based on how the VM
>>>>>>>>>>> was created (which is an entirely different ioctl).
>>>>>>>>>>>
>>>>>>>>>>> From Rusty Russel's API Design grading, 
>>>>>>>>>> drm_i915_gem_execbuffer2 is
>>>>>>>>>>> already at -6 without these changes. I think after 
>>>>>>>>>> vm_bind we'll need
>>>>>>>>>>> to create a -11 entry just to deal with this ioctl.
>>>>>>>>>>>
>>>>>>>>>>
>>>>>>>>>> The only change here is removing the execlist support for VM_BIND
>>>>>>>>>> mode (other than natual extensions).
>>>>>>>>>> Adding a new execbuffer3 was considered, but I think we need 
>>>>>>>>>> to be careful
>>>>>>>>>> with that as that goes beyond the VM_BIND support, including 
>>>>>>>>>> any future
>>>>>>>>>> requirements (as we don't want an execbuffer4 after VM_BIND).
>>>>>>>>>
>>>>>>>>> Why not? it's not like adding extensions here is really that 
>>>>>>>>> different
>>>>>>>>> than adding new ioctls.
>>>>>>>>>
>>>>>>>>> I definitely think this deserves an execbuffer3 without even
>>>>>>>>> considering future requirements. Just  to burn down the old
>>>>>>>>> requirements and pointless fields.
>>>>>>>>>
>>>>>>>>> Make execbuffer3 be vm bind only, no relocs, no legacy bits, 
>>>>>>>>> leave the
>>>>>>>>> older sw on execbuf2 for ever.
>>>>>>>>
>>>>>>>> I guess another point in favour of execbuf3 would be that it's less
>>>>>>>> midlayer. If we share the entry point then there's quite a few 
>>>>>>>> vfuncs
>>>>>>>> needed to cleanly split out the vm_bind paths from the legacy
>>>>>>>> reloc/softping paths.
>>>>>>>>
>>>>>>>> If we invert this and do execbuf3, then there's the existing ioctl
>>>>>>>> vfunc, and then we share code (where it even makes sense, probably
>>>>>>>> request setup/submit need to be shared, anything else is probably
>>>>>>>> cleaner to just copypaste) with the usual helper approach.
>>>>>>>>
>>>>>>>> Also that would guarantee that really none of the old concepts like
>>>>>>>> i915_active on the vma or vma open counts and all that stuff leaks
>>>>>>>> into the new vm_bind execbuf.
>>>>>>>>
>>>>>>>> Finally I also think that copypasting would make backporting 
>>>>>>>> easier,
>>>>>>>> or at least more flexible, since it should make it easier to 
>>>>>>>> have the
>>>>>>>> upstream vm_bind co-exist with all the other things we have. 
>>>>>>>> Without
>>>>>>>> huge amounts of conflicts (or at least much less) that pushing a 
>>>>>>>> pile
>>>>>>>> of vfuncs into the existing code would cause.
>>>>>>>>
>>>>>>>> So maybe we should do this?
>>>>>>>
>>>>>>> Thanks Dave, Daniel.
>>>>>>> There are a few things that will be common between execbuf2 and
>>>>>>> execbuf3, like request setup/submit (as you said), fence handling 
>>>>>>> (timeline fences, fence array, composite fences), engine selection,
>>>>>>> etc. Also, many of the 'flags' will be there in execbuf3 also (but
>>>>>>> bit position will differ).
>>>>>>> But I guess these should be fine as the suggestion here is to
>>>>>>> copy-paste the execbuff code and having a shared code where 
>>>>>>> possible.
>>>>>>> Besides, we can stop supporting some older feature in execbuff3
>>>>>>> (like fence array in favor of newer timeline fences), which will
>>>>>>> further reduce common code.
>>>>>>>
>>>>>>> Ok, I will update this series by adding execbuf3 and send out soon.
>>>>>>>
>>>>>>
>>>>>> Does this sound reasonable?
>>>>>
>>>>>
>>>>> Thanks for proposing this. Some comments below.
>>>>>
>>>>>
>>>>>>
>>>>>> struct drm_i915_gem_execbuffer3 {
>>>>>>        __u32 ctx_id;        /* previously execbuffer2.rsvd1 */
>>>>>>
>>>>>>        __u32 batch_count;
>>>>>>        __u64 batch_addr_ptr;    /* Pointer to an array of batch 
>>>>>> gpu virtual addresses */
>>>>>>
>>>>>>        __u64 flags;
>>>>>> #define I915_EXEC3_RING_MASK              (0x3f)
>>>>>> #define I915_EXEC3_DEFAULT                (0<<0)
>>>>>> #define I915_EXEC3_RENDER                 (1<<0)
>>>>>> #define I915_EXEC3_BSD                    (2<<0)
>>>>>> #define I915_EXEC3_BLT                    (3<<0)
>>>>>> #define I915_EXEC3_VEBOX                  (4<<0)
>>>>>
>>>>>
>>>>> Shouldn't we use the new engine selection uAPI instead?
>>>>>
>>>>> We can already create an engine map with I915_CONTEXT_PARAM_ENGINES 
>>>>> in drm_i915_gem_context_create_ext_setparam.
>>>>>
>>>>> And you can also create virtual engines with the same extension.
>>>>>
>>>>> It feels like this could be a single u32 with the engine index (in 
>>>>> the context engine map).
>>>>
>>>> Yes I said the same yesterday.
>>>>
>>>> Also note that as you can't any longer set engines on a default 
>>>> context, question is whether userspace cares to use execbuf3 with it 
>>>> (default context).
>>>>
>>>> If it does, it will need an alternative engine selection for that 
>>>> case. I was proposing class:instance rather than legacy cumbersome 
>>>> flags.
>>>>
>>>> If it does not, I  mean if the decision is to only allow execbuf3 
>>>> with engine maps, then it leaves the default context a waste of 
>>>> kernel memory in the execbuf3 future. :( Don't know what to do there..
>>>>
>>>> Regards,
>>>>
>>>> Tvrtko
>>>
>>>
>>> Thanks Tvrtko, I only saw your reply after responding.
>>>
>>>
>>> Both Iris & Anv create a context with engines (if kernel supports it) 
>>> : 
>>> https://gitlab.freedesktop.org/mesa/mesa/-/blob/main/src/intel/common/intel_gem.c#L73 
>>>
>>>
>>>
>>> I think we should be fine with just a single engine id and we don't 
>>> care about the default context.
>>
>> I wonder if in this case we could stop creating the default context 
>> starting from a future "gen"? Otherwise, with engine map only execbuf3 
>> and execbuf3 only userspace, it would serve no purpose apart from 
>> wasting kernel memory.
>>
> 
> Thanks Tvrtko, Lionell.
> 
> I will be glad to remove these flags, just define a uint32 engine_id and
> mandate a context with user engines map.
> 
> Regarding removing the default context, yah, it depends on from which gen
> onwards we will only be supporting execbuf3 and execbuf2 is fully
> deprecated. Till then, we will have to keep it I guess :(.

Forgot about this sub-thread.. I think it could be removed before 
execbuf2 is fully deprecated. We can make that decision with any new 
platform which needs UMD stack updates to be supported. But it is work 
for us to adjust IGT so I am not hopeful anyone will tackle it. We will 
just end up wasting memory.

Regards,

Tvrtko
diff mbox series

Patch

diff --git a/Documentation/gpu/rfc/i915_vm_bind.h b/Documentation/gpu/rfc/i915_vm_bind.h
new file mode 100644
index 000000000000..589c0a009107
--- /dev/null
+++ b/Documentation/gpu/rfc/i915_vm_bind.h
@@ -0,0 +1,399 @@ 
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+/**
+ * DOC: I915_PARAM_HAS_VM_BIND
+ *
+ * VM_BIND feature availability.
+ * See typedef drm_i915_getparam_t param.
+ */
+#define I915_PARAM_HAS_VM_BIND		57
+
+/**
+ * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
+ *
+ * Flag to opt-in for VM_BIND mode of binding during VM creation.
+ * See struct drm_i915_gem_vm_control flags.
+ *
+ * A VM in VM_BIND mode will not support the older execbuff mode of binding.
+ * In VM_BIND mode, execbuff ioctl will not accept any execlist (ie., the
+ * &drm_i915_gem_execbuffer2.buffer_count must be 0).
+ * Also, &drm_i915_gem_execbuffer2.batch_start_offset and
+ * &drm_i915_gem_execbuffer2.batch_len must be 0.
+ * DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES extension must be provided
+ * to pass in the batch buffer addresses.
+ *
+ * Additionally, I915_EXEC_NO_RELOC, I915_EXEC_HANDLE_LUT and
+ * I915_EXEC_BATCH_FIRST of &drm_i915_gem_execbuffer2.flags must be 0
+ * (not used) in VM_BIND mode. I915_EXEC_USE_EXTENSIONS flag must always be
+ * set (See struct drm_i915_gem_execbuffer_ext_batch_addresses).
+ * The buffers_ptr, buffer_count, batch_start_offset and batch_len fields
+ * of struct drm_i915_gem_execbuffer2 are also not used and must be 0.
+ */
+#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
+
+/**
+ * DOC: I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING
+ *
+ * Flag to declare context as long running.
+ * See struct drm_i915_gem_context_create_ext flags.
+ *
+ * Usage of dma-fence expects that they complete in reasonable amount of time.
+ * Compute on the other hand can be long running. Hence it is not appropriate
+ * for compute contexts to export request completion dma-fence to user.
+ * The dma-fence usage will be limited to in-kernel consumption only.
+ * Compute contexts need to use user/memory fence.
+ *
+ * So, long running contexts do not support output fences. Hence,
+ * I915_EXEC_FENCE_OUT (See &drm_i915_gem_execbuffer2.flags and
+ * I915_EXEC_FENCE_SIGNAL (See &drm_i915_gem_exec_fence.flags) are expected
+ * to be not used.
+ *
+ * DRM_I915_GEM_WAIT ioctl call is also not supported for objects mapped
+ * to long running contexts.
+ */
+#define I915_CONTEXT_CREATE_FLAGS_LONG_RUNNING   (1u << 2)
+
+/* VM_BIND related ioctls */
+#define DRM_I915_GEM_VM_BIND		0x3d
+#define DRM_I915_GEM_VM_UNBIND		0x3e
+#define DRM_I915_GEM_WAIT_USER_FENCE	0x3f
+
+#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
+#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
+#define DRM_IOCTL_I915_GEM_WAIT_USER_FENCE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT_USER_FENCE, struct drm_i915_gem_wait_user_fence)
+
+/**
+ * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
+ *
+ * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
+ * virtual address (VA) range to the section of an object that should be bound
+ * in the device page table of the specified address space (VM).
+ * The VA range specified must be unique (ie., not currently bound) and can
+ * be mapped to whole object or a section of the object (partial binding).
+ * Multiple VA mappings can be created to the same section of the object
+ * (aliasing).
+ */
+struct drm_i915_gem_vm_bind {
+	/** @vm_id: VM (address space) id to bind */
+	__u32 vm_id;
+
+	/** @handle: Object handle */
+	__u32 handle;
+
+	/** @start: Virtual Address start to bind */
+	__u64 start;
+
+	/** @offset: Offset in object to bind */
+	__u64 offset;
+
+	/** @length: Length of mapping to bind */
+	__u64 length;
+
+	/**
+	 * @flags: Supported flags are,
+	 *
+	 * I915_GEM_VM_BIND_READONLY:
+	 * Mapping is read-only.
+	 *
+	 * I915_GEM_VM_BIND_CAPTURE:
+	 * Capture this mapping in the dump upon GPU error.
+	 */
+	__u64 flags;
+#define I915_GEM_VM_BIND_READONLY    (1 << 0)
+#define I915_GEM_VM_BIND_CAPTURE     (1 << 1)
+
+	/** @extensions: 0-terminated chain of extensions for this mapping. */
+	__u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
+ *
+ * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
+ * address (VA) range that should be unbound from the device page table of the
+ * specified address space (VM). The specified VA range must match one of the
+ * mappings created with the VM_BIND ioctl. TLB is flushed upon unbind
+ * completion.
+ */
+struct drm_i915_gem_vm_unbind {
+	/** @vm_id: VM (address space) id to bind */
+	__u32 vm_id;
+
+	/** @rsvd: Reserved for future use; must be zero. */
+	__u32 rsvd;
+
+	/** @start: Virtual Address start to unbind */
+	__u64 start;
+
+	/** @length: Length of mapping to unbind */
+	__u64 length;
+
+	/** @flags: reserved for future usage, currently MBZ */
+	__u64 flags;
+
+	/** @extensions: 0-terminated chain of extensions for this mapping. */
+	__u64 extensions;
+};
+
+/**
+ * struct drm_i915_vm_bind_fence - An input or output fence for the vm_bind
+ * or the vm_unbind work.
+ *
+ * The vm_bind or vm_unbind aync worker will wait for input fence to signal
+ * before starting the binding or unbinding.
+ *
+ * The vm_bind or vm_unbind async worker will signal the returned output fence
+ * after the completion of binding or unbinding.
+ */
+struct drm_i915_vm_bind_fence {
+	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
+	__u32 handle;
+
+	/**
+	 * @flags: Supported flags are,
+	 *
+	 * I915_VM_BIND_FENCE_WAIT:
+	 * Wait for the input fence before binding/unbinding
+	 *
+	 * I915_VM_BIND_FENCE_SIGNAL:
+	 * Return bind/unbind completion fence as output
+	 */
+	__u32 flags;
+#define I915_VM_BIND_FENCE_WAIT            (1<<0)
+#define I915_VM_BIND_FENCE_SIGNAL          (1<<1)
+#define __I915_VM_BIND_FENCE_UNKNOWN_FLAGS (-(I915_VM_BIND_FENCE_SIGNAL << 1))
+};
+
+/**
+ * struct drm_i915_vm_bind_ext_timeline_fences - Timeline fences for vm_bind
+ * and vm_unbind.
+ *
+ * This structure describes an array of timeline drm_syncobj and associated
+ * points for timeline variants of drm_syncobj. These timeline 'drm_syncobj's
+ * can be input or output fences (See struct drm_i915_vm_bind_fence).
+ */
+struct drm_i915_vm_bind_ext_timeline_fences {
+#define I915_VM_BIND_EXT_timeline_FENCES	0
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/**
+	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
+	 * arrays.
+	 */
+	__u64 fence_count;
+
+	/**
+	 * @handles_ptr: Pointer to an array of struct drm_i915_vm_bind_fence
+	 * of length @fence_count.
+	 */
+	__u64 handles_ptr;
+
+	/**
+	 * @values_ptr: Pointer to an array of u64 values of length
+	 * @fence_count.
+	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
+	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+	 * binary one.
+	 */
+	__u64 values_ptr;
+};
+
+/**
+ * struct drm_i915_vm_bind_user_fence - An input or output user fence for the
+ * vm_bind or the vm_unbind work.
+ *
+ * The vm_bind or vm_unbind aync worker will wait for the input fence (value at
+ * @addr to become equal to @val) before starting the binding or unbinding.
+ *
+ * The vm_bind or vm_unbind async worker will signal the output fence after
+ * the completion of binding or unbinding by writing @val to memory location at
+ * @addr
+ */
+struct drm_i915_vm_bind_user_fence {
+	/** @addr: User/Memory fence qword aligned process virtual address */
+	__u64 addr;
+
+	/** @val: User/Memory fence value to be written after bind completion */
+	__u64 val;
+
+	/**
+	 * @flags: Supported flags are,
+	 *
+	 * I915_VM_BIND_USER_FENCE_WAIT:
+	 * Wait for the input fence before binding/unbinding
+	 *
+	 * I915_VM_BIND_USER_FENCE_SIGNAL:
+	 * Return bind/unbind completion fence as output
+	 */
+	__u32 flags;
+#define I915_VM_BIND_USER_FENCE_WAIT            (1<<0)
+#define I915_VM_BIND_USER_FENCE_SIGNAL          (1<<1)
+#define __I915_VM_BIND_USER_FENCE_UNKNOWN_FLAGS \
+	(-(I915_VM_BIND_USER_FENCE_SIGNAL << 1))
+};
+
+/**
+ * struct drm_i915_vm_bind_ext_user_fence - User/memory fences for vm_bind
+ * and vm_unbind.
+ *
+ * These user fences can be input or output fences
+ * (See struct drm_i915_vm_bind_user_fence).
+ */
+struct drm_i915_vm_bind_ext_user_fence {
+#define I915_VM_BIND_EXT_USER_FENCES	1
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/** @fence_count: Number of elements in the @user_fence_ptr array. */
+	__u64 fence_count;
+
+	/**
+	 * @user_fence_ptr: Pointer to an array of
+	 * struct drm_i915_vm_bind_user_fence of length @fence_count.
+	 */
+	__u64 user_fence_ptr;
+};
+
+/**
+ * struct drm_i915_gem_execbuffer_ext_batch_addresses - Array of batch buffer
+ * gpu virtual addresses.
+ *
+ * In the execbuff ioctl (See struct drm_i915_gem_execbuffer2), this extension
+ * must always be appended in the VM_BIND mode and it will be an error to
+ * append this extension in older non-VM_BIND mode.
+ */
+struct drm_i915_gem_execbuffer_ext_batch_addresses {
+#define DRM_I915_GEM_EXECBUFFER_EXT_BATCH_ADDRESSES	1
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/** @count: Number of addresses in the addr array. */
+	__u32 count;
+
+	/** @addr: An array of batch gpu virtual addresses. */
+	__u64 addr[0];
+};
+
+/**
+ * struct drm_i915_gem_execbuffer_ext_user_fence - First level batch completion
+ * signaling extension.
+ *
+ * This extension allows user to attach a user fence (@addr, @value pair) to an
+ * execbuf to be signaled by the command streamer after the completion of first
+ * level batch, by writing the @value at specified @addr and triggering an
+ * interrupt.
+ * User can either poll for this user fence to signal or can also wait on it
+ * with i915_gem_wait_user_fence ioctl.
+ * This is very much usefaul for long running contexts where waiting on dma-fence
+ * by user (like i915_gem_wait ioctl) is not supported.
+ */
+struct drm_i915_gem_execbuffer_ext_user_fence {
+#define DRM_I915_GEM_EXECBUFFER_EXT_USER_FENCE		2
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/**
+	 * @addr: User/Memory fence qword aligned GPU virtual address.
+	 *
+	 * Address has to be a valid GPU virtual address at the time of
+	 * first level batch completion.
+	 */
+	__u64 addr;
+
+	/**
+	 * @value: User/Memory fence Value to be written to above address
+	 * after first level batch completes.
+	 */
+	__u64 value;
+
+	/** @rsvd: Reserved for future extensions, MBZ */
+	__u64 rsvd;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
+ * private to the specified VM.
+ *
+ * See struct drm_i915_gem_create_ext.
+ */
+struct drm_i915_gem_create_ext_vm_private {
+#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
+	/** @base: Extension link. See struct i915_user_extension. */
+	struct i915_user_extension base;
+
+	/** @vm_id: Id of the VM to which the object is private */
+	__u32 vm_id;
+};
+
+/**
+ * struct drm_i915_gem_wait_user_fence - Wait on user/memory fence.
+ *
+ * User/Memory fence can be woken up either by:
+ *
+ * 1. GPU context indicated by @ctx_id, or,
+ * 2. Kerrnel driver async worker upon I915_UFENCE_WAIT_SOFT.
+ *    @ctx_id is ignored when this flag is set.
+ *
+ * Wakeup condition is,
+ * ``((*addr & mask) op (value & mask))``
+ *
+ * See :ref:`Documentation/driver-api/dma-buf.rst <indefinite_dma_fences>`
+ */
+struct drm_i915_gem_wait_user_fence {
+	/** @extensions: Zero-terminated chain of extensions. */
+	__u64 extensions;
+
+	/** @addr: User/Memory fence address */
+	__u64 addr;
+
+	/** @ctx_id: Id of the Context which will signal the fence. */
+	__u32 ctx_id;
+
+	/** @op: Wakeup condition operator */
+	__u16 op;
+#define I915_UFENCE_WAIT_EQ      0
+#define I915_UFENCE_WAIT_NEQ     1
+#define I915_UFENCE_WAIT_GT      2
+#define I915_UFENCE_WAIT_GTE     3
+#define I915_UFENCE_WAIT_LT      4
+#define I915_UFENCE_WAIT_LTE     5
+#define I915_UFENCE_WAIT_BEFORE  6
+#define I915_UFENCE_WAIT_AFTER   7
+
+	/**
+	 * @flags: Supported flags are,
+	 *
+	 * I915_UFENCE_WAIT_SOFT:
+	 *
+	 * To be woken up by i915 driver async worker (not by GPU).
+	 *
+	 * I915_UFENCE_WAIT_ABSTIME:
+	 *
+	 * Wait timeout specified as absolute time.
+	 */
+	__u16 flags;
+#define I915_UFENCE_WAIT_SOFT    0x1
+#define I915_UFENCE_WAIT_ABSTIME 0x2
+
+	/** @value: Wakeup value */
+	__u64 value;
+
+	/** @mask: Wakeup mask */
+	__u64 mask;
+#define I915_UFENCE_WAIT_U8     0xffu
+#define I915_UFENCE_WAIT_U16    0xffffu
+#define I915_UFENCE_WAIT_U32    0xfffffffful
+#define I915_UFENCE_WAIT_U64    0xffffffffffffffffull
+
+	/**
+	 * @timeout: Wait timeout in nanoseconds.
+	 *
+	 * If I915_UFENCE_WAIT_ABSTIME flag is set, then time timeout is the
+	 * absolute time in nsec.
+	 */
+	__s64 timeout;
+};