diff mbox series

[v2,1/3] userfaultfd: move userfaultfd_ctx struct to header file

Message ID 20240129193512.123145-2-lokeshgidra@google.com (mailing list archive)
State New, archived
Headers show
Series per-vma locks in userfaultfd | expand

Commit Message

Lokesh Gidra Jan. 29, 2024, 7:35 p.m. UTC
Moving the struct to userfaultfd_k.h to be accessible from
mm/userfaultfd.c. There are no other changes in the struct.

This is required to prepare for using per-vma locks in userfaultfd
operations.

Signed-off-by: Lokesh Gidra <lokeshgidra@google.com>
---
 fs/userfaultfd.c              | 39 -----------------------------------
 include/linux/userfaultfd_k.h | 39 +++++++++++++++++++++++++++++++++++
 2 files changed, 39 insertions(+), 39 deletions(-)

Comments

Mike Rapoport Jan. 30, 2024, 7:12 a.m. UTC | #1
On Mon, Jan 29, 2024 at 11:35:10AM -0800, Lokesh Gidra wrote:
> Moving the struct to userfaultfd_k.h to be accessible from
> mm/userfaultfd.c. There are no other changes in the struct.

Just a thought, it maybe worth to move all of fs/userfaultfd.c to
mm/userfaultfd.c ...
 
> This is required to prepare for using per-vma locks in userfaultfd
> operations.
> 
> Signed-off-by: Lokesh Gidra <lokeshgidra@google.com>

Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>

> ---
>  fs/userfaultfd.c              | 39 -----------------------------------
>  include/linux/userfaultfd_k.h | 39 +++++++++++++++++++++++++++++++++++
>  2 files changed, 39 insertions(+), 39 deletions(-)
> 
> diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
> index 05c8e8a05427..58331b83d648 100644
> --- a/fs/userfaultfd.c
> +++ b/fs/userfaultfd.c
> @@ -50,45 +50,6 @@ static struct ctl_table vm_userfaultfd_table[] = {
>  
>  static struct kmem_cache *userfaultfd_ctx_cachep __ro_after_init;
>  
> -/*
> - * Start with fault_pending_wqh and fault_wqh so they're more likely
> - * to be in the same cacheline.
> - *
> - * Locking order:
> - *	fd_wqh.lock
> - *		fault_pending_wqh.lock
> - *			fault_wqh.lock
> - *		event_wqh.lock
> - *
> - * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
> - * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
> - * also taken in IRQ context.
> - */
> -struct userfaultfd_ctx {
> -	/* waitqueue head for the pending (i.e. not read) userfaults */
> -	wait_queue_head_t fault_pending_wqh;
> -	/* waitqueue head for the userfaults */
> -	wait_queue_head_t fault_wqh;
> -	/* waitqueue head for the pseudo fd to wakeup poll/read */
> -	wait_queue_head_t fd_wqh;
> -	/* waitqueue head for events */
> -	wait_queue_head_t event_wqh;
> -	/* a refile sequence protected by fault_pending_wqh lock */
> -	seqcount_spinlock_t refile_seq;
> -	/* pseudo fd refcounting */
> -	refcount_t refcount;
> -	/* userfaultfd syscall flags */
> -	unsigned int flags;
> -	/* features requested from the userspace */
> -	unsigned int features;
> -	/* released */
> -	bool released;
> -	/* memory mappings are changing because of non-cooperative event */
> -	atomic_t mmap_changing;
> -	/* mm with one ore more vmas attached to this userfaultfd_ctx */
> -	struct mm_struct *mm;
> -};
> -
>  struct userfaultfd_fork_ctx {
>  	struct userfaultfd_ctx *orig;
>  	struct userfaultfd_ctx *new;
> diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
> index e4056547fbe6..691d928ee864 100644
> --- a/include/linux/userfaultfd_k.h
> +++ b/include/linux/userfaultfd_k.h
> @@ -36,6 +36,45 @@
>  #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
>  #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
>  
> +/*
> + * Start with fault_pending_wqh and fault_wqh so they're more likely
> + * to be in the same cacheline.
> + *
> + * Locking order:
> + *	fd_wqh.lock
> + *		fault_pending_wqh.lock
> + *			fault_wqh.lock
> + *		event_wqh.lock
> + *
> + * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
> + * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
> + * also taken in IRQ context.
> + */
> +struct userfaultfd_ctx {
> +	/* waitqueue head for the pending (i.e. not read) userfaults */
> +	wait_queue_head_t fault_pending_wqh;
> +	/* waitqueue head for the userfaults */
> +	wait_queue_head_t fault_wqh;
> +	/* waitqueue head for the pseudo fd to wakeup poll/read */
> +	wait_queue_head_t fd_wqh;
> +	/* waitqueue head for events */
> +	wait_queue_head_t event_wqh;
> +	/* a refile sequence protected by fault_pending_wqh lock */
> +	seqcount_spinlock_t refile_seq;
> +	/* pseudo fd refcounting */
> +	refcount_t refcount;
> +	/* userfaultfd syscall flags */
> +	unsigned int flags;
> +	/* features requested from the userspace */
> +	unsigned int features;
> +	/* released */
> +	bool released;
> +	/* memory mappings are changing because of non-cooperative event */
> +	atomic_t mmap_changing;
> +	/* mm with one ore more vmas attached to this userfaultfd_ctx */
> +	struct mm_struct *mm;
> +};
> +
>  extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
>  
>  /* A combined operation mode + behavior flags. */
> -- 
> 2.43.0.429.g432eaa2c6b-goog
>
diff mbox series

Patch

diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 05c8e8a05427..58331b83d648 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -50,45 +50,6 @@  static struct ctl_table vm_userfaultfd_table[] = {
 
 static struct kmem_cache *userfaultfd_ctx_cachep __ro_after_init;
 
-/*
- * Start with fault_pending_wqh and fault_wqh so they're more likely
- * to be in the same cacheline.
- *
- * Locking order:
- *	fd_wqh.lock
- *		fault_pending_wqh.lock
- *			fault_wqh.lock
- *		event_wqh.lock
- *
- * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
- * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
- * also taken in IRQ context.
- */
-struct userfaultfd_ctx {
-	/* waitqueue head for the pending (i.e. not read) userfaults */
-	wait_queue_head_t fault_pending_wqh;
-	/* waitqueue head for the userfaults */
-	wait_queue_head_t fault_wqh;
-	/* waitqueue head for the pseudo fd to wakeup poll/read */
-	wait_queue_head_t fd_wqh;
-	/* waitqueue head for events */
-	wait_queue_head_t event_wqh;
-	/* a refile sequence protected by fault_pending_wqh lock */
-	seqcount_spinlock_t refile_seq;
-	/* pseudo fd refcounting */
-	refcount_t refcount;
-	/* userfaultfd syscall flags */
-	unsigned int flags;
-	/* features requested from the userspace */
-	unsigned int features;
-	/* released */
-	bool released;
-	/* memory mappings are changing because of non-cooperative event */
-	atomic_t mmap_changing;
-	/* mm with one ore more vmas attached to this userfaultfd_ctx */
-	struct mm_struct *mm;
-};
-
 struct userfaultfd_fork_ctx {
 	struct userfaultfd_ctx *orig;
 	struct userfaultfd_ctx *new;
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index e4056547fbe6..691d928ee864 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -36,6 +36,45 @@ 
 #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
 #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
 
+/*
+ * Start with fault_pending_wqh and fault_wqh so they're more likely
+ * to be in the same cacheline.
+ *
+ * Locking order:
+ *	fd_wqh.lock
+ *		fault_pending_wqh.lock
+ *			fault_wqh.lock
+ *		event_wqh.lock
+ *
+ * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
+ * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
+ * also taken in IRQ context.
+ */
+struct userfaultfd_ctx {
+	/* waitqueue head for the pending (i.e. not read) userfaults */
+	wait_queue_head_t fault_pending_wqh;
+	/* waitqueue head for the userfaults */
+	wait_queue_head_t fault_wqh;
+	/* waitqueue head for the pseudo fd to wakeup poll/read */
+	wait_queue_head_t fd_wqh;
+	/* waitqueue head for events */
+	wait_queue_head_t event_wqh;
+	/* a refile sequence protected by fault_pending_wqh lock */
+	seqcount_spinlock_t refile_seq;
+	/* pseudo fd refcounting */
+	refcount_t refcount;
+	/* userfaultfd syscall flags */
+	unsigned int flags;
+	/* features requested from the userspace */
+	unsigned int features;
+	/* released */
+	bool released;
+	/* memory mappings are changing because of non-cooperative event */
+	atomic_t mmap_changing;
+	/* mm with one ore more vmas attached to this userfaultfd_ctx */
+	struct mm_struct *mm;
+};
+
 extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
 
 /* A combined operation mode + behavior flags. */