Message ID | 20240116-flsplit-v1-12-c9d0f4370a5d@kernel.org (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | filelock: split struct file_lock into file_lock and file_lease structs | expand |
On Wed, 17 Jan 2024, Jeff Layton wrote: > Convert __locks_delete_block and __locks_wake_up_blocks to take a struct > file_lock_core pointer. Note that to accomodate this, we need to add a > new file_lock() wrapper to go from file_lock_core to file_lock. Actually we don't need it.... see below. > > Signed-off-by: Jeff Layton <jlayton@kernel.org> > --- > fs/locks.c | 43 ++++++++++++++++++++++++++----------------- > 1 file changed, 26 insertions(+), 17 deletions(-) > > diff --git a/fs/locks.c b/fs/locks.c > index eddf4d767d5d..6b8e8820dec9 100644 > --- a/fs/locks.c > +++ b/fs/locks.c > @@ -92,6 +92,11 @@ static inline bool IS_LEASE(struct file_lock_core *flc) > > #define IS_REMOTELCK(fl) (fl->fl_core.fl_pid <= 0) > > +struct file_lock *file_lock(struct file_lock_core *flc) > +{ > + return container_of(flc, struct file_lock, fl_core); > +} > + > static bool lease_breaking(struct file_lock *fl) > { > return fl->fl_core.fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); > @@ -677,31 +682,35 @@ static void locks_delete_global_blocked(struct file_lock_core *waiter) > * > * Must be called with blocked_lock_lock held. > */ > -static void __locks_delete_block(struct file_lock *waiter) > +static void __locks_delete_block(struct file_lock_core *waiter) > { > - locks_delete_global_blocked(&waiter->fl_core); > - list_del_init(&waiter->fl_core.fl_blocked_member); > + locks_delete_global_blocked(waiter); > + list_del_init(&waiter->fl_blocked_member); > } > > -static void __locks_wake_up_blocks(struct file_lock *blocker) > +static void __locks_wake_up_blocks(struct file_lock_core *blocker) > { > - while (!list_empty(&blocker->fl_core.fl_blocked_requests)) { > - struct file_lock *waiter; > + while (!list_empty(&blocker->fl_blocked_requests)) { > + struct file_lock_core *waiter; > + struct file_lock *fl; > + > + waiter = list_first_entry(&blocker->fl_blocked_requests, > + struct file_lock_core, fl_blocked_member); > > - waiter = list_first_entry(&blocker->fl_core.fl_blocked_requests, > - struct file_lock, fl_core.fl_blocked_member); > + fl = file_lock(waiter); fl = list_first_entry(&blocker->fl_core.fl_blocked_requests, struct file_lock, fl_core.fl_blocked_member); waiter = &fl->fl_core; achieves the same result without needing file_lock(). If you really want to add file_lock() then do so, but you need a better justification :-) NeilBrown > __locks_delete_block(waiter); > - if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) > - waiter->fl_lmops->lm_notify(waiter); > + if ((IS_POSIX(waiter) || IS_FLOCK(waiter)) && > + fl->fl_lmops && fl->fl_lmops->lm_notify) > + fl->fl_lmops->lm_notify(fl); > else > - wake_up(&waiter->fl_core.fl_wait); > + wake_up(&waiter->fl_wait); > > /* > * The setting of fl_blocker to NULL marks the "done" > * point in deleting a block. Paired with acquire at the top > * of locks_delete_block(). > */ > - smp_store_release(&waiter->fl_core.fl_blocker, NULL); > + smp_store_release(&waiter->fl_blocker, NULL); > } > } > > @@ -743,8 +752,8 @@ int locks_delete_block(struct file_lock *waiter) > spin_lock(&blocked_lock_lock); > if (waiter->fl_core.fl_blocker) > status = 0; > - __locks_wake_up_blocks(waiter); > - __locks_delete_block(waiter); > + __locks_wake_up_blocks(&waiter->fl_core); > + __locks_delete_block(&waiter->fl_core); > > /* > * The setting of fl_blocker to NULL marks the "done" point in deleting > @@ -799,7 +808,7 @@ static void __locks_insert_block(struct file_lock *blocker, > * waiter, but might not conflict with blocker, or the requests > * and lock which block it. So they all need to be woken. > */ > - __locks_wake_up_blocks(waiter); > + __locks_wake_up_blocks(&waiter->fl_core); > } > > /* Must be called with flc_lock held. */ > @@ -831,7 +840,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker) > return; > > spin_lock(&blocked_lock_lock); > - __locks_wake_up_blocks(blocker); > + __locks_wake_up_blocks(&blocker->fl_core); > spin_unlock(&blocked_lock_lock); > } > > @@ -1186,7 +1195,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, > * Ensure that we don't find any locks blocked on this > * request during deadlock detection. > */ > - __locks_wake_up_blocks(request); > + __locks_wake_up_blocks(&request->fl_core); > if (likely(!posix_locks_deadlock(request, fl))) { > error = FILE_LOCK_DEFERRED; > __locks_insert_block(fl, request, > > -- > 2.43.0 > >
On Wed, 2024-01-17 at 09:23 +1100, NeilBrown wrote: > On Wed, 17 Jan 2024, Jeff Layton wrote: > > Convert __locks_delete_block and __locks_wake_up_blocks to take a struct > > file_lock_core pointer. Note that to accomodate this, we need to add a > > new file_lock() wrapper to go from file_lock_core to file_lock. > > Actually we don't need it.... see below. > > > > > Signed-off-by: Jeff Layton <jlayton@kernel.org> > > --- > > fs/locks.c | 43 ++++++++++++++++++++++++++----------------- > > 1 file changed, 26 insertions(+), 17 deletions(-) > > > > diff --git a/fs/locks.c b/fs/locks.c > > index eddf4d767d5d..6b8e8820dec9 100644 > > --- a/fs/locks.c > > +++ b/fs/locks.c > > @@ -92,6 +92,11 @@ static inline bool IS_LEASE(struct file_lock_core *flc) > > > > #define IS_REMOTELCK(fl) (fl->fl_core.fl_pid <= 0) > > > > +struct file_lock *file_lock(struct file_lock_core *flc) > > +{ > > + return container_of(flc, struct file_lock, fl_core); > > +} > > + > > static bool lease_breaking(struct file_lock *fl) > > { > > return fl->fl_core.fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); > > @@ -677,31 +682,35 @@ static void locks_delete_global_blocked(struct file_lock_core *waiter) > > * > > * Must be called with blocked_lock_lock held. > > */ > > -static void __locks_delete_block(struct file_lock *waiter) > > +static void __locks_delete_block(struct file_lock_core *waiter) > > { > > - locks_delete_global_blocked(&waiter->fl_core); > > - list_del_init(&waiter->fl_core.fl_blocked_member); > > + locks_delete_global_blocked(waiter); > > + list_del_init(&waiter->fl_blocked_member); > > } > > > > -static void __locks_wake_up_blocks(struct file_lock *blocker) > > +static void __locks_wake_up_blocks(struct file_lock_core *blocker) > > { > > - while (!list_empty(&blocker->fl_core.fl_blocked_requests)) { > > - struct file_lock *waiter; > > + while (!list_empty(&blocker->fl_blocked_requests)) { > > + struct file_lock_core *waiter; > > + struct file_lock *fl; > > + > > + waiter = list_first_entry(&blocker->fl_blocked_requests, > > + struct file_lock_core, fl_blocked_member); > > > > - waiter = list_first_entry(&blocker->fl_core.fl_blocked_requests, > > - struct file_lock, fl_core.fl_blocked_member); > > > + fl = file_lock(waiter); > > fl = list_first_entry(&blocker->fl_core.fl_blocked_requests, > struct file_lock, fl_core.fl_blocked_member); > > waiter = &fl->fl_core; > > achieves the same result without needing file_lock(). > > If you really want to add file_lock() then do so, but you need a better > justification :-) > > Except that we actually do need to pass the file_lock pointer to lm_notify in the block below. There are also some other places that come up later that will need file_lock() (and file_lease(), for that matter). I probably could get away without the container_of wrappers, but then we'd need separate functions for leases for some of the fs/locks.c code. To be clear, this is not the "cleanest" split ever. If we were reimplementing leases from scratch today, I'd probably not piggyback on so much of the file locking code. My main goal here is to get the initial struct split done. Once that's finished, we may be able to do a more clean separation later. > > > > __locks_delete_block(waiter); > > - if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) > > - waiter->fl_lmops->lm_notify(waiter); > > + if ((IS_POSIX(waiter) || IS_FLOCK(waiter)) && > > + fl->fl_lmops && fl->fl_lmops->lm_notify) > > + fl->fl_lmops->lm_notify(fl); > > else > > - wake_up(&waiter->fl_core.fl_wait); > > + wake_up(&waiter->fl_wait); > > > > /* > > * The setting of fl_blocker to NULL marks the "done" > > * point in deleting a block. Paired with acquire at the top > > * of locks_delete_block(). > > */ > > - smp_store_release(&waiter->fl_core.fl_blocker, NULL); > > + smp_store_release(&waiter->fl_blocker, NULL); > > } > > } > > > > @@ -743,8 +752,8 @@ int locks_delete_block(struct file_lock *waiter) > > spin_lock(&blocked_lock_lock); > > if (waiter->fl_core.fl_blocker) > > status = 0; > > - __locks_wake_up_blocks(waiter); > > - __locks_delete_block(waiter); > > + __locks_wake_up_blocks(&waiter->fl_core); > > + __locks_delete_block(&waiter->fl_core); > > > > /* > > * The setting of fl_blocker to NULL marks the "done" point in deleting > > @@ -799,7 +808,7 @@ static void __locks_insert_block(struct file_lock *blocker, > > * waiter, but might not conflict with blocker, or the requests > > * and lock which block it. So they all need to be woken. > > */ > > - __locks_wake_up_blocks(waiter); > > + __locks_wake_up_blocks(&waiter->fl_core); > > } > > > > /* Must be called with flc_lock held. */ > > @@ -831,7 +840,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker) > > return; > > > > spin_lock(&blocked_lock_lock); > > - __locks_wake_up_blocks(blocker); > > + __locks_wake_up_blocks(&blocker->fl_core); > > spin_unlock(&blocked_lock_lock); > > } > > > > @@ -1186,7 +1195,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, > > * Ensure that we don't find any locks blocked on this > > * request during deadlock detection. > > */ > > - __locks_wake_up_blocks(request); > > + __locks_wake_up_blocks(&request->fl_core); > > if (likely(!posix_locks_deadlock(request, fl))) { > > error = FILE_LOCK_DEFERRED; > > __locks_insert_block(fl, request, > > > > -- > > 2.43.0 > > > > >
diff --git a/fs/locks.c b/fs/locks.c index eddf4d767d5d..6b8e8820dec9 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -92,6 +92,11 @@ static inline bool IS_LEASE(struct file_lock_core *flc) #define IS_REMOTELCK(fl) (fl->fl_core.fl_pid <= 0) +struct file_lock *file_lock(struct file_lock_core *flc) +{ + return container_of(flc, struct file_lock, fl_core); +} + static bool lease_breaking(struct file_lock *fl) { return fl->fl_core.fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); @@ -677,31 +682,35 @@ static void locks_delete_global_blocked(struct file_lock_core *waiter) * * Must be called with blocked_lock_lock held. */ -static void __locks_delete_block(struct file_lock *waiter) +static void __locks_delete_block(struct file_lock_core *waiter) { - locks_delete_global_blocked(&waiter->fl_core); - list_del_init(&waiter->fl_core.fl_blocked_member); + locks_delete_global_blocked(waiter); + list_del_init(&waiter->fl_blocked_member); } -static void __locks_wake_up_blocks(struct file_lock *blocker) +static void __locks_wake_up_blocks(struct file_lock_core *blocker) { - while (!list_empty(&blocker->fl_core.fl_blocked_requests)) { - struct file_lock *waiter; + while (!list_empty(&blocker->fl_blocked_requests)) { + struct file_lock_core *waiter; + struct file_lock *fl; + + waiter = list_first_entry(&blocker->fl_blocked_requests, + struct file_lock_core, fl_blocked_member); - waiter = list_first_entry(&blocker->fl_core.fl_blocked_requests, - struct file_lock, fl_core.fl_blocked_member); + fl = file_lock(waiter); __locks_delete_block(waiter); - if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) - waiter->fl_lmops->lm_notify(waiter); + if ((IS_POSIX(waiter) || IS_FLOCK(waiter)) && + fl->fl_lmops && fl->fl_lmops->lm_notify) + fl->fl_lmops->lm_notify(fl); else - wake_up(&waiter->fl_core.fl_wait); + wake_up(&waiter->fl_wait); /* * The setting of fl_blocker to NULL marks the "done" * point in deleting a block. Paired with acquire at the top * of locks_delete_block(). */ - smp_store_release(&waiter->fl_core.fl_blocker, NULL); + smp_store_release(&waiter->fl_blocker, NULL); } } @@ -743,8 +752,8 @@ int locks_delete_block(struct file_lock *waiter) spin_lock(&blocked_lock_lock); if (waiter->fl_core.fl_blocker) status = 0; - __locks_wake_up_blocks(waiter); - __locks_delete_block(waiter); + __locks_wake_up_blocks(&waiter->fl_core); + __locks_delete_block(&waiter->fl_core); /* * The setting of fl_blocker to NULL marks the "done" point in deleting @@ -799,7 +808,7 @@ static void __locks_insert_block(struct file_lock *blocker, * waiter, but might not conflict with blocker, or the requests * and lock which block it. So they all need to be woken. */ - __locks_wake_up_blocks(waiter); + __locks_wake_up_blocks(&waiter->fl_core); } /* Must be called with flc_lock held. */ @@ -831,7 +840,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker) return; spin_lock(&blocked_lock_lock); - __locks_wake_up_blocks(blocker); + __locks_wake_up_blocks(&blocker->fl_core); spin_unlock(&blocked_lock_lock); } @@ -1186,7 +1195,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, * Ensure that we don't find any locks blocked on this * request during deadlock detection. */ - __locks_wake_up_blocks(request); + __locks_wake_up_blocks(&request->fl_core); if (likely(!posix_locks_deadlock(request, fl))) { error = FILE_LOCK_DEFERRED; __locks_insert_block(fl, request,
Convert __locks_delete_block and __locks_wake_up_blocks to take a struct file_lock_core pointer. Note that to accomodate this, we need to add a new file_lock() wrapper to go from file_lock_core to file_lock. Signed-off-by: Jeff Layton <jlayton@kernel.org> --- fs/locks.c | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-)