Message ID | 1564180381-9916-1-git-send-email-dwysocha@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [RFC] SUNRPC: Track writers of the 'channel' file to improve cache_listeners_exist | expand |
On Fri, Jul 26, 2019 at 06:33:01PM -0400, Dave Wysochanski wrote: > The sunrpc cache interface is susceptible to being fooled by a rogue > process just reading a 'channel' file. If this happens the kernel > may think a valid daemon exists to service the cache when it does not. > For example, the following may fool the kernel: > cat /proc/net/rpc/auth.unix.gid/channel > > Change the tracking of readers to writers when considering whether a > listener exists as all valid daemon processes either open a channel > file O_RDWR or O_WRONLY. While this does not prevent a rogue process > from "stealing" a message from the kernel, it does at least improve > the kernels perception of whether a valid process servicing the cache > exists. > > Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> > --- > include/linux/sunrpc/cache.h | 6 +++--- > net/sunrpc/cache.c | 12 ++++++++---- > 2 files changed, 11 insertions(+), 7 deletions(-) > > diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h > index c7f38e8..f7d086b 100644 > --- a/include/linux/sunrpc/cache.h > +++ b/include/linux/sunrpc/cache.h > @@ -107,9 +107,9 @@ struct cache_detail { > /* fields for communication over channel */ > struct list_head queue; > > - atomic_t readers; /* how many time is /chennel open */ > - time_t last_close; /* if no readers, when did last close */ > - time_t last_warn; /* when we last warned about no readers */ > + atomic_t writers; /* how many time is /channel open */ > + time_t last_close; /* if no writers, when did last close */ > + time_t last_warn; /* when we last warned about no writers */ > > union { > struct proc_dir_entry *procfs; > diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c > index 6f1528f..a6a6190 100644 > --- a/net/sunrpc/cache.c > +++ b/net/sunrpc/cache.c > @@ -373,7 +373,7 @@ void sunrpc_init_cache_detail(struct cache_detail *cd) > spin_lock(&cache_list_lock); > cd->nextcheck = 0; > cd->entries = 0; > - atomic_set(&cd->readers, 0); > + atomic_set(&cd->writers, 0); > cd->last_close = 0; > cd->last_warn = -1; > list_add(&cd->others, &cache_list); > @@ -1029,11 +1029,13 @@ static int cache_open(struct inode *inode, struct file *filp, > } > rp->offset = 0; > rp->q.reader = 1; > - atomic_inc(&cd->readers); > + > spin_lock(&queue_lock); > list_add(&rp->q.list, &cd->queue); > spin_unlock(&queue_lock); > } > + if (filp->f_mode & FMODE_WRITE) > + atomic_inc(&cd->writers); This patch would be even simpler if we just modified the condition of the preceding if clause: - if (filp->f_mode & FMODE_READ) { + if (filp->f_mode & FMODE_WRITE) { and then we could drop the following chunk completely. Is there any reason not to do that? Or if the resulting behavior isn't right for write-only openers, we could make the condition ((filp->f_mode & FMODE_READ) && (filp->f_mode & FMODE_WRITE)). --b. > filp->private_data = rp; > return 0; > } > @@ -1062,8 +1064,10 @@ static int cache_release(struct inode *inode, struct file *filp, > filp->private_data = NULL; > kfree(rp); > > + } > + if (filp->f_mode & FMODE_WRITE) { > + atomic_dec(&cd->writers); > cd->last_close = seconds_since_boot(); > - atomic_dec(&cd->readers); > } > module_put(cd->owner); > return 0; > @@ -1171,7 +1175,7 @@ static void warn_no_listener(struct cache_detail *detail) > > static bool cache_listeners_exist(struct cache_detail *detail) > { > - if (atomic_read(&detail->readers)) > + if (atomic_read(&detail->writers)) > return true; > if (detail->last_close == 0) > /* This cache was never opened */ > -- > 1.8.3.1
On Mon, Jul 29 2019, J. Bruce Fields wrote: > On Fri, Jul 26, 2019 at 06:33:01PM -0400, Dave Wysochanski wrote: >> The sunrpc cache interface is susceptible to being fooled by a rogue >> process just reading a 'channel' file. If this happens the kernel >> may think a valid daemon exists to service the cache when it does not. >> For example, the following may fool the kernel: >> cat /proc/net/rpc/auth.unix.gid/channel >> >> Change the tracking of readers to writers when considering whether a >> listener exists as all valid daemon processes either open a channel >> file O_RDWR or O_WRONLY. While this does not prevent a rogue process >> from "stealing" a message from the kernel, it does at least improve >> the kernels perception of whether a valid process servicing the cache >> exists. >> >> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> >> --- >> include/linux/sunrpc/cache.h | 6 +++--- >> net/sunrpc/cache.c | 12 ++++++++---- >> 2 files changed, 11 insertions(+), 7 deletions(-) >> >> diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h >> index c7f38e8..f7d086b 100644 >> --- a/include/linux/sunrpc/cache.h >> +++ b/include/linux/sunrpc/cache.h >> @@ -107,9 +107,9 @@ struct cache_detail { >> /* fields for communication over channel */ >> struct list_head queue; >> >> - atomic_t readers; /* how many time is /chennel open */ >> - time_t last_close; /* if no readers, when did last close */ >> - time_t last_warn; /* when we last warned about no readers */ >> + atomic_t writers; /* how many time is /channel open */ >> + time_t last_close; /* if no writers, when did last close */ >> + time_t last_warn; /* when we last warned about no writers */ >> >> union { >> struct proc_dir_entry *procfs; >> diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c >> index 6f1528f..a6a6190 100644 >> --- a/net/sunrpc/cache.c >> +++ b/net/sunrpc/cache.c >> @@ -373,7 +373,7 @@ void sunrpc_init_cache_detail(struct cache_detail *cd) >> spin_lock(&cache_list_lock); >> cd->nextcheck = 0; >> cd->entries = 0; >> - atomic_set(&cd->readers, 0); >> + atomic_set(&cd->writers, 0); >> cd->last_close = 0; >> cd->last_warn = -1; >> list_add(&cd->others, &cache_list); >> @@ -1029,11 +1029,13 @@ static int cache_open(struct inode *inode, struct file *filp, >> } >> rp->offset = 0; >> rp->q.reader = 1; >> - atomic_inc(&cd->readers); >> + >> spin_lock(&queue_lock); >> list_add(&rp->q.list, &cd->queue); >> spin_unlock(&queue_lock); >> } >> + if (filp->f_mode & FMODE_WRITE) >> + atomic_inc(&cd->writers); > > This patch would be even simpler if we just modified the condition of > the preceding if clause: > > - if (filp->f_mode & FMODE_READ) { > + if (filp->f_mode & FMODE_WRITE) { > > and then we could drop the following chunk completely. > > Is there any reason not to do that? I can see how this would be tempting, but I think the reason not to do that is it is ... wrong. The bulk of the code is for setting up context to support reading, so it really should be conditional on FMODE_READ. We always want to set that up, because if a process opens for read, and not write, we want to respond properly to read requests. This is useful for debugging. I think this patch from Dave is good. A process opening for read might just be inquisitive. A program opening for write is making more of a commitment to being involved in managing the cache. Reviewed-by: NeilBrown <neilb@suse.com> Thanks, NeilBrown > > Or if the resulting behavior isn't right for write-only openers, we > could make the condition ((filp->f_mode & FMODE_READ) && (filp->f_mode & > FMODE_WRITE)). > > --b. > >> filp->private_data = rp; >> return 0; >> } >> @@ -1062,8 +1064,10 @@ static int cache_release(struct inode *inode, struct file *filp, >> filp->private_data = NULL; >> kfree(rp); >> >> + } >> + if (filp->f_mode & FMODE_WRITE) { >> + atomic_dec(&cd->writers); >> cd->last_close = seconds_since_boot(); >> - atomic_dec(&cd->readers); >> } >> module_put(cd->owner); >> return 0; >> @@ -1171,7 +1175,7 @@ static void warn_no_listener(struct cache_detail *detail) >> >> static bool cache_listeners_exist(struct cache_detail *detail) >> { >> - if (atomic_read(&detail->readers)) >> + if (atomic_read(&detail->writers)) >> return true; >> if (detail->last_close == 0) >> /* This cache was never opened */ >> -- >> 1.8.3.1
On Tue, Jul 30, 2019 at 10:02:37AM +1000, NeilBrown wrote: > On Mon, Jul 29 2019, J. Bruce Fields wrote: > > > On Fri, Jul 26, 2019 at 06:33:01PM -0400, Dave Wysochanski wrote: > >> The sunrpc cache interface is susceptible to being fooled by a rogue > >> process just reading a 'channel' file. If this happens the kernel > >> may think a valid daemon exists to service the cache when it does not. > >> For example, the following may fool the kernel: > >> cat /proc/net/rpc/auth.unix.gid/channel > >> > >> Change the tracking of readers to writers when considering whether a > >> listener exists as all valid daemon processes either open a channel > >> file O_RDWR or O_WRONLY. While this does not prevent a rogue process > >> from "stealing" a message from the kernel, it does at least improve > >> the kernels perception of whether a valid process servicing the cache > >> exists. > >> > >> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> > >> --- > >> include/linux/sunrpc/cache.h | 6 +++--- > >> net/sunrpc/cache.c | 12 ++++++++---- > >> 2 files changed, 11 insertions(+), 7 deletions(-) > >> > >> diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h > >> index c7f38e8..f7d086b 100644 > >> --- a/include/linux/sunrpc/cache.h > >> +++ b/include/linux/sunrpc/cache.h > >> @@ -107,9 +107,9 @@ struct cache_detail { > >> /* fields for communication over channel */ > >> struct list_head queue; > >> > >> - atomic_t readers; /* how many time is /chennel open */ > >> - time_t last_close; /* if no readers, when did last close */ > >> - time_t last_warn; /* when we last warned about no readers */ > >> + atomic_t writers; /* how many time is /channel open */ > >> + time_t last_close; /* if no writers, when did last close */ > >> + time_t last_warn; /* when we last warned about no writers */ > >> > >> union { > >> struct proc_dir_entry *procfs; > >> diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c > >> index 6f1528f..a6a6190 100644 > >> --- a/net/sunrpc/cache.c > >> +++ b/net/sunrpc/cache.c > >> @@ -373,7 +373,7 @@ void sunrpc_init_cache_detail(struct cache_detail *cd) > >> spin_lock(&cache_list_lock); > >> cd->nextcheck = 0; > >> cd->entries = 0; > >> - atomic_set(&cd->readers, 0); > >> + atomic_set(&cd->writers, 0); > >> cd->last_close = 0; > >> cd->last_warn = -1; > >> list_add(&cd->others, &cache_list); > >> @@ -1029,11 +1029,13 @@ static int cache_open(struct inode *inode, struct file *filp, > >> } > >> rp->offset = 0; > >> rp->q.reader = 1; > >> - atomic_inc(&cd->readers); > >> + > >> spin_lock(&queue_lock); > >> list_add(&rp->q.list, &cd->queue); > >> spin_unlock(&queue_lock); > >> } > >> + if (filp->f_mode & FMODE_WRITE) > >> + atomic_inc(&cd->writers); > > > > This patch would be even simpler if we just modified the condition of > > the preceding if clause: > > > > - if (filp->f_mode & FMODE_READ) { > > + if (filp->f_mode & FMODE_WRITE) { > > > > and then we could drop the following chunk completely. > > > > Is there any reason not to do that? > > I can see how this would be tempting, but I think the reason not to do > that is it is ... wrong. > > The bulk of the code is for setting up context to support reading, so it > really should be conditional on FMODE_READ. > We always want to set that up, because if a process opens for read, and > not write, we want to respond properly to read requests. This is useful > for debugging. How is it useful for debugging? --b. > I think this patch from Dave is good. A process opening for read might > just be inquisitive. A program opening for write is making more of a > commitment to being involved in managing the cache. > > Reviewed-by: NeilBrown <neilb@suse.com> > > Thanks, > NeilBrown > > > > > > Or if the resulting behavior isn't right for write-only openers, we > > could make the condition ((filp->f_mode & FMODE_READ) && (filp->f_mode & > > FMODE_WRITE)). > > > > --b. > > > >> filp->private_data = rp; > >> return 0; > >> } > >> @@ -1062,8 +1064,10 @@ static int cache_release(struct inode *inode, struct file *filp, > >> filp->private_data = NULL; > >> kfree(rp); > >> > >> + } > >> + if (filp->f_mode & FMODE_WRITE) { > >> + atomic_dec(&cd->writers); > >> cd->last_close = seconds_since_boot(); > >> - atomic_dec(&cd->readers); > >> } > >> module_put(cd->owner); > >> return 0; > >> @@ -1171,7 +1175,7 @@ static void warn_no_listener(struct cache_detail *detail) > >> > >> static bool cache_listeners_exist(struct cache_detail *detail) > >> { > >> - if (atomic_read(&detail->readers)) > >> + if (atomic_read(&detail->writers)) > >> return true; > >> if (detail->last_close == 0) > >> /* This cache was never opened */ > >> -- > >> 1.8.3.1
On Mon, Jul 29 2019, J. Bruce Fields wrote: > On Tue, Jul 30, 2019 at 10:02:37AM +1000, NeilBrown wrote: >> On Mon, Jul 29 2019, J. Bruce Fields wrote: >> >> > On Fri, Jul 26, 2019 at 06:33:01PM -0400, Dave Wysochanski wrote: >> >> The sunrpc cache interface is susceptible to being fooled by a rogue >> >> process just reading a 'channel' file. If this happens the kernel >> >> may think a valid daemon exists to service the cache when it does not. >> >> For example, the following may fool the kernel: >> >> cat /proc/net/rpc/auth.unix.gid/channel >> >> >> >> Change the tracking of readers to writers when considering whether a >> >> listener exists as all valid daemon processes either open a channel >> >> file O_RDWR or O_WRONLY. While this does not prevent a rogue process >> >> from "stealing" a message from the kernel, it does at least improve >> >> the kernels perception of whether a valid process servicing the cache >> >> exists. >> >> >> >> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> >> >> --- >> >> include/linux/sunrpc/cache.h | 6 +++--- >> >> net/sunrpc/cache.c | 12 ++++++++---- >> >> 2 files changed, 11 insertions(+), 7 deletions(-) >> >> >> >> diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h >> >> index c7f38e8..f7d086b 100644 >> >> --- a/include/linux/sunrpc/cache.h >> >> +++ b/include/linux/sunrpc/cache.h >> >> @@ -107,9 +107,9 @@ struct cache_detail { >> >> /* fields for communication over channel */ >> >> struct list_head queue; >> >> >> >> - atomic_t readers; /* how many time is /chennel open */ >> >> - time_t last_close; /* if no readers, when did last close */ >> >> - time_t last_warn; /* when we last warned about no readers */ >> >> + atomic_t writers; /* how many time is /channel open */ >> >> + time_t last_close; /* if no writers, when did last close */ >> >> + time_t last_warn; /* when we last warned about no writers */ >> >> >> >> union { >> >> struct proc_dir_entry *procfs; >> >> diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c >> >> index 6f1528f..a6a6190 100644 >> >> --- a/net/sunrpc/cache.c >> >> +++ b/net/sunrpc/cache.c >> >> @@ -373,7 +373,7 @@ void sunrpc_init_cache_detail(struct cache_detail *cd) >> >> spin_lock(&cache_list_lock); >> >> cd->nextcheck = 0; >> >> cd->entries = 0; >> >> - atomic_set(&cd->readers, 0); >> >> + atomic_set(&cd->writers, 0); >> >> cd->last_close = 0; >> >> cd->last_warn = -1; >> >> list_add(&cd->others, &cache_list); >> >> @@ -1029,11 +1029,13 @@ static int cache_open(struct inode *inode, struct file *filp, >> >> } >> >> rp->offset = 0; >> >> rp->q.reader = 1; >> >> - atomic_inc(&cd->readers); >> >> + >> >> spin_lock(&queue_lock); >> >> list_add(&rp->q.list, &cd->queue); >> >> spin_unlock(&queue_lock); >> >> } >> >> + if (filp->f_mode & FMODE_WRITE) >> >> + atomic_inc(&cd->writers); >> > >> > This patch would be even simpler if we just modified the condition of >> > the preceding if clause: >> > >> > - if (filp->f_mode & FMODE_READ) { >> > + if (filp->f_mode & FMODE_WRITE) { >> > >> > and then we could drop the following chunk completely. >> > >> > Is there any reason not to do that? >> >> I can see how this would be tempting, but I think the reason not to do >> that is it is ... wrong. >> >> The bulk of the code is for setting up context to support reading, so it >> really should be conditional on FMODE_READ. >> We always want to set that up, because if a process opens for read, and >> not write, we want to respond properly to read requests. This is useful >> for debugging. > > How is it useful for debugging? I often ask for grep . /proc/net/rpc/*/* If nothing is reported for "channel", then I know that the problem isn't that mountd is dead or stuck or similar. NeilBrown > > --b. > >> I think this patch from Dave is good. A process opening for read might >> just be inquisitive. A program opening for write is making more of a >> commitment to being involved in managing the cache. >> >> Reviewed-by: NeilBrown <neilb@suse.com> >> >> Thanks, >> NeilBrown >> >> >> > >> > Or if the resulting behavior isn't right for write-only openers, we >> > could make the condition ((filp->f_mode & FMODE_READ) && (filp->f_mode & >> > FMODE_WRITE)). >> > >> > --b. >> > >> >> filp->private_data = rp; >> >> return 0; >> >> } >> >> @@ -1062,8 +1064,10 @@ static int cache_release(struct inode *inode, struct file *filp, >> >> filp->private_data = NULL; >> >> kfree(rp); >> >> >> >> + } >> >> + if (filp->f_mode & FMODE_WRITE) { >> >> + atomic_dec(&cd->writers); >> >> cd->last_close = seconds_since_boot(); >> >> - atomic_dec(&cd->readers); >> >> } >> >> module_put(cd->owner); >> >> return 0; >> >> @@ -1171,7 +1175,7 @@ static void warn_no_listener(struct cache_detail *detail) >> >> >> >> static bool cache_listeners_exist(struct cache_detail *detail) >> >> { >> >> - if (atomic_read(&detail->readers)) >> >> + if (atomic_read(&detail->writers)) >> >> return true; >> >> if (detail->last_close == 0) >> >> /* This cache was never opened */ >> >> -- >> >> 1.8.3.1
On Tue, Jul 30, 2019 at 11:14:55AM +1000, NeilBrown wrote: > On Mon, Jul 29 2019, J. Bruce Fields wrote: > > > On Tue, Jul 30, 2019 at 10:02:37AM +1000, NeilBrown wrote: > >> On Mon, Jul 29 2019, J. Bruce Fields wrote: > >> > >> > On Fri, Jul 26, 2019 at 06:33:01PM -0400, Dave Wysochanski wrote: > >> >> The sunrpc cache interface is susceptible to being fooled by a rogue > >> >> process just reading a 'channel' file. If this happens the kernel > >> >> may think a valid daemon exists to service the cache when it does not. > >> >> For example, the following may fool the kernel: > >> >> cat /proc/net/rpc/auth.unix.gid/channel > >> >> > >> >> Change the tracking of readers to writers when considering whether a > >> >> listener exists as all valid daemon processes either open a channel > >> >> file O_RDWR or O_WRONLY. While this does not prevent a rogue process > >> >> from "stealing" a message from the kernel, it does at least improve > >> >> the kernels perception of whether a valid process servicing the cache > >> >> exists. > >> >> > >> >> Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> > >> >> --- > >> >> include/linux/sunrpc/cache.h | 6 +++--- > >> >> net/sunrpc/cache.c | 12 ++++++++---- > >> >> 2 files changed, 11 insertions(+), 7 deletions(-) > >> >> > >> >> diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h > >> >> index c7f38e8..f7d086b 100644 > >> >> --- a/include/linux/sunrpc/cache.h > >> >> +++ b/include/linux/sunrpc/cache.h > >> >> @@ -107,9 +107,9 @@ struct cache_detail { > >> >> /* fields for communication over channel */ > >> >> struct list_head queue; > >> >> > >> >> - atomic_t readers; /* how many time is /chennel open */ > >> >> - time_t last_close; /* if no readers, when did last close */ > >> >> - time_t last_warn; /* when we last warned about no readers */ > >> >> + atomic_t writers; /* how many time is /channel open */ > >> >> + time_t last_close; /* if no writers, when did last close */ > >> >> + time_t last_warn; /* when we last warned about no writers */ > >> >> > >> >> union { > >> >> struct proc_dir_entry *procfs; > >> >> diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c > >> >> index 6f1528f..a6a6190 100644 > >> >> --- a/net/sunrpc/cache.c > >> >> +++ b/net/sunrpc/cache.c > >> >> @@ -373,7 +373,7 @@ void sunrpc_init_cache_detail(struct cache_detail *cd) > >> >> spin_lock(&cache_list_lock); > >> >> cd->nextcheck = 0; > >> >> cd->entries = 0; > >> >> - atomic_set(&cd->readers, 0); > >> >> + atomic_set(&cd->writers, 0); > >> >> cd->last_close = 0; > >> >> cd->last_warn = -1; > >> >> list_add(&cd->others, &cache_list); > >> >> @@ -1029,11 +1029,13 @@ static int cache_open(struct inode *inode, struct file *filp, > >> >> } > >> >> rp->offset = 0; > >> >> rp->q.reader = 1; > >> >> - atomic_inc(&cd->readers); > >> >> + > >> >> spin_lock(&queue_lock); > >> >> list_add(&rp->q.list, &cd->queue); > >> >> spin_unlock(&queue_lock); > >> >> } > >> >> + if (filp->f_mode & FMODE_WRITE) > >> >> + atomic_inc(&cd->writers); > >> > > >> > This patch would be even simpler if we just modified the condition of > >> > the preceding if clause: > >> > > >> > - if (filp->f_mode & FMODE_READ) { > >> > + if (filp->f_mode & FMODE_WRITE) { > >> > > >> > and then we could drop the following chunk completely. > >> > > >> > Is there any reason not to do that? > >> > >> I can see how this would be tempting, but I think the reason not to do > >> that is it is ... wrong. > >> > >> The bulk of the code is for setting up context to support reading, so it > >> really should be conditional on FMODE_READ. > >> We always want to set that up, because if a process opens for read, and > >> not write, we want to respond properly to read requests. This is useful > >> for debugging. > > > > How is it useful for debugging? > > I often ask for > > grep . /proc/net/rpc/*/* > > If nothing is reported for "channel", then I know that the problem isn't > that mountd is dead or stuck or similar. Eh, OK. Anyway I've got no actual serious complaint. Applying with the reviewed-by:. --b.
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index c7f38e8..f7d086b 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h @@ -107,9 +107,9 @@ struct cache_detail { /* fields for communication over channel */ struct list_head queue; - atomic_t readers; /* how many time is /chennel open */ - time_t last_close; /* if no readers, when did last close */ - time_t last_warn; /* when we last warned about no readers */ + atomic_t writers; /* how many time is /channel open */ + time_t last_close; /* if no writers, when did last close */ + time_t last_warn; /* when we last warned about no writers */ union { struct proc_dir_entry *procfs; diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 6f1528f..a6a6190 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -373,7 +373,7 @@ void sunrpc_init_cache_detail(struct cache_detail *cd) spin_lock(&cache_list_lock); cd->nextcheck = 0; cd->entries = 0; - atomic_set(&cd->readers, 0); + atomic_set(&cd->writers, 0); cd->last_close = 0; cd->last_warn = -1; list_add(&cd->others, &cache_list); @@ -1029,11 +1029,13 @@ static int cache_open(struct inode *inode, struct file *filp, } rp->offset = 0; rp->q.reader = 1; - atomic_inc(&cd->readers); + spin_lock(&queue_lock); list_add(&rp->q.list, &cd->queue); spin_unlock(&queue_lock); } + if (filp->f_mode & FMODE_WRITE) + atomic_inc(&cd->writers); filp->private_data = rp; return 0; } @@ -1062,8 +1064,10 @@ static int cache_release(struct inode *inode, struct file *filp, filp->private_data = NULL; kfree(rp); + } + if (filp->f_mode & FMODE_WRITE) { + atomic_dec(&cd->writers); cd->last_close = seconds_since_boot(); - atomic_dec(&cd->readers); } module_put(cd->owner); return 0; @@ -1171,7 +1175,7 @@ static void warn_no_listener(struct cache_detail *detail) static bool cache_listeners_exist(struct cache_detail *detail) { - if (atomic_read(&detail->readers)) + if (atomic_read(&detail->writers)) return true; if (detail->last_close == 0) /* This cache was never opened */
The sunrpc cache interface is susceptible to being fooled by a rogue process just reading a 'channel' file. If this happens the kernel may think a valid daemon exists to service the cache when it does not. For example, the following may fool the kernel: cat /proc/net/rpc/auth.unix.gid/channel Change the tracking of readers to writers when considering whether a listener exists as all valid daemon processes either open a channel file O_RDWR or O_WRONLY. While this does not prevent a rogue process from "stealing" a message from the kernel, it does at least improve the kernels perception of whether a valid process servicing the cache exists. Signed-off-by: Dave Wysochanski <dwysocha@redhat.com> --- include/linux/sunrpc/cache.h | 6 +++--- net/sunrpc/cache.c | 12 ++++++++---- 2 files changed, 11 insertions(+), 7 deletions(-)