Message ID | 20250317100719.134558-1-liuyerd@163.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v2] fs/proc/page: Refactoring to reduce code duplication. | expand |
On 17.03.25 11:07, Liu Ye wrote: > From: Liu Ye <liuye@kylinos.cn> > > The function kpageflags_read and kpagecgroup_read is quite similar > to kpagecount_read. Consider refactoring common code into a helper > function to reduce code duplication. > > Signed-off-by: Liu Ye <liuye@kylinos.cn> > > --- > V2 : Use an enumeration to indicate the operation to be performed > to avoid passing functions. > --- > --- > fs/proc/page.c | 166 +++++++++++++++++-------------------------------- > 1 file changed, 58 insertions(+), 108 deletions(-) > > diff --git a/fs/proc/page.c b/fs/proc/page.c > index a55f5acefa97..66f454330a87 100644 > --- a/fs/proc/page.c > +++ b/fs/proc/page.c > @@ -22,6 +22,14 @@ > #define KPMMASK (KPMSIZE - 1) > #define KPMBITS (KPMSIZE * BITS_PER_BYTE) > > +enum kpage_operation { > + KPAGE_FLAGS, > + KPAGE_COUNT, > +#ifdef CONFIG_MEMCG > + KPAGE_CGROUP, > +#endif > +}; > + > static inline unsigned long get_max_dump_pfn(void) > { > #ifdef CONFIG_SPARSEMEM > @@ -37,19 +45,17 @@ static inline unsigned long get_max_dump_pfn(void) > #endif > } > > -/* /proc/kpagecount - an array exposing page mapcounts > - * > - * Each entry is a u64 representing the corresponding > - * physical page mapcount. > - */ > -static ssize_t kpagecount_read(struct file *file, char __user *buf, > - size_t count, loff_t *ppos) > +static ssize_t kpage_read(struct file *file, char __user *buf, > + size_t count, loff_t *ppos, > + enum kpage_operation op) > { > const unsigned long max_dump_pfn = get_max_dump_pfn(); > u64 __user *out = (u64 __user *)buf; > + struct page *ppage; > unsigned long src = *ppos; > unsigned long pfn; > ssize_t ret = 0; > + u64 info; > > pfn = src / KPMSIZE; > if (src & KPMMASK || count & KPMMASK) > @@ -59,19 +65,29 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, > count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); > > while (count > 0) { > - struct page *page; > - u64 mapcount = 0; > - > - /* > - * TODO: ZONE_DEVICE support requires to identify > - * memmaps that were actually initialized. > - */ > - page = pfn_to_online_page(pfn); > - if (page) > - mapcount = folio_precise_page_mapcount(page_folio(page), > - page); > - > - if (put_user(mapcount, out)) { > + ppage = pfn_to_online_page(pfn); > + > + if (ppage) { > + switch (op) { > + case KPAGE_FLAGS: > + info = stable_page_flags(ppage); > + break; > + case KPAGE_COUNT: > + info = folio_precise_page_mapcount(page_folio(ppage), ppage); > + break; > +#ifdef CONFIG_MEMCG > + case KPAGE_CGROUP: > + info = page_cgroup_ino(ppage); > + break; > +#endif In general, LGTM. I do wonder if we should just get rid of the two "#ifdef CONFIG_MEMCG" by adding a stub for page_cgroup_ino(). diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 57664e2a8fb7b..24248f4dcc971 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1788,6 +1788,11 @@ static inline void count_objcg_events(struct obj_cgroup *objcg, { } +static inline ino_t page_cgroup_ino(struct page *page) +{ + return 0; +} + #endif /* CONFIG_MEMCG */ #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
在 2025/3/17 18:24, David Hildenbrand 写道: > On 17.03.25 11:07, Liu Ye wrote: >> From: Liu Ye <liuye@kylinos.cn> >> >> The function kpageflags_read and kpagecgroup_read is quite similar >> to kpagecount_read. Consider refactoring common code into a helper >> function to reduce code duplication. >> >> Signed-off-by: Liu Ye <liuye@kylinos.cn> >> >> --- >> V2 : Use an enumeration to indicate the operation to be performed >> to avoid passing functions. >> --- >> --- >> fs/proc/page.c | 166 +++++++++++++++++-------------------------------- >> 1 file changed, 58 insertions(+), 108 deletions(-) >> >> diff --git a/fs/proc/page.c b/fs/proc/page.c >> index a55f5acefa97..66f454330a87 100644 >> --- a/fs/proc/page.c >> +++ b/fs/proc/page.c >> @@ -22,6 +22,14 @@ >> #define KPMMASK (KPMSIZE - 1) >> #define KPMBITS (KPMSIZE * BITS_PER_BYTE) >> +enum kpage_operation { >> + KPAGE_FLAGS, >> + KPAGE_COUNT, >> +#ifdef CONFIG_MEMCG >> + KPAGE_CGROUP, >> +#endif >> +}; >> + >> static inline unsigned long get_max_dump_pfn(void) >> { >> #ifdef CONFIG_SPARSEMEM >> @@ -37,19 +45,17 @@ static inline unsigned long get_max_dump_pfn(void) >> #endif >> } >> -/* /proc/kpagecount - an array exposing page mapcounts >> - * >> - * Each entry is a u64 representing the corresponding >> - * physical page mapcount. >> - */ >> -static ssize_t kpagecount_read(struct file *file, char __user *buf, >> - size_t count, loff_t *ppos) >> +static ssize_t kpage_read(struct file *file, char __user *buf, >> + size_t count, loff_t *ppos, >> + enum kpage_operation op) >> { >> const unsigned long max_dump_pfn = get_max_dump_pfn(); >> u64 __user *out = (u64 __user *)buf; >> + struct page *ppage; >> unsigned long src = *ppos; >> unsigned long pfn; >> ssize_t ret = 0; >> + u64 info; >> pfn = src / KPMSIZE; >> if (src & KPMMASK || count & KPMMASK) >> @@ -59,19 +65,29 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, >> count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); >> while (count > 0) { >> - struct page *page; >> - u64 mapcount = 0; >> - >> - /* >> - * TODO: ZONE_DEVICE support requires to identify >> - * memmaps that were actually initialized. >> - */ >> - page = pfn_to_online_page(pfn); >> - if (page) >> - mapcount = folio_precise_page_mapcount(page_folio(page), >> - page); >> - >> - if (put_user(mapcount, out)) { >> + ppage = pfn_to_online_page(pfn); >> + >> + if (ppage) { >> + switch (op) { >> + case KPAGE_FLAGS: >> + info = stable_page_flags(ppage); >> + break; >> + case KPAGE_COUNT: >> + info = folio_precise_page_mapcount(page_folio(ppage), ppage); >> + break; >> +#ifdef CONFIG_MEMCG >> + case KPAGE_CGROUP: >> + info = page_cgroup_ino(ppage); >> + break; >> +#endif > > In general, LGTM. > > I do wonder if we should just get rid of the two "#ifdef CONFIG_MEMCG" by adding > a stub for page_cgroup_ino(). > > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h > index 57664e2a8fb7b..24248f4dcc971 100644 > --- a/include/linux/memcontrol.h > +++ b/include/linux/memcontrol.h > @@ -1788,6 +1788,11 @@ static inline void count_objcg_events(struct obj_cgroup *objcg, > { > } > > +static inline ino_t page_cgroup_ino(struct page *page) > +{ > + return 0; > +} > + > #endif /* CONFIG_MEMCG */ > > #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP) > > Agreed. I’ll add a stub for page_cgroup_ino() and remove the #ifdef CONFIG_MEMCG. like this. diff --git a/fs/proc/page.c b/fs/proc/page.c index 66f454330a87..cbadbf9568a1 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -25,9 +25,7 @@ enum kpage_operation { KPAGE_FLAGS, KPAGE_COUNT, -#ifdef CONFIG_MEMCG KPAGE_CGROUP, -#endif }; static inline unsigned long get_max_dump_pfn(void) @@ -75,11 +73,9 @@ static ssize_t kpage_read(struct file *file, char __user *buf, case KPAGE_COUNT: info = folio_precise_page_mapcount(page_folio(ppage), ppage); break; -#ifdef CONFIG_MEMCG case KPAGE_CGROUP: info = page_cgroup_ino(ppage); break; -#endif default: info = 0; break; @@ -262,7 +258,6 @@ static const struct proc_ops kpageflags_proc_ops = { }; #ifdef CONFIG_MEMCG - static ssize_t kpagecgroup_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6e74b8254d9b..e806e2ebf5b8 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1794,6 +1794,10 @@ static inline void count_objcg_events(struct obj_cgroup *objcg, { } +static inline ino_t page_cgroup_ino(struct page *page) +{ + return 0; +} #endif /* CONFIG_MEMCG */ #if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
diff --git a/fs/proc/page.c b/fs/proc/page.c index a55f5acefa97..66f454330a87 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -22,6 +22,14 @@ #define KPMMASK (KPMSIZE - 1) #define KPMBITS (KPMSIZE * BITS_PER_BYTE) +enum kpage_operation { + KPAGE_FLAGS, + KPAGE_COUNT, +#ifdef CONFIG_MEMCG + KPAGE_CGROUP, +#endif +}; + static inline unsigned long get_max_dump_pfn(void) { #ifdef CONFIG_SPARSEMEM @@ -37,19 +45,17 @@ static inline unsigned long get_max_dump_pfn(void) #endif } -/* /proc/kpagecount - an array exposing page mapcounts - * - * Each entry is a u64 representing the corresponding - * physical page mapcount. - */ -static ssize_t kpagecount_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) +static ssize_t kpage_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos, + enum kpage_operation op) { const unsigned long max_dump_pfn = get_max_dump_pfn(); u64 __user *out = (u64 __user *)buf; + struct page *ppage; unsigned long src = *ppos; unsigned long pfn; ssize_t ret = 0; + u64 info; pfn = src / KPMSIZE; if (src & KPMMASK || count & KPMMASK) @@ -59,19 +65,29 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); while (count > 0) { - struct page *page; - u64 mapcount = 0; - - /* - * TODO: ZONE_DEVICE support requires to identify - * memmaps that were actually initialized. - */ - page = pfn_to_online_page(pfn); - if (page) - mapcount = folio_precise_page_mapcount(page_folio(page), - page); - - if (put_user(mapcount, out)) { + ppage = pfn_to_online_page(pfn); + + if (ppage) { + switch (op) { + case KPAGE_FLAGS: + info = stable_page_flags(ppage); + break; + case KPAGE_COUNT: + info = folio_precise_page_mapcount(page_folio(ppage), ppage); + break; +#ifdef CONFIG_MEMCG + case KPAGE_CGROUP: + info = page_cgroup_ino(ppage); + break; +#endif + default: + info = 0; + break; + } + } else + info = 0; + + if (put_user(info, out)) { ret = -EFAULT; break; } @@ -89,17 +105,23 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, return ret; } +/* /proc/kpagecount - an array exposing page mapcounts + * + * Each entry is a u64 representing the corresponding + * physical page mapcount. + */ +static ssize_t kpagecount_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + return kpage_read(file, buf, count, ppos, KPAGE_COUNT); +} + static const struct proc_ops kpagecount_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_lseek = mem_lseek, .proc_read = kpagecount_read, }; -/* /proc/kpageflags - an array exposing page flags - * - * Each entry is a u64 representing the corresponding - * physical page flags. - */ static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) { @@ -220,47 +242,17 @@ u64 stable_page_flags(const struct page *page) #endif return u; -}; +} +/* /proc/kpageflags - an array exposing page flags + * + * Each entry is a u64 representing the corresponding + * physical page flags. + */ static ssize_t kpageflags_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { - const unsigned long max_dump_pfn = get_max_dump_pfn(); - u64 __user *out = (u64 __user *)buf; - unsigned long src = *ppos; - unsigned long pfn; - ssize_t ret = 0; - - pfn = src / KPMSIZE; - if (src & KPMMASK || count & KPMMASK) - return -EINVAL; - if (src >= max_dump_pfn * KPMSIZE) - return 0; - count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); - - while (count > 0) { - /* - * TODO: ZONE_DEVICE support requires to identify - * memmaps that were actually initialized. - */ - struct page *page = pfn_to_online_page(pfn); - - if (put_user(stable_page_flags(page), out)) { - ret = -EFAULT; - break; - } - - pfn++; - out++; - count -= KPMSIZE; - - cond_resched(); - } - - *ppos += (char __user *)out - buf; - if (!ret) - ret = (char __user *)out - buf; - return ret; + return kpage_read(file, buf, count, ppos, KPAGE_FLAGS); } static const struct proc_ops kpageflags_proc_ops = { @@ -270,54 +262,12 @@ static const struct proc_ops kpageflags_proc_ops = { }; #ifdef CONFIG_MEMCG + static ssize_t kpagecgroup_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { - const unsigned long max_dump_pfn = get_max_dump_pfn(); - u64 __user *out = (u64 __user *)buf; - struct page *ppage; - unsigned long src = *ppos; - unsigned long pfn; - ssize_t ret = 0; - u64 ino; - - pfn = src / KPMSIZE; - if (src & KPMMASK || count & KPMMASK) - return -EINVAL; - if (src >= max_dump_pfn * KPMSIZE) - return 0; - count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src); - - while (count > 0) { - /* - * TODO: ZONE_DEVICE support requires to identify - * memmaps that were actually initialized. - */ - ppage = pfn_to_online_page(pfn); - - if (ppage) - ino = page_cgroup_ino(ppage); - else - ino = 0; - - if (put_user(ino, out)) { - ret = -EFAULT; - break; - } - - pfn++; - out++; - count -= KPMSIZE; - - cond_resched(); - } - - *ppos += (char __user *)out - buf; - if (!ret) - ret = (char __user *)out - buf; - return ret; + return kpage_read(file, buf, count, ppos, KPAGE_CGROUP); } - static const struct proc_ops kpagecgroup_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_lseek = mem_lseek,