Message ID | 20200120163915.1469-1-cai@lca.pw (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [-mm,v3] mm/page_isolation: fix potential warning from user | expand |
On 20.01.20 17:39, Qian Cai wrote: > It makes sense to call the WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE) > from start_isolate_page_range(), but should avoid triggering it from > userspace, i.e, from is_mem_section_removable() because it could crash > the system by a non-root user if warn_on_panic is set. > > While at it, simplify the code a bit by removing an unnecessary jump > label. > > Suggested-by: Michal Hocko <mhocko@kernel.org> > Signed-off-by: Qian Cai <cai@lca.pw> > --- > > v3: Drop the page_isolation.c cleanup change. > v2: Improve the commit log. > Warn for all start_isolate_page_range() users not just offlining. > > mm/page_alloc.c | 11 ++++------- > mm/page_isolation.c | 18 +++++++++++------- > 2 files changed, 15 insertions(+), 14 deletions(-) > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index 621716a25639..3c4eb750a199 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -8231,7 +8231,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, > if (is_migrate_cma(migratetype)) > return NULL; > > - goto unmovable; > + return page; > } > > for (; iter < pageblock_nr_pages; iter++) { > @@ -8241,7 +8241,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, > page = pfn_to_page(pfn + iter); > > if (PageReserved(page)) > - goto unmovable; > + return page; > > /* > * If the zone is movable and we have ruled out all reserved > @@ -8261,7 +8261,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, > unsigned int skip_pages; > > if (!hugepage_migration_supported(page_hstate(head))) > - goto unmovable; > + return page; > > skip_pages = compound_nr(head) - (page - head); > iter += skip_pages - 1; > @@ -8303,12 +8303,9 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, > * is set to both of a memory hole page and a _used_ kernel > * page at boot. > */ > - goto unmovable; > + return page; > } > return NULL; > -unmovable: > - WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); > - return pfn_to_page(pfn + iter); > } > > #ifdef CONFIG_CONTIG_ALLOC > diff --git a/mm/page_isolation.c b/mm/page_isolation.c > index e70586523ca3..a9fd7c740c23 100644 > --- a/mm/page_isolation.c > +++ b/mm/page_isolation.c > @@ -54,14 +54,18 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ > > out: > spin_unlock_irqrestore(&zone->lock, flags); > - if (!ret) > + if (!ret) { > drain_all_pages(zone); > - else if ((isol_flags & REPORT_FAILURE) && unmovable) > - /* > - * printk() with zone->lock held will guarantee to trigger a > - * lockdep splat, so defer it here. > - */ > - dump_page(unmovable, "unmovable page"); > + } else { > + WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); > + > + if ((isol_flags & REPORT_FAILURE) && unmovable) > + /* > + * printk() with zone->lock held will likely trigger a > + * lockdep splat, so defer it here. > + */ > + dump_page(unmovable, "unmovable page"); > + } > > return ret; > } > Thanks! Reviewed-by: David Hildenbrand <david@redhat.com>
On Mon 20-01-20 11:39:15, Qian Cai wrote: > It makes sense to call the WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE) > from start_isolate_page_range(), but should avoid triggering it from > userspace, i.e, from is_mem_section_removable() because it could crash > the system by a non-root user if warn_on_panic is set. > > While at it, simplify the code a bit by removing an unnecessary jump > label. > > Suggested-by: Michal Hocko <mhocko@kernel.org> > Signed-off-by: Qian Cai <cai@lca.pw> Acked-by: Michal Hocko <mhocko@suse.com> Thanks! > --- > > v3: Drop the page_isolation.c cleanup change. > v2: Improve the commit log. > Warn for all start_isolate_page_range() users not just offlining. > > mm/page_alloc.c | 11 ++++------- > mm/page_isolation.c | 18 +++++++++++------- > 2 files changed, 15 insertions(+), 14 deletions(-) > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index 621716a25639..3c4eb750a199 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -8231,7 +8231,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, > if (is_migrate_cma(migratetype)) > return NULL; > > - goto unmovable; > + return page; > } > > for (; iter < pageblock_nr_pages; iter++) { > @@ -8241,7 +8241,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, > page = pfn_to_page(pfn + iter); > > if (PageReserved(page)) > - goto unmovable; > + return page; > > /* > * If the zone is movable and we have ruled out all reserved > @@ -8261,7 +8261,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, > unsigned int skip_pages; > > if (!hugepage_migration_supported(page_hstate(head))) > - goto unmovable; > + return page; > > skip_pages = compound_nr(head) - (page - head); > iter += skip_pages - 1; > @@ -8303,12 +8303,9 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, > * is set to both of a memory hole page and a _used_ kernel > * page at boot. > */ > - goto unmovable; > + return page; > } > return NULL; > -unmovable: > - WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); > - return pfn_to_page(pfn + iter); > } > > #ifdef CONFIG_CONTIG_ALLOC > diff --git a/mm/page_isolation.c b/mm/page_isolation.c > index e70586523ca3..a9fd7c740c23 100644 > --- a/mm/page_isolation.c > +++ b/mm/page_isolation.c > @@ -54,14 +54,18 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ > > out: > spin_unlock_irqrestore(&zone->lock, flags); > - if (!ret) > + if (!ret) { > drain_all_pages(zone); > - else if ((isol_flags & REPORT_FAILURE) && unmovable) > - /* > - * printk() with zone->lock held will guarantee to trigger a > - * lockdep splat, so defer it here. > - */ > - dump_page(unmovable, "unmovable page"); > + } else { > + WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); > + > + if ((isol_flags & REPORT_FAILURE) && unmovable) > + /* > + * printk() with zone->lock held will likely trigger a > + * lockdep splat, so defer it here. > + */ > + dump_page(unmovable, "unmovable page"); > + } > > return ret; > } > -- > 2.21.0 (Apple Git-122.2)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 621716a25639..3c4eb750a199 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -8231,7 +8231,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, if (is_migrate_cma(migratetype)) return NULL; - goto unmovable; + return page; } for (; iter < pageblock_nr_pages; iter++) { @@ -8241,7 +8241,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, page = pfn_to_page(pfn + iter); if (PageReserved(page)) - goto unmovable; + return page; /* * If the zone is movable and we have ruled out all reserved @@ -8261,7 +8261,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, unsigned int skip_pages; if (!hugepage_migration_supported(page_hstate(head))) - goto unmovable; + return page; skip_pages = compound_nr(head) - (page - head); iter += skip_pages - 1; @@ -8303,12 +8303,9 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, * is set to both of a memory hole page and a _used_ kernel * page at boot. */ - goto unmovable; + return page; } return NULL; -unmovable: - WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); - return pfn_to_page(pfn + iter); } #ifdef CONFIG_CONTIG_ALLOC diff --git a/mm/page_isolation.c b/mm/page_isolation.c index e70586523ca3..a9fd7c740c23 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -54,14 +54,18 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ out: spin_unlock_irqrestore(&zone->lock, flags); - if (!ret) + if (!ret) { drain_all_pages(zone); - else if ((isol_flags & REPORT_FAILURE) && unmovable) - /* - * printk() with zone->lock held will guarantee to trigger a - * lockdep splat, so defer it here. - */ - dump_page(unmovable, "unmovable page"); + } else { + WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE); + + if ((isol_flags & REPORT_FAILURE) && unmovable) + /* + * printk() with zone->lock held will likely trigger a + * lockdep splat, so defer it here. + */ + dump_page(unmovable, "unmovable page"); + } return ret; }
It makes sense to call the WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE) from start_isolate_page_range(), but should avoid triggering it from userspace, i.e, from is_mem_section_removable() because it could crash the system by a non-root user if warn_on_panic is set. While at it, simplify the code a bit by removing an unnecessary jump label. Suggested-by: Michal Hocko <mhocko@kernel.org> Signed-off-by: Qian Cai <cai@lca.pw> --- v3: Drop the page_isolation.c cleanup change. v2: Improve the commit log. Warn for all start_isolate_page_range() users not just offlining. mm/page_alloc.c | 11 ++++------- mm/page_isolation.c | 18 +++++++++++------- 2 files changed, 15 insertions(+), 14 deletions(-)