Message ID | 1590561903-13186-6-git-send-email-iamjoonsoo.kim@lge.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | clean-up the migration target allocation functions | expand |
On Wed 27-05-20 15:44:56, Joonsoo Kim wrote: [...] > -/* page migration callback function */ > struct page *alloc_huge_page_nodemask(struct hstate *h, > struct alloc_control *ac) > { > ac->gfp_mask |= htlb_alloc_mask(h); > + if (ac->nid == NUMA_NO_NODE) > + ac->gfp_mask &= ~__GFP_THISNODE; Is this really needed? alloc_huge_page_node is currently only called from numa migration code and the target node should be always defined. > > spin_lock(&hugetlb_lock); > if (h->free_huge_pages - h->resv_huge_pages > 0) { > diff --git a/mm/mempolicy.c b/mm/mempolicy.c > index 3b6b551..e705efd 100644 > --- a/mm/mempolicy.c > +++ b/mm/mempolicy.c > @@ -1073,9 +1073,10 @@ struct page *alloc_new_node_page(struct page *page, unsigned long node) > struct alloc_control ac = { > .nid = node, > .nmask = NULL, > + .gfp_mask = __GFP_THISNODE, > }; > > - return alloc_huge_page_node(h, &ac); > + return alloc_huge_page_nodemask(h, &ac); > } else if (PageTransHuge(page)) { > struct page *thp; > > -- > 2.7.4 >
2020년 6월 9일 (화) 오후 10:43, Michal Hocko <mhocko@kernel.org>님이 작성: > > On Wed 27-05-20 15:44:56, Joonsoo Kim wrote: > [...] > > -/* page migration callback function */ > > struct page *alloc_huge_page_nodemask(struct hstate *h, > > struct alloc_control *ac) > > { > > ac->gfp_mask |= htlb_alloc_mask(h); > > + if (ac->nid == NUMA_NO_NODE) > > + ac->gfp_mask &= ~__GFP_THISNODE; > > Is this really needed? alloc_huge_page_node is currently only called > from numa migration code and the target node should be always defined. Thanks! When I read the code, I was not sure that the target node is always defined so I left this code. However, if it's true, this code isn't needed at all. I will consider your suggestion in the next version. Thanks.
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 15c8fb8..f482563 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -505,8 +505,6 @@ struct huge_bootmem_page { struct page *alloc_migrate_huge_page(struct hstate *h, struct alloc_control *ac); -struct page *alloc_huge_page_node(struct hstate *h, - struct alloc_control *ac); struct page *alloc_huge_page_nodemask(struct hstate *h, struct alloc_control *ac); struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, @@ -755,12 +753,6 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, struct hstate {}; static inline struct page * -alloc_huge_page_node(struct hstate *h, struct alloc_control *ac) -{ - return NULL; -} - -static inline struct page * alloc_huge_page_nodemask(struct hstate *h, struct alloc_control *ac) { return NULL; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dabe460..8132985 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1980,31 +1980,12 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, } /* page migration callback function */ -struct page *alloc_huge_page_node(struct hstate *h, - struct alloc_control *ac) -{ - struct page *page = NULL; - - ac->gfp_mask |= htlb_alloc_mask(h); - if (ac->nid != NUMA_NO_NODE) - ac->gfp_mask |= __GFP_THISNODE; - - spin_lock(&hugetlb_lock); - if (h->free_huge_pages - h->resv_huge_pages > 0) - page = dequeue_huge_page_nodemask(h, ac); - spin_unlock(&hugetlb_lock); - - if (!page) - page = alloc_migrate_huge_page(h, ac); - - return page; -} - -/* page migration callback function */ struct page *alloc_huge_page_nodemask(struct hstate *h, struct alloc_control *ac) { ac->gfp_mask |= htlb_alloc_mask(h); + if (ac->nid == NUMA_NO_NODE) + ac->gfp_mask &= ~__GFP_THISNODE; spin_lock(&hugetlb_lock); if (h->free_huge_pages - h->resv_huge_pages > 0) { diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3b6b551..e705efd 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1073,9 +1073,10 @@ struct page *alloc_new_node_page(struct page *page, unsigned long node) struct alloc_control ac = { .nid = node, .nmask = NULL, + .gfp_mask = __GFP_THISNODE, }; - return alloc_huge_page_node(h, &ac); + return alloc_huge_page_nodemask(h, &ac); } else if (PageTransHuge(page)) { struct page *thp;