Message ID | 20220611084731.55155-4-linmiaohe@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | A few cleanup patches for khugepaged | expand |
On 11 Jun 16:47, Miaohe Lin wrote: > Fix some typos and tweak the code to meet codestyle. No functional > change intended. > > Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> > --- > mm/khugepaged.c | 14 +++++++------- > 1 file changed, 7 insertions(+), 7 deletions(-) > > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > index a8adb2d1e9c6..1b5dd3820eac 100644 > --- a/mm/khugepaged.c > +++ b/mm/khugepaged.c > @@ -260,7 +260,7 @@ static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, > unsigned long max_ptes_none; > > err = kstrtoul(buf, 10, &max_ptes_none); > - if (err || max_ptes_none > HPAGE_PMD_NR-1) > + if (err || max_ptes_none > HPAGE_PMD_NR - 1) > return -EINVAL; > > khugepaged_max_ptes_none = max_ptes_none; > @@ -286,7 +286,7 @@ static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, > unsigned long max_ptes_swap; > > err = kstrtoul(buf, 10, &max_ptes_swap); > - if (err || max_ptes_swap > HPAGE_PMD_NR-1) > + if (err || max_ptes_swap > HPAGE_PMD_NR - 1) > return -EINVAL; > > khugepaged_max_ptes_swap = max_ptes_swap; > @@ -313,7 +313,7 @@ static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj, > unsigned long max_ptes_shared; > > err = kstrtoul(buf, 10, &max_ptes_shared); > - if (err || max_ptes_shared > HPAGE_PMD_NR-1) > + if (err || max_ptes_shared > HPAGE_PMD_NR - 1) > return -EINVAL; > > khugepaged_max_ptes_shared = max_ptes_shared; > @@ -599,7 +599,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, > int none_or_zero = 0, shared = 0, result = 0, referenced = 0; > bool writable = false; > > - for (_pte = pte; _pte < pte+HPAGE_PMD_NR; > + for (_pte = pte; _pte < pte + HPAGE_PMD_NR; > _pte++, address += PAGE_SIZE) { > pte_t pteval = *_pte; > if (pte_none(pteval) || (pte_present(pteval) && > @@ -1216,7 +1216,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, > > memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); > pte = pte_offset_map_lock(mm, pmd, address, &ptl); > - for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; > + for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; > _pte++, _address += PAGE_SIZE) { > pte_t pteval = *_pte; > if (is_swap_pte(pteval)) { > @@ -1306,7 +1306,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, > /* > * Check if the page has any GUP (or other external) pins. > * > - * Here the check is racy it may see totmal_mapcount > refcount > + * Here the check is racy it may see total_mapcount > refcount > * in some cases. > * For example, one process with one forked child process. > * The parent has the PMD split due to MADV_DONTNEED, then > @@ -1557,7 +1557,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) > * mmap_write_lock(mm) as PMD-mapping is likely to be split > * later. > * > - * Not that vma->anon_vma check is racy: it can be set up after > + * Note that vma->anon_vma check is racy: it can be set up after > * the check but before we took mmap_lock by the fault path. > * But page lock would prevent establishing any new ptes of the > * page, so we are safe. > -- > 2.23.0 > > Reviewed-by: Zach O'Keefe <zokeefe@google.com>
On Sat, Jun 11, 2022 at 1:47 AM Miaohe Lin <linmiaohe@huawei.com> wrote: > > Fix some typos and tweak the code to meet codestyle. No functional > change intended. > > Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Reviewed-by: Yang Shi <shy828301@gmail.com> > --- > mm/khugepaged.c | 14 +++++++------- > 1 file changed, 7 insertions(+), 7 deletions(-) > > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > index a8adb2d1e9c6..1b5dd3820eac 100644 > --- a/mm/khugepaged.c > +++ b/mm/khugepaged.c > @@ -260,7 +260,7 @@ static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, > unsigned long max_ptes_none; > > err = kstrtoul(buf, 10, &max_ptes_none); > - if (err || max_ptes_none > HPAGE_PMD_NR-1) > + if (err || max_ptes_none > HPAGE_PMD_NR - 1) > return -EINVAL; > > khugepaged_max_ptes_none = max_ptes_none; > @@ -286,7 +286,7 @@ static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, > unsigned long max_ptes_swap; > > err = kstrtoul(buf, 10, &max_ptes_swap); > - if (err || max_ptes_swap > HPAGE_PMD_NR-1) > + if (err || max_ptes_swap > HPAGE_PMD_NR - 1) > return -EINVAL; > > khugepaged_max_ptes_swap = max_ptes_swap; > @@ -313,7 +313,7 @@ static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj, > unsigned long max_ptes_shared; > > err = kstrtoul(buf, 10, &max_ptes_shared); > - if (err || max_ptes_shared > HPAGE_PMD_NR-1) > + if (err || max_ptes_shared > HPAGE_PMD_NR - 1) > return -EINVAL; > > khugepaged_max_ptes_shared = max_ptes_shared; > @@ -599,7 +599,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, > int none_or_zero = 0, shared = 0, result = 0, referenced = 0; > bool writable = false; > > - for (_pte = pte; _pte < pte+HPAGE_PMD_NR; > + for (_pte = pte; _pte < pte + HPAGE_PMD_NR; > _pte++, address += PAGE_SIZE) { > pte_t pteval = *_pte; > if (pte_none(pteval) || (pte_present(pteval) && > @@ -1216,7 +1216,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, > > memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); > pte = pte_offset_map_lock(mm, pmd, address, &ptl); > - for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; > + for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; > _pte++, _address += PAGE_SIZE) { > pte_t pteval = *_pte; > if (is_swap_pte(pteval)) { > @@ -1306,7 +1306,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, > /* > * Check if the page has any GUP (or other external) pins. > * > - * Here the check is racy it may see totmal_mapcount > refcount > + * Here the check is racy it may see total_mapcount > refcount > * in some cases. > * For example, one process with one forked child process. > * The parent has the PMD split due to MADV_DONTNEED, then > @@ -1557,7 +1557,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) > * mmap_write_lock(mm) as PMD-mapping is likely to be split > * later. > * > - * Not that vma->anon_vma check is racy: it can be set up after > + * Note that vma->anon_vma check is racy: it can be set up after > * the check but before we took mmap_lock by the fault path. > * But page lock would prevent establishing any new ptes of the > * page, so we are safe. > -- > 2.23.0 > >
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a8adb2d1e9c6..1b5dd3820eac 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -260,7 +260,7 @@ static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, unsigned long max_ptes_none; err = kstrtoul(buf, 10, &max_ptes_none); - if (err || max_ptes_none > HPAGE_PMD_NR-1) + if (err || max_ptes_none > HPAGE_PMD_NR - 1) return -EINVAL; khugepaged_max_ptes_none = max_ptes_none; @@ -286,7 +286,7 @@ static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, unsigned long max_ptes_swap; err = kstrtoul(buf, 10, &max_ptes_swap); - if (err || max_ptes_swap > HPAGE_PMD_NR-1) + if (err || max_ptes_swap > HPAGE_PMD_NR - 1) return -EINVAL; khugepaged_max_ptes_swap = max_ptes_swap; @@ -313,7 +313,7 @@ static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj, unsigned long max_ptes_shared; err = kstrtoul(buf, 10, &max_ptes_shared); - if (err || max_ptes_shared > HPAGE_PMD_NR-1) + if (err || max_ptes_shared > HPAGE_PMD_NR - 1) return -EINVAL; khugepaged_max_ptes_shared = max_ptes_shared; @@ -599,7 +599,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, int none_or_zero = 0, shared = 0, result = 0, referenced = 0; bool writable = false; - for (_pte = pte; _pte < pte+HPAGE_PMD_NR; + for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { pte_t pteval = *_pte; if (pte_none(pteval) || (pte_present(pteval) && @@ -1216,7 +1216,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); pte = pte_offset_map_lock(mm, pmd, address, &ptl); - for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; + for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; if (is_swap_pte(pteval)) { @@ -1306,7 +1306,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, /* * Check if the page has any GUP (or other external) pins. * - * Here the check is racy it may see totmal_mapcount > refcount + * Here the check is racy it may see total_mapcount > refcount * in some cases. * For example, one process with one forked child process. * The parent has the PMD split due to MADV_DONTNEED, then @@ -1557,7 +1557,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * mmap_write_lock(mm) as PMD-mapping is likely to be split * later. * - * Not that vma->anon_vma check is racy: it can be set up after + * Note that vma->anon_vma check is racy: it can be set up after * the check but before we took mmap_lock by the fault path. * But page lock would prevent establishing any new ptes of the * page, so we are safe.
Fix some typos and tweak the code to meet codestyle. No functional change intended. Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> --- mm/khugepaged.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-)