diff mbox series

[v2,1/2] mm: Make pte_range_none() return number of empty PTEs

Message ID 20240916110754.1236200-2-dev.jain@arm.com (mailing list archive)
State New
Headers show
Series Compute contiguous empty PTEs for mTHP efficiently | expand

Commit Message

Dev Jain Sept. 16, 2024, 11:07 a.m. UTC
In preparation for the second patch, make pte_range_none() return
the number of contiguous empty PTEs.

Signed-off-by: Dev Jain <dev.jain@arm.com>
---
 mm/memory.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Comments

Baolin Wang Sept. 18, 2024, 7:03 a.m. UTC | #1
On 2024/9/16 19:07, Dev Jain wrote:
> In preparation for the second patch, make pte_range_none() return
> the number of contiguous empty PTEs.
> 
> Signed-off-by: Dev Jain <dev.jain@arm.com>

LGTM.
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>

> ---
>   mm/memory.c | 12 ++++++------
>   1 file changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index 6469ac99f2f7..8bb1236de93c 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4617,16 +4617,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>   	return ret;
>   }
>   
> -static bool pte_range_none(pte_t *pte, int nr_pages)
> +static int pte_range_none(pte_t *pte, int nr_pages)
>   {
>   	int i;
>   
>   	for (i = 0; i < nr_pages; i++) {
>   		if (!pte_none(ptep_get_lockless(pte + i)))
> -			return false;
> +			return i;
>   	}
>   
> -	return true;
> +	return nr_pages;
>   }
>   
>   static struct folio *alloc_anon_folio(struct vm_fault *vmf)
> @@ -4671,7 +4671,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
>   	order = highest_order(orders);
>   	while (orders) {
>   		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
> -		if (pte_range_none(pte + pte_index(addr), 1 << order))
> +		if (pte_range_none(pte + pte_index(addr), 1 << order) == 1 << order)
>   			break;
>   		order = next_order(&orders, order);
>   	}
> @@ -4787,7 +4787,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
>   	if (nr_pages == 1 && vmf_pte_changed(vmf)) {
>   		update_mmu_tlb(vma, addr, vmf->pte);
>   		goto release;
> -	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
> +	} else if (nr_pages > 1 && pte_range_none(vmf->pte, nr_pages) != nr_pages) {
>   		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
>   		goto release;
>   	}
> @@ -5121,7 +5121,7 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
>   		update_mmu_tlb(vma, addr, vmf->pte);
>   		ret = VM_FAULT_NOPAGE;
>   		goto unlock;
> -	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
> +	} else if (nr_pages > 1 && pte_range_none(vmf->pte, nr_pages) != nr_pages) {
>   		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
>   		ret = VM_FAULT_NOPAGE;
>   		goto unlock;
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index 6469ac99f2f7..8bb1236de93c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4617,16 +4617,16 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	return ret;
 }
 
-static bool pte_range_none(pte_t *pte, int nr_pages)
+static int pte_range_none(pte_t *pte, int nr_pages)
 {
 	int i;
 
 	for (i = 0; i < nr_pages; i++) {
 		if (!pte_none(ptep_get_lockless(pte + i)))
-			return false;
+			return i;
 	}
 
-	return true;
+	return nr_pages;
 }
 
 static struct folio *alloc_anon_folio(struct vm_fault *vmf)
@@ -4671,7 +4671,7 @@  static struct folio *alloc_anon_folio(struct vm_fault *vmf)
 	order = highest_order(orders);
 	while (orders) {
 		addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order);
-		if (pte_range_none(pte + pte_index(addr), 1 << order))
+		if (pte_range_none(pte + pte_index(addr), 1 << order) == 1 << order)
 			break;
 		order = next_order(&orders, order);
 	}
@@ -4787,7 +4787,7 @@  static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
 	if (nr_pages == 1 && vmf_pte_changed(vmf)) {
 		update_mmu_tlb(vma, addr, vmf->pte);
 		goto release;
-	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
+	} else if (nr_pages > 1 && pte_range_none(vmf->pte, nr_pages) != nr_pages) {
 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
 		goto release;
 	}
@@ -5121,7 +5121,7 @@  vm_fault_t finish_fault(struct vm_fault *vmf)
 		update_mmu_tlb(vma, addr, vmf->pte);
 		ret = VM_FAULT_NOPAGE;
 		goto unlock;
-	} else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) {
+	} else if (nr_pages > 1 && pte_range_none(vmf->pte, nr_pages) != nr_pages) {
 		update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages);
 		ret = VM_FAULT_NOPAGE;
 		goto unlock;