diff mbox series

mm: add access/dirty bit on numa page fault

Message ID 20220316010836.1137084-1-maobibo@loongson.cn (mailing list archive)
State New
Headers show
Series mm: add access/dirty bit on numa page fault | expand

Commit Message

bibo mao March 16, 2022, 1:08 a.m. UTC
During numa page fault, dirty bit can be added for old pte if
fail to migrate on write fault. And if it succeeds to migrate,
access bit can be added for migrated new pte, also dirty bit
can be added for write fault.

Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
 mm/memory.c | 21 ++++++++++++++++++++-
 1 file changed, 20 insertions(+), 1 deletion(-)

Comments

Anshuman Khandual March 16, 2022, 6:43 a.m. UTC | #1
On 3/16/22 06:38, Bibo Mao wrote:
> During numa page fault, dirty bit can be added for old pte if
> fail to migrate on write fault. And if it succeeds to migrate,
> access bit can be added for migrated new pte, also dirty bit
> can be added for write fault.

The current code does not set the access and dirty bits when ever
applicable i.e on FAULT_FLAG_WRITE, on the pte (old if migration
fails, new if migration succeeds) ? Did not this cause any problem
earlier ? I am wondering how this might have gone unnoticed.

> 
> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
> ---
>  mm/memory.c | 21 ++++++++++++++++++++-
>  1 file changed, 20 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/memory.c b/mm/memory.c
> index c125c4969913..65813bec9c06 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4404,6 +4404,22 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>  	if (migrate_misplaced_page(page, vma, target_nid)) {
>  		page_nid = target_nid;
>  		flags |= TNF_MIGRATED;
> +
> +		/*
> +		 * update pte entry with access bit, and dirty bit for
> +		 * write fault
> +		 */
> +		spin_lock(vmf->ptl);
> +		pte = *vmf->pte;
> +		pte = pte_mkyoung(pte);
> +		if (was_writable) {
> +			pte = pte_mkwrite(pte);
> +			if (vmf->flags & FAULT_FLAG_WRITE)
> +				pte = pte_mkdirty(pte);
> +		}
> +		set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
> +		update_mmu_cache(vma, vmf->address, vmf->pte);
> +		pte_unmap_unlock(vmf->pte, vmf->ptl);
>  	} else {
>  		flags |= TNF_MIGRATE_FAIL;
>  		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
> @@ -4427,8 +4443,11 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>  	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
>  	pte = pte_modify(old_pte, vma->vm_page_prot);
>  	pte = pte_mkyoung(pte);
> -	if (was_writable)
> +	if (was_writable) {
>  		pte = pte_mkwrite(pte);
> +		if (vmf->flags & FAULT_FLAG_WRITE)
> +			pte = pte_mkdirty(pte);
> +	}
>  	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
>  	update_mmu_cache(vma, vmf->address, vmf->pte);
>  	pte_unmap_unlock(vmf->pte, vmf->ptl);
bibo mao March 16, 2022, 7:03 a.m. UTC | #2
On 03/16/2022 02:43 PM, Anshuman Khandual wrote:
> 
> 
> On 3/16/22 06:38, Bibo Mao wrote:
>> During numa page fault, dirty bit can be added for old pte if
>> fail to migrate on write fault. And if it succeeds to migrate,
>> access bit can be added for migrated new pte, also dirty bit
>> can be added for write fault.
> 
> The current code does not set the access and dirty bits when ever
> applicable i.e on FAULT_FLAG_WRITE, on the pte (old if migration
> fails, new if migration succeeds) ? Did not this cause any problem
> earlier ? I am wondering how this might have gone unnoticed.

On arm/x86 platform hw will set access/dirty bits automatically,
however on MIPS platform access/dirty bits are set by software in next
page fault, it is relatively easier to watch on MIPS platform.

regards
bibo,mao

> 
>>
>> Signed-off-by: Bibo Mao <maobibo@loongson.cn>
>> ---
>>  mm/memory.c | 21 ++++++++++++++++++++-
>>  1 file changed, 20 insertions(+), 1 deletion(-)
>>
>> diff --git a/mm/memory.c b/mm/memory.c
>> index c125c4969913..65813bec9c06 100644
>> --- a/mm/memory.c
>> +++ b/mm/memory.c
>> @@ -4404,6 +4404,22 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>  	if (migrate_misplaced_page(page, vma, target_nid)) {
>>  		page_nid = target_nid;
>>  		flags |= TNF_MIGRATED;
>> +
>> +		/*
>> +		 * update pte entry with access bit, and dirty bit for
>> +		 * write fault
>> +		 */
>> +		spin_lock(vmf->ptl);
>> +		pte = *vmf->pte;
>> +		pte = pte_mkyoung(pte);
>> +		if (was_writable) {
>> +			pte = pte_mkwrite(pte);
>> +			if (vmf->flags & FAULT_FLAG_WRITE)
>> +				pte = pte_mkdirty(pte);
>> +		}
>> +		set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
>> +		update_mmu_cache(vma, vmf->address, vmf->pte);
>> +		pte_unmap_unlock(vmf->pte, vmf->ptl);
>>  	} else {
>>  		flags |= TNF_MIGRATE_FAIL;
>>  		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
>> @@ -4427,8 +4443,11 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
>>  	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
>>  	pte = pte_modify(old_pte, vma->vm_page_prot);
>>  	pte = pte_mkyoung(pte);
>> -	if (was_writable)
>> +	if (was_writable) {
>>  		pte = pte_mkwrite(pte);
>> +		if (vmf->flags & FAULT_FLAG_WRITE)
>> +			pte = pte_mkdirty(pte);
>> +	}
>>  	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
>>  	update_mmu_cache(vma, vmf->address, vmf->pte);
>>  	pte_unmap_unlock(vmf->pte, vmf->ptl);
Anshuman Khandual March 16, 2022, 9:42 a.m. UTC | #3
On 3/16/22 12:33, maobibo wrote:
> 
> On 03/16/2022 02:43 PM, Anshuman Khandual wrote:
>>
>> On 3/16/22 06:38, Bibo Mao wrote:
>>> During numa page fault, dirty bit can be added for old pte if
>>> fail to migrate on write fault. And if it succeeds to migrate,
>>> access bit can be added for migrated new pte, also dirty bit
>>> can be added for write fault.
>> The current code does not set the access and dirty bits when ever
>> applicable i.e on FAULT_FLAG_WRITE, on the pte (old if migration
>> fails, new if migration succeeds) ? Did not this cause any problem
>> earlier ? I am wondering how this might have gone unnoticed.

> On arm/x86 platform hw will set access/dirty bits automatically,
> however on MIPS platform access/dirty bits are set by software in next
> page fault, it is relatively easier to watch on MIPS platform.

Could you please update this in the commit message as well ?
bibo mao March 17, 2022, 1:09 a.m. UTC | #4
On 03/16/2022 05:42 PM, Anshuman Khandual wrote:
> 
> 
> On 3/16/22 12:33, maobibo wrote:
>>
>> On 03/16/2022 02:43 PM, Anshuman Khandual wrote:
>>>
>>> On 3/16/22 06:38, Bibo Mao wrote:
>>>> During numa page fault, dirty bit can be added for old pte if
>>>> fail to migrate on write fault. And if it succeeds to migrate,
>>>> access bit can be added for migrated new pte, also dirty bit
>>>> can be added for write fault.
>>> The current code does not set the access and dirty bits when ever
>>> applicable i.e on FAULT_FLAG_WRITE, on the pte (old if migration
>>> fails, new if migration succeeds) ? Did not this cause any problem
>>> earlier ? I am wondering how this might have gone unnoticed.
> 
>> On arm/x86 platform hw will set access/dirty bits automatically,
>> however on MIPS platform access/dirty bits are set by software in next
>> page fault, it is relatively easier to watch on MIPS platform.
> 
> Could you please update this in the commit message as well ?

sure, will do. 

regards
bibo, mao
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index c125c4969913..65813bec9c06 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4404,6 +4404,22 @@  static vm_fault_t do_numa_page(struct vm_fault *vmf)
 	if (migrate_misplaced_page(page, vma, target_nid)) {
 		page_nid = target_nid;
 		flags |= TNF_MIGRATED;
+
+		/*
+		 * update pte entry with access bit, and dirty bit for
+		 * write fault
+		 */
+		spin_lock(vmf->ptl);
+		pte = *vmf->pte;
+		pte = pte_mkyoung(pte);
+		if (was_writable) {
+			pte = pte_mkwrite(pte);
+			if (vmf->flags & FAULT_FLAG_WRITE)
+				pte = pte_mkdirty(pte);
+		}
+		set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
+		update_mmu_cache(vma, vmf->address, vmf->pte);
+		pte_unmap_unlock(vmf->pte, vmf->ptl);
 	} else {
 		flags |= TNF_MIGRATE_FAIL;
 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
@@ -4427,8 +4443,11 @@  static vm_fault_t do_numa_page(struct vm_fault *vmf)
 	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
 	pte = pte_modify(old_pte, vma->vm_page_prot);
 	pte = pte_mkyoung(pte);
-	if (was_writable)
+	if (was_writable) {
 		pte = pte_mkwrite(pte);
+		if (vmf->flags & FAULT_FLAG_WRITE)
+			pte = pte_mkdirty(pte);
+	}
 	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
 	update_mmu_cache(vma, vmf->address, vmf->pte);
 	pte_unmap_unlock(vmf->pte, vmf->ptl);