diff mbox series

[v6,04/19] powerpc: mm: Add p?d_large() definitions

Message ID 20190326162624.20736-5-steven.price@arm.com (mailing list archive)
State New, archived
Headers show
Series Convert x86 & arm64 to use generic page walk | expand

Commit Message

Steven Price March 26, 2019, 4:26 p.m. UTC
walk_page_range() is going to be allowed to walk page tables other than
those of user space. For this it needs to know when it has reached a
'leaf' entry in the page tables. This information is provided by the
p?d_large() functions/macros.

For powerpc pmd_large() was already implemented, so hoist it out of the
CONFIG_TRANSPARENT_HUGEPAGE condition and implement the other levels.

Also since we now have a pmd_large always implemented we can drop the
pmd_is_leaf() function.

CC: Benjamin Herrenschmidt <benh@kernel.crashing.org>
CC: Paul Mackerras <paulus@samba.org>
CC: Michael Ellerman <mpe@ellerman.id.au>
CC: linuxppc-dev@lists.ozlabs.org
CC: kvm-ppc@vger.kernel.org
Signed-off-by: Steven Price <steven.price@arm.com>
---
 arch/powerpc/include/asm/book3s/64/pgtable.h | 30 ++++++++++++++------
 arch/powerpc/kvm/book3s_64_mmu_radix.c       | 12 ++------
 2 files changed, 24 insertions(+), 18 deletions(-)

Comments

Christophe Leroy March 26, 2019, 4:58 p.m. UTC | #1
Le 26/03/2019 à 17:26, Steven Price a écrit :
> walk_page_range() is going to be allowed to walk page tables other than
> those of user space. For this it needs to know when it has reached a
> 'leaf' entry in the page tables. This information is provided by the
> p?d_large() functions/macros.
> 
> For powerpc pmd_large() was already implemented, so hoist it out of the
> CONFIG_TRANSPARENT_HUGEPAGE condition and implement the other levels.
> 
> Also since we now have a pmd_large always implemented we can drop the
> pmd_is_leaf() function.

Wouldn't it be better to drop the pmd_is_leaf() in a second patch ?

Christophe

> 
> CC: Benjamin Herrenschmidt <benh@kernel.crashing.org>
> CC: Paul Mackerras <paulus@samba.org>
> CC: Michael Ellerman <mpe@ellerman.id.au>
> CC: linuxppc-dev@lists.ozlabs.org
> CC: kvm-ppc@vger.kernel.org
> Signed-off-by: Steven Price <steven.price@arm.com>
> ---
>   arch/powerpc/include/asm/book3s/64/pgtable.h | 30 ++++++++++++++------
>   arch/powerpc/kvm/book3s_64_mmu_radix.c       | 12 ++------
>   2 files changed, 24 insertions(+), 18 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
> index 581f91be9dd4..f6d1ac8b832e 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
> @@ -897,6 +897,12 @@ static inline int pud_present(pud_t pud)
>   	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
>   }
>   
> +#define pud_large	pud_large
> +static inline int pud_large(pud_t pud)
> +{
> +	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
> +}
> +
>   extern struct page *pud_page(pud_t pud);
>   extern struct page *pmd_page(pmd_t pmd);
>   static inline pte_t pud_pte(pud_t pud)
> @@ -940,6 +946,12 @@ static inline int pgd_present(pgd_t pgd)
>   	return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
>   }
>   
> +#define pgd_large	pgd_large
> +static inline int pgd_large(pgd_t pgd)
> +{
> +	return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
> +}
> +
>   static inline pte_t pgd_pte(pgd_t pgd)
>   {
>   	return __pte_raw(pgd_raw(pgd));
> @@ -1093,6 +1105,15 @@ static inline bool pmd_access_permitted(pmd_t pmd, bool write)
>   	return pte_access_permitted(pmd_pte(pmd), write);
>   }
>   
> +#define pmd_large	pmd_large
> +/*
> + * returns true for pmd migration entries, THP, devmap, hugetlb
> + */
> +static inline int pmd_large(pmd_t pmd)
> +{
> +	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> +}
> +
>   #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>   extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
>   extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
> @@ -1119,15 +1140,6 @@ pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
>   	return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
>   }
>   
> -/*
> - * returns true for pmd migration entries, THP, devmap, hugetlb
> - * But compile time dependent on THP config
> - */
> -static inline int pmd_large(pmd_t pmd)
> -{
> -	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
> -}
> -
>   static inline pmd_t pmd_mknotpresent(pmd_t pmd)
>   {
>   	return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> index f55ef071883f..1b57b4e3f819 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> @@ -363,12 +363,6 @@ static void kvmppc_pte_free(pte_t *ptep)
>   	kmem_cache_free(kvm_pte_cache, ptep);
>   }
>   
> -/* Like pmd_huge() and pmd_large(), but works regardless of config options */
> -static inline int pmd_is_leaf(pmd_t pmd)
> -{
> -	return !!(pmd_val(pmd) & _PAGE_PTE);
> -}
> -
>   static pmd_t *kvmppc_pmd_alloc(void)
>   {
>   	return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
> @@ -460,7 +454,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
>   	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
>   		if (!pmd_present(*p))
>   			continue;
> -		if (pmd_is_leaf(*p)) {
> +		if (pmd_large(*p)) {
>   			if (full) {
>   				pmd_clear(p);
>   			} else {
> @@ -593,7 +587,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
>   	else if (level <= 1)
>   		new_pmd = kvmppc_pmd_alloc();
>   
> -	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
> +	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_large(*pmd)))
>   		new_ptep = kvmppc_pte_alloc();
>   
>   	/* Check if we might have been invalidated; let the guest retry if so */
> @@ -662,7 +656,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
>   		new_pmd = NULL;
>   	}
>   	pmd = pmd_offset(pud, gpa);
> -	if (pmd_is_leaf(*pmd)) {
> +	if (pmd_large(*pmd)) {
>   		unsigned long lgpa = gpa & PMD_MASK;
>   
>   		/* Check if we raced and someone else has set the same thing */
>
Steven Price March 28, 2019, 11 a.m. UTC | #2
On 26/03/2019 16:58, Christophe Leroy wrote:
> 
> 
> Le 26/03/2019 à 17:26, Steven Price a écrit :
>> walk_page_range() is going to be allowed to walk page tables other than
>> those of user space. For this it needs to know when it has reached a
>> 'leaf' entry in the page tables. This information is provided by the
>> p?d_large() functions/macros.
>>
>> For powerpc pmd_large() was already implemented, so hoist it out of the
>> CONFIG_TRANSPARENT_HUGEPAGE condition and implement the other levels.
>>
>> Also since we now have a pmd_large always implemented we can drop the
>> pmd_is_leaf() function.
> 
> Wouldn't it be better to drop the pmd_is_leaf() in a second patch ?

Fair point, I'll split this patch.

Thanks for the review,

Steve

> Christophe
> 
>>
>> CC: Benjamin Herrenschmidt <benh@kernel.crashing.org>
>> CC: Paul Mackerras <paulus@samba.org>
>> CC: Michael Ellerman <mpe@ellerman.id.au>
>> CC: linuxppc-dev@lists.ozlabs.org
>> CC: kvm-ppc@vger.kernel.org
>> Signed-off-by: Steven Price <steven.price@arm.com>
>> ---
>>   arch/powerpc/include/asm/book3s/64/pgtable.h | 30 ++++++++++++++------
>>   arch/powerpc/kvm/book3s_64_mmu_radix.c       | 12 ++------
>>   2 files changed, 24 insertions(+), 18 deletions(-)
>>
>> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h
>> b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> index 581f91be9dd4..f6d1ac8b832e 100644
>> --- a/arch/powerpc/include/asm/book3s/64/pgtable.h
>> +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
>> @@ -897,6 +897,12 @@ static inline int pud_present(pud_t pud)
>>       return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
>>   }
>>   +#define pud_large    pud_large
>> +static inline int pud_large(pud_t pud)
>> +{
>> +    return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
>> +}
>> +
>>   extern struct page *pud_page(pud_t pud);
>>   extern struct page *pmd_page(pmd_t pmd);
>>   static inline pte_t pud_pte(pud_t pud)
>> @@ -940,6 +946,12 @@ static inline int pgd_present(pgd_t pgd)
>>       return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
>>   }
>>   +#define pgd_large    pgd_large
>> +static inline int pgd_large(pgd_t pgd)
>> +{
>> +    return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
>> +}
>> +
>>   static inline pte_t pgd_pte(pgd_t pgd)
>>   {
>>       return __pte_raw(pgd_raw(pgd));
>> @@ -1093,6 +1105,15 @@ static inline bool pmd_access_permitted(pmd_t
>> pmd, bool write)
>>       return pte_access_permitted(pmd_pte(pmd), write);
>>   }
>>   +#define pmd_large    pmd_large
>> +/*
>> + * returns true for pmd migration entries, THP, devmap, hugetlb
>> + */
>> +static inline int pmd_large(pmd_t pmd)
>> +{
>> +    return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
>> +}
>> +
>>   #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>>   extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
>>   extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
>> @@ -1119,15 +1140,6 @@ pmd_hugepage_update(struct mm_struct *mm,
>> unsigned long addr, pmd_t *pmdp,
>>       return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
>>   }
>>   -/*
>> - * returns true for pmd migration entries, THP, devmap, hugetlb
>> - * But compile time dependent on THP config
>> - */
>> -static inline int pmd_large(pmd_t pmd)
>> -{
>> -    return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
>> -}
>> -
>>   static inline pmd_t pmd_mknotpresent(pmd_t pmd)
>>   {
>>       return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
>> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c
>> b/arch/powerpc/kvm/book3s_64_mmu_radix.c
>> index f55ef071883f..1b57b4e3f819 100644
>> --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
>> +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
>> @@ -363,12 +363,6 @@ static void kvmppc_pte_free(pte_t *ptep)
>>       kmem_cache_free(kvm_pte_cache, ptep);
>>   }
>>   -/* Like pmd_huge() and pmd_large(), but works regardless of config
>> options */
>> -static inline int pmd_is_leaf(pmd_t pmd)
>> -{
>> -    return !!(pmd_val(pmd) & _PAGE_PTE);
>> -}
>> -
>>   static pmd_t *kvmppc_pmd_alloc(void)
>>   {
>>       return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
>> @@ -460,7 +454,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm,
>> pmd_t *pmd, bool full,
>>       for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
>>           if (!pmd_present(*p))
>>               continue;
>> -        if (pmd_is_leaf(*p)) {
>> +        if (pmd_large(*p)) {
>>               if (full) {
>>                   pmd_clear(p);
>>               } else {
>> @@ -593,7 +587,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t
>> *pgtable, pte_t pte,
>>       else if (level <= 1)
>>           new_pmd = kvmppc_pmd_alloc();
>>   -    if (level == 0 && !(pmd && pmd_present(*pmd) &&
>> !pmd_is_leaf(*pmd)))
>> +    if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_large(*pmd)))
>>           new_ptep = kvmppc_pte_alloc();
>>         /* Check if we might have been invalidated; let the guest
>> retry if so */
>> @@ -662,7 +656,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t
>> *pgtable, pte_t pte,
>>           new_pmd = NULL;
>>       }
>>       pmd = pmd_offset(pud, gpa);
>> -    if (pmd_is_leaf(*pmd)) {
>> +    if (pmd_large(*pmd)) {
>>           unsigned long lgpa = gpa & PMD_MASK;
>>             /* Check if we raced and someone else has set the same
>> thing */
>>
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 581f91be9dd4..f6d1ac8b832e 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -897,6 +897,12 @@  static inline int pud_present(pud_t pud)
 	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
 }
 
+#define pud_large	pud_large
+static inline int pud_large(pud_t pud)
+{
+	return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
+}
+
 extern struct page *pud_page(pud_t pud);
 extern struct page *pmd_page(pmd_t pmd);
 static inline pte_t pud_pte(pud_t pud)
@@ -940,6 +946,12 @@  static inline int pgd_present(pgd_t pgd)
 	return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
 }
 
+#define pgd_large	pgd_large
+static inline int pgd_large(pgd_t pgd)
+{
+	return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
+}
+
 static inline pte_t pgd_pte(pgd_t pgd)
 {
 	return __pte_raw(pgd_raw(pgd));
@@ -1093,6 +1105,15 @@  static inline bool pmd_access_permitted(pmd_t pmd, bool write)
 	return pte_access_permitted(pmd_pte(pmd), write);
 }
 
+#define pmd_large	pmd_large
+/*
+ * returns true for pmd migration entries, THP, devmap, hugetlb
+ */
+static inline int pmd_large(pmd_t pmd)
+{
+	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
 extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
@@ -1119,15 +1140,6 @@  pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
 	return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
 }
 
-/*
- * returns true for pmd migration entries, THP, devmap, hugetlb
- * But compile time dependent on THP config
- */
-static inline int pmd_large(pmd_t pmd)
-{
-	return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
-}
-
 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
 {
 	return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index f55ef071883f..1b57b4e3f819 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -363,12 +363,6 @@  static void kvmppc_pte_free(pte_t *ptep)
 	kmem_cache_free(kvm_pte_cache, ptep);
 }
 
-/* Like pmd_huge() and pmd_large(), but works regardless of config options */
-static inline int pmd_is_leaf(pmd_t pmd)
-{
-	return !!(pmd_val(pmd) & _PAGE_PTE);
-}
-
 static pmd_t *kvmppc_pmd_alloc(void)
 {
 	return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
@@ -460,7 +454,7 @@  static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
 	for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
 		if (!pmd_present(*p))
 			continue;
-		if (pmd_is_leaf(*p)) {
+		if (pmd_large(*p)) {
 			if (full) {
 				pmd_clear(p);
 			} else {
@@ -593,7 +587,7 @@  int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 	else if (level <= 1)
 		new_pmd = kvmppc_pmd_alloc();
 
-	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
+	if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_large(*pmd)))
 		new_ptep = kvmppc_pte_alloc();
 
 	/* Check if we might have been invalidated; let the guest retry if so */
@@ -662,7 +656,7 @@  int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
 		new_pmd = NULL;
 	}
 	pmd = pmd_offset(pud, gpa);
-	if (pmd_is_leaf(*pmd)) {
+	if (pmd_large(*pmd)) {
 		unsigned long lgpa = gpa & PMD_MASK;
 
 		/* Check if we raced and someone else has set the same thing */