diff mbox

[v4,2/3] arm64: Implement page table free interfaces

Message ID 1521546314-31753-3-git-send-email-cpandya@codeaurora.org (mailing list archive)
State New, archived
Headers show

Commit Message

Chintan Pandya March 20, 2018, 11:45 a.m. UTC
Implement pud_free_pmd_page() and pmd_free_pte_page().

Implementation requires,
 1) Freeing of the un-used next level page tables
 2) Clearing off the current pud/pmd entry
 3) Invalidate TLB which could have previously
    valid but not stale entry

Signed-off-by: Chintan Pandya <cpandya@codeaurora.org>
---
 arch/arm64/mm/mmu.c | 40 ++++++++++++++++++++++++++++++++++++++--
 1 file changed, 38 insertions(+), 2 deletions(-)

Comments

Mark Rutland March 26, 2018, 9:55 a.m. UTC | #1
On Tue, Mar 20, 2018 at 05:15:13PM +0530, Chintan Pandya wrote:
> +static int __pmd_free_pte_page(pmd_t *pmd, unsigned long addr, bool tlb_inv)
> +{
> +	pmd_t *table;
> +
> +	if (pmd_val(*pmd)) {
> +		table = __va(pmd_val(*pmd));
> +		pmd_clear(pmd);
> +		/*
> +		 * FIXME: __flush_tlb_pgtable(&init_mm, addr) is
> +		 *        ideal candidate here, which exactly
> +		 *        flushes intermediate pgtables. But,
> +		 *        this is broken (evident from tests).
> +		 *        So, use safe TLB op unless that is fixed.
> +		 */
> +		if (tlb_inv)
> +			flush_tlb_kernel_range(addr, addr + PMD_SIZE);

I don't think that __flush_tlb_pgtable() is broken. It's only valid to
call it for user page tables, since it doesn't affect all ASIDs.

We can add a simlar helper for kernel mappings, which affects all ASIDs,
e.g.

static inline void __flush_tlb_kernel_pgtable(unsigned long addr)
{
	addr >>= 12;
	__tlbi(vaae1is, addr);
	dsb(ish);
}

Thanks,
Mark.
Chintan Pandya March 27, 2018, 4:38 a.m. UTC | #2
On 3/26/2018 3:25 PM, Mark Rutland wrote:
> On Tue, Mar 20, 2018 at 05:15:13PM +0530, Chintan Pandya wrote:
>> +static int __pmd_free_pte_page(pmd_t *pmd, unsigned long addr, bool tlb_inv)
>> +{
>> +	pmd_t *table;
>> +
>> +	if (pmd_val(*pmd)) {
>> +		table = __va(pmd_val(*pmd));
>> +		pmd_clear(pmd);
>> +		/*
>> +		 * FIXME: __flush_tlb_pgtable(&init_mm, addr) is
>> +		 *        ideal candidate here, which exactly
>> +		 *        flushes intermediate pgtables. But,
>> +		 *        this is broken (evident from tests).
>> +		 *        So, use safe TLB op unless that is fixed.
>> +		 */
>> +		if (tlb_inv)
>> +			flush_tlb_kernel_range(addr, addr + PMD_SIZE);
> 
> I don't think that __flush_tlb_pgtable() is broken. It's only valid to
> call it for user page tables, since it doesn't affect all ASIDs.
> 
> We can add a simlar helper for kernel mappings, which affects all ASIDs,
> e.g.
> 
Okay. I will test it and update v5.

> static inline void __flush_tlb_kernel_pgtable(unsigned long addr)
> {
> 	addr >>= 12;
> 	__tlbi(vaae1is, addr);
> 	dsb(ish);
> }
> 
> Thanks,
> Mark.
> 

Chintan
diff mbox

Patch

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index da98828..7be3106 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -45,6 +45,7 @@ 
 #include <asm/memblock.h>
 #include <asm/mmu_context.h>
 #include <asm/ptdump.h>
+#include <asm/tlbflush.h>
 
 #define NO_BLOCK_MAPPINGS	BIT(0)
 #define NO_CONT_MAPPINGS	BIT(1)
@@ -973,12 +974,47 @@  int pmd_clear_huge(pmd_t *pmdp)
 	return 1;
 }
 
+static int __pmd_free_pte_page(pmd_t *pmd, unsigned long addr, bool tlb_inv)
+{
+	pmd_t *table;
+
+	if (pmd_val(*pmd)) {
+		table = __va(pmd_val(*pmd));
+		pmd_clear(pmd);
+		/*
+		 * FIXME: __flush_tlb_pgtable(&init_mm, addr) is
+		 *        ideal candidate here, which exactly
+		 *        flushes intermediate pgtables. But,
+		 *        this is broken (evident from tests).
+		 *        So, use safe TLB op unless that is fixed.
+		 */
+		if (tlb_inv)
+			flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+
+		free_page((unsigned long) table);
+	}
+	return 1;
+}
+
 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 {
-	return pud_none(*pud);
+	pmd_t *table;
+	int i;
+
+	if (pud_val(*pud)) {
+		table = __va(pud_val(*pud));
+		for (i = 0; i < PTRS_PER_PMD; i++)
+			__pmd_free_pte_page(&table[i], addr + (i * PMD_SIZE),
+						false);
+
+		pud_clear(pud);
+		flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+		free_page((unsigned long) table);
+	}
+	return 1;
 }
 
 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 {
-	return pmd_none(*pmd);
+	return __pmd_free_pte_page(pmd, addr, true);
 }