Message ID | 20190215170029.15641-8-clg@kaod.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | ppc: add native hash and radix support for POWER9 | expand |
On Fri, Feb 15, 2019 at 06:00:24PM +0100, Cédric Le Goater wrote: > From: Benjamin Herrenschmidt <benh@kernel.crashing.org> > > POWER9 (arch v3) slightly changes the HPTE format. The B bits move > from the first to the second half of the HPTE, and the AVPN/ARPN > are slightly shorter. > > However, under SPAPR, the hypercalls still take the old format > (and probably will for the foreseable future). > > The simplest way to support this is thus to convert the HPTEs from > new to old format when reading them if the MMU model is v3 and there > is no virtual hypervisor, leaving the rest of the code unchanged. > > Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> > Signed-off-by: Cédric Le Goater <clg@kaod.org> Kinda hacky, but definitely the easiest way in the short to medium term. > --- > target/ppc/mmu-book3s-v3.h | 12 ++++++++++++ > target/ppc/mmu-hash64.h | 5 +++++ > target/ppc/mmu-hash64.c | 5 +++++ > 3 files changed, 22 insertions(+) > > diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h > index 4e59742d7eac..216ff296c088 100644 > --- a/target/ppc/mmu-book3s-v3.h > +++ b/target/ppc/mmu-book3s-v3.h > @@ -56,6 +56,18 @@ static inline bool ppc64_v3_radix(PowerPCCPU *cpu) > return !!(cpu->env.spr[SPR_LPCR] & LPCR_HR); > } > > +static inline void ppc64_v3_new_to_old_hpte(target_ulong *pte0, > + target_ulong *pte1) > +{ > + /* Insert B into pte0 */ > + *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | > + ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << > + (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); > + > + /* Remove B from pte1 */ > + *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; > +} > + > hwaddr ppc64_v3_get_phys_page_debug(PowerPCCPU *cpu, vaddr eaddr); > > int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, > diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h > index f11efc9cbc1f..016d6b44ee75 100644 > --- a/target/ppc/mmu-hash64.h > +++ b/target/ppc/mmu-hash64.h > @@ -102,6 +102,11 @@ void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, > #define HPTE64_V_1TB_SEG 0x4000000000000000ULL > #define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL > > +/* Format changes for ARCH v3 */ > +#define HPTE64_V_COMMON_BITS 0x000fffffffffffffULL > +#define HPTE64_R_3_0_SSIZE_SHIFT 58 > +#define HPTE64_R_3_0_SSIZE_MASK (3ULL << HPTE64_R_3_0_SSIZE_SHIFT) > + > static inline hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu) > { > if (cpu->vhyp) { > diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c > index b3c4d33faa55..9afaab8a177c 100644 > --- a/target/ppc/mmu-hash64.c > +++ b/target/ppc/mmu-hash64.c > @@ -514,6 +514,11 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, > smp_rmb(); > pte1 = ppc_hash64_hpte1(cpu, pteg, i); > > + /* Convert format if necessary */ > + if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) { > + ppc64_v3_new_to_old_hpte(&pte0, &pte1); > + } > + > /* This compares V, B, H (secondary) and the AVPN */ > if (HPTE64_V_COMPARE(pte0, ptem)) { > *pshift = hpte_page_shift(sps, pte0, pte1);
On Tue, Feb 19, 2019 at 03:05:29PM +1100, David Gibson wrote: > On Fri, Feb 15, 2019 at 06:00:24PM +0100, Cédric Le Goater wrote: > > From: Benjamin Herrenschmidt <benh@kernel.crashing.org> > > > > POWER9 (arch v3) slightly changes the HPTE format. The B bits move > > from the first to the second half of the HPTE, and the AVPN/ARPN > > are slightly shorter. > > > > However, under SPAPR, the hypercalls still take the old format > > (and probably will for the foreseable future). > > > > The simplest way to support this is thus to convert the HPTEs from > > new to old format when reading them if the MMU model is v3 and there > > is no virtual hypervisor, leaving the rest of the code unchanged. > > > > Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> > > Signed-off-by: Cédric Le Goater <clg@kaod.org> > > Kinda hacky, but definitely the easiest way in the short to medium > term. So, this doesn't compile as-is without the include rearrangement I objected to earlier in the series. But.. I'm not actually seeing any reason (here or later in the series) that ppc64_v3_new_to_old_hpte() really needs to be in the header. So I'm applying this with it moved into mmu-hash64.c. > > > --- > > target/ppc/mmu-book3s-v3.h | 12 ++++++++++++ > > target/ppc/mmu-hash64.h | 5 +++++ > > target/ppc/mmu-hash64.c | 5 +++++ > > 3 files changed, 22 insertions(+) > > > > diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h > > index 4e59742d7eac..216ff296c088 100644 > > --- a/target/ppc/mmu-book3s-v3.h > > +++ b/target/ppc/mmu-book3s-v3.h > > @@ -56,6 +56,18 @@ static inline bool ppc64_v3_radix(PowerPCCPU *cpu) > > return !!(cpu->env.spr[SPR_LPCR] & LPCR_HR); > > } > > > > +static inline void ppc64_v3_new_to_old_hpte(target_ulong *pte0, > > + target_ulong *pte1) > > +{ > > + /* Insert B into pte0 */ > > + *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | > > + ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << > > + (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); > > + > > + /* Remove B from pte1 */ > > + *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; > > +} > > + > > hwaddr ppc64_v3_get_phys_page_debug(PowerPCCPU *cpu, vaddr eaddr); > > > > int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, > > diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h > > index f11efc9cbc1f..016d6b44ee75 100644 > > --- a/target/ppc/mmu-hash64.h > > +++ b/target/ppc/mmu-hash64.h > > @@ -102,6 +102,11 @@ void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, > > #define HPTE64_V_1TB_SEG 0x4000000000000000ULL > > #define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL > > > > +/* Format changes for ARCH v3 */ > > +#define HPTE64_V_COMMON_BITS 0x000fffffffffffffULL > > +#define HPTE64_R_3_0_SSIZE_SHIFT 58 > > +#define HPTE64_R_3_0_SSIZE_MASK (3ULL << HPTE64_R_3_0_SSIZE_SHIFT) > > + > > static inline hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu) > > { > > if (cpu->vhyp) { > > diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c > > index b3c4d33faa55..9afaab8a177c 100644 > > --- a/target/ppc/mmu-hash64.c > > +++ b/target/ppc/mmu-hash64.c > > @@ -514,6 +514,11 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, > > smp_rmb(); > > pte1 = ppc_hash64_hpte1(cpu, pteg, i); > > > > + /* Convert format if necessary */ > > + if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) { > > + ppc64_v3_new_to_old_hpte(&pte0, &pte1); > > + } > > + > > /* This compares V, B, H (secondary) and the AVPN */ > > if (HPTE64_V_COMPARE(pte0, ptem)) { > > *pshift = hpte_page_shift(sps, pte0, pte1); >
diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h index 4e59742d7eac..216ff296c088 100644 --- a/target/ppc/mmu-book3s-v3.h +++ b/target/ppc/mmu-book3s-v3.h @@ -56,6 +56,18 @@ static inline bool ppc64_v3_radix(PowerPCCPU *cpu) return !!(cpu->env.spr[SPR_LPCR] & LPCR_HR); } +static inline void ppc64_v3_new_to_old_hpte(target_ulong *pte0, + target_ulong *pte1) +{ + /* Insert B into pte0 */ + *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) | + ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) << + (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT)); + + /* Remove B from pte1 */ + *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK; +} + hwaddr ppc64_v3_get_phys_page_debug(PowerPCCPU *cpu, vaddr eaddr); int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h index f11efc9cbc1f..016d6b44ee75 100644 --- a/target/ppc/mmu-hash64.h +++ b/target/ppc/mmu-hash64.h @@ -102,6 +102,11 @@ void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu, #define HPTE64_V_1TB_SEG 0x4000000000000000ULL #define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL +/* Format changes for ARCH v3 */ +#define HPTE64_V_COMMON_BITS 0x000fffffffffffffULL +#define HPTE64_R_3_0_SSIZE_SHIFT 58 +#define HPTE64_R_3_0_SSIZE_MASK (3ULL << HPTE64_R_3_0_SSIZE_SHIFT) + static inline hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu) { if (cpu->vhyp) { diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index b3c4d33faa55..9afaab8a177c 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -514,6 +514,11 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, smp_rmb(); pte1 = ppc_hash64_hpte1(cpu, pteg, i); + /* Convert format if necessary */ + if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) { + ppc64_v3_new_to_old_hpte(&pte0, &pte1); + } + /* This compares V, B, H (secondary) and the AVPN */ if (HPTE64_V_COMPARE(pte0, ptem)) { *pshift = hpte_page_shift(sps, pte0, pte1);