diff mbox

target-ppc/pseries: Clean up handling of KVM managed external HPTs

Message ID 1457059242-6673-1-git-send-email-david@gibson.dropbear.id.au (mailing list archive)
State New, archived
Headers show

Commit Message

David Gibson March 4, 2016, 2:40 a.m. UTC
fa48b43 "target-ppc: Remove hack for ppc_hash64_load_hpte*() with HV KVM"
purports to remove a hack in the handling of hash page tables (HPTs)
managed by KVM instead of qemu.  However, it makes the wrong call.

That patch requires anything looking for an external HPT (that is one not
managed by the guest itself) to check both env->external_htab (for a qemu
managed HPT) and kvmppc_kern_htab (for a KVM managed HPT).  That's a
problem because kvmppc_kern_htab is local to mmu-hash64.c, but some places
which need to check for an external HPT are outside that, such as
kvm_arch_get_registers().  The latter was subtly broken by the earlier
patch such that gdbstub can no longer access memory.

Basically a KVM managed HPT is much more like a qemu managed HPT than it is
like a guest managed HPT, so the original "hack" was actually on the right
track.

This partially reverts fa48b43, marking a KVM managed external HPT by
putting a special but non-NULL value in env->external_htab.  It then goes
further, using that marker to eliminate the kvmppc_kern_htab global
entirely, and adding a ppc_hash64_set_external_hpt() helper function to
reduce the amount of intimate knowledge of the cpu code that the pseries
machine type needs to set this up correctly.

This also has some flow-on changes to the HPT access helpers, required by
the above changes.

Reported-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
---
 hw/ppc/spapr.c          |  6 ++----
 hw/ppc/spapr_hcall.c    | 10 +++++-----
 target-ppc/mmu-hash64.c | 46 +++++++++++++++++++++++++---------------------
 target-ppc/mmu-hash64.h |  9 ++++-----
 4 files changed, 36 insertions(+), 35 deletions(-)

Comments

David Gibson March 4, 2016, 5:10 a.m. UTC | #1
On Fri, Mar 04, 2016 at 01:40:42PM +1100, David Gibson wrote:
> fa48b43 "target-ppc: Remove hack for ppc_hash64_load_hpte*() with HV KVM"
> purports to remove a hack in the handling of hash page tables (HPTs)
> managed by KVM instead of qemu.  However, it makes the wrong call.
> 
> That patch requires anything looking for an external HPT (that is one not
> managed by the guest itself) to check both env->external_htab (for a qemu
> managed HPT) and kvmppc_kern_htab (for a KVM managed HPT).  That's a
> problem because kvmppc_kern_htab is local to mmu-hash64.c, but some places
> which need to check for an external HPT are outside that, such as
> kvm_arch_get_registers().  The latter was subtly broken by the earlier
> patch such that gdbstub can no longer access memory.
> 
> Basically a KVM managed HPT is much more like a qemu managed HPT than it is
> like a guest managed HPT, so the original "hack" was actually on the right
> track.
> 
> This partially reverts fa48b43, marking a KVM managed external HPT by
> putting a special but non-NULL value in env->external_htab.  It then goes
> further, using that marker to eliminate the kvmppc_kern_htab global
> entirely, and adding a ppc_hash64_set_external_hpt() helper function to
> reduce the amount of intimate knowledge of the cpu code that the pseries
> machine type needs to set this up correctly.
> 
> This also has some flow-on changes to the HPT access helpers, required by
> the above changes.
> 
> Reported-by: Greg Kurz <gkurz@linux.vnet.ibm.com>
> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>

Self NACK, sorry.  Realised this has a stupid omission (and also
partially overlaps with another patch I was working on).

> ---
>  hw/ppc/spapr.c          |  6 ++----
>  hw/ppc/spapr_hcall.c    | 10 +++++-----
>  target-ppc/mmu-hash64.c | 46 +++++++++++++++++++++++++---------------------
>  target-ppc/mmu-hash64.h |  9 ++++-----
>  4 files changed, 36 insertions(+), 35 deletions(-)
> 
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index e9d4abf..d8b749c 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -1091,7 +1091,7 @@ static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
>          }
>  
>          spapr->htab_shift = shift;
> -        kvmppc_kern_htab = true;
> +        spapr->htab = NULL;
>      } else {
>          /* kernel-side HPT not needed, allocate in userspace instead */
>          size_t size = 1ULL << shift;
> @@ -1106,7 +1106,6 @@ static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
>  
>          memset(spapr->htab, 0, size);
>          spapr->htab_shift = shift;
> -        kvmppc_kern_htab = false;
>  
>          for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
>              DIRTY_HPTE(HPTE(spapr->htab, i));
> @@ -1196,8 +1195,7 @@ static void spapr_cpu_reset(void *opaque)
>  
>      env->spr[SPR_HIOR] = 0;
>  
> -    env->external_htab = (uint8_t *)spapr->htab;
> -    env->htab_base = -1;
> +    ppc_hash64_set_external_hpt(cpu, spapr->htab);
>      /*
>       * htab_mask is the mask used to normalize hash value to PTEG index.
>       * htab_shift is log2 of hash table size.
> diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
> index 1733482..b2b1b93 100644
> --- a/hw/ppc/spapr_hcall.c
> +++ b/hw/ppc/spapr_hcall.c
> @@ -122,17 +122,17 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
>                  break;
>              }
>          }
> -        ppc_hash64_stop_access(token);
> +        ppc_hash64_stop_access(cpu, token);
>          if (index == 8) {
>              return H_PTEG_FULL;
>          }
>      } else {
>          token = ppc_hash64_start_access(cpu, pte_index);
>          if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) {
> -            ppc_hash64_stop_access(token);
> +            ppc_hash64_stop_access(cpu, token);
>              return H_PTEG_FULL;
>          }
> -        ppc_hash64_stop_access(token);
> +        ppc_hash64_stop_access(cpu, token);
>      }
>  
>      ppc_hash64_store_hpte(cpu, pte_index + index,
> @@ -165,7 +165,7 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
>      token = ppc_hash64_start_access(cpu, ptex);
>      v = ppc_hash64_load_hpte0(cpu, token, 0);
>      r = ppc_hash64_load_hpte1(cpu, token, 0);
> -    ppc_hash64_stop_access(token);
> +    ppc_hash64_stop_access(cpu, token);
>  
>      if ((v & HPTE64_V_VALID) == 0 ||
>          ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
> @@ -288,7 +288,7 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
>      token = ppc_hash64_start_access(cpu, pte_index);
>      v = ppc_hash64_load_hpte0(cpu, token, 0);
>      r = ppc_hash64_load_hpte1(cpu, token, 0);
> -    ppc_hash64_stop_access(token);
> +    ppc_hash64_stop_access(cpu, token);
>  
>      if ((v & HPTE64_V_VALID) == 0 ||
>          ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
> diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
> index 9c58fbf..88d4296 100644
> --- a/target-ppc/mmu-hash64.c
> +++ b/target-ppc/mmu-hash64.c
> @@ -36,10 +36,11 @@
>  #endif
>  
>  /*
> - * Used to indicate whether we have allocated htab in the
> - * host kernel
> + * Used to indicate that a CPU has it's hash page table (HPT) managed
> + * within the host kernel
>   */
> -bool kvmppc_kern_htab;
> +#define MMU_HASH64_KVM_MANAGED_HPT      ((void *)-1)
> +
>  /*
>   * SLB handling
>   */
> @@ -259,6 +260,18 @@ target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
>   * 64-bit hash table MMU handling
>   */
>  
> +void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt)
> +{
> +    CPUPPCState *env = &cpu->env;
> +
> +    if (hpt) {
> +        env->external_htab = hpt;
> +    } else {
> +        env->external_htab = MMU_HASH64_KVM_MANAGED_HPT;
> +    }
> +    env->htab_base = -1;
> +}
> +
>  static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
>                                 ppc_slb_t *slb, ppc_hash_pte64_t pte)
>  {
> @@ -353,25 +366,16 @@ uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
>      hwaddr pte_offset;
>  
>      pte_offset = pte_index * HASH_PTE_SIZE_64;
> -    if (kvmppc_kern_htab) {
> +    if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
>          /*
>           * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
>           */
>          token = kvmppc_hash64_read_pteg(cpu, pte_index);
> -        if (token) {
> -            return token;
> -        }
> +    } else if (cpu->env.external_htab) {
>          /*
> -         * pteg read failed, even though we have allocated htab via
> -         * kvmppc_reset_htab.
> +         * HTAB is controlled by QEMU. Just point to the internally
> +         * accessible PTEG.
>           */
> -        return 0;
> -    }
> -    /*
> -     * HTAB is controlled by QEMU. Just point to the internally
> -     * accessible PTEG.
> -     */
> -    if (cpu->env.external_htab) {
>          token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
>      } else if (cpu->env.htab_base) {
>          token = cpu->env.htab_base + pte_offset;
> @@ -379,9 +383,9 @@ uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
>      return token;
>  }
>  
> -void ppc_hash64_stop_access(uint64_t token)
> +void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
>  {
> -    if (kvmppc_kern_htab) {
> +    if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
>          kvmppc_hash64_free_pteg(token);
>      }
>  }
> @@ -410,11 +414,11 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
>              && HPTE64_V_COMPARE(pte0, ptem)) {
>              pte->pte0 = pte0;
>              pte->pte1 = pte1;
> -            ppc_hash64_stop_access(token);
> +            ppc_hash64_stop_access(cpu, token);
>              return (pte_index + i) * HASH_PTE_SIZE_64;
>          }
>      }
> -    ppc_hash64_stop_access(token);
> +    ppc_hash64_stop_access(cpu, token);
>      /*
>       * We didn't find a valid entry.
>       */
> @@ -729,7 +733,7 @@ void ppc_hash64_store_hpte(PowerPCCPU *cpu,
>  {
>      CPUPPCState *env = &cpu->env;
>  
> -    if (kvmppc_kern_htab) {
> +    if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
>          kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
>          return;
>      }
> diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
> index e7d9925..f7b1f0d 100644
> --- a/target-ppc/mmu-hash64.h
> +++ b/target-ppc/mmu-hash64.h
> @@ -90,10 +90,9 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
>  #define HPTE64_V_1TB_SEG        0x4000000000000000ULL
>  #define HPTE64_V_VRMA_MASK      0x4001ffffff000000ULL
>  
> -
> -extern bool kvmppc_kern_htab;
> +void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt);
>  uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index);
> -void ppc_hash64_stop_access(uint64_t token);
> +void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token);
>  
>  static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu,
>                                                   uint64_t token, int index)
> @@ -102,7 +101,7 @@ static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu,
>      uint64_t addr;
>  
>      addr = token + (index * HASH_PTE_SIZE_64);
> -    if (kvmppc_kern_htab || env->external_htab) {
> +    if (env->external_htab) {
>          return  ldq_p((const void *)(uintptr_t)addr);
>      } else {
>          return ldq_phys(CPU(cpu)->as, addr);
> @@ -116,7 +115,7 @@ static inline target_ulong ppc_hash64_load_hpte1(PowerPCCPU *cpu,
>      uint64_t addr;
>  
>      addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2;
> -    if (kvmppc_kern_htab || env->external_htab) {
> +    if (env->external_htab) {
>          return  ldq_p((const void *)(uintptr_t)addr);
>      } else {
>          return ldq_phys(CPU(cpu)->as, addr);
diff mbox

Patch

diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index e9d4abf..d8b749c 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1091,7 +1091,7 @@  static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
         }
 
         spapr->htab_shift = shift;
-        kvmppc_kern_htab = true;
+        spapr->htab = NULL;
     } else {
         /* kernel-side HPT not needed, allocate in userspace instead */
         size_t size = 1ULL << shift;
@@ -1106,7 +1106,6 @@  static void spapr_reallocate_hpt(sPAPRMachineState *spapr, int shift,
 
         memset(spapr->htab, 0, size);
         spapr->htab_shift = shift;
-        kvmppc_kern_htab = false;
 
         for (i = 0; i < size / HASH_PTE_SIZE_64; i++) {
             DIRTY_HPTE(HPTE(spapr->htab, i));
@@ -1196,8 +1195,7 @@  static void spapr_cpu_reset(void *opaque)
 
     env->spr[SPR_HIOR] = 0;
 
-    env->external_htab = (uint8_t *)spapr->htab;
-    env->htab_base = -1;
+    ppc_hash64_set_external_hpt(cpu, spapr->htab);
     /*
      * htab_mask is the mask used to normalize hash value to PTEG index.
      * htab_shift is log2 of hash table size.
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 1733482..b2b1b93 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -122,17 +122,17 @@  static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
                 break;
             }
         }
-        ppc_hash64_stop_access(token);
+        ppc_hash64_stop_access(cpu, token);
         if (index == 8) {
             return H_PTEG_FULL;
         }
     } else {
         token = ppc_hash64_start_access(cpu, pte_index);
         if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) {
-            ppc_hash64_stop_access(token);
+            ppc_hash64_stop_access(cpu, token);
             return H_PTEG_FULL;
         }
-        ppc_hash64_stop_access(token);
+        ppc_hash64_stop_access(cpu, token);
     }
 
     ppc_hash64_store_hpte(cpu, pte_index + index,
@@ -165,7 +165,7 @@  static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
     token = ppc_hash64_start_access(cpu, ptex);
     v = ppc_hash64_load_hpte0(cpu, token, 0);
     r = ppc_hash64_load_hpte1(cpu, token, 0);
-    ppc_hash64_stop_access(token);
+    ppc_hash64_stop_access(cpu, token);
 
     if ((v & HPTE64_V_VALID) == 0 ||
         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
@@ -288,7 +288,7 @@  static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
     token = ppc_hash64_start_access(cpu, pte_index);
     v = ppc_hash64_load_hpte0(cpu, token, 0);
     r = ppc_hash64_load_hpte1(cpu, token, 0);
-    ppc_hash64_stop_access(token);
+    ppc_hash64_stop_access(cpu, token);
 
     if ((v & HPTE64_V_VALID) == 0 ||
         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 9c58fbf..88d4296 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -36,10 +36,11 @@ 
 #endif
 
 /*
- * Used to indicate whether we have allocated htab in the
- * host kernel
+ * Used to indicate that a CPU has it's hash page table (HPT) managed
+ * within the host kernel
  */
-bool kvmppc_kern_htab;
+#define MMU_HASH64_KVM_MANAGED_HPT      ((void *)-1)
+
 /*
  * SLB handling
  */
@@ -259,6 +260,18 @@  target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
  * 64-bit hash table MMU handling
  */
 
+void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt)
+{
+    CPUPPCState *env = &cpu->env;
+
+    if (hpt) {
+        env->external_htab = hpt;
+    } else {
+        env->external_htab = MMU_HASH64_KVM_MANAGED_HPT;
+    }
+    env->htab_base = -1;
+}
+
 static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
                                ppc_slb_t *slb, ppc_hash_pte64_t pte)
 {
@@ -353,25 +366,16 @@  uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
     hwaddr pte_offset;
 
     pte_offset = pte_index * HASH_PTE_SIZE_64;
-    if (kvmppc_kern_htab) {
+    if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
         /*
          * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
          */
         token = kvmppc_hash64_read_pteg(cpu, pte_index);
-        if (token) {
-            return token;
-        }
+    } else if (cpu->env.external_htab) {
         /*
-         * pteg read failed, even though we have allocated htab via
-         * kvmppc_reset_htab.
+         * HTAB is controlled by QEMU. Just point to the internally
+         * accessible PTEG.
          */
-        return 0;
-    }
-    /*
-     * HTAB is controlled by QEMU. Just point to the internally
-     * accessible PTEG.
-     */
-    if (cpu->env.external_htab) {
         token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
     } else if (cpu->env.htab_base) {
         token = cpu->env.htab_base + pte_offset;
@@ -379,9 +383,9 @@  uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
     return token;
 }
 
-void ppc_hash64_stop_access(uint64_t token)
+void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
 {
-    if (kvmppc_kern_htab) {
+    if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
         kvmppc_hash64_free_pteg(token);
     }
 }
@@ -410,11 +414,11 @@  static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
             && HPTE64_V_COMPARE(pte0, ptem)) {
             pte->pte0 = pte0;
             pte->pte1 = pte1;
-            ppc_hash64_stop_access(token);
+            ppc_hash64_stop_access(cpu, token);
             return (pte_index + i) * HASH_PTE_SIZE_64;
         }
     }
-    ppc_hash64_stop_access(token);
+    ppc_hash64_stop_access(cpu, token);
     /*
      * We didn't find a valid entry.
      */
@@ -729,7 +733,7 @@  void ppc_hash64_store_hpte(PowerPCCPU *cpu,
 {
     CPUPPCState *env = &cpu->env;
 
-    if (kvmppc_kern_htab) {
+    if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
         kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
         return;
     }
diff --git a/target-ppc/mmu-hash64.h b/target-ppc/mmu-hash64.h
index e7d9925..f7b1f0d 100644
--- a/target-ppc/mmu-hash64.h
+++ b/target-ppc/mmu-hash64.h
@@ -90,10 +90,9 @@  unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
 #define HPTE64_V_1TB_SEG        0x4000000000000000ULL
 #define HPTE64_V_VRMA_MASK      0x4001ffffff000000ULL
 
-
-extern bool kvmppc_kern_htab;
+void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt);
 uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index);
-void ppc_hash64_stop_access(uint64_t token);
+void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token);
 
 static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu,
                                                  uint64_t token, int index)
@@ -102,7 +101,7 @@  static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu,
     uint64_t addr;
 
     addr = token + (index * HASH_PTE_SIZE_64);
-    if (kvmppc_kern_htab || env->external_htab) {
+    if (env->external_htab) {
         return  ldq_p((const void *)(uintptr_t)addr);
     } else {
         return ldq_phys(CPU(cpu)->as, addr);
@@ -116,7 +115,7 @@  static inline target_ulong ppc_hash64_load_hpte1(PowerPCCPU *cpu,
     uint64_t addr;
 
     addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2;
-    if (kvmppc_kern_htab || env->external_htab) {
+    if (env->external_htab) {
         return  ldq_p((const void *)(uintptr_t)addr);
     } else {
         return ldq_phys(CPU(cpu)->as, addr);