diff mbox

[v4] hw/ppc/spapr: Implement the h_page_init hypercall

Message ID 1455786954-20625-1-git-send-email-thuth@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Thomas Huth Feb. 18, 2016, 9:15 a.m. UTC
This hypercall either initializes a page with zeros, or copies
another page.
According to LoPAPR, the i-cache of the page should also be
flushed if using H_ICACHE_INVALIDATE or H_ICACHE_SYNCHRONIZE,
and the d-cache should be synchronized to the RAM if the
H_ICACHE_SYNCHRONIZE flag is used. For this, two new functions
are introduced, kvmppc_dcbst_range() and kvmppc_icbi()_range, which
use the corresponding assembler instructions to flush the caches
if running with KVM on Power. If the code runs with TCG instead,
the code only uses tb_flush(), assuming that this will be
enough for synchronization.

Signed-off-by: Thomas Huth <thuth@redhat.com>
---
 v4:
 - Re-arranged the H_COPY_PAGE code block for better readability
   and to avoid a wrong compiler warning with newer versions of GCC

 hw/ppc/spapr_hcall.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 target-ppc/kvm_ppc.h | 36 +++++++++++++++++++++++++++++--
 2 files changed, 94 insertions(+), 2 deletions(-)

Comments

David Gibson Feb. 19, 2016, 12:13 a.m. UTC | #1
On Thu, Feb 18, 2016 at 10:15:54AM +0100, Thomas Huth wrote:
> This hypercall either initializes a page with zeros, or copies
> another page.
> According to LoPAPR, the i-cache of the page should also be
> flushed if using H_ICACHE_INVALIDATE or H_ICACHE_SYNCHRONIZE,
> and the d-cache should be synchronized to the RAM if the
> H_ICACHE_SYNCHRONIZE flag is used. For this, two new functions
> are introduced, kvmppc_dcbst_range() and kvmppc_icbi()_range, which
> use the corresponding assembler instructions to flush the caches
> if running with KVM on Power. If the code runs with TCG instead,
> the code only uses tb_flush(), assuming that this will be
> enough for synchronization.
> 
> Signed-off-by: Thomas Huth <thuth@redhat.com>

Applied to ppc-for-2.6, thanks.

> ---
>  v4:
>  - Re-arranged the H_COPY_PAGE code block for better readability
>    and to avoid a wrong compiler warning with newer versions of GCC
> 
>  hw/ppc/spapr_hcall.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  target-ppc/kvm_ppc.h | 36 +++++++++++++++++++++++++++++--
>  2 files changed, 94 insertions(+), 2 deletions(-)
> 
> diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
> index 6e9b6be..1733482 100644
> --- a/hw/ppc/spapr_hcall.c
> +++ b/hw/ppc/spapr_hcall.c
> @@ -386,6 +386,65 @@ static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
>      return H_SUCCESS;
>  }
>  
> +static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr,
> +                                target_ulong opcode, target_ulong *args)
> +{
> +    target_ulong flags = args[0];
> +    hwaddr dst = args[1];
> +    hwaddr src = args[2];
> +    hwaddr len = TARGET_PAGE_SIZE;
> +    uint8_t *pdst, *psrc;
> +    target_long ret = H_SUCCESS;
> +
> +    if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
> +                  | H_COPY_PAGE | H_ZERO_PAGE)) {
> +        qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
> +                      flags);
> +        return H_PARAMETER;
> +    }
> +
> +    /* Map-in destination */
> +    if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
> +        return H_PARAMETER;
> +    }
> +    pdst = cpu_physical_memory_map(dst, &len, 1);
> +    if (!pdst || len != TARGET_PAGE_SIZE) {
> +        return H_PARAMETER;
> +    }
> +
> +    if (flags & H_COPY_PAGE) {
> +        /* Map-in source, copy to destination, and unmap source again */
> +        if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
> +            ret = H_PARAMETER;
> +            goto unmap_out;
> +        }
> +        psrc = cpu_physical_memory_map(src, &len, 0);
> +        if (!psrc || len != TARGET_PAGE_SIZE) {
> +            ret = H_PARAMETER;
> +            goto unmap_out;
> +        }
> +        memcpy(pdst, psrc, len);
> +        cpu_physical_memory_unmap(psrc, len, 0, len);
> +    } else if (flags & H_ZERO_PAGE) {
> +        memset(pdst, 0, len);          /* Just clear the destination page */
> +    }
> +
> +    if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
> +        kvmppc_dcbst_range(cpu, pdst, len);
> +    }
> +    if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
> +        if (kvm_enabled()) {
> +            kvmppc_icbi_range(cpu, pdst, len);
> +        } else {
> +            tb_flush(CPU(cpu));
> +        }
> +    }
> +
> +unmap_out:
> +    cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
> +    return ret;
> +}
> +
>  #define FLAGS_REGISTER_VPA         0x0000200000000000ULL
>  #define FLAGS_REGISTER_DTL         0x0000400000000000ULL
>  #define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
> @@ -1045,6 +1104,7 @@ static void hypercall_register_types(void)
>      spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
>      spapr_register_hypercall(H_SET_DABR, h_set_dabr);
>      spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
> +    spapr_register_hypercall(H_PAGE_INIT, h_page_init);
>      spapr_register_hypercall(H_SET_MODE, h_set_mode);
>  
>      /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
> diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
> index aaa828c..fd64c44 100644
> --- a/target-ppc/kvm_ppc.h
> +++ b/target-ppc/kvm_ppc.h
> @@ -249,15 +249,47 @@ static inline int kvmppc_enable_hwrng(void)
>  #endif
>  
>  #ifndef CONFIG_KVM
> +
>  #define kvmppc_eieio() do { } while (0)
> -#else
> +
> +static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
> +{
> +}
> +
> +static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
> +{
> +}
> +
> +#else   /* CONFIG_KVM */
> +
>  #define kvmppc_eieio() \
>      do {                                          \
>          if (kvm_enabled()) {                          \
>              asm volatile("eieio" : : : "memory"); \
>          } \
>      } while (0)
> -#endif
> +
> +/* Store data cache blocks back to memory */
> +static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
> +{
> +    uint8_t *p;
> +
> +    for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) {
> +        asm volatile("dcbst 0,%0" : : "r"(p) : "memory");
> +    }
> +}
> +
> +/* Invalidate instruction cache blocks */
> +static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
> +{
> +    uint8_t *p;
> +
> +    for (p = addr; p < addr + len; p += cpu->env.icache_line_size) {
> +        asm volatile("icbi 0,%0" : : "r"(p));
> +    }
> +}
> +
> +#endif  /* CONFIG_KVM */
>  
>  #ifndef KVM_INTERRUPT_SET
>  #define KVM_INTERRUPT_SET -1
diff mbox

Patch

diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 6e9b6be..1733482 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -386,6 +386,65 @@  static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
     return H_SUCCESS;
 }
 
+static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr,
+                                target_ulong opcode, target_ulong *args)
+{
+    target_ulong flags = args[0];
+    hwaddr dst = args[1];
+    hwaddr src = args[2];
+    hwaddr len = TARGET_PAGE_SIZE;
+    uint8_t *pdst, *psrc;
+    target_long ret = H_SUCCESS;
+
+    if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
+                  | H_COPY_PAGE | H_ZERO_PAGE)) {
+        qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
+                      flags);
+        return H_PARAMETER;
+    }
+
+    /* Map-in destination */
+    if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
+        return H_PARAMETER;
+    }
+    pdst = cpu_physical_memory_map(dst, &len, 1);
+    if (!pdst || len != TARGET_PAGE_SIZE) {
+        return H_PARAMETER;
+    }
+
+    if (flags & H_COPY_PAGE) {
+        /* Map-in source, copy to destination, and unmap source again */
+        if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
+            ret = H_PARAMETER;
+            goto unmap_out;
+        }
+        psrc = cpu_physical_memory_map(src, &len, 0);
+        if (!psrc || len != TARGET_PAGE_SIZE) {
+            ret = H_PARAMETER;
+            goto unmap_out;
+        }
+        memcpy(pdst, psrc, len);
+        cpu_physical_memory_unmap(psrc, len, 0, len);
+    } else if (flags & H_ZERO_PAGE) {
+        memset(pdst, 0, len);          /* Just clear the destination page */
+    }
+
+    if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
+        kvmppc_dcbst_range(cpu, pdst, len);
+    }
+    if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
+        if (kvm_enabled()) {
+            kvmppc_icbi_range(cpu, pdst, len);
+        } else {
+            tb_flush(CPU(cpu));
+        }
+    }
+
+unmap_out:
+    cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
+    return ret;
+}
+
 #define FLAGS_REGISTER_VPA         0x0000200000000000ULL
 #define FLAGS_REGISTER_DTL         0x0000400000000000ULL
 #define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
@@ -1045,6 +1104,7 @@  static void hypercall_register_types(void)
     spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
     spapr_register_hypercall(H_SET_DABR, h_set_dabr);
     spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
+    spapr_register_hypercall(H_PAGE_INIT, h_page_init);
     spapr_register_hypercall(H_SET_MODE, h_set_mode);
 
     /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index aaa828c..fd64c44 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -249,15 +249,47 @@  static inline int kvmppc_enable_hwrng(void)
 #endif
 
 #ifndef CONFIG_KVM
+
 #define kvmppc_eieio() do { } while (0)
-#else
+
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+}
+
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+}
+
+#else   /* CONFIG_KVM */
+
 #define kvmppc_eieio() \
     do {                                          \
         if (kvm_enabled()) {                          \
             asm volatile("eieio" : : : "memory"); \
         } \
     } while (0)
-#endif
+
+/* Store data cache blocks back to memory */
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+    uint8_t *p;
+
+    for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) {
+        asm volatile("dcbst 0,%0" : : "r"(p) : "memory");
+    }
+}
+
+/* Invalidate instruction cache blocks */
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+    uint8_t *p;
+
+    for (p = addr; p < addr + len; p += cpu->env.icache_line_size) {
+        asm volatile("icbi 0,%0" : : "r"(p));
+    }
+}
+
+#endif  /* CONFIG_KVM */
 
 #ifndef KVM_INTERRUPT_SET
 #define KVM_INTERRUPT_SET -1