diff mbox

[RFC,v7,02/16] softmmu: Simplify helper_*_st_name, wrap unaligned code

Message ID 1454059965-23402-3-git-send-email-a.rigo@virtualopensystems.com (mailing list archive)
State New, archived
Headers show

Commit Message

alvise rigo Jan. 29, 2016, 9:32 a.m. UTC
Attempting to simplify the helper_*_st_name, wrap the
do_unaligned_access code into an inline function.
Remove also the goto statement.

Based on this work, Alex proposed the following patch series
https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg01136.html
that reduces code duplication of the softmmu_helpers.

Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
---
 softmmu_template.h | 96 ++++++++++++++++++++++++++++++++++--------------------
 1 file changed, 60 insertions(+), 36 deletions(-)

Comments

Alex Bennée Feb. 11, 2016, 1:07 p.m. UTC | #1
Alvise Rigo <a.rigo@virtualopensystems.com> writes:

> Attempting to simplify the helper_*_st_name, wrap the
> do_unaligned_access code into an inline function.
> Remove also the goto statement.

How are you generating your CC list? get_maintainer.pl shows Peter
Croshwaite (CC'ed) should also be CC'ed on these patches. If we want to
get any of this patch series merged before soft freeze we'll need some
signoffs from the maintainers ;-)

>
> Based on this work, Alex proposed the following patch series
> https://lists.gnu.org/archive/html/qemu-devel/2016-01/msg01136.html
> that reduces code duplication of the softmmu_helpers.
>
> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
> ---
>  softmmu_template.h | 96 ++++++++++++++++++++++++++++++++++--------------------
>  1 file changed, 60 insertions(+), 36 deletions(-)
>
> diff --git a/softmmu_template.h b/softmmu_template.h
> index 208f808..7029a03 100644
> --- a/softmmu_template.h
> +++ b/softmmu_template.h
> @@ -370,6 +370,32 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
>                                   iotlbentry->attrs);
>  }
>
> +static inline void glue(helper_le_st_name, _do_unl_access)(CPUArchState *env,
> +                                                           DATA_TYPE val,
> +                                                           target_ulong addr,
> +                                                           TCGMemOpIdx oi,
> +                                                           unsigned mmu_idx,
> +                                                           uintptr_t retaddr)
> +{
> +    int i;
> +
> +    if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
> +        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
> +                             mmu_idx, retaddr);
> +    }
> +    /* XXX: not efficient, but simple */
> +    /* Note: relies on the fact that tlb_fill() does not remove the
> +     * previous page from the TLB cache.  */
> +    for (i = DATA_SIZE - 1; i >= 0; i--) {
> +        /* Little-endian extract.  */
> +        uint8_t val8 = val >> (i * 8);
> +        /* Note the adjustment at the beginning of the function.
> +           Undo that for the recursion.  */
> +        glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
> +                                        oi, retaddr + GETPC_ADJ);
> +    }
> +}
> +

I still think there is some mileage in combining the unaligned stuff but
as no maintainer spoke out before or against last time I'll leave that
for future clean-ups.

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

>  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>                         TCGMemOpIdx oi, uintptr_t retaddr)
>  {
> @@ -399,7 +425,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
>          CPUIOTLBEntry *iotlbentry;
>          if ((addr & (DATA_SIZE - 1)) != 0) {
> -            goto do_unaligned_access;
> +            glue(helper_le_st_name, _do_unl_access)(env, val, addr, mmu_idx,
> +                                                    oi, retaddr);
>          }
>          iotlbentry = &env->iotlb[mmu_idx][index];
>
> @@ -414,23 +441,8 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>      if (DATA_SIZE > 1
>          && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>                       >= TARGET_PAGE_SIZE)) {
> -        int i;
> -    do_unaligned_access:
> -        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
> -            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
> -                                 mmu_idx, retaddr);
> -        }
> -        /* XXX: not efficient, but simple */
> -        /* Note: relies on the fact that tlb_fill() does not remove the
> -         * previous page from the TLB cache.  */
> -        for (i = DATA_SIZE - 1; i >= 0; i--) {
> -            /* Little-endian extract.  */
> -            uint8_t val8 = val >> (i * 8);
> -            /* Note the adjustment at the beginning of the function.
> -               Undo that for the recursion.  */
> -            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
> -                                            oi, retaddr + GETPC_ADJ);
> -        }
> +        glue(helper_le_st_name, _do_unl_access)(env, val, addr, mmu_idx,
> +                                                oi, retaddr);
>          return;
>      }
>
> @@ -450,6 +462,32 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>  }
>
>  #if DATA_SIZE > 1
> +static inline void glue(helper_be_st_name, _do_unl_access)(CPUArchState *env,
> +                                                           DATA_TYPE val,
> +                                                           target_ulong addr,
> +                                                           TCGMemOpIdx oi,
> +                                                           unsigned mmu_idx,
> +                                                           uintptr_t retaddr)
> +{
> +    int i;
> +
> +    if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
> +        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
> +                             mmu_idx, retaddr);
> +    }
> +    /* XXX: not efficient, but simple */
> +    /* Note: relies on the fact that tlb_fill() does not remove the
> +     * previous page from the TLB cache.  */
> +    for (i = DATA_SIZE - 1; i >= 0; i--) {
> +        /* Big-endian extract.  */
> +        uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
> +        /* Note the adjustment at the beginning of the function.
> +           Undo that for the recursion.  */
> +        glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
> +                                        oi, retaddr + GETPC_ADJ);
> +    }
> +}
> +
>  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>                         TCGMemOpIdx oi, uintptr_t retaddr)
>  {
> @@ -479,7 +517,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
>          CPUIOTLBEntry *iotlbentry;
>          if ((addr & (DATA_SIZE - 1)) != 0) {
> -            goto do_unaligned_access;
> +            glue(helper_be_st_name, _do_unl_access)(env, val, addr, mmu_idx,
> +                                                    oi, retaddr);
>          }
>          iotlbentry = &env->iotlb[mmu_idx][index];
>
> @@ -494,23 +533,8 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>      if (DATA_SIZE > 1
>          && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
>                       >= TARGET_PAGE_SIZE)) {
> -        int i;
> -    do_unaligned_access:
> -        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
> -            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
> -                                 mmu_idx, retaddr);
> -        }
> -        /* XXX: not efficient, but simple */
> -        /* Note: relies on the fact that tlb_fill() does not remove the
> -         * previous page from the TLB cache.  */
> -        for (i = DATA_SIZE - 1; i >= 0; i--) {
> -            /* Big-endian extract.  */
> -            uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
> -            /* Note the adjustment at the beginning of the function.
> -               Undo that for the recursion.  */
> -            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
> -                                            oi, retaddr + GETPC_ADJ);
> -        }
> +            glue(helper_be_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx,
> +                                                    retaddr);
>          return;
>      }


--
Alex Bennée
diff mbox

Patch

diff --git a/softmmu_template.h b/softmmu_template.h
index 208f808..7029a03 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -370,6 +370,32 @@  static inline void glue(io_write, SUFFIX)(CPUArchState *env,
                                  iotlbentry->attrs);
 }
 
+static inline void glue(helper_le_st_name, _do_unl_access)(CPUArchState *env,
+                                                           DATA_TYPE val,
+                                                           target_ulong addr,
+                                                           TCGMemOpIdx oi,
+                                                           unsigned mmu_idx,
+                                                           uintptr_t retaddr)
+{
+    int i;
+
+    if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+                             mmu_idx, retaddr);
+    }
+    /* XXX: not efficient, but simple */
+    /* Note: relies on the fact that tlb_fill() does not remove the
+     * previous page from the TLB cache.  */
+    for (i = DATA_SIZE - 1; i >= 0; i--) {
+        /* Little-endian extract.  */
+        uint8_t val8 = val >> (i * 8);
+        /* Note the adjustment at the beginning of the function.
+           Undo that for the recursion.  */
+        glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
+                                        oi, retaddr + GETPC_ADJ);
+    }
+}
+
 void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                        TCGMemOpIdx oi, uintptr_t retaddr)
 {
@@ -399,7 +425,8 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
         CPUIOTLBEntry *iotlbentry;
         if ((addr & (DATA_SIZE - 1)) != 0) {
-            goto do_unaligned_access;
+            glue(helper_le_st_name, _do_unl_access)(env, val, addr, mmu_idx,
+                                                    oi, retaddr);
         }
         iotlbentry = &env->iotlb[mmu_idx][index];
 
@@ -414,23 +441,8 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
     if (DATA_SIZE > 1
         && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                      >= TARGET_PAGE_SIZE)) {
-        int i;
-    do_unaligned_access:
-        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
-            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
-                                 mmu_idx, retaddr);
-        }
-        /* XXX: not efficient, but simple */
-        /* Note: relies on the fact that tlb_fill() does not remove the
-         * previous page from the TLB cache.  */
-        for (i = DATA_SIZE - 1; i >= 0; i--) {
-            /* Little-endian extract.  */
-            uint8_t val8 = val >> (i * 8);
-            /* Note the adjustment at the beginning of the function.
-               Undo that for the recursion.  */
-            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
-                                            oi, retaddr + GETPC_ADJ);
-        }
+        glue(helper_le_st_name, _do_unl_access)(env, val, addr, mmu_idx,
+                                                oi, retaddr);
         return;
     }
 
@@ -450,6 +462,32 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 }
 
 #if DATA_SIZE > 1
+static inline void glue(helper_be_st_name, _do_unl_access)(CPUArchState *env,
+                                                           DATA_TYPE val,
+                                                           target_ulong addr,
+                                                           TCGMemOpIdx oi,
+                                                           unsigned mmu_idx,
+                                                           uintptr_t retaddr)
+{
+    int i;
+
+    if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
+                             mmu_idx, retaddr);
+    }
+    /* XXX: not efficient, but simple */
+    /* Note: relies on the fact that tlb_fill() does not remove the
+     * previous page from the TLB cache.  */
+    for (i = DATA_SIZE - 1; i >= 0; i--) {
+        /* Big-endian extract.  */
+        uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
+        /* Note the adjustment at the beginning of the function.
+           Undo that for the recursion.  */
+        glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
+                                        oi, retaddr + GETPC_ADJ);
+    }
+}
+
 void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                        TCGMemOpIdx oi, uintptr_t retaddr)
 {
@@ -479,7 +517,8 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
         CPUIOTLBEntry *iotlbentry;
         if ((addr & (DATA_SIZE - 1)) != 0) {
-            goto do_unaligned_access;
+            glue(helper_be_st_name, _do_unl_access)(env, val, addr, mmu_idx,
+                                                    oi, retaddr);
         }
         iotlbentry = &env->iotlb[mmu_idx][index];
 
@@ -494,23 +533,8 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
     if (DATA_SIZE > 1
         && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                      >= TARGET_PAGE_SIZE)) {
-        int i;
-    do_unaligned_access:
-        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
-            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
-                                 mmu_idx, retaddr);
-        }
-        /* XXX: not efficient, but simple */
-        /* Note: relies on the fact that tlb_fill() does not remove the
-         * previous page from the TLB cache.  */
-        for (i = DATA_SIZE - 1; i >= 0; i--) {
-            /* Big-endian extract.  */
-            uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
-            /* Note the adjustment at the beginning of the function.
-               Undo that for the recursion.  */
-            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
-                                            oi, retaddr + GETPC_ADJ);
-        }
+            glue(helper_be_st_name, _do_unl_access)(env, val, addr, oi, mmu_idx,
+                                                    retaddr);
         return;
     }