diff mbox series

[12/18] accel/tcg: Move user-related declarations out of 'exec/cpu-all.h' (2/4)

Message ID 20241212185341.2857-13-philmd@linaro.org (mailing list archive)
State New
Headers show
Series accel/tcg: Extract user APIs out of 'exec/[cpu, exec]-all.h' | expand

Commit Message

Philippe Mathieu-Daudé Dec. 12, 2024, 6:53 p.m. UTC
Move declarations related to page protection under user
emulation from "exec/cpu-all.h" to "user/page-protection.h".

Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
 bsd-user/bsd-mem.h             |  1 +
 include/exec/cpu-all.h         | 55 --------------------------------
 include/user/page-protection.h | 57 ++++++++++++++++++++++++++++++++++
 target/arm/tcg/mte_helper.c    |  3 ++
 4 files changed, 61 insertions(+), 55 deletions(-)

Comments

Pierrick Bouvier Dec. 12, 2024, 7:26 p.m. UTC | #1
On 12/12/24 10:53, Philippe Mathieu-Daudé wrote:
> Move declarations related to page protection under user
> emulation from "exec/cpu-all.h" to "user/page-protection.h".
> 
> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> ---
>   bsd-user/bsd-mem.h             |  1 +
>   include/exec/cpu-all.h         | 55 --------------------------------
>   include/user/page-protection.h | 57 ++++++++++++++++++++++++++++++++++
>   target/arm/tcg/mte_helper.c    |  3 ++
>   4 files changed, 61 insertions(+), 55 deletions(-)
> 
> diff --git a/bsd-user/bsd-mem.h b/bsd-user/bsd-mem.h
> index eef6b222d9e..f5ec0de24ca 100644
> --- a/bsd-user/bsd-mem.h
> +++ b/bsd-user/bsd-mem.h
> @@ -57,6 +57,7 @@
>   
>   #include "qemu-bsd.h"
>   #include "exec/page-protection.h"
> +#include "user/page-protection.h"
>   
>   extern struct bsd_shm_regions bsd_shm_regions[];
>   extern abi_ulong target_brk;
> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
> index 3d97323893b..86cd40020c9 100644
> --- a/include/exec/cpu-all.h
> +++ b/include/exec/cpu-all.h
> @@ -130,21 +130,6 @@ extern const TargetPageBits target_page;
>   
>   int page_get_flags(target_ulong address);
>   
> -/**
> - * page_set_flags:
> - * @start: first byte of range
> - * @last: last byte of range
> - * @flags: flags to set
> - * Context: holding mmap lock
> - *
> - * Modify the flags of a page and invalidate the code if necessary.
> - * The flag PAGE_WRITE_ORG is positioned automatically depending
> - * on PAGE_WRITE.  The mmap_lock should already be held.
> - */
> -void page_set_flags(target_ulong start, target_ulong last, int flags);
> -
> -void page_reset_target_data(target_ulong start, target_ulong last);
> -
>   /**
>    * page_check_range
>    * @start: first byte of range
> @@ -157,46 +142,6 @@ void page_reset_target_data(target_ulong start, target_ulong last);
>    */
>   bool page_check_range(target_ulong start, target_ulong last, int flags);
>   
> -/**
> - * page_check_range_empty:
> - * @start: first byte of range
> - * @last: last byte of range
> - * Context: holding mmap lock
> - *
> - * Return true if the entire range [@start, @last] is unmapped.
> - * The memory lock must be held so that the caller will can ensure
> - * the result stays true until a new mapping can be installed.
> - */
> -bool page_check_range_empty(target_ulong start, target_ulong last);
> -
> -/**
> - * page_find_range_empty
> - * @min: first byte of search range
> - * @max: last byte of search range
> - * @len: size of the hole required
> - * @align: alignment of the hole required (power of 2)
> - *
> - * If there is a range [x, x+@len) within [@min, @max] such that
> - * x % @align == 0, then return x.  Otherwise return -1.
> - * The memory lock must be held, as the caller will want to ensure
> - * the returned range stays empty until a new mapping can be installed.
> - */
> -target_ulong page_find_range_empty(target_ulong min, target_ulong max,
> -                                   target_ulong len, target_ulong align);
> -
> -/**
> - * page_get_target_data(address)
> - * @address: guest virtual address
> - *
> - * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
> - * with the guest page at @address, allocating it if necessary.  The
> - * caller should already have verified that the address is valid.
> - *
> - * The memory will be freed when the guest page is deallocated,
> - * e.g. with the munmap system call.
> - */
> -void *page_get_target_data(target_ulong address)
> -    __attribute__((returns_nonnull));
>   #endif
>   
>   CPUArchState *cpu_copy(CPUArchState *env);
> diff --git a/include/user/page-protection.h b/include/user/page-protection.h
> index ea11cf9e328..d21fab1aaf9 100644
> --- a/include/user/page-protection.h
> +++ b/include/user/page-protection.h
> @@ -18,6 +18,63 @@
>   
>   void page_protect(tb_page_addr_t page_addr);
>   int page_unprotect(tb_page_addr_t address, uintptr_t pc);
> +
> +/**
> + * page_set_flags:
> + * @start: first byte of range
> + * @last: last byte of range
> + * @flags: flags to set
> + * Context: holding mmap lock
> + *
> + * Modify the flags of a page and invalidate the code if necessary.
> + * The flag PAGE_WRITE_ORG is positioned automatically depending
> + * on PAGE_WRITE.  The mmap_lock should already be held.
> + */
> +void page_set_flags(target_ulong start, target_ulong last, int flags);
> +
> +void page_reset_target_data(target_ulong start, target_ulong last);
> +
> +/**
> + * page_check_range_empty:
> + * @start: first byte of range
> + * @last: last byte of range
> + * Context: holding mmap lock
> + *
> + * Return true if the entire range [@start, @last] is unmapped.
> + * The memory lock must be held so that the caller will can ensure
> + * the result stays true until a new mapping can be installed.
> + */
> +bool page_check_range_empty(target_ulong start, target_ulong last);
> +
> +/**
> + * page_find_range_empty
> + * @min: first byte of search range
> + * @max: last byte of search range
> + * @len: size of the hole required
> + * @align: alignment of the hole required (power of 2)
> + *
> + * If there is a range [x, x+@len) within [@min, @max] such that
> + * x % @align == 0, then return x.  Otherwise return -1.
> + * The memory lock must be held, as the caller will want to ensure
> + * the returned range stays empty until a new mapping can be installed.
> + */
> +target_ulong page_find_range_empty(target_ulong min, target_ulong max,
> +                                   target_ulong len, target_ulong align);
> +
> +/**
> + * page_get_target_data(address)
> + * @address: guest virtual address
> + *
> + * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
> + * with the guest page at @address, allocating it if necessary.  The
> + * caller should already have verified that the address is valid.
> + *
> + * The memory will be freed when the guest page is deallocated,
> + * e.g. with the munmap system call.
> + */
> +__attribute__((returns_nonnull))
> +void *page_get_target_data(target_ulong address);
> +
>   typedef int (*walk_memory_regions_fn)(void *, target_ulong,
>                                         target_ulong, unsigned long);
>   
> diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
> index 9d2ba287eeb..ae037dc9143 100644
> --- a/target/arm/tcg/mte_helper.c
> +++ b/target/arm/tcg/mte_helper.c
> @@ -23,6 +23,9 @@
>   #include "internals.h"
>   #include "exec/exec-all.h"
>   #include "exec/page-protection.h"
> +#ifdef CONFIG_USER_ONLY
> +#include "user/page-protection.h"
> +#endif
>   #include "exec/ram_addr.h"
>   #include "exec/cpu_ldst.h"
>   #include "exec/helper-proto.h"

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
diff mbox series

Patch

diff --git a/bsd-user/bsd-mem.h b/bsd-user/bsd-mem.h
index eef6b222d9e..f5ec0de24ca 100644
--- a/bsd-user/bsd-mem.h
+++ b/bsd-user/bsd-mem.h
@@ -57,6 +57,7 @@ 
 
 #include "qemu-bsd.h"
 #include "exec/page-protection.h"
+#include "user/page-protection.h"
 
 extern struct bsd_shm_regions bsd_shm_regions[];
 extern abi_ulong target_brk;
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 3d97323893b..86cd40020c9 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -130,21 +130,6 @@  extern const TargetPageBits target_page;
 
 int page_get_flags(target_ulong address);
 
-/**
- * page_set_flags:
- * @start: first byte of range
- * @last: last byte of range
- * @flags: flags to set
- * Context: holding mmap lock
- *
- * Modify the flags of a page and invalidate the code if necessary.
- * The flag PAGE_WRITE_ORG is positioned automatically depending
- * on PAGE_WRITE.  The mmap_lock should already be held.
- */
-void page_set_flags(target_ulong start, target_ulong last, int flags);
-
-void page_reset_target_data(target_ulong start, target_ulong last);
-
 /**
  * page_check_range
  * @start: first byte of range
@@ -157,46 +142,6 @@  void page_reset_target_data(target_ulong start, target_ulong last);
  */
 bool page_check_range(target_ulong start, target_ulong last, int flags);
 
-/**
- * page_check_range_empty:
- * @start: first byte of range
- * @last: last byte of range
- * Context: holding mmap lock
- *
- * Return true if the entire range [@start, @last] is unmapped.
- * The memory lock must be held so that the caller will can ensure
- * the result stays true until a new mapping can be installed.
- */
-bool page_check_range_empty(target_ulong start, target_ulong last);
-
-/**
- * page_find_range_empty
- * @min: first byte of search range
- * @max: last byte of search range
- * @len: size of the hole required
- * @align: alignment of the hole required (power of 2)
- *
- * If there is a range [x, x+@len) within [@min, @max] such that
- * x % @align == 0, then return x.  Otherwise return -1.
- * The memory lock must be held, as the caller will want to ensure
- * the returned range stays empty until a new mapping can be installed.
- */
-target_ulong page_find_range_empty(target_ulong min, target_ulong max,
-                                   target_ulong len, target_ulong align);
-
-/**
- * page_get_target_data(address)
- * @address: guest virtual address
- *
- * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
- * with the guest page at @address, allocating it if necessary.  The
- * caller should already have verified that the address is valid.
- *
- * The memory will be freed when the guest page is deallocated,
- * e.g. with the munmap system call.
- */
-void *page_get_target_data(target_ulong address)
-    __attribute__((returns_nonnull));
 #endif
 
 CPUArchState *cpu_copy(CPUArchState *env);
diff --git a/include/user/page-protection.h b/include/user/page-protection.h
index ea11cf9e328..d21fab1aaf9 100644
--- a/include/user/page-protection.h
+++ b/include/user/page-protection.h
@@ -18,6 +18,63 @@ 
 
 void page_protect(tb_page_addr_t page_addr);
 int page_unprotect(tb_page_addr_t address, uintptr_t pc);
+
+/**
+ * page_set_flags:
+ * @start: first byte of range
+ * @last: last byte of range
+ * @flags: flags to set
+ * Context: holding mmap lock
+ *
+ * Modify the flags of a page and invalidate the code if necessary.
+ * The flag PAGE_WRITE_ORG is positioned automatically depending
+ * on PAGE_WRITE.  The mmap_lock should already be held.
+ */
+void page_set_flags(target_ulong start, target_ulong last, int flags);
+
+void page_reset_target_data(target_ulong start, target_ulong last);
+
+/**
+ * page_check_range_empty:
+ * @start: first byte of range
+ * @last: last byte of range
+ * Context: holding mmap lock
+ *
+ * Return true if the entire range [@start, @last] is unmapped.
+ * The memory lock must be held so that the caller will can ensure
+ * the result stays true until a new mapping can be installed.
+ */
+bool page_check_range_empty(target_ulong start, target_ulong last);
+
+/**
+ * page_find_range_empty
+ * @min: first byte of search range
+ * @max: last byte of search range
+ * @len: size of the hole required
+ * @align: alignment of the hole required (power of 2)
+ *
+ * If there is a range [x, x+@len) within [@min, @max] such that
+ * x % @align == 0, then return x.  Otherwise return -1.
+ * The memory lock must be held, as the caller will want to ensure
+ * the returned range stays empty until a new mapping can be installed.
+ */
+target_ulong page_find_range_empty(target_ulong min, target_ulong max,
+                                   target_ulong len, target_ulong align);
+
+/**
+ * page_get_target_data(address)
+ * @address: guest virtual address
+ *
+ * Return TARGET_PAGE_DATA_SIZE bytes of out-of-band data to associate
+ * with the guest page at @address, allocating it if necessary.  The
+ * caller should already have verified that the address is valid.
+ *
+ * The memory will be freed when the guest page is deallocated,
+ * e.g. with the munmap system call.
+ */
+__attribute__((returns_nonnull))
+void *page_get_target_data(target_ulong address);
+
 typedef int (*walk_memory_regions_fn)(void *, target_ulong,
                                       target_ulong, unsigned long);
 
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 9d2ba287eeb..ae037dc9143 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -23,6 +23,9 @@ 
 #include "internals.h"
 #include "exec/exec-all.h"
 #include "exec/page-protection.h"
+#ifdef CONFIG_USER_ONLY
+#include "user/page-protection.h"
+#endif
 #include "exec/ram_addr.h"
 #include "exec/cpu_ldst.h"
 #include "exec/helper-proto.h"