diff mbox series

[RFC,V3,39/43] rv64ilp32_abi: sysinfo: Adapt sysinfo structure to lp64 uapi

Message ID 20250325121624.523258-40-guoren@kernel.org (mailing list archive)
State New
Headers show
Series rv64ilp32_abi: Build CONFIG_64BIT kernel-self with ILP32 ABI | expand

Commit Message

Guo Ren March 25, 2025, 12:16 p.m. UTC
From: "Guo Ren (Alibaba DAMO Academy)" <guoren@kernel.org>

The RISC-V 64ilp32 ABI leverages LP64 uapi and accommodates LP64
ABI userspace directly, necessitating updates to the sysinfo
struct's unsigned long and array types with u64.

Signed-off-by: Guo Ren (Alibaba DAMO Academy) <guoren@kernel.org>
---
 fs/proc/loadavg.c             | 10 +++++++---
 include/linux/sched/loadavg.h |  4 ++++
 include/uapi/linux/sysinfo.h  | 20 ++++++++++++++++++++
 kernel/sched/loadavg.c        |  4 ++++
 4 files changed, 35 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index 817981e57223..643e06de3446 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -13,14 +13,18 @@ 
 
 static int loadavg_proc_show(struct seq_file *m, void *v)
 {
+#if defined(CONFIG_64BIT) && (BITS_PER_LONG == 32)
+	unsigned long long avnrun[3];
+#else
 	unsigned long avnrun[3];
+#endif
 
 	get_avenrun(avnrun, FIXED_1/200, 0);
 
 	seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %u/%d %d\n",
-		LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
-		LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
-		LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
+		LOAD_INT((ulong)avnrun[0]), LOAD_FRAC((ulong)avnrun[0]),
+		LOAD_INT((ulong)avnrun[1]), LOAD_FRAC((ulong)avnrun[1]),
+		LOAD_INT((ulong)avnrun[2]), LOAD_FRAC((ulong)avnrun[2]),
 		nr_running(), nr_threads,
 		idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
 	return 0;
diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h
index 83ec54b65e79..8f2d6a827ee9 100644
--- a/include/linux/sched/loadavg.h
+++ b/include/linux/sched/loadavg.h
@@ -13,7 +13,11 @@ 
  *    11 bit fractions.
  */
 extern unsigned long avenrun[];		/* Load averages */
+#if defined(CONFIG_64BIT) && (BITS_PER_LONG == 32)
+extern void get_avenrun(unsigned long long *loads, unsigned long offset, int shift);
+#else
 extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
+#endif
 
 #define FSHIFT		11		/* nr of bits of precision */
 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
diff --git a/include/uapi/linux/sysinfo.h b/include/uapi/linux/sysinfo.h
index 435d5c23f0c0..cd29a3d3cd10 100644
--- a/include/uapi/linux/sysinfo.h
+++ b/include/uapi/linux/sysinfo.h
@@ -5,6 +5,25 @@ 
 #include <linux/types.h>
 
 #define SI_LOAD_SHIFT	16
+
+#if (__riscv_xlen == 64) && (__BITS_PER_LONG == 32)
+struct sysinfo {
+	__s64 uptime;		/* Seconds since boot */
+	__u64 loads[3];		/* 1, 5, and 15 minute load averages */
+	__u64 totalram;		/* Total usable main memory size */
+	__u64 freeram;		/* Available memory size */
+	__u64 sharedram;	/* Amount of shared memory */
+	__u64 bufferram;	/* Memory used by buffers */
+	__u64 totalswap;	/* Total swap space size */
+	__u64 freeswap;		/* swap space still available */
+	__u16 procs;	   	/* Number of current processes */
+	__u16 pad;	   	/* Explicit padding for m68k */
+	__u64 totalhigh;	/* Total high memory size */
+	__u64 freehigh;		/* Available high memory size */
+	__u32 mem_unit;		/* Memory unit size in bytes */
+	char _f[20-2*sizeof(__u64)-sizeof(__u32)];	/* Padding: libc5 uses this.. */
+};
+#else
 struct sysinfo {
 	__kernel_long_t uptime;		/* Seconds since boot */
 	__kernel_ulong_t loads[3];	/* 1, 5, and 15 minute load averages */
@@ -21,5 +40,6 @@  struct sysinfo {
 	__u32 mem_unit;			/* Memory unit size in bytes */
 	char _f[20-2*sizeof(__kernel_ulong_t)-sizeof(__u32)];	/* Padding: libc5 uses this.. */
 };
+#endif
 
 #endif /* _LINUX_SYSINFO_H */
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index c48900b856a2..f1f5abc64dea 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -68,7 +68,11 @@  EXPORT_SYMBOL(avenrun); /* should be removed */
  *
  * These values are estimates at best, so no need for locking.
  */
+#if defined(CONFIG_64BIT) && (BITS_PER_LONG == 32)
+void get_avenrun(unsigned long long *loads, unsigned long offset, int shift)
+#else
 void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+#endif
 {
 	loads[0] = (avenrun[0] + offset) << shift;
 	loads[1] = (avenrun[1] + offset) << shift;