diff mbox series

[12/12] xen/cpupool: make per-cpupool sched-gran hypfs node writable

Message ID 20201026091316.25680-13-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen: support per-cpupool scheduling granularity | expand

Commit Message

Jürgen Groß Oct. 26, 2020, 9:13 a.m. UTC
Make /cpupool/<id>/sched-gran in hypfs writable. This will enable per
cpupool selectable scheduling granularity.

Writing this node is allowed only with no cpu assigned to the cpupool.
Allowed are values "cpu", "core" and "socket".

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 docs/misc/hypfs-paths.pandoc |  5 ++-
 xen/common/sched/cpupool.c   | 75 +++++++++++++++++++++++++++++++-----
 2 files changed, 69 insertions(+), 11 deletions(-)

Comments

Jan Beulich Oct. 29, 2020, 2:58 p.m. UTC | #1
On 26.10.2020 10:13, Juergen Gross wrote:
> @@ -1088,13 +1098,58 @@ static int cpupool_gran_read(const struct hypfs_entry *entry,
>      return copy_to_guest(uaddr, name, strlen(name) + 1) ? -EFAULT : 0;
>  }
>  
> +static int cpupool_gran_write(struct hypfs_entry_leaf *leaf,
> +                              XEN_GUEST_HANDLE_PARAM(void) uaddr,
> +                              unsigned int ulen)
> +{
> +    const struct hypfs_dyndir_id *data;
> +    struct cpupool *cpupool;
> +    enum sched_gran gran;
> +    unsigned int sched_gran;
> +    char name[SCHED_GRAN_NAME_LEN];
> +    int ret = 0;
> +
> +    if ( ulen > SCHED_GRAN_NAME_LEN )
> +        return -ENOSPC;
> +
> +    if ( copy_from_guest(name, uaddr, ulen) )
> +        return -EFAULT;
> +
> +    sched_gran = sched_gran_get(name, &gran) ? 0
> +                                             : cpupool_check_granularity(gran);
> +    if ( memchr(name, 0, ulen) != (name + ulen - 1) || sched_gran == 0 )
> +        return -EINVAL;

I guess the memchr() check wants to happen before the call to
sched_gran_get()?

Jan
Jürgen Groß Oct. 29, 2020, 2:59 p.m. UTC | #2
On 29.10.20 15:58, Jan Beulich wrote:
> On 26.10.2020 10:13, Juergen Gross wrote:
>> @@ -1088,13 +1098,58 @@ static int cpupool_gran_read(const struct hypfs_entry *entry,
>>       return copy_to_guest(uaddr, name, strlen(name) + 1) ? -EFAULT : 0;
>>   }
>>   
>> +static int cpupool_gran_write(struct hypfs_entry_leaf *leaf,
>> +                              XEN_GUEST_HANDLE_PARAM(void) uaddr,
>> +                              unsigned int ulen)
>> +{
>> +    const struct hypfs_dyndir_id *data;
>> +    struct cpupool *cpupool;
>> +    enum sched_gran gran;
>> +    unsigned int sched_gran;
>> +    char name[SCHED_GRAN_NAME_LEN];
>> +    int ret = 0;
>> +
>> +    if ( ulen > SCHED_GRAN_NAME_LEN )
>> +        return -ENOSPC;
>> +
>> +    if ( copy_from_guest(name, uaddr, ulen) )
>> +        return -EFAULT;
>> +
>> +    sched_gran = sched_gran_get(name, &gran) ? 0
>> +                                             : cpupool_check_granularity(gran);
>> +    if ( memchr(name, 0, ulen) != (name + ulen - 1) || sched_gran == 0 )
>> +        return -EINVAL;
> 
> I guess the memchr() check wants to happen before the call to
> sched_gran_get()?

Yes.


Juergen
diff mbox series

Patch

diff --git a/docs/misc/hypfs-paths.pandoc b/docs/misc/hypfs-paths.pandoc
index f1ce24d7fe..e86f7d0dbe 100644
--- a/docs/misc/hypfs-paths.pandoc
+++ b/docs/misc/hypfs-paths.pandoc
@@ -184,10 +184,13 @@  A directory of all current cpupools.
 The individual cpupools. Each entry is a directory with the name being the
 cpupool-id (e.g. /cpupool/0/).
 
-#### /cpupool/*/sched-gran = ("cpu" | "core" | "socket")
+#### /cpupool/*/sched-gran = ("cpu" | "core" | "socket") [w]
 
 The scheduling granularity of a cpupool.
 
+Writing a value is allowed only for cpupools with no cpu assigned and if the
+architecture is supporting different scheduling granularities.
+
 #### /params/
 
 A directory of runtime parameters.
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index 8674ac0fdd..d0c61fb720 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -78,7 +78,7 @@  static void sched_gran_print(enum sched_gran mode, unsigned int gran)
 }
 
 #ifdef CONFIG_HAS_SCHED_GRANULARITY
-static int __init sched_select_granularity(const char *str)
+static int sched_gran_get(const char *str, enum sched_gran *mode)
 {
     unsigned int i;
 
@@ -86,36 +86,43 @@  static int __init sched_select_granularity(const char *str)
     {
         if ( strcmp(sg_name[i].name, str) == 0 )
         {
-            opt_sched_granularity = sg_name[i].mode;
+            *mode = sg_name[i].mode;
             return 0;
         }
     }
 
     return -EINVAL;
 }
+
+static int __init sched_select_granularity(const char *str)
+{
+    return sched_gran_get(str, &opt_sched_granularity);
+}
 custom_param("sched-gran", sched_select_granularity);
+#else
+static int sched_gran_get(const char *str, enum sched_gran *mode)
+{
+    return -EINVAL;
+}
 #endif
 
-static unsigned int __init cpupool_check_granularity(void)
+static unsigned int cpupool_check_granularity(enum sched_gran mode)
 {
     unsigned int cpu;
     unsigned int siblings, gran = 0;
 
-    if ( opt_sched_granularity == SCHED_GRAN_cpu )
+    if ( mode == SCHED_GRAN_cpu )
         return 1;
 
     for_each_online_cpu ( cpu )
     {
-        siblings = cpumask_weight(sched_get_opt_cpumask(opt_sched_granularity,
-                                                        cpu));
+        siblings = cpumask_weight(sched_get_opt_cpumask(mode, cpu));
         if ( gran == 0 )
             gran = siblings;
         else if ( gran != siblings )
             return 0;
     }
 
-    sched_disable_smt_switching = true;
-
     return gran;
 }
 
@@ -127,7 +134,7 @@  static void __init cpupool_gran_init(void)
 
     while ( gran == 0 )
     {
-        gran = cpupool_check_granularity();
+        gran = cpupool_check_granularity(opt_sched_granularity);
 
         if ( gran == 0 )
         {
@@ -153,6 +160,9 @@  static void __init cpupool_gran_init(void)
     if ( fallback )
         warning_add(fallback);
 
+    if ( opt_sched_granularity != SCHED_GRAN_cpu )
+        sched_disable_smt_switching = true;
+
     sched_granularity = gran;
     sched_gran_print(opt_sched_granularity, sched_granularity);
 }
@@ -1088,13 +1098,58 @@  static int cpupool_gran_read(const struct hypfs_entry *entry,
     return copy_to_guest(uaddr, name, strlen(name) + 1) ? -EFAULT : 0;
 }
 
+static int cpupool_gran_write(struct hypfs_entry_leaf *leaf,
+                              XEN_GUEST_HANDLE_PARAM(void) uaddr,
+                              unsigned int ulen)
+{
+    const struct hypfs_dyndir_id *data;
+    struct cpupool *cpupool;
+    enum sched_gran gran;
+    unsigned int sched_gran;
+    char name[SCHED_GRAN_NAME_LEN];
+    int ret = 0;
+
+    if ( ulen > SCHED_GRAN_NAME_LEN )
+        return -ENOSPC;
+
+    if ( copy_from_guest(name, uaddr, ulen) )
+        return -EFAULT;
+
+    sched_gran = sched_gran_get(name, &gran) ? 0
+                                             : cpupool_check_granularity(gran);
+    if ( memchr(name, 0, ulen) != (name + ulen - 1) || sched_gran == 0 )
+        return -EINVAL;
+
+    data = hypfs_get_dyndata();
+    if ( !data )
+        return -ENOENT;
+
+    spin_lock(&cpupool_lock);
+
+    cpupool = __cpupool_find_by_id(data->id, true);
+    if ( !cpupool )
+        ret = -ENOENT;
+    else if ( !cpumask_empty(cpupool->cpu_valid) )
+        ret = -EBUSY;
+    else
+    {
+        cpupool->gran = gran;
+        cpupool->sched_gran = sched_gran;
+    }
+
+    spin_unlock(&cpupool_lock);
+
+    return ret;
+}
+
 static struct hypfs_funcs cpupool_gran_funcs = {
     .read = cpupool_gran_read,
+    .write = cpupool_gran_write,
     .getsize = hypfs_getsize,
 };
 
 static HYPFS_VARSIZE_INIT(cpupool_gran, XEN_HYPFS_TYPE_STRING, "sched-gran",
-                          0, &cpupool_gran_funcs);
+                          SCHED_GRAN_NAME_LEN, &cpupool_gran_funcs);
 static char granstr[SCHED_GRAN_NAME_LEN] = {
     [0 ... SCHED_GRAN_NAME_LEN - 2] = '?',
     [SCHED_GRAN_NAME_LEN - 1] = 0