diff mbox

[v1,1/1] block/gluster: memory usage: use one glfs instance per volume

Message ID 1477565163-10475-1-git-send-email-prasanna.kalever@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Prasanna Kumar Kalever Oct. 27, 2016, 10:46 a.m. UTC
Currently, for every drive accessed via gfapi we create a new glfs
instance (call glfs_new() followed by glfs_init()) which could consume
memory in few 100 MB's, from the table below it looks like for each
instance ~300 MB VSZ was consumed

Before:
-------
Disks   VSZ     RSS
1       1098728 187756
2       1430808 198656
3       1764932 199704
4       2084728 202684

This patch maintains a list of pre-opened glfs objects. On adding
a new drive belonging to the same gluster volume, we just reuse the
existing glfs object by updating its refcount.

With this approch we shrink up the unwanted memory consumption and
glfs_new/glfs_init calls for accessing a disk (file) if belongs to
same volume.

From below table notice that the memory usage after adding a disk
(which will reuse the existing glfs object hence) is in negligible
compared to before.

After:
------
Disks   VSZ     RSS
1       1101964 185768
2       1109604 194920
3       1114012 196036
4       1114496 199868

Disks: number of -drive
VSZ: virtual memory size of the process in KiB
RSS: resident set size, the non-swapped physical memory (in kiloBytes)

VSZ and RSS are analyzed using 'ps aux' utility.

Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
---
 block/gluster.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 91 insertions(+), 14 deletions(-)

Comments

Jeff Cody Oct. 27, 2016, 1:08 p.m. UTC | #1
On Thu, Oct 27, 2016 at 04:16:03PM +0530, Prasanna Kumar Kalever wrote:
> Currently, for every drive accessed via gfapi we create a new glfs
> instance (call glfs_new() followed by glfs_init()) which could consume
> memory in few 100 MB's, from the table below it looks like for each
> instance ~300 MB VSZ was consumed
> 
> Before:
> -------
> Disks   VSZ     RSS
> 1       1098728 187756
> 2       1430808 198656
> 3       1764932 199704
> 4       2084728 202684
> 
> This patch maintains a list of pre-opened glfs objects. On adding
> a new drive belonging to the same gluster volume, we just reuse the
> existing glfs object by updating its refcount.
> 
> With this approch we shrink up the unwanted memory consumption and
> glfs_new/glfs_init calls for accessing a disk (file) if belongs to
> same volume.
> 
> From below table notice that the memory usage after adding a disk
> (which will reuse the existing glfs object hence) is in negligible
> compared to before.
> 
> After:
> ------
> Disks   VSZ     RSS
> 1       1101964 185768
> 2       1109604 194920
> 3       1114012 196036
> 4       1114496 199868
> 
> Disks: number of -drive
> VSZ: virtual memory size of the process in KiB
> RSS: resident set size, the non-swapped physical memory (in kiloBytes)
> 
> VSZ and RSS are analyzed using 'ps aux' utility.
> 
> Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
> ---
>  block/gluster.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 91 insertions(+), 14 deletions(-)
> 
> diff --git a/block/gluster.c b/block/gluster.c
> index 01b479f..367d692 100644
> --- a/block/gluster.c
> +++ b/block/gluster.c
> @@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState {
>  } BDRVGlusterReopenState;
>  
>  
> +typedef struct GlfsPreopened {
> +    char *volume;
> +    glfs_t *fs;
> +    int ref;
> +} GlfsPreopened;
> +
> +typedef struct ListElement {
> +    QLIST_ENTRY(ListElement) list;
> +    GlfsPreopened saved;
> +} ListElement;
> +
> +static QLIST_HEAD(glfs_list, ListElement) glfs_list;
> +
>  static QemuOptsList qemu_gluster_create_opts = {
>      .name = "qemu-gluster-create-opts",
>      .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
> @@ -182,6 +195,63 @@ static QemuOptsList runtime_tcp_opts = {
>      },
>  };
>  
> +static int glfs_set_preopened(const char *volume, glfs_t *fs)
> +{
> +    ListElement *entry = NULL;
> +
> +    entry = g_new(ListElement, 1);
> +    if (!entry) {

This NULL check is not needed, g_new is guaranteed to succeed if it returns.
If any glib allocation function fails, it will abort and terminate the
application (it is considered a fatal error).  So the QEMU coding convention
is to not check for NULL after glib allocation functions.  The exception to
this are the g_try_* functions, of course.


> +        errno = ENOMEM;
> +        return -1;
> +    }
> +
> +    entry->saved.volume = g_strdup(volume);
> +    if (!entry->saved.volume) {

Same here.

> +        g_free(entry->saved.volume);
> +        errno = ENOMEM;
> +        return -1;
> +    }
> +
> +    entry->saved.fs = fs;
> +    entry->saved.ref = 1;
> +
> +    QLIST_INSERT_HEAD(&glfs_list, entry, list);
> +
> +    return 0;

Without the need for memory error checking, this function can just return
void.

> +}
> +
> +static glfs_t *glfs_find_preopened(const char *volume)
> +{
> +    ListElement *entry = NULL;
> +
> +     QLIST_FOREACH(entry, &glfs_list, list) {
> +        if (strcmp(entry->saved.volume, volume) == 0) {
> +            entry->saved.ref++;
> +            return entry->saved.fs;
> +        }
> +     }
> +
> +    return NULL;
> +}
> +
> +static void glfs_clear_preopened(glfs_t *fs)
> +{
> +    ListElement *entry = NULL;
> +

Maybe short circuit with a return if fs==NULL?

> +    QLIST_FOREACH(entry, &glfs_list, list) {
> +        if (entry->saved.fs == fs) {
> +            if (--entry->saved.ref) {
> +                return;
> +            }
> +
> +            QLIST_REMOVE(entry, list);
> +
> +            glfs_fini(entry->saved.fs);

You need to g_free(entry->saved.volume) as well.

> +            g_free(entry);


> +        }
> +    }
> +}
> +
>  static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
>  {
>      char *p, *q;
> @@ -319,11 +389,23 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
>      int old_errno;
>      GlusterServerList *server;
>  
> +    glfs = glfs_find_preopened(gconf->volume);
> +    if (glfs) {
> +        return glfs;
> +    }
> +
>      glfs = glfs_new(gconf->volume);
>      if (!glfs) {
>          goto out;
>      }
>  
> +    ret = glfs_set_preopened(gconf->volume, glfs);
> +    if (ret < 0) {
> +        error_setg(errp, "glfs_set_preopened: Failed to register volume (%s)",
> +                   gconf->volume);
> +        goto out;
> +    }
> +
>      for (server = gconf->server; server; server = server->next) {
>          if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
>              ret = glfs_set_volfile_server(glfs,
> @@ -375,7 +457,7 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
>  out:
>      if (glfs) {
>          old_errno = errno;
> -        glfs_fini(glfs);
> +        glfs_clear_preopened(glfs);
>          errno = old_errno;
>      }
>      return NULL;
> @@ -741,9 +823,9 @@ out:
>      if (s->fd) {
>          glfs_close(s->fd);
>      }
> -    if (s->glfs) {
> -        glfs_fini(s->glfs);
> -    }
> +
> +    glfs_clear_preopened(s->glfs);
> +
>      return ret;
>  }
>  
> @@ -808,9 +890,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state)
>      if (s->fd) {
>          glfs_close(s->fd);
>      }
> -    if (s->glfs) {
> -        glfs_fini(s->glfs);
> -    }
> +
> +    glfs_clear_preopened(s->glfs);
>  
>      /* use the newly opened image / connection */
>      s->fd         = reop_s->fd;
> @@ -835,9 +916,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
>          glfs_close(reop_s->fd);
>      }
>  
> -    if (reop_s->glfs) {
> -        glfs_fini(reop_s->glfs);
> -    }
> +    glfs_clear_preopened(reop_s->glfs);
>  
>      g_free(state->opaque);
>      state->opaque = NULL;
> @@ -955,9 +1034,7 @@ static int qemu_gluster_create(const char *filename,
>  out:
>      g_free(tmp);
>      qapi_free_BlockdevOptionsGluster(gconf);
> -    if (glfs) {
> -        glfs_fini(glfs);
> -    }
> +    glfs_clear_preopened(glfs);
>      return ret;
>  }
>  
> @@ -1029,7 +1106,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
>          glfs_close(s->fd);
>          s->fd = NULL;
>      }
> -    glfs_fini(s->glfs);
> +    glfs_clear_preopened(s->glfs);
>  }
>  
>  static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
> -- 
> 2.7.4
>
Prasanna Kalever Oct. 27, 2016, 3:16 p.m. UTC | #2
On Thu, Oct 27, 2016 at 6:38 PM, Jeff Cody <jcody@redhat.com> wrote:
> On Thu, Oct 27, 2016 at 04:16:03PM +0530, Prasanna Kumar Kalever wrote:
>> Currently, for every drive accessed via gfapi we create a new glfs
>> instance (call glfs_new() followed by glfs_init()) which could consume
>> memory in few 100 MB's, from the table below it looks like for each
>> instance ~300 MB VSZ was consumed
>>
>> Before:
>> -------
>> Disks   VSZ     RSS
>> 1       1098728 187756
>> 2       1430808 198656
>> 3       1764932 199704
>> 4       2084728 202684
>>
>> This patch maintains a list of pre-opened glfs objects. On adding
>> a new drive belonging to the same gluster volume, we just reuse the
>> existing glfs object by updating its refcount.
>>
>> With this approch we shrink up the unwanted memory consumption and
>> glfs_new/glfs_init calls for accessing a disk (file) if belongs to
>> same volume.
>>
>> From below table notice that the memory usage after adding a disk
>> (which will reuse the existing glfs object hence) is in negligible
>> compared to before.
>>
>> After:
>> ------
>> Disks   VSZ     RSS
>> 1       1101964 185768
>> 2       1109604 194920
>> 3       1114012 196036
>> 4       1114496 199868
>>
>> Disks: number of -drive
>> VSZ: virtual memory size of the process in KiB
>> RSS: resident set size, the non-swapped physical memory (in kiloBytes)
>>
>> VSZ and RSS are analyzed using 'ps aux' utility.
>>
>> Signed-off-by: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
>> ---
>>  block/gluster.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++++--------
>>  1 file changed, 91 insertions(+), 14 deletions(-)
>>
>> diff --git a/block/gluster.c b/block/gluster.c
>> index 01b479f..367d692 100644
>> --- a/block/gluster.c
>> +++ b/block/gluster.c
>> @@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState {
>>  } BDRVGlusterReopenState;
>>
>>
>> +typedef struct GlfsPreopened {
>> +    char *volume;
>> +    glfs_t *fs;
>> +    int ref;
>> +} GlfsPreopened;
>> +
>> +typedef struct ListElement {
>> +    QLIST_ENTRY(ListElement) list;
>> +    GlfsPreopened saved;
>> +} ListElement;
>> +
>> +static QLIST_HEAD(glfs_list, ListElement) glfs_list;
>> +
>>  static QemuOptsList qemu_gluster_create_opts = {
>>      .name = "qemu-gluster-create-opts",
>>      .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
>> @@ -182,6 +195,63 @@ static QemuOptsList runtime_tcp_opts = {
>>      },
>>  };
>>
>> +static int glfs_set_preopened(const char *volume, glfs_t *fs)
>> +{
>> +    ListElement *entry = NULL;
>> +
>> +    entry = g_new(ListElement, 1);
>> +    if (!entry) {
>
> This NULL check is not needed, g_new is guaranteed to succeed if it returns.
> If any glib allocation function fails, it will abort and terminate the
> application (it is considered a fatal error).  So the QEMU coding convention
> is to not check for NULL after glib allocation functions.  The exception to
> this are the g_try_* functions, of course.

Perfect!
Will remove the defense checks for glib allocations through out the patch.

>
>
>> +        errno = ENOMEM;
>> +        return -1;
>> +    }
>> +
>> +    entry->saved.volume = g_strdup(volume);
>> +    if (!entry->saved.volume) {
>
> Same here.
>
>> +        g_free(entry->saved.volume);
>> +        errno = ENOMEM;
>> +        return -1;
>> +    }
>> +
>> +    entry->saved.fs = fs;
>> +    entry->saved.ref = 1;
>> +
>> +    QLIST_INSERT_HEAD(&glfs_list, entry, list);
>> +
>> +    return 0;
>
> Without the need for memory error checking, this function can just return
> void.

yes.

>
>> +}
>> +
>> +static glfs_t *glfs_find_preopened(const char *volume)
>> +{
>> +    ListElement *entry = NULL;
>> +
>> +     QLIST_FOREACH(entry, &glfs_list, list) {
>> +        if (strcmp(entry->saved.volume, volume) == 0) {
>> +            entry->saved.ref++;
>> +            return entry->saved.fs;
>> +        }
>> +     }
>> +
>> +    return NULL;
>> +}
>> +
>> +static void glfs_clear_preopened(glfs_t *fs)
>> +{
>> +    ListElement *entry = NULL;
>> +
>
> Maybe short circuit with a return if fs==NULL?

Worth one.

>
>> +    QLIST_FOREACH(entry, &glfs_list, list) {
>> +        if (entry->saved.fs == fs) {
>> +            if (--entry->saved.ref) {
>> +                return;
>> +            }
>> +
>> +            QLIST_REMOVE(entry, list);
>> +
>> +            glfs_fini(entry->saved.fs);
>
> You need to g_free(entry->saved.volume) as well.

You caught it ;)

Thanks Jeff, will accommodate all the suggestions in the next spin.

--
Prasanna

>
>> +            g_free(entry);
>
>
>> +        }
>> +    }
>> +}
>> +
>>  static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
>>  {
>>      char *p, *q;
>> @@ -319,11 +389,23 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
>>      int old_errno;
>>      GlusterServerList *server;
>>
>> +    glfs = glfs_find_preopened(gconf->volume);
>> +    if (glfs) {
>> +        return glfs;
>> +    }
>> +
>>      glfs = glfs_new(gconf->volume);
>>      if (!glfs) {
>>          goto out;
>>      }
>>
>> +    ret = glfs_set_preopened(gconf->volume, glfs);
>> +    if (ret < 0) {
>> +        error_setg(errp, "glfs_set_preopened: Failed to register volume (%s)",
>> +                   gconf->volume);
>> +        goto out;
>> +    }
>> +
>>      for (server = gconf->server; server; server = server->next) {
>>          if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
>>              ret = glfs_set_volfile_server(glfs,
>> @@ -375,7 +457,7 @@ static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
>>  out:
>>      if (glfs) {
>>          old_errno = errno;
>> -        glfs_fini(glfs);
>> +        glfs_clear_preopened(glfs);
>>          errno = old_errno;
>>      }
>>      return NULL;
>> @@ -741,9 +823,9 @@ out:
>>      if (s->fd) {
>>          glfs_close(s->fd);
>>      }
>> -    if (s->glfs) {
>> -        glfs_fini(s->glfs);
>> -    }
>> +
>> +    glfs_clear_preopened(s->glfs);
>> +
>>      return ret;
>>  }
>>
>> @@ -808,9 +890,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState *state)
>>      if (s->fd) {
>>          glfs_close(s->fd);
>>      }
>> -    if (s->glfs) {
>> -        glfs_fini(s->glfs);
>> -    }
>> +
>> +    glfs_clear_preopened(s->glfs);
>>
>>      /* use the newly opened image / connection */
>>      s->fd         = reop_s->fd;
>> @@ -835,9 +916,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
>>          glfs_close(reop_s->fd);
>>      }
>>
>> -    if (reop_s->glfs) {
>> -        glfs_fini(reop_s->glfs);
>> -    }
>> +    glfs_clear_preopened(reop_s->glfs);
>>
>>      g_free(state->opaque);
>>      state->opaque = NULL;
>> @@ -955,9 +1034,7 @@ static int qemu_gluster_create(const char *filename,
>>  out:
>>      g_free(tmp);
>>      qapi_free_BlockdevOptionsGluster(gconf);
>> -    if (glfs) {
>> -        glfs_fini(glfs);
>> -    }
>> +    glfs_clear_preopened(glfs);
>>      return ret;
>>  }
>>
>> @@ -1029,7 +1106,7 @@ static void qemu_gluster_close(BlockDriverState *bs)
>>          glfs_close(s->fd);
>>          s->fd = NULL;
>>      }
>> -    glfs_fini(s->glfs);
>> +    glfs_clear_preopened(s->glfs);
>>  }
>>
>>  static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
>> --
>> 2.7.4
>>
diff mbox

Patch

diff --git a/block/gluster.c b/block/gluster.c
index 01b479f..367d692 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -54,6 +54,19 @@  typedef struct BDRVGlusterReopenState {
 } BDRVGlusterReopenState;
 
 
+typedef struct GlfsPreopened {
+    char *volume;
+    glfs_t *fs;
+    int ref;
+} GlfsPreopened;
+
+typedef struct ListElement {
+    QLIST_ENTRY(ListElement) list;
+    GlfsPreopened saved;
+} ListElement;
+
+static QLIST_HEAD(glfs_list, ListElement) glfs_list;
+
 static QemuOptsList qemu_gluster_create_opts = {
     .name = "qemu-gluster-create-opts",
     .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -182,6 +195,63 @@  static QemuOptsList runtime_tcp_opts = {
     },
 };
 
+static int glfs_set_preopened(const char *volume, glfs_t *fs)
+{
+    ListElement *entry = NULL;
+
+    entry = g_new(ListElement, 1);
+    if (!entry) {
+        errno = ENOMEM;
+        return -1;
+    }
+
+    entry->saved.volume = g_strdup(volume);
+    if (!entry->saved.volume) {
+        g_free(entry->saved.volume);
+        errno = ENOMEM;
+        return -1;
+    }
+
+    entry->saved.fs = fs;
+    entry->saved.ref = 1;
+
+    QLIST_INSERT_HEAD(&glfs_list, entry, list);
+
+    return 0;
+}
+
+static glfs_t *glfs_find_preopened(const char *volume)
+{
+    ListElement *entry = NULL;
+
+     QLIST_FOREACH(entry, &glfs_list, list) {
+        if (strcmp(entry->saved.volume, volume) == 0) {
+            entry->saved.ref++;
+            return entry->saved.fs;
+        }
+     }
+
+    return NULL;
+}
+
+static void glfs_clear_preopened(glfs_t *fs)
+{
+    ListElement *entry = NULL;
+
+    QLIST_FOREACH(entry, &glfs_list, list) {
+        if (entry->saved.fs == fs) {
+            if (--entry->saved.ref) {
+                return;
+            }
+
+            QLIST_REMOVE(entry, list);
+
+            glfs_fini(entry->saved.fs);
+            g_free(entry);
+        }
+    }
+}
+
 static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
     char *p, *q;
@@ -319,11 +389,23 @@  static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
     int old_errno;
     GlusterServerList *server;
 
+    glfs = glfs_find_preopened(gconf->volume);
+    if (glfs) {
+        return glfs;
+    }
+
     glfs = glfs_new(gconf->volume);
     if (!glfs) {
         goto out;
     }
 
+    ret = glfs_set_preopened(gconf->volume, glfs);
+    if (ret < 0) {
+        error_setg(errp, "glfs_set_preopened: Failed to register volume (%s)",
+                   gconf->volume);
+        goto out;
+    }
+
     for (server = gconf->server; server; server = server->next) {
         if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
             ret = glfs_set_volfile_server(glfs,
@@ -375,7 +457,7 @@  static struct glfs *qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 out:
     if (glfs) {
         old_errno = errno;
-        glfs_fini(glfs);
+        glfs_clear_preopened(glfs);
         errno = old_errno;
     }
     return NULL;
@@ -741,9 +823,9 @@  out:
     if (s->fd) {
         glfs_close(s->fd);
     }
-    if (s->glfs) {
-        glfs_fini(s->glfs);
-    }
+
+    glfs_clear_preopened(s->glfs);
+
     return ret;
 }
 
@@ -808,9 +890,8 @@  static void qemu_gluster_reopen_commit(BDRVReopenState *state)
     if (s->fd) {
         glfs_close(s->fd);
     }
-    if (s->glfs) {
-        glfs_fini(s->glfs);
-    }
+
+    glfs_clear_preopened(s->glfs);
 
     /* use the newly opened image / connection */
     s->fd         = reop_s->fd;
@@ -835,9 +916,7 @@  static void qemu_gluster_reopen_abort(BDRVReopenState *state)
         glfs_close(reop_s->fd);
     }
 
-    if (reop_s->glfs) {
-        glfs_fini(reop_s->glfs);
-    }
+    glfs_clear_preopened(reop_s->glfs);
 
     g_free(state->opaque);
     state->opaque = NULL;
@@ -955,9 +1034,7 @@  static int qemu_gluster_create(const char *filename,
 out:
     g_free(tmp);
     qapi_free_BlockdevOptionsGluster(gconf);
-    if (glfs) {
-        glfs_fini(glfs);
-    }
+    glfs_clear_preopened(glfs);
     return ret;
 }
 
@@ -1029,7 +1106,7 @@  static void qemu_gluster_close(BlockDriverState *bs)
         glfs_close(s->fd);
         s->fd = NULL;
     }
-    glfs_fini(s->glfs);
+    glfs_clear_preopened(s->glfs);
 }
 
 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)