diff mbox series

[v2,15/19] tools/xenstore: switch hashtable to use the talloc framework

Message ID 20221213160045.28170-16-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series tools/xenstore: do some cleanup and fixes | expand

Commit Message

Jürgen Groß Dec. 13, 2022, 4 p.m. UTC
Instead of using malloc() and friends, let the hashtable implementation
use the talloc framework.

This is more consistent with the rest of xenstored and it allows to
track memory usage via "xenstore-control memreport".

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 tools/xenstore/hashtable.c        | 86 ++++++++++++-------------------
 tools/xenstore/hashtable.h        |  3 +-
 tools/xenstore/xenstored_core.c   |  5 +-
 tools/xenstore/xenstored_domain.c |  2 +-
 4 files changed, 39 insertions(+), 57 deletions(-)

Comments

Julien Grall Dec. 20, 2022, 9:50 p.m. UTC | #1
Hi Juergen,

On 13/12/2022 16:00, Juergen Gross wrote:
> @@ -115,47 +117,32 @@ hashtable_expand(struct hashtable *h)
>       if (h->primeindex == (prime_table_length - 1)) return 0;
>       newsize = primes[++(h->primeindex)];
>   
> -    newtable = (struct entry **)calloc(newsize, sizeof(struct entry*));
> -    if (NULL != newtable)
> +    newtable = talloc_realloc(h, h->table, struct entry *, newsize);
> +    if (!newtable)
>       {
> -        /* This algorithm is not 'stable'. ie. it reverses the list
> -         * when it transfers entries between the tables */
> -        for (i = 0; i < h->tablelength; i++) {
> -            while (NULL != (e = h->table[i])) {
> -                h->table[i] = e->next;
> -                index = indexFor(newsize,e->h);
> +        h->primeindex--;
> +        return 0;
> +    }
> +
> +    h->table = newtable;
> +    memset(newtable + h->tablelength, 0,
> +           (newsize - h->tablelength) * sizeof(*newtable));
> +    for (i = 0; i < h->tablelength; i++) {

I understand this code is taken from the realloc path. However, isn't 
this algorithm also not stable? If so, then I think we move the comment 
here.

> +        for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
> +            index = indexFor(newsize,e->h);

Missing space after ",".

> +            if (index == i)
> +            {
> +                pE = &(e->next);
> +            }
> +            else
> +            {
> +                *pE = e->next;
>                   e->next = newtable[index];
>                   newtable[index] = e;
>               }
>           }
> -        free(h->table);
> -        h->table = newtable;
> -    }
> -    /* Plan B: realloc instead */
> -    else
> -    {
> -        newtable = (struct entry **)
> -                   realloc(h->table, newsize * sizeof(struct entry *));
> -        if (NULL == newtable) { (h->primeindex)--; return 0; }
> -        h->table = newtable;
> -        memset(newtable + h->tablelength, 0,
> -               (newsize - h->tablelength) * sizeof(*newtable));
> -        for (i = 0; i < h->tablelength; i++) {
> -            for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
> -                index = indexFor(newsize,e->h);
> -                if (index == i)
> -                {
> -                    pE = &(e->next);
> -                }
> -                else
> -                {
> -                    *pE = e->next;
> -                    e->next = newtable[index];
> -                    newtable[index] = e;
> -                }
> -            }
> -        }
>       }
> +
>       h->tablelength = newsize;
>       h->loadlimit   = (unsigned int)
>           (((uint64_t)newsize * max_load_factor) / 100);
> @@ -184,7 +171,7 @@ hashtable_insert(struct hashtable *h, void *k, void *v)
>            * element may be ok. Next time we insert, we'll try expanding again.*/
>           hashtable_expand(h);
>       }
> -    e = (struct entry *)calloc(1, sizeof(struct entry));
> +    e = talloc_zero(h, struct entry);
>       if (NULL == e) { --(h->entrycount); return 0; } /*oom*/
>       e->h = hash(h,k);
>       index = indexFor(h->tablelength,e->h);
> @@ -238,8 +225,8 @@ hashtable_remove(struct hashtable *h, void *k)
>               h->entrycount--;
>               v = e->v;
>               if (h->flags & HASHTABLE_FREE_KEY)
> -                free(e->k);
> -            free(e);
> +                talloc_free(e->k);
> +            talloc_free(e);
>               return v;
>           }
>           pE = &(e->next);
> @@ -280,25 +267,20 @@ void
>   hashtable_destroy(struct hashtable *h)
>   {
>       unsigned int i;
> -    struct entry *e, *f;
> +    struct entry *e;
>       struct entry **table = h->table;
>   
>       for (i = 0; i < h->tablelength; i++)
>       {
> -        e = table[i];
> -        while (NULL != e)
> +        for (e = table[i]; e; e = e->next)
>           {
> -            f = e;
> -            e = e->next;
>               if (h->flags & HASHTABLE_FREE_KEY)
> -                free(f->k);
> +                talloc_free(e->k);
>               if (h->flags & HASHTABLE_FREE_VALUE)
> -                free(f->v);
> -            free(f);

AFAIU, the loop is reworked so you let talloc to free each element with 
the parent. Using a while loop is definitely cleaner, but now you will 
end up to have two separate loop for the elements.

There is a risk that the overall performance of hashtable_destroy() will 
be worse as the data accessed in one loop may not fit in the cache. So 
you will have to reload it on the second loop.

Therefore, I think it would be better to keep the loop as-is.

Cheers,
Jürgen Groß Jan. 11, 2023, 9:27 a.m. UTC | #2
On 20.12.22 22:50, Julien Grall wrote:
> Hi Juergen,
> 
> On 13/12/2022 16:00, Juergen Gross wrote:
>> @@ -115,47 +117,32 @@ hashtable_expand(struct hashtable *h)
>>       if (h->primeindex == (prime_table_length - 1)) return 0;
>>       newsize = primes[++(h->primeindex)];
>> -    newtable = (struct entry **)calloc(newsize, sizeof(struct entry*));
>> -    if (NULL != newtable)
>> +    newtable = talloc_realloc(h, h->table, struct entry *, newsize);
>> +    if (!newtable)
>>       {
>> -        /* This algorithm is not 'stable'. ie. it reverses the list
>> -         * when it transfers entries between the tables */
>> -        for (i = 0; i < h->tablelength; i++) {
>> -            while (NULL != (e = h->table[i])) {
>> -                h->table[i] = e->next;
>> -                index = indexFor(newsize,e->h);
>> +        h->primeindex--;
>> +        return 0;
>> +    }
>> +
>> +    h->table = newtable;
>> +    memset(newtable + h->tablelength, 0,
>> +           (newsize - h->tablelength) * sizeof(*newtable));
>> +    for (i = 0; i < h->tablelength; i++) {
> 
> I understand this code is taken from the realloc path. However, isn't this 
> algorithm also not stable? If so, then I think we move the comment here.

I'm fine with that, even if I don't see how it would matter. There is no
guarantee regarding the order of entries for a given index.

> 
>> +        for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
>> +            index = indexFor(newsize,e->h);
> 
> Missing space after ",".

Will fix.

> 
>> +            if (index == i)
>> +            {
>> +                pE = &(e->next);
>> +            }
>> +            else
>> +            {
>> +                *pE = e->next;
>>                   e->next = newtable[index];
>>                   newtable[index] = e;
>>               }
>>           }
>> -        free(h->table);
>> -        h->table = newtable;
>> -    }
>> -    /* Plan B: realloc instead */
>> -    else
>> -    {
>> -        newtable = (struct entry **)
>> -                   realloc(h->table, newsize * sizeof(struct entry *));
>> -        if (NULL == newtable) { (h->primeindex)--; return 0; }
>> -        h->table = newtable;
>> -        memset(newtable + h->tablelength, 0,
>> -               (newsize - h->tablelength) * sizeof(*newtable));
>> -        for (i = 0; i < h->tablelength; i++) {
>> -            for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
>> -                index = indexFor(newsize,e->h);
>> -                if (index == i)
>> -                {
>> -                    pE = &(e->next);
>> -                }
>> -                else
>> -                {
>> -                    *pE = e->next;
>> -                    e->next = newtable[index];
>> -                    newtable[index] = e;
>> -                }
>> -            }
>> -        }
>>       }
>> +
>>       h->tablelength = newsize;
>>       h->loadlimit   = (unsigned int)
>>           (((uint64_t)newsize * max_load_factor) / 100);
>> @@ -184,7 +171,7 @@ hashtable_insert(struct hashtable *h, void *k, void *v)
>>            * element may be ok. Next time we insert, we'll try expanding again.*/
>>           hashtable_expand(h);
>>       }
>> -    e = (struct entry *)calloc(1, sizeof(struct entry));
>> +    e = talloc_zero(h, struct entry);
>>       if (NULL == e) { --(h->entrycount); return 0; } /*oom*/
>>       e->h = hash(h,k);
>>       index = indexFor(h->tablelength,e->h);
>> @@ -238,8 +225,8 @@ hashtable_remove(struct hashtable *h, void *k)
>>               h->entrycount--;
>>               v = e->v;
>>               if (h->flags & HASHTABLE_FREE_KEY)
>> -                free(e->k);
>> -            free(e);
>> +                talloc_free(e->k);
>> +            talloc_free(e);
>>               return v;
>>           }
>>           pE = &(e->next);
>> @@ -280,25 +267,20 @@ void
>>   hashtable_destroy(struct hashtable *h)
>>   {
>>       unsigned int i;
>> -    struct entry *e, *f;
>> +    struct entry *e;
>>       struct entry **table = h->table;
>>       for (i = 0; i < h->tablelength; i++)
>>       {
>> -        e = table[i];
>> -        while (NULL != e)
>> +        for (e = table[i]; e; e = e->next)
>>           {
>> -            f = e;
>> -            e = e->next;
>>               if (h->flags & HASHTABLE_FREE_KEY)
>> -                free(f->k);
>> +                talloc_free(e->k);
>>               if (h->flags & HASHTABLE_FREE_VALUE)
>> -                free(f->v);
>> -            free(f);
> 
> AFAIU, the loop is reworked so you let talloc to free each element with the 
> parent. Using a while loop is definitely cleaner, but now you will end up to 
> have two separate loop for the elements.
> 
> There is a risk that the overall performance of hashtable_destroy() will be 
> worse as the data accessed in one loop may not fit in the cache. So you will 
> have to reload it on the second loop.
> 
> Therefore, I think it would be better to keep the loop as-is.

What about a completely different approach? I could make the key and value
talloc children of e when _inserting_ the element and the related flag is
set. This would reduce hashtable_destroy to a single talloc_free().


Juergen
Julien Grall Jan. 11, 2023, 5:50 p.m. UTC | #3
Hi,

On 11/01/2023 09:27, Juergen Gross wrote:
> On 20.12.22 22:50, Julien Grall wrote:
>> Hi Juergen,
>>
>> On 13/12/2022 16:00, Juergen Gross wrote:
>>> @@ -115,47 +117,32 @@ hashtable_expand(struct hashtable *h)
>>>       if (h->primeindex == (prime_table_length - 1)) return 0;
>>>       newsize = primes[++(h->primeindex)];
>>> -    newtable = (struct entry **)calloc(newsize, sizeof(struct entry*));
>>> -    if (NULL != newtable)
>>> +    newtable = talloc_realloc(h, h->table, struct entry *, newsize);
>>> +    if (!newtable)
>>>       {
>>> -        /* This algorithm is not 'stable'. ie. it reverses the list
>>> -         * when it transfers entries between the tables */
>>> -        for (i = 0; i < h->tablelength; i++) {
>>> -            while (NULL != (e = h->table[i])) {
>>> -                h->table[i] = e->next;
>>> -                index = indexFor(newsize,e->h);
>>> +        h->primeindex--;
>>> +        return 0;
>>> +    }
>>> +
>>> +    h->table = newtable;
>>> +    memset(newtable + h->tablelength, 0,
>>> +           (newsize - h->tablelength) * sizeof(*newtable));
>>> +    for (i = 0; i < h->tablelength; i++) {
>>
>> I understand this code is taken from the realloc path. However, isn't 
>> this algorithm also not stable? If so, then I think we move the 
>> comment here.
> 
> I'm fine with that, even if I don't see how it would matter. There is no
> guarantee regarding the order of entries for a given index.

That's a fair point. Feel free to ignore my comment then :).

>>> +            if (index == i)
>>> +            {
>>> +                pE = &(e->next);
>>> +            }
>>> +            else
>>> +            {
>>> +                *pE = e->next;
>>>                   e->next = newtable[index];
>>>                   newtable[index] = e;
>>>               }
>>>           }
>>> -        free(h->table);
>>> -        h->table = newtable;
>>> -    }
>>> -    /* Plan B: realloc instead */
>>> -    else
>>> -    {
>>> -        newtable = (struct entry **)
>>> -                   realloc(h->table, newsize * sizeof(struct entry *));
>>> -        if (NULL == newtable) { (h->primeindex)--; return 0; }
>>> -        h->table = newtable;
>>> -        memset(newtable + h->tablelength, 0,
>>> -               (newsize - h->tablelength) * sizeof(*newtable));
>>> -        for (i = 0; i < h->tablelength; i++) {
>>> -            for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
>>> -                index = indexFor(newsize,e->h);
>>> -                if (index == i)
>>> -                {
>>> -                    pE = &(e->next);
>>> -                }
>>> -                else
>>> -                {
>>> -                    *pE = e->next;
>>> -                    e->next = newtable[index];
>>> -                    newtable[index] = e;
>>> -                }
>>> -            }
>>> -        }
>>>       }
>>> +
>>>       h->tablelength = newsize;
>>>       h->loadlimit   = (unsigned int)
>>>           (((uint64_t)newsize * max_load_factor) / 100);
>>> @@ -184,7 +171,7 @@ hashtable_insert(struct hashtable *h, void *k, 
>>> void *v)
>>>            * element may be ok. Next time we insert, we'll try 
>>> expanding again.*/
>>>           hashtable_expand(h);
>>>       }
>>> -    e = (struct entry *)calloc(1, sizeof(struct entry));
>>> +    e = talloc_zero(h, struct entry);
>>>       if (NULL == e) { --(h->entrycount); return 0; } /*oom*/
>>>       e->h = hash(h,k);
>>>       index = indexFor(h->tablelength,e->h);
>>> @@ -238,8 +225,8 @@ hashtable_remove(struct hashtable *h, void *k)
>>>               h->entrycount--;
>>>               v = e->v;
>>>               if (h->flags & HASHTABLE_FREE_KEY)
>>> -                free(e->k);
>>> -            free(e);
>>> +                talloc_free(e->k);
>>> +            talloc_free(e);
>>>               return v;
>>>           }
>>>           pE = &(e->next);
>>> @@ -280,25 +267,20 @@ void
>>>   hashtable_destroy(struct hashtable *h)
>>>   {
>>>       unsigned int i;
>>> -    struct entry *e, *f;
>>> +    struct entry *e;
>>>       struct entry **table = h->table;
>>>       for (i = 0; i < h->tablelength; i++)
>>>       {
>>> -        e = table[i];
>>> -        while (NULL != e)
>>> +        for (e = table[i]; e; e = e->next)
>>>           {
>>> -            f = e;
>>> -            e = e->next;
>>>               if (h->flags & HASHTABLE_FREE_KEY)
>>> -                free(f->k);
>>> +                talloc_free(e->k);
>>>               if (h->flags & HASHTABLE_FREE_VALUE)
>>> -                free(f->v);
>>> -            free(f);
>>
>> AFAIU, the loop is reworked so you let talloc to free each element 
>> with the parent. Using a while loop is definitely cleaner, but now you 
>> will end up to have two separate loop for the elements.
>>
>> There is a risk that the overall performance of hashtable_destroy() 
>> will be worse as the data accessed in one loop may not fit in the 
>> cache. So you will have to reload it on the second loop.
>>
>> Therefore, I think it would be better to keep the loop as-is.
> 
> What about a completely different approach? I could make the key and value
> talloc children of e when _inserting_ the element and the related flag is
> set. This would reduce hashtable_destroy to a single talloc_free().

I am fine with that.

Cheers,
diff mbox series

Patch

diff --git a/tools/xenstore/hashtable.c b/tools/xenstore/hashtable.c
index 299549c51e..3b6745b692 100644
--- a/tools/xenstore/hashtable.c
+++ b/tools/xenstore/hashtable.c
@@ -6,6 +6,8 @@ 
 #include <string.h>
 #include <math.h>
 #include <stdint.h>
+#include <stdarg.h>
+#include "talloc.h"
 
 struct entry
 {
@@ -50,7 +52,7 @@  indexFor(unsigned int tablelength, unsigned int hashvalue) {
 
 /*****************************************************************************/
 struct hashtable *
-create_hashtable(unsigned int minsize,
+create_hashtable(const void *ctx, unsigned int minsize,
                  unsigned int (*hashf) (void*),
                  int (*eqf) (void*,void*),
                  unsigned int flags)
@@ -66,10 +68,10 @@  create_hashtable(unsigned int minsize,
         if (primes[pindex] > minsize) { size = primes[pindex]; break; }
     }
 
-    h = (struct hashtable *)calloc(1, sizeof(struct hashtable));
+    h = talloc_zero(ctx, struct hashtable);
     if (NULL == h)
         goto err0;
-    h->table = (struct entry **)calloc(size, sizeof(struct entry *));
+    h->table = talloc_zero_array(h, struct entry *, size);
     if (NULL == h->table)
         goto err1;
 
@@ -83,7 +85,7 @@  create_hashtable(unsigned int minsize,
     return h;
 
 err1:
-   free(h);
+   talloc_free(h);
 err0:
    return NULL;
 }
@@ -115,47 +117,32 @@  hashtable_expand(struct hashtable *h)
     if (h->primeindex == (prime_table_length - 1)) return 0;
     newsize = primes[++(h->primeindex)];
 
-    newtable = (struct entry **)calloc(newsize, sizeof(struct entry*));
-    if (NULL != newtable)
+    newtable = talloc_realloc(h, h->table, struct entry *, newsize);
+    if (!newtable)
     {
-        /* This algorithm is not 'stable'. ie. it reverses the list
-         * when it transfers entries between the tables */
-        for (i = 0; i < h->tablelength; i++) {
-            while (NULL != (e = h->table[i])) {
-                h->table[i] = e->next;
-                index = indexFor(newsize,e->h);
+        h->primeindex--;
+        return 0;
+    }
+
+    h->table = newtable;
+    memset(newtable + h->tablelength, 0,
+           (newsize - h->tablelength) * sizeof(*newtable));
+    for (i = 0; i < h->tablelength; i++) {
+        for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
+            index = indexFor(newsize,e->h);
+            if (index == i)
+            {
+                pE = &(e->next);
+            }
+            else
+            {
+                *pE = e->next;
                 e->next = newtable[index];
                 newtable[index] = e;
             }
         }
-        free(h->table);
-        h->table = newtable;
-    }
-    /* Plan B: realloc instead */
-    else 
-    {
-        newtable = (struct entry **)
-                   realloc(h->table, newsize * sizeof(struct entry *));
-        if (NULL == newtable) { (h->primeindex)--; return 0; }
-        h->table = newtable;
-        memset(newtable + h->tablelength, 0,
-               (newsize - h->tablelength) * sizeof(*newtable));
-        for (i = 0; i < h->tablelength; i++) {
-            for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
-                index = indexFor(newsize,e->h);
-                if (index == i)
-                {
-                    pE = &(e->next);
-                }
-                else
-                {
-                    *pE = e->next;
-                    e->next = newtable[index];
-                    newtable[index] = e;
-                }
-            }
-        }
     }
+
     h->tablelength = newsize;
     h->loadlimit   = (unsigned int)
         (((uint64_t)newsize * max_load_factor) / 100);
@@ -184,7 +171,7 @@  hashtable_insert(struct hashtable *h, void *k, void *v)
          * element may be ok. Next time we insert, we'll try expanding again.*/
         hashtable_expand(h);
     }
-    e = (struct entry *)calloc(1, sizeof(struct entry));
+    e = talloc_zero(h, struct entry);
     if (NULL == e) { --(h->entrycount); return 0; } /*oom*/
     e->h = hash(h,k);
     index = indexFor(h->tablelength,e->h);
@@ -238,8 +225,8 @@  hashtable_remove(struct hashtable *h, void *k)
             h->entrycount--;
             v = e->v;
             if (h->flags & HASHTABLE_FREE_KEY)
-                free(e->k);
-            free(e);
+                talloc_free(e->k);
+            talloc_free(e);
             return v;
         }
         pE = &(e->next);
@@ -280,25 +267,20 @@  void
 hashtable_destroy(struct hashtable *h)
 {
     unsigned int i;
-    struct entry *e, *f;
+    struct entry *e;
     struct entry **table = h->table;
 
     for (i = 0; i < h->tablelength; i++)
     {
-        e = table[i];
-        while (NULL != e)
+        for (e = table[i]; e; e = e->next)
         {
-            f = e;
-            e = e->next;
             if (h->flags & HASHTABLE_FREE_KEY)
-                free(f->k);
+                talloc_free(e->k);
             if (h->flags & HASHTABLE_FREE_VALUE)
-                free(f->v);
-            free(f);
+                talloc_free(e->v);
         }
     }
-    free(h->table);
-    free(h);
+    talloc_free(h);
 }
 
 /*
diff --git a/tools/xenstore/hashtable.h b/tools/xenstore/hashtable.h
index 6d65431f96..becec73092 100644
--- a/tools/xenstore/hashtable.h
+++ b/tools/xenstore/hashtable.h
@@ -9,6 +9,7 @@  struct hashtable;
  * create_hashtable
    
  * @name                    create_hashtable
+ * @param   ctx             talloc context to use for allocations
  * @param   minsize         minimum initial size of hashtable
  * @param   hashfunction    function for hashing keys
  * @param   key_eq_fn       function for determining key equality
@@ -22,7 +23,7 @@  struct hashtable;
 #define HASHTABLE_FREE_KEY   (1U << 1)
 
 struct hashtable *
-create_hashtable(unsigned int minsize,
+create_hashtable(const void *ctx, unsigned int minsize,
                  unsigned int (*hashfunction) (void*),
                  int (*key_eq_fn) (void*,void*),
                  unsigned int flags
diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index 6c9d22b8a2..60433265ed 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -2419,11 +2419,10 @@  static int keys_equal_fn(void *key1, void *key2)
 
 int remember_string(struct hashtable *hash, const char *str)
 {
-	char *k = malloc(strlen(str) + 1);
+	char *k = talloc_strdup(NULL, str);
 
 	if (!k)
 		return 0;
-	strcpy(k, str);
 	return hashtable_insert(hash, k, (void *)1);
 }
 
@@ -2518,7 +2517,7 @@  void check_store(void)
 	};
 
 	/* Don't free values (they are all void *1) */
-	reachable = create_hashtable(16, hash_from_key_fn, keys_equal_fn,
+	reachable = create_hashtable(NULL, 16, hash_from_key_fn, keys_equal_fn,
 				     HASHTABLE_FREE_KEY);
 	if (!reachable) {
 		log("check_store: ENOMEM");
diff --git a/tools/xenstore/xenstored_domain.c b/tools/xenstore/xenstored_domain.c
index 65e94e3822..4d735a5951 100644
--- a/tools/xenstore/xenstored_domain.c
+++ b/tools/xenstore/xenstored_domain.c
@@ -929,7 +929,7 @@  void domain_init(int evtfd)
 	int rc;
 
 	/* Start with a random rather low domain count for the hashtable. */
-	domhash = create_hashtable(8, domhash_fn, domeq_fn, 0);
+	domhash = create_hashtable(NULL, 8, domhash_fn, domeq_fn, 0);
 	if (!domhash)
 		barf_perror("Failed to allocate domain hashtable");