diff mbox

[2/3] qht: fix unlock-after-free segfault upon resizing

Message ID 1475706880-10667-3-git-send-email-cota@braap.org (mailing list archive)
State New, archived
Headers show

Commit Message

Emilio Cota Oct. 5, 2016, 10:34 p.m. UTC
The old map's bucket locks are being unlocked *after*
that same old map has been passed to RCU for destruction.
This is a bug that can cause a segfault, since there's
no guarantee that the deletion will be deferred (e.g.
there may be no concurrent readers).

The segfault is easily triggered in RHEL6/CentOS6 with qht-test,
particularly on a single-core system or by pinning qht-test
to a single core.

Fix it by unlocking the map's bucket locks right after having
published the new map, and (crucially) before marking the map
for deletion via call_rcu().

While at it, expand qht_do_resize() to atomically do (1) a reset,
(2) a resize, or (3) a reset+resize. This simplifies the calling
code, since the new function (qht_do_resize_reset()) acquires
and releases the buckets' locks.

Note that no qht_do_reset inline is provided, since it would have
no users--qht_reset() already performs a reset without taking
ht->lock.

Reported-by: Peter Maydell <peter.maydell@linaro.org>
Reported-by: Daniel P. Berrange <berrange@redhat.com>
Signed-off-by: Emilio G. Cota <cota@braap.org>
---
 util/qht.c | 49 ++++++++++++++++++++++++++++---------------------
 1 file changed, 28 insertions(+), 21 deletions(-)
diff mbox

Patch

diff --git a/util/qht.c b/util/qht.c
index af8da3c..6c61aca 100644
--- a/util/qht.c
+++ b/util/qht.c
@@ -133,7 +133,8 @@  struct qht_map {
 /* trigger a resize when n_added_buckets > n_buckets / div */
 #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
 
-static void qht_do_resize(struct qht *ht, struct qht_map *new);
+static void qht_do_resize_reset(struct qht *ht, struct qht_map *new,
+                                bool reset);
 static void qht_grow_maybe(struct qht *ht);
 
 #ifdef QHT_DEBUG
@@ -408,6 +409,16 @@  void qht_reset(struct qht *ht)
     qht_map_unlock_buckets(map);
 }
 
+static inline void qht_do_resize(struct qht *ht, struct qht_map *new)
+{
+    qht_do_resize_reset(ht, new, false);
+}
+
+static inline void qht_do_resize_and_reset(struct qht *ht, struct qht_map *new)
+{
+    qht_do_resize_reset(ht, new, true);
+}
+
 bool qht_reset_size(struct qht *ht, size_t n_elems)
 {
     struct qht_map *new = NULL;
@@ -421,13 +432,7 @@  bool qht_reset_size(struct qht *ht, size_t n_elems)
     if (n_buckets != map->n_buckets) {
         new = qht_map_create(n_buckets);
     }
-
-    qht_map_lock_buckets(map);
-    qht_map_reset__all_locked(map);
-    if (new) {
-        qht_do_resize(ht, new);
-    }
-    qht_map_unlock_buckets(map);
+    qht_do_resize_and_reset(ht, new);
     qemu_mutex_unlock(&ht->lock);
 
     return !!new;
@@ -559,9 +564,7 @@  static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
     if (qht_map_needs_resize(map)) {
         struct qht_map *new = qht_map_create(map->n_buckets * 2);
 
-        qht_map_lock_buckets(map);
         qht_do_resize(ht, new);
-        qht_map_unlock_buckets(map);
     }
     qemu_mutex_unlock(&ht->lock);
 }
@@ -737,24 +740,31 @@  static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp)
 }
 
 /*
- * Call with ht->lock and all bucket locks held.
- *
- * Creating the @new map here would add unnecessary delay while all the locks
- * are held--holding up the bucket locks is particularly bad, since no writes
- * can occur while these are held. Thus, we let callers create the new map,
- * hopefully without the bucket locks held.
+ * Atomically perform a resize and/or reset.
+ * Call with ht->lock held.
  */
-static void qht_do_resize(struct qht *ht, struct qht_map *new)
+static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
 {
     struct qht_map *old;
 
     old = ht->map;
-    g_assert_cmpuint(new->n_buckets, !=, old->n_buckets);
+    qht_map_lock_buckets(old);
 
+    if (reset) {
+        qht_map_reset__all_locked(old);
+    }
+
+    if (new == NULL) {
+        qht_map_unlock_buckets(old);
+        return;
+    }
+
+    g_assert_cmpuint(new->n_buckets, !=, old->n_buckets);
     qht_map_iter__all_locked(ht, old, qht_map_copy, new);
     qht_map_debug__all_locked(new);
 
     atomic_rcu_set(&ht->map, new);
+    qht_map_unlock_buckets(old);
     call_rcu(old, qht_map_destroy, rcu);
 }
 
@@ -766,12 +776,9 @@  bool qht_resize(struct qht *ht, size_t n_elems)
     qemu_mutex_lock(&ht->lock);
     if (n_buckets != ht->map->n_buckets) {
         struct qht_map *new;
-        struct qht_map *old = ht->map;
 
         new = qht_map_create(n_buckets);
-        qht_map_lock_buckets(old);
         qht_do_resize(ht, new);
-        qht_map_unlock_buckets(old);
         ret = true;
     }
     qemu_mutex_unlock(&ht->lock);