diff mbox series

[PULL,13/17] qcow2: Use bs->bl.request_alignment when updating an L1 entry

Message ID 20200206125132.594625-14-mreitz@redhat.com (mailing list archive)
State New, archived
Headers show
Series [PULL,01/17] qcow2: Assert that host cluster offsets fit in L2 table entries | expand

Commit Message

Max Reitz Feb. 6, 2020, 12:51 p.m. UTC
From: Alberto Garcia <berto@igalia.com>

When updating an L1 entry the qcow2 driver writes a (512-byte) sector
worth of data to avoid a read-modify-write cycle. Instead of always
writing 512 bytes we should follow the alignment requirements of the
storage backend.

(the only exception is when the alignment is larger than the cluster
size because then we could be overwriting data after the L1 table)

Signed-off-by: Alberto Garcia <berto@igalia.com>
Message-id: 71f34d4ae4b367b32fb36134acbf4f4f7ee681f4.1579374329.git.berto@igalia.com
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
---
 block/qcow2-cluster.c | 25 +++++++++++++++----------
 1 file changed, 15 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index 0384fb2339..1947f13a2d 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -216,26 +216,31 @@  static int l2_load(BlockDriverState *bs, uint64_t offset,
 }
 
 /*
- * Writes one sector of the L1 table to the disk (can't update single entries
- * and we really don't want bdrv_pread to perform a read-modify-write)
+ * Writes an L1 entry to disk (note that depending on the alignment
+ * requirements this function may write more that just one entry in
+ * order to prevent bdrv_pwrite from performing a read-modify-write)
  */
-#define L1_ENTRIES_PER_SECTOR (512 / 8)
 int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
 {
     BDRVQcow2State *s = bs->opaque;
-    uint64_t buf[L1_ENTRIES_PER_SECTOR] = { 0 };
     int l1_start_index;
     int i, ret;
+    int bufsize = MAX(sizeof(uint64_t),
+                      MIN(bs->file->bs->bl.request_alignment, s->cluster_size));
+    int nentries = bufsize / sizeof(uint64_t);
+    g_autofree uint64_t *buf = g_try_new0(uint64_t, nentries);
 
-    l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1);
-    for (i = 0; i < L1_ENTRIES_PER_SECTOR && l1_start_index + i < s->l1_size;
-         i++)
-    {
+    if (buf == NULL) {
+        return -ENOMEM;
+    }
+
+    l1_start_index = QEMU_ALIGN_DOWN(l1_index, nentries);
+    for (i = 0; i < MIN(nentries, s->l1_size - l1_start_index); i++) {
         buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]);
     }
 
     ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L1,
-            s->l1_table_offset + 8 * l1_start_index, sizeof(buf), false);
+            s->l1_table_offset + 8 * l1_start_index, bufsize, false);
     if (ret < 0) {
         return ret;
     }
@@ -243,7 +248,7 @@  int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
     BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
     ret = bdrv_pwrite_sync(bs->file,
                            s->l1_table_offset + 8 * l1_start_index,
-                           buf, sizeof(buf));
+                           buf, bufsize);
     if (ret < 0) {
         return ret;
     }