@@ -777,6 +777,10 @@ int qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs,
(cluster_offset + compressed_size - 1) / QCOW2_COMPRESSED_SECTOR_SIZE -
(cluster_offset / QCOW2_COMPRESSED_SECTOR_SIZE);
+ /* The offset and size must fit in their fields of the L2 table entry */
+ assert((cluster_offset & s->cluster_offset_mask) == cluster_offset);
+ assert((nb_csectors & s->csize_mask) == nb_csectors);
+
cluster_offset |= QCOW_OFLAG_COMPRESSED |
((uint64_t)nb_csectors << s->csize_shift);
@@ -972,6 +976,7 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
assert(l2_index + m->nb_clusters <= s->l2_slice_size);
for (i = 0; i < m->nb_clusters; i++) {
+ uint64_t offset = cluster_offset + (i << s->cluster_bits);
/* if two concurrent writes happen to the same unallocated cluster
* each write allocates separate cluster and writes data concurrently.
* The first one to complete updates l2 table with pointer to its
@@ -982,8 +987,10 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
old_cluster[j++] = l2_slice[l2_index + i];
}
- l2_slice[l2_index + i] = cpu_to_be64((cluster_offset +
- (i << s->cluster_bits)) | QCOW_OFLAG_COPIED);
+ /* The offset must fit in the offset field of the L2 table entry */
+ assert((offset & L2E_OFFSET_MASK) == offset);
+
+ l2_slice[l2_index + i] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
}
@@ -1913,6 +1920,9 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
goto fail;
}
+ /* The offset must fit in the offset field */
+ assert((offset & L2E_OFFSET_MASK) == offset);
+
if (l2_refcount > 1) {
/* For shared L2 tables, set the refcount accordingly
* (it is already 1 and needs to be l2_refcount) */