diff mbox

kvm tool: check the cluster boundary in the qcow read code.

Message ID 1302809009-3177-1-git-send-email-prasadjoshi124@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Prasad Joshi April 14, 2011, 7:23 p.m. UTC
Changed the function names from sect_to_l1_offset(), sect_to_l2_offset() to
get_l1_index(), get_l2_index() as they return index into their respective table.

Signed-off-by: Prasad Joshi <prasadjoshi124@gmail.com>
---
 tools/kvm/qcow.c |  103 ++++++++++++++++++++++++++++++------------------------
 1 files changed, 57 insertions(+), 46 deletions(-)

Comments

Pekka Enberg April 14, 2011, 7:59 p.m. UTC | #1
On Thu, 2011-04-14 at 20:23 +0100, Prasad Joshi wrote:
> Changed the function names from sect_to_l1_offset(), sect_to_l2_offset() to
> get_l1_index(), get_l2_index() as they return index into their respective table.

This patch does a lot more than what's described above. Please split the
changes in two separate patches.

> +	uint32_t length;
> +	uint32_t tmp;
> +	char *buf = dst;
> +
> +	clust_size = 1 << header->cluster_bits;
> +	length = 0;
> +
> +	while (length < dst_len) {
> +		offset = sector << SECTOR_SHIFT;
> +		if (offset >= header->size)
> +			goto out_error;
> +
> +		l1_idx = get_l1_index(self->priv, offset);
> +		if (l1_idx >= q->table.table_size)
> +			goto out_error;
> +
> +		l2_table_offset	= be64_to_cpu(q->table.l1_table[l1_idx]);
> +		if (!l2_table_offset) {
> +			tmp = clust_size;
> +			memset(buf, 0, tmp);
> +			goto next_cluster;
> +		}
> +
> +		l2_table_size = 1 << header->l2_bits;
> +
> +		l2_table = calloc(l2_table_size, sizeof(uint64_t));
> +		if (!l2_table)
> +			goto out_error;
> +
> +		if (pread_in_full(q->fd, l2_table, sizeof(uint64_t) * l2_table_size, l2_table_offset) < 0)
> +			goto out_error_free_l2;
> +
> +		l2_idx = get_l2_index(self->priv, offset);
> +		if (l2_idx >= l2_table_size)
> +			goto out_error_free_l2;
> +
> +		clust_start = be64_to_cpu(l2_table[l2_idx]);
> +		free(l2_table);
> +		if (!clust_start) {
> +			tmp = clust_size;
> +			memset(buf, 0, tmp);
> +		} else {
> +			clust_offset = sect_to_cluster_offset(self->priv, offset);
> +			tmp = clust_size - clust_offset;
> +
> +			if (pread_in_full(q->fd, buf, tmp, clust_start + clust_offset) < 0)
> +				goto out_error;
> +		}
> +
> +next_cluster:
> +		buf     += tmp;
> +		sector  += (tmp >> SECTOR_SHIFT);
> +		length  += tmp;
> +	}

Well, the loop is not exactly making the code any better. If you're
worried about reads that cross over a single cluster, it's probably
better to rename the current function to qcow1_read_cluster() or
something and use that in a loop that makes sure we never cross over a
cluster.

Btw, I think our ->read_sector and ->write_sector functions need
renaming to ->read_sectors and ->write_sectors with little bit
commentary on what they can expect to receive.

			Pekka

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Prasad Joshi April 15, 2011, 12:04 a.m. UTC | #2
On Thu, Apr 14, 2011 at 8:59 PM, Pekka Enberg <penberg@kernel.org> wrote:
> On Thu, 2011-04-14 at 20:23 +0100, Prasad Joshi wrote:
>> Changed the function names from sect_to_l1_offset(), sect_to_l2_offset() to
>> get_l1_index(), get_l2_index() as they return index into their respective table.
>
> This patch does a lot more than what's described above. Please split the
> changes in two separate patches.

Okay.

>
>> +     uint32_t length;
>> +     uint32_t tmp;
>> +     char *buf = dst;
>> +
>> +     clust_size = 1 << header->cluster_bits;
>> +     length = 0;
>> +
>> +     while (length < dst_len) {
>> +             offset = sector << SECTOR_SHIFT;
>> +             if (offset >= header->size)
>> +                     goto out_error;
>> +
>> +             l1_idx = get_l1_index(self->priv, offset);
>> +             if (l1_idx >= q->table.table_size)
>> +                     goto out_error;
>> +
>> +             l2_table_offset = be64_to_cpu(q->table.l1_table[l1_idx]);
>> +             if (!l2_table_offset) {
>> +                     tmp = clust_size;
>> +                     memset(buf, 0, tmp);
>> +                     goto next_cluster;
>> +             }
>> +
>> +             l2_table_size = 1 << header->l2_bits;
>> +
>> +             l2_table = calloc(l2_table_size, sizeof(uint64_t));
>> +             if (!l2_table)
>> +                     goto out_error;
>> +
>> +             if (pread_in_full(q->fd, l2_table, sizeof(uint64_t) * l2_table_size, l2_table_offset) < 0)
>> +                     goto out_error_free_l2;
>> +
>> +             l2_idx = get_l2_index(self->priv, offset);
>> +             if (l2_idx >= l2_table_size)
>> +                     goto out_error_free_l2;
>> +
>> +             clust_start = be64_to_cpu(l2_table[l2_idx]);
>> +             free(l2_table);
>> +             if (!clust_start) {
>> +                     tmp = clust_size;
>> +                     memset(buf, 0, tmp);
>> +             } else {
>> +                     clust_offset = sect_to_cluster_offset(self->priv, offset);
>> +                     tmp = clust_size - clust_offset;
>> +
>> +                     if (pread_in_full(q->fd, buf, tmp, clust_start + clust_offset) < 0)
>> +                             goto out_error;
>> +             }
>> +
>> +next_cluster:
>> +             buf     += tmp;
>> +             sector  += (tmp >> SECTOR_SHIFT);
>> +             length  += tmp;
>> +     }
>
> Well, the loop is not exactly making the code any better.

More than clarity it is mandatory. The consecutive sectors are not
always guaranteed to be assigned to the same file. If fact, a cluster
might be holding a level 2 table.

> If you're
> worried about reads that cross over a single cluster, it's probably
> better to rename the current function to qcow1_read_cluster() or
> something and use that in a loop that makes sure we never cross over a
> cluster.

Okay.

>
> Btw, I think our ->read_sector and ->write_sector functions need
> renaming to ->read_sectors and ->write_sectors with little bit
> commentary on what they can expect to receive.
>

Okay.

Thanks and Regards,
Prasad

>                        Pekka
>
>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/tools/kvm/qcow.c b/tools/kvm/qcow.c
index c4e3e48..3afd3fb 100644
--- a/tools/kvm/qcow.c
+++ b/tools/kvm/qcow.c
@@ -15,14 +15,14 @@ 
 #include <linux/byteorder.h>
 #include <linux/types.h>
 
-static inline uint64_t sect_to_l1_offset(struct qcow *q, uint64_t offset)
+static inline uint64_t get_l1_index(struct qcow *q, uint64_t offset)
 {
 	struct qcow1_header *header = q->header;
 
 	return offset >> (header->l2_bits + header->cluster_bits);
 }
 
-static inline uint64_t sect_to_l2_offset(struct qcow *q, uint64_t offset)
+static inline uint64_t get_l2_index(struct qcow *q, uint64_t offset)
 {
 	struct qcow1_header *header = q->header;
 
@@ -44,54 +44,65 @@  static int qcow1_read_sector(struct disk_image *self, uint64_t sector, void *dst
 	uint64_t l2_table_size;
 	uint64_t clust_offset;
 	uint64_t clust_start;
+	uint64_t clust_size;
 	uint64_t *l2_table;
 	uint64_t l1_idx;
 	uint64_t l2_idx;
 	uint64_t offset;
-
-	offset		= sector << SECTOR_SHIFT;
-	if (offset >= header->size)
-		goto out_error;
-
-	l1_idx		= sect_to_l1_offset(self->priv, offset);
-
-	if (l1_idx >= q->table.table_size)
-		goto out_error;
-
-	l2_table_offset	= be64_to_cpu(q->table.l1_table[l1_idx]);
-	if (!l2_table_offset)
-		goto zero_sector;
-
-	l2_table_size	= 1 << header->l2_bits;
-
-	l2_table	= calloc(l2_table_size, sizeof(uint64_t));
-	if (!l2_table)
-		goto out_error;
-
-	if (pread_in_full(q->fd, l2_table, sizeof(uint64_t) * l2_table_size, l2_table_offset) < 0)
-		goto out_error_free_l2;
-
-	l2_idx		= sect_to_l2_offset(self->priv, offset);
-
-	if (l2_idx >= l2_table_size)
-		goto out_error_free_l2;
-
-	clust_start	= be64_to_cpu(l2_table[l2_idx]);
-
-	if (!clust_start)
-		goto zero_sector;
-
-	clust_offset	= sect_to_cluster_offset(self->priv, offset);
-
-	if (pread_in_full(q->fd, dst, dst_len, clust_start + clust_offset) < 0)
-		goto out_error_free_l2;
-
-	free(l2_table);
-
-	return 0;
-
-zero_sector:
-	memset(dst, 0, dst_len);
+	uint32_t length;
+	uint32_t tmp;
+	char *buf = dst;
+
+	clust_size = 1 << header->cluster_bits;
+	length = 0;
+
+	while (length < dst_len) {
+		offset = sector << SECTOR_SHIFT;
+		if (offset >= header->size)
+			goto out_error;
+
+		l1_idx = get_l1_index(self->priv, offset);
+		if (l1_idx >= q->table.table_size)
+			goto out_error;
+
+		l2_table_offset	= be64_to_cpu(q->table.l1_table[l1_idx]);
+		if (!l2_table_offset) {
+			tmp = clust_size;
+			memset(buf, 0, tmp);
+			goto next_cluster;
+		}
+
+		l2_table_size = 1 << header->l2_bits;
+
+		l2_table = calloc(l2_table_size, sizeof(uint64_t));
+		if (!l2_table)
+			goto out_error;
+
+		if (pread_in_full(q->fd, l2_table, sizeof(uint64_t) * l2_table_size, l2_table_offset) < 0)
+			goto out_error_free_l2;
+
+		l2_idx = get_l2_index(self->priv, offset);
+		if (l2_idx >= l2_table_size)
+			goto out_error_free_l2;
+
+		clust_start = be64_to_cpu(l2_table[l2_idx]);
+		free(l2_table);
+		if (!clust_start) {
+			tmp = clust_size;
+			memset(buf, 0, tmp);
+		} else {
+			clust_offset = sect_to_cluster_offset(self->priv, offset);
+			tmp = clust_size - clust_offset;
+
+			if (pread_in_full(q->fd, buf, tmp, clust_start + clust_offset) < 0)
+				goto out_error;
+		}
+
+next_cluster:
+		buf     += tmp;
+		sector  += (tmp >> SECTOR_SHIFT);
+		length  += tmp;
+	}
 
 	return 0;