From patchwork Thu Apr 14 19:23:29 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Prasad Joshi X-Patchwork-Id: 709451 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by demeter1.kernel.org (8.14.4/8.14.3) with ESMTP id p3EJNX5L018480 for ; Thu, 14 Apr 2011 19:23:33 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932779Ab1DNTXZ (ORCPT ); Thu, 14 Apr 2011 15:23:25 -0400 Received: from mail-wy0-f174.google.com ([74.125.82.174]:35075 "EHLO mail-wy0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932620Ab1DNTXY (ORCPT ); Thu, 14 Apr 2011 15:23:24 -0400 Received: by wya21 with SMTP id 21so1685559wya.19 for ; Thu, 14 Apr 2011 12:23:23 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=gamma; h=domainkey-signature:from:to:cc:subject:date:message-id:x-mailer; bh=D1FO3kGIyNCxuzIcxGhtiJ708XiWySoEzTFP/fIOBGg=; b=M071LmC+z14m/HDgkuokIje1jH+K1GsfxyIKBFVDdmeGUFw4RJFFJDB5Qeyw1ltGcz V+u7Y8821eGOwW7qZir83eayuBR8bawvk4Ktt+GxDvLGWSlTnGr0PxdSGSEPpdH3rtCm nKva67f4xqPoRj2mNB19MiVFKwGjmvaN1m7rc= DomainKey-Signature: a=rsa-sha1; c=nofws; d=gmail.com; s=gamma; h=from:to:cc:subject:date:message-id:x-mailer; b=xikfFAVApGEHJxSD4gPQ3M8vXdidkmUpdu1MwfdaMZ+96OlnOkJBjE/56iEHvKSJdH nkxnSAhcQO5gJikmjo1K+I9cXb5fkABKH2vt8hKEuMrsTZgzxDq+5mZyJYuLt+UAcNf5 +thp0BwswA0DM45yoo7avROcR9yG2ejI29lyU= Received: by 10.216.67.135 with SMTP id j7mr1240229wed.54.1302809003148; Thu, 14 Apr 2011 12:23:23 -0700 (PDT) Received: from prasad-kvm.localdomain (pineapple.rdg.ac.uk [134.225.206.123]) by mx.google.com with ESMTPS id w12sm1185683wby.7.2011.04.14.12.23.21 (version=TLSv1/SSLv3 cipher=OTHER); Thu, 14 Apr 2011 12:23:22 -0700 (PDT) Received: by prasad-kvm.localdomain (Postfix, from userid 1000) id AB70926E006E; Thu, 14 Apr 2011 20:23:30 +0100 (BST) From: Prasad Joshi To: prasadjoshi124@gmail.com Cc: mingo@elte.hu, kvm@vger.kernel.org, penberg@kernel.org, asias.hejun@gmail.com, gorcunov@gmail.com, levinsasha928@gmail.com, kwolf@redhat.com, stefanha@linux.vnet.ibm.com, chaitanyakulkarni15@gmail.com Subject: [PATCH] kvm tool: check the cluster boundary in the qcow read code. Date: Thu, 14 Apr 2011 20:23:29 +0100 Message-Id: <1302809009-3177-1-git-send-email-prasadjoshi124@gmail.com> X-Mailer: git-send-email 1.7.1 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.6 (demeter1.kernel.org [140.211.167.41]); Thu, 14 Apr 2011 19:23:33 +0000 (UTC) Changed the function names from sect_to_l1_offset(), sect_to_l2_offset() to get_l1_index(), get_l2_index() as they return index into their respective table. Signed-off-by: Prasad Joshi --- tools/kvm/qcow.c | 103 ++++++++++++++++++++++++++++++------------------------ 1 files changed, 57 insertions(+), 46 deletions(-) diff --git a/tools/kvm/qcow.c b/tools/kvm/qcow.c index c4e3e48..3afd3fb 100644 --- a/tools/kvm/qcow.c +++ b/tools/kvm/qcow.c @@ -15,14 +15,14 @@ #include #include -static inline uint64_t sect_to_l1_offset(struct qcow *q, uint64_t offset) +static inline uint64_t get_l1_index(struct qcow *q, uint64_t offset) { struct qcow1_header *header = q->header; return offset >> (header->l2_bits + header->cluster_bits); } -static inline uint64_t sect_to_l2_offset(struct qcow *q, uint64_t offset) +static inline uint64_t get_l2_index(struct qcow *q, uint64_t offset) { struct qcow1_header *header = q->header; @@ -44,54 +44,65 @@ static int qcow1_read_sector(struct disk_image *self, uint64_t sector, void *dst uint64_t l2_table_size; uint64_t clust_offset; uint64_t clust_start; + uint64_t clust_size; uint64_t *l2_table; uint64_t l1_idx; uint64_t l2_idx; uint64_t offset; - - offset = sector << SECTOR_SHIFT; - if (offset >= header->size) - goto out_error; - - l1_idx = sect_to_l1_offset(self->priv, offset); - - if (l1_idx >= q->table.table_size) - goto out_error; - - l2_table_offset = be64_to_cpu(q->table.l1_table[l1_idx]); - if (!l2_table_offset) - goto zero_sector; - - l2_table_size = 1 << header->l2_bits; - - l2_table = calloc(l2_table_size, sizeof(uint64_t)); - if (!l2_table) - goto out_error; - - if (pread_in_full(q->fd, l2_table, sizeof(uint64_t) * l2_table_size, l2_table_offset) < 0) - goto out_error_free_l2; - - l2_idx = sect_to_l2_offset(self->priv, offset); - - if (l2_idx >= l2_table_size) - goto out_error_free_l2; - - clust_start = be64_to_cpu(l2_table[l2_idx]); - - if (!clust_start) - goto zero_sector; - - clust_offset = sect_to_cluster_offset(self->priv, offset); - - if (pread_in_full(q->fd, dst, dst_len, clust_start + clust_offset) < 0) - goto out_error_free_l2; - - free(l2_table); - - return 0; - -zero_sector: - memset(dst, 0, dst_len); + uint32_t length; + uint32_t tmp; + char *buf = dst; + + clust_size = 1 << header->cluster_bits; + length = 0; + + while (length < dst_len) { + offset = sector << SECTOR_SHIFT; + if (offset >= header->size) + goto out_error; + + l1_idx = get_l1_index(self->priv, offset); + if (l1_idx >= q->table.table_size) + goto out_error; + + l2_table_offset = be64_to_cpu(q->table.l1_table[l1_idx]); + if (!l2_table_offset) { + tmp = clust_size; + memset(buf, 0, tmp); + goto next_cluster; + } + + l2_table_size = 1 << header->l2_bits; + + l2_table = calloc(l2_table_size, sizeof(uint64_t)); + if (!l2_table) + goto out_error; + + if (pread_in_full(q->fd, l2_table, sizeof(uint64_t) * l2_table_size, l2_table_offset) < 0) + goto out_error_free_l2; + + l2_idx = get_l2_index(self->priv, offset); + if (l2_idx >= l2_table_size) + goto out_error_free_l2; + + clust_start = be64_to_cpu(l2_table[l2_idx]); + free(l2_table); + if (!clust_start) { + tmp = clust_size; + memset(buf, 0, tmp); + } else { + clust_offset = sect_to_cluster_offset(self->priv, offset); + tmp = clust_size - clust_offset; + + if (pread_in_full(q->fd, buf, tmp, clust_start + clust_offset) < 0) + goto out_error; + } + +next_cluster: + buf += tmp; + sector += (tmp >> SECTOR_SHIFT); + length += tmp; + } return 0;