From patchwork Wed Jul 18 15:07:28 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Stefan Hajnoczi X-Patchwork-Id: 1211541 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork2.kernel.org (Postfix) with ESMTP id 56A2AE0079 for ; Wed, 18 Jul 2012 15:08:47 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754783Ab2GRPIk (ORCPT ); Wed, 18 Jul 2012 11:08:40 -0400 Received: from e06smtp15.uk.ibm.com ([195.75.94.111]:55340 "EHLO e06smtp15.uk.ibm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754409Ab2GRPIb (ORCPT ); Wed, 18 Jul 2012 11:08:31 -0400 Received: from /spool/local by e06smtp15.uk.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Wed, 18 Jul 2012 16:08:29 +0100 Received: from d06nrmr1806.portsmouth.uk.ibm.com (9.149.39.193) by e06smtp15.uk.ibm.com (192.168.101.145) with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted; Wed, 18 Jul 2012 16:08:25 +0100 Received: from d06av06.portsmouth.uk.ibm.com (d06av06.portsmouth.uk.ibm.com [9.149.37.217]) by d06nrmr1806.portsmouth.uk.ibm.com (8.13.8/8.13.8/NCO v10.0) with ESMTP id q6IF8ONA2900022 for ; Wed, 18 Jul 2012 16:08:24 +0100 Received: from d06av06.portsmouth.uk.ibm.com (loopback [127.0.0.1]) by d06av06.portsmouth.uk.ibm.com (8.14.4/8.13.1/NCO v10.0 AVout) with ESMTP id q6IF8NaD012878 for ; Wed, 18 Jul 2012 09:08:24 -0600 Received: from localhost (sig-9-145-185-169.de.ibm.com [9.145.185.169]) by d06av06.portsmouth.uk.ibm.com (8.14.4/8.13.1/NCO v10.0 AVin) with ESMTP id q6IF8NwF012857; Wed, 18 Jul 2012 09:08:23 -0600 From: Stefan Hajnoczi To: Cc: , Anthony Liguori , Kevin Wolf , Paolo Bonzini , "Michael S. Tsirkin" , Asias He , Khoa Huynh , Stefan Hajnoczi Subject: [RFC v9 01/27] virtio-blk: Remove virtqueue request handling code Date: Wed, 18 Jul 2012 16:07:28 +0100 Message-Id: <1342624074-24650-2-git-send-email-stefanha@linux.vnet.ibm.com> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1342624074-24650-1-git-send-email-stefanha@linux.vnet.ibm.com> References: <1342624074-24650-1-git-send-email-stefanha@linux.vnet.ibm.com> x-cbid: 12071815-0342-0000-0000-00000254D35F Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Start with a clean slate, a virtio-blk device that supports virtio lifecycle operations and configuration but doesn't do any actual I/O. The I/O is going to happen in a separate optimized data plane thread. Signed-off-by: Stefan Hajnoczi --- hw/virtio-blk.c | 496 +------------------------------------------------------ 1 file changed, 3 insertions(+), 493 deletions(-) diff --git a/hw/virtio-blk.c b/hw/virtio-blk.c index 49990f8..a627427 100644 --- a/hw/virtio-blk.c +++ b/hw/virtio-blk.c @@ -16,18 +16,12 @@ #include "trace.h" #include "blockdev.h" #include "virtio-blk.h" -#include "scsi-defs.h" -#ifdef __linux__ -# include -#endif typedef struct VirtIOBlock { VirtIODevice vdev; BlockDriverState *bs; VirtQueue *vq; - void *rq; - QEMUBH *bh; BlockConf *conf; char *serial; unsigned short sector_mask; @@ -39,439 +33,11 @@ static VirtIOBlock *to_virtio_blk(VirtIODevice *vdev) return (VirtIOBlock *)vdev; } -typedef struct VirtIOBlockReq -{ - VirtIOBlock *dev; - VirtQueueElement elem; - struct virtio_blk_inhdr *in; - struct virtio_blk_outhdr *out; - struct virtio_scsi_inhdr *scsi; - QEMUIOVector qiov; - struct VirtIOBlockReq *next; - BlockAcctCookie acct; -} VirtIOBlockReq; - -static void virtio_blk_req_complete(VirtIOBlockReq *req, int status) -{ - VirtIOBlock *s = req->dev; - - trace_virtio_blk_req_complete(req, status); - - stb_p(&req->in->status, status); - virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in)); - virtio_notify(&s->vdev, s->vq); -} - -static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, - int is_read) -{ - BlockErrorAction action = bdrv_get_on_error(req->dev->bs, is_read); - VirtIOBlock *s = req->dev; - - if (action == BLOCK_ERR_IGNORE) { - bdrv_emit_qmp_error_event(s->bs, BDRV_ACTION_IGNORE, is_read); - return 0; - } - - if ((error == ENOSPC && action == BLOCK_ERR_STOP_ENOSPC) - || action == BLOCK_ERR_STOP_ANY) { - req->next = s->rq; - s->rq = req; - bdrv_emit_qmp_error_event(s->bs, BDRV_ACTION_STOP, is_read); - vm_stop(RUN_STATE_IO_ERROR); - bdrv_iostatus_set_err(s->bs, error); - } else { - virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); - bdrv_acct_done(s->bs, &req->acct); - g_free(req); - bdrv_emit_qmp_error_event(s->bs, BDRV_ACTION_REPORT, is_read); - } - - return 1; -} - -static void virtio_blk_rw_complete(void *opaque, int ret) -{ - VirtIOBlockReq *req = opaque; - - trace_virtio_blk_rw_complete(req, ret); - - if (ret) { - int is_read = !(ldl_p(&req->out->type) & VIRTIO_BLK_T_OUT); - if (virtio_blk_handle_rw_error(req, -ret, is_read)) - return; - } - - virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); - bdrv_acct_done(req->dev->bs, &req->acct); - g_free(req); -} - -static void virtio_blk_flush_complete(void *opaque, int ret) -{ - VirtIOBlockReq *req = opaque; - - if (ret) { - if (virtio_blk_handle_rw_error(req, -ret, 0)) { - return; - } - } - - virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); - bdrv_acct_done(req->dev->bs, &req->acct); - g_free(req); -} - -static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s) -{ - VirtIOBlockReq *req = g_malloc(sizeof(*req)); - req->dev = s; - req->qiov.size = 0; - req->next = NULL; - return req; -} - -static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s) -{ - VirtIOBlockReq *req = virtio_blk_alloc_request(s); - - if (req != NULL) { - if (!virtqueue_pop(s->vq, &req->elem)) { - g_free(req); - return NULL; - } - } - - return req; -} - -#ifdef __linux__ -static void virtio_blk_handle_scsi(VirtIOBlockReq *req) -{ - struct sg_io_hdr hdr; - int ret; - int status; - int i; - - if ((req->dev->vdev.guest_features & (1 << VIRTIO_BLK_F_SCSI)) == 0) { - virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); - g_free(req); - return; - } - - /* - * We require at least one output segment each for the virtio_blk_outhdr - * and the SCSI command block. - * - * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr - * and the sense buffer pointer in the input segments. - */ - if (req->elem.out_num < 2 || req->elem.in_num < 3) { - virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); - g_free(req); - return; - } - - /* - * No support for bidirection commands yet. - */ - if (req->elem.out_num > 2 && req->elem.in_num > 3) { - virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); - g_free(req); - return; - } - - /* - * The scsi inhdr is placed in the second-to-last input segment, just - * before the regular inhdr. - */ - req->scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base; - - memset(&hdr, 0, sizeof(struct sg_io_hdr)); - hdr.interface_id = 'S'; - hdr.cmd_len = req->elem.out_sg[1].iov_len; - hdr.cmdp = req->elem.out_sg[1].iov_base; - hdr.dxfer_len = 0; - - if (req->elem.out_num > 2) { - /* - * If there are more than the minimally required 2 output segments - * there is write payload starting from the third iovec. - */ - hdr.dxfer_direction = SG_DXFER_TO_DEV; - hdr.iovec_count = req->elem.out_num - 2; - - for (i = 0; i < hdr.iovec_count; i++) - hdr.dxfer_len += req->elem.out_sg[i + 2].iov_len; - - hdr.dxferp = req->elem.out_sg + 2; - - } else if (req->elem.in_num > 3) { - /* - * If we have more than 3 input segments the guest wants to actually - * read data. - */ - hdr.dxfer_direction = SG_DXFER_FROM_DEV; - hdr.iovec_count = req->elem.in_num - 3; - for (i = 0; i < hdr.iovec_count; i++) - hdr.dxfer_len += req->elem.in_sg[i].iov_len; - - hdr.dxferp = req->elem.in_sg; - } else { - /* - * Some SCSI commands don't actually transfer any data. - */ - hdr.dxfer_direction = SG_DXFER_NONE; - } - - hdr.sbp = req->elem.in_sg[req->elem.in_num - 3].iov_base; - hdr.mx_sb_len = req->elem.in_sg[req->elem.in_num - 3].iov_len; - - ret = bdrv_ioctl(req->dev->bs, SG_IO, &hdr); - if (ret) { - status = VIRTIO_BLK_S_UNSUPP; - hdr.status = ret; - hdr.resid = hdr.dxfer_len; - } else if (hdr.status) { - status = VIRTIO_BLK_S_IOERR; - } else { - status = VIRTIO_BLK_S_OK; - } - - /* - * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi) - * clear the masked_status field [hence status gets cleared too, see - * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED - * status has occurred. However they do set DRIVER_SENSE in driver_status - * field. Also a (sb_len_wr > 0) indicates there is a sense buffer. - */ - if (hdr.status == 0 && hdr.sb_len_wr > 0) { - hdr.status = CHECK_CONDITION; - } - - stl_p(&req->scsi->errors, - hdr.status | (hdr.msg_status << 8) | - (hdr.host_status << 16) | (hdr.driver_status << 24)); - stl_p(&req->scsi->residual, hdr.resid); - stl_p(&req->scsi->sense_len, hdr.sb_len_wr); - stl_p(&req->scsi->data_len, hdr.dxfer_len); - - virtio_blk_req_complete(req, status); - g_free(req); -} -#else -static void virtio_blk_handle_scsi(VirtIOBlockReq *req) -{ - virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); - g_free(req); -} -#endif /* __linux__ */ - -typedef struct MultiReqBuffer { - BlockRequest blkreq[32]; - unsigned int num_writes; -} MultiReqBuffer; - -static void virtio_submit_multiwrite(BlockDriverState *bs, MultiReqBuffer *mrb) -{ - int i, ret; - - if (!mrb->num_writes) { - return; - } - - ret = bdrv_aio_multiwrite(bs, mrb->blkreq, mrb->num_writes); - if (ret != 0) { - for (i = 0; i < mrb->num_writes; i++) { - if (mrb->blkreq[i].error) { - virtio_blk_rw_complete(mrb->blkreq[i].opaque, -EIO); - } - } - } - - mrb->num_writes = 0; -} - -static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb) -{ - bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH); - - /* - * Make sure all outstanding writes are posted to the backing device. - */ - virtio_submit_multiwrite(req->dev->bs, mrb); - bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req); -} - -static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb) -{ - BlockRequest *blkreq; - uint64_t sector; - - sector = ldq_p(&req->out->sector); - - bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_WRITE); - - trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512); - - if (sector & req->dev->sector_mask) { - virtio_blk_rw_complete(req, -EIO); - return; - } - if (req->qiov.size % req->dev->conf->logical_block_size) { - virtio_blk_rw_complete(req, -EIO); - return; - } - - if (mrb->num_writes == 32) { - virtio_submit_multiwrite(req->dev->bs, mrb); - } - - blkreq = &mrb->blkreq[mrb->num_writes]; - blkreq->sector = sector; - blkreq->nb_sectors = req->qiov.size / BDRV_SECTOR_SIZE; - blkreq->qiov = &req->qiov; - blkreq->cb = virtio_blk_rw_complete; - blkreq->opaque = req; - blkreq->error = 0; - - mrb->num_writes++; -} - -static void virtio_blk_handle_read(VirtIOBlockReq *req) -{ - uint64_t sector; - - sector = ldq_p(&req->out->sector); - - bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ); - - trace_virtio_blk_handle_read(req, sector, req->qiov.size / 512); - - if (sector & req->dev->sector_mask) { - virtio_blk_rw_complete(req, -EIO); - return; - } - if (req->qiov.size % req->dev->conf->logical_block_size) { - virtio_blk_rw_complete(req, -EIO); - return; - } - bdrv_aio_readv(req->dev->bs, sector, &req->qiov, - req->qiov.size / BDRV_SECTOR_SIZE, - virtio_blk_rw_complete, req); -} - -static void virtio_blk_handle_request(VirtIOBlockReq *req, - MultiReqBuffer *mrb) -{ - uint32_t type; - - if (req->elem.out_num < 1 || req->elem.in_num < 1) { - error_report("virtio-blk missing headers"); - exit(1); - } - - if (req->elem.out_sg[0].iov_len < sizeof(*req->out) || - req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) { - error_report("virtio-blk header not in correct element"); - exit(1); - } - - req->out = (void *)req->elem.out_sg[0].iov_base; - req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base; - - type = ldl_p(&req->out->type); - - if (type & VIRTIO_BLK_T_FLUSH) { - virtio_blk_handle_flush(req, mrb); - } else if (type & VIRTIO_BLK_T_SCSI_CMD) { - virtio_blk_handle_scsi(req); - } else if (type & VIRTIO_BLK_T_GET_ID) { - VirtIOBlock *s = req->dev; - - /* - * NB: per existing s/n string convention the string is - * terminated by '\0' only when shorter than buffer. - */ - strncpy(req->elem.in_sg[0].iov_base, - s->serial ? s->serial : "", - MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES)); - virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); - g_free(req); - } else if (type & VIRTIO_BLK_T_OUT) { - qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1], - req->elem.out_num - 1); - virtio_blk_handle_write(req, mrb); - } else { - qemu_iovec_init_external(&req->qiov, &req->elem.in_sg[0], - req->elem.in_num - 1); - virtio_blk_handle_read(req); - } -} - static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) { - VirtIOBlock *s = to_virtio_blk(vdev); - VirtIOBlockReq *req; - MultiReqBuffer mrb = { - .num_writes = 0, - }; - - while ((req = virtio_blk_get_request(s))) { - virtio_blk_handle_request(req, &mrb); - } - - virtio_submit_multiwrite(s->bs, &mrb); - - /* - * FIXME: Want to check for completions before returning to guest mode, - * so cached reads and writes are reported as quickly as possible. But - * that should be done in the generic block layer. - */ -} - -static void virtio_blk_dma_restart_bh(void *opaque) -{ - VirtIOBlock *s = opaque; - VirtIOBlockReq *req = s->rq; - MultiReqBuffer mrb = { - .num_writes = 0, - }; - - qemu_bh_delete(s->bh); - s->bh = NULL; - - s->rq = NULL; - - while (req) { - virtio_blk_handle_request(req, &mrb); - req = req->next; - } - - virtio_submit_multiwrite(s->bs, &mrb); -} - -static void virtio_blk_dma_restart_cb(void *opaque, int running, - RunState state) -{ - VirtIOBlock *s = opaque; - - if (!running) - return; - - if (!s->bh) { - s->bh = qemu_bh_new(virtio_blk_dma_restart_bh, s); - qemu_bh_schedule(s->bh); - } -} - -static void virtio_blk_reset(VirtIODevice *vdev) -{ - /* - * This should cancel pending requests, but can't do nicely until there - * are per-device request lists. - */ - bdrv_drain_all(); + fprintf(stderr, "virtio_blk_handle_output: should never get here," + "data plane thread should process requests\n"); + exit(1); } /* coalesce internal state, copy to pci i/o region 0 @@ -519,61 +85,11 @@ static uint32_t virtio_blk_get_features(VirtIODevice *vdev, uint32_t features) return features; } -static void virtio_blk_save(QEMUFile *f, void *opaque) -{ - VirtIOBlock *s = opaque; - VirtIOBlockReq *req = s->rq; - - virtio_save(&s->vdev, f); - - while (req) { - qemu_put_sbyte(f, 1); - qemu_put_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); - req = req->next; - } - qemu_put_sbyte(f, 0); -} - -static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id) -{ - VirtIOBlock *s = opaque; - - if (version_id != 2) - return -EINVAL; - - virtio_load(&s->vdev, f); - while (qemu_get_sbyte(f)) { - VirtIOBlockReq *req = virtio_blk_alloc_request(s); - qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); - req->next = s->rq; - s->rq = req; - - virtqueue_map_sg(req->elem.in_sg, req->elem.in_addr, - req->elem.in_num, 1); - virtqueue_map_sg(req->elem.out_sg, req->elem.out_addr, - req->elem.out_num, 0); - } - - return 0; -} - -static void virtio_blk_resize(void *opaque) -{ - VirtIOBlock *s = opaque; - - virtio_notify_config(&s->vdev); -} - -static const BlockDevOps virtio_block_ops = { - .resize_cb = virtio_blk_resize, -}; - VirtIODevice *virtio_blk_init(DeviceState *dev, BlockConf *conf, char **serial) { VirtIOBlock *s; int cylinders, heads, secs; - static int virtio_blk_id; DriveInfo *dinfo; if (!conf->bs) { @@ -599,21 +115,15 @@ VirtIODevice *virtio_blk_init(DeviceState *dev, BlockConf *conf, s->vdev.get_config = virtio_blk_update_config; s->vdev.get_features = virtio_blk_get_features; - s->vdev.reset = virtio_blk_reset; s->bs = conf->bs; s->conf = conf; s->serial = *serial; - s->rq = NULL; s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1; bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs); s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output); - qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); s->qdev = dev; - register_savevm(dev, "virtio-blk", virtio_blk_id++, 2, - virtio_blk_save, virtio_blk_load, s); - bdrv_set_dev_ops(s->bs, &virtio_block_ops, s); bdrv_set_buffer_alignment(s->bs, conf->logical_block_size); bdrv_iostatus_enable(s->bs);