From patchwork Fri May 17 08:42:30 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Klaus Jensen X-Patchwork-Id: 10947501 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 4A499924 for ; Fri, 17 May 2019 08:45:57 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 37F42212BE for ; Fri, 17 May 2019 08:45:57 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 2AFBB228C8; Fri, 17 May 2019 08:45:57 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.2 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id 044C6212BE for ; Fri, 17 May 2019 08:45:56 +0000 (UTC) Received: from localhost ([127.0.0.1]:44592 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1hRYV1-0000UM-7F for patchwork-qemu-devel@patchwork.kernel.org; Fri, 17 May 2019 04:45:55 -0400 Received: from eggs.gnu.org ([209.51.188.92]:59229) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1hRYTH-0007V2-VE for qemu-devel@nongnu.org; Fri, 17 May 2019 04:44:10 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1hRYTF-00063y-9G for qemu-devel@nongnu.org; Fri, 17 May 2019 04:44:07 -0400 Received: from charlie.dont.surf ([128.199.63.193]:40178) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1hRYT8-0005u2-Mh; Fri, 17 May 2019 04:43:58 -0400 Received: from apples.localdomain (ip-5-186-120-196.cgn.fibianet.dk [5.186.120.196]) by charlie.dont.surf (Postfix) with ESMTPSA id 56E90C06DF; Fri, 17 May 2019 08:43:56 +0000 (UTC) From: Klaus Birkelund Jensen To: qemu-block@nongnu.org Date: Fri, 17 May 2019 10:42:30 +0200 Message-Id: <20190517084234.26923-5-klaus@birkelund.eu> X-Mailer: git-send-email 2.21.0 In-Reply-To: <20190517084234.26923-1-klaus@birkelund.eu> References: <20190517084234.26923-1-klaus@birkelund.eu> MIME-Version: 1.0 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.2.x-3.x [generic] X-Received-From: 128.199.63.193 Subject: [Qemu-devel] [PATCH 4/8] nvme: allow multiple i/o's per request X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Keith Busch , Kevin Wolf , qemu-devel@nongnu.org, Max Reitz Errors-To: qemu-devel-bounces+patchwork-qemu-devel=patchwork.kernel.org@nongnu.org Sender: "Qemu-devel" X-Virus-Scanned: ClamAV using ClamSMTP Introduce a new NvmeBlockBackendRequest and move the QEMUSGList and QEMUIOVector from the NvmeRequest. This is in preparation for metadata support and makes it easier to handle multiple block backend requests to different offsets. Signed-off-by: Klaus Birkelund Jensen --- hw/block/nvme.c | 319 ++++++++++++++++++++++++++++++++---------- hw/block/nvme.h | 47 +++++-- hw/block/trace-events | 2 + 3 files changed, 286 insertions(+), 82 deletions(-) diff --git a/hw/block/nvme.c b/hw/block/nvme.c index 453213f9abb4..c514f93f3867 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -322,6 +322,88 @@ static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len, return err; } +static void nvme_blk_req_destroy(NvmeBlockBackendRequest *blk_req) +{ + if (blk_req->qsg.nalloc) { + qemu_sglist_destroy(&blk_req->qsg); + } + + if (blk_req->iov.nalloc) { + qemu_iovec_destroy(&blk_req->iov); + } + + g_free(blk_req); +} + +static void nvme_blk_req_put(NvmeCtrl *n, NvmeBlockBackendRequest *blk_req) +{ + nvme_blk_req_destroy(blk_req); +} + +static NvmeBlockBackendRequest *nvme_blk_req_get(NvmeCtrl *n, NvmeRequest *req, + QEMUSGList *qsg) +{ + NvmeBlockBackendRequest *blk_req = g_malloc0(sizeof(*blk_req)); + + blk_req->req = req; + + if (qsg) { + pci_dma_sglist_init(&blk_req->qsg, &n->parent_obj, qsg->nsg); + memcpy(blk_req->qsg.sg, qsg->sg, qsg->nsg * sizeof(ScatterGatherEntry)); + + blk_req->qsg.nsg = qsg->nsg; + blk_req->qsg.size = qsg->size; + } + + return blk_req; +} + +static uint16_t nvme_blk_setup(NvmeCtrl *n, NvmeNamespace *ns, QEMUSGList *qsg, + uint64_t blk_offset, uint32_t unit_len, NvmeRequest *req) +{ + NvmeBlockBackendRequest *blk_req = nvme_blk_req_get(n, req, qsg); + if (!blk_req) { + NVME_GUEST_ERR(nvme_err_internal_dev_error, "nvme_blk_req_get: %s", + "could not allocate memory"); + return NVME_INTERNAL_DEV_ERROR; + } + + blk_req->slba = req->slba; + blk_req->nlb = req->nlb; + blk_req->blk_offset = blk_offset + req->slba * unit_len; + + QTAILQ_INSERT_TAIL(&req->blk_req_tailq, blk_req, tailq_entry); + + return NVME_SUCCESS; +} + +static uint16_t nvme_blk_map(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) +{ + NvmeNamespace *ns = req->ns; + uint16_t err; + + QEMUSGList qsg; + + uint32_t unit_len = nvme_ns_lbads_bytes(ns); + uint32_t len = req->nlb * unit_len; + uint64_t prp1 = le64_to_cpu(cmd->prp1); + uint64_t prp2 = le64_to_cpu(cmd->prp2); + + err = nvme_map_prp(n, &qsg, prp1, prp2, len, req); + if (err) { + return err; + } + + err = nvme_blk_setup(n, ns, &qsg, ns->blk_offset, unit_len, req); + if (err) { + return err; + } + + qemu_sglist_destroy(&qsg); + + return NVME_SUCCESS; +} + static void nvme_post_cqe(NvmeCQueue *cq, NvmeRequest *req) { NvmeCtrl *n = cq->ctrl; @@ -447,114 +529,190 @@ static void nvme_process_aers(void *opaque) static void nvme_rw_cb(void *opaque, int ret) { - NvmeRequest *req = opaque; + NvmeBlockBackendRequest *blk_req = opaque; + NvmeRequest *req = blk_req->req; NvmeSQueue *sq = req->sq; NvmeCtrl *n = sq->ctrl; NvmeCQueue *cq = n->cq[sq->cqid]; + NvmeNamespace *ns = req->ns; + + QTAILQ_REMOVE(&req->blk_req_tailq, blk_req, tailq_entry); + + trace_nvme_rw_cb(req->cqe.cid, ns->id); if (!ret) { - block_acct_done(blk_get_stats(n->conf.blk), &req->acct); - req->status = NVME_SUCCESS; + block_acct_done(blk_get_stats(n->conf.blk), &blk_req->acct); } else { - block_acct_failed(blk_get_stats(n->conf.blk), &req->acct); - req->status = NVME_INTERNAL_DEV_ERROR; + block_acct_failed(blk_get_stats(n->conf.blk), &blk_req->acct); + NVME_GUEST_ERR(nvme_err_internal_dev_error, "block request failed: %s", + strerror(-ret)); + req->status = NVME_INTERNAL_DEV_ERROR | NVME_DNR; } - if (req->qsg.nalloc) { - qemu_sglist_destroy(&req->qsg); - } - if (req->iov.nalloc) { - qemu_iovec_destroy(&req->iov); + if (QTAILQ_EMPTY(&req->blk_req_tailq)) { + nvme_enqueue_req_completion(cq, req); } - nvme_enqueue_req_completion(cq, req); + nvme_blk_req_put(n, blk_req); } -static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, - NvmeRequest *req) +static uint16_t nvme_flush(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { - block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, + NvmeBlockBackendRequest *blk_req = nvme_blk_req_get(n, req, NULL); + if (!blk_req) { + NVME_GUEST_ERR(nvme_err_internal_dev_error, "nvme_blk_req_get: %s", + "could not allocate memory"); + return NVME_INTERNAL_DEV_ERROR; + } + + block_acct_start(blk_get_stats(n->conf.blk), &blk_req->acct, 0, BLOCK_ACCT_FLUSH); - req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req); + blk_req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, blk_req); + + QTAILQ_INSERT_TAIL(&req->blk_req_tailq, blk_req, tailq_entry); return NVME_NO_COMPLETE; } -static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, - NvmeRequest *req) +static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { NvmeRwCmd *rw = (NvmeRwCmd *)cmd; - const uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas); - const uint8_t data_shift = ns->id_ns.lbaf[lba_index].lbads; + NvmeBlockBackendRequest *blk_req; + const uint8_t lbads = nvme_ns_lbads(req->ns); uint64_t slba = le64_to_cpu(rw->slba); uint32_t nlb = le16_to_cpu(rw->nlb) + 1; - uint64_t offset = slba << data_shift; - uint32_t count = nlb << data_shift; + uint64_t offset = slba << lbads; + uint32_t count = nlb << lbads; - if (unlikely(slba + nlb > ns->id_ns.nsze)) { - trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); + if (unlikely(slba + nlb > req->ns->id_ns.nsze)) { + trace_nvme_err_invalid_lba_range(slba, nlb, req->ns->id_ns.nsze); return NVME_LBA_RANGE | NVME_DNR; } - block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, - BLOCK_ACCT_WRITE); - req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count, - BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req); + blk_req = nvme_blk_req_get(n, req, NULL); + if (!blk_req) { + NVME_GUEST_ERR(nvme_err_internal_dev_error, "nvme_blk_req_get: %s", + "could not allocate memory"); + return NVME_INTERNAL_DEV_ERROR; + } + + block_acct_start(blk_get_stats(n->conf.blk), &blk_req->acct, 0, + BLOCK_ACCT_WRITE); + + blk_req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count, + BDRV_REQ_MAY_UNMAP, nvme_rw_cb, blk_req); + + QTAILQ_INSERT_TAIL(&req->blk_req_tailq, blk_req, tailq_entry); + + return NVME_NO_COMPLETE; +} + +static uint16_t nvme_rw_check_req(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) +{ + NvmeNamespace *ns = req->ns; + NvmeRwCmd *rw = (NvmeRwCmd *) cmd; + + uint16_t ctrl = le16_to_cpu(rw->control); + uint32_t data_size = req->nlb << nvme_ns_lbads(ns); + + if (n->params.mdts && data_size > n->page_size * (1 << n->params.mdts)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + if ((ctrl & NVME_RW_PRINFO_PRACT) && !(ns->id_ns.dps & DPS_TYPE_MASK)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + + return NVME_SUCCESS; +} + +static void nvme_blk_submit_dma(NvmeCtrl *n, NvmeBlockBackendRequest *blk_req, + BlockCompletionFunc *cb) +{ + NvmeRequest *req = blk_req->req; + + if (req->is_write) { + dma_acct_start(n->conf.blk, &blk_req->acct, &blk_req->qsg, + BLOCK_ACCT_WRITE); + + blk_req->aiocb = dma_blk_write(n->conf.blk, &blk_req->qsg, + blk_req->blk_offset, BDRV_SECTOR_SIZE, cb, blk_req); + } else { + dma_acct_start(n->conf.blk, &blk_req->acct, &blk_req->qsg, + BLOCK_ACCT_READ); + + blk_req->aiocb = dma_blk_read(n->conf.blk, &blk_req->qsg, + blk_req->blk_offset, BDRV_SECTOR_SIZE, cb, blk_req); + } +} + +static void nvme_blk_submit_cmb(NvmeCtrl *n, NvmeBlockBackendRequest *blk_req, + BlockCompletionFunc *cb) +{ + NvmeRequest *req = blk_req->req; + + qemu_iovec_init(&blk_req->iov, blk_req->qsg.nsg); + dma_to_cmb(n, &blk_req->qsg, &blk_req->iov); + + if (req->is_write) { + block_acct_start(blk_get_stats(n->conf.blk), &blk_req->acct, + blk_req->iov.size, BLOCK_ACCT_WRITE); + + blk_req->aiocb = blk_aio_pwritev(n->conf.blk, blk_req->blk_offset, + &blk_req->iov, 0, cb, blk_req); + } else { + block_acct_start(blk_get_stats(n->conf.blk), &blk_req->acct, + blk_req->iov.size, BLOCK_ACCT_READ); + + blk_req->aiocb = blk_aio_preadv(n->conf.blk, blk_req->blk_offset, + &blk_req->iov, 0, cb, blk_req); + } +} + +static uint16_t nvme_blk_submit_io(NvmeCtrl *n, NvmeRequest *req, + BlockCompletionFunc *cb) +{ + NvmeBlockBackendRequest *blk_req; + + if (QTAILQ_EMPTY(&req->blk_req_tailq)) { + return NVME_SUCCESS; + } + + QTAILQ_FOREACH(blk_req, &req->blk_req_tailq, tailq_entry) { + if (req->is_cmb) { + nvme_blk_submit_cmb(n, blk_req, cb); + } else { + nvme_blk_submit_dma(n, blk_req, cb); + } + } + return NVME_NO_COMPLETE; } -static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, - NvmeRequest *req) +static uint16_t nvme_rw(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { NvmeRwCmd *rw = (NvmeRwCmd *)cmd; uint32_t nlb = le32_to_cpu(rw->nlb) + 1; uint64_t slba = le64_to_cpu(rw->slba); - uint64_t prp1 = le64_to_cpu(rw->prp1); - uint64_t prp2 = le64_to_cpu(rw->prp2); - uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas); - uint8_t data_shift = ns->id_ns.lbaf[lba_index].lbads; - uint64_t data_size = (uint64_t)nlb << data_shift; - uint64_t data_offset = ns->blk_offset + (slba << data_shift); - int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0; - enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ; + req->is_write = nvme_rw_is_write(req); - trace_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba); + trace_nvme_rw(req->is_write ? "write" : "read", nlb, + nlb << nvme_ns_lbads(req->ns), slba); - if (unlikely((slba + nlb) > ns->id_ns.nsze)) { - block_acct_invalid(blk_get_stats(n->conf.blk), acct); - trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); - return NVME_LBA_RANGE | NVME_DNR; + int err = nvme_blk_map(n, cmd, req); + if (err) { + return err; } - if (nvme_map_prp(n, &req->qsg, prp1, prp2, data_size, req)) { - block_acct_invalid(blk_get_stats(n->conf.blk), acct); - return NVME_INVALID_FIELD | NVME_DNR; - } - - dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct); - if (!req->is_cmb) { - req->aiocb = is_write ? - dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE, - nvme_rw_cb, req) : - dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE, - nvme_rw_cb, req); - } else { - qemu_iovec_init(&req->iov, req->qsg.nsg); - dma_to_cmb(n, &req->qsg, &req->iov); - req->aiocb = is_write ? - blk_aio_pwritev(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb, - req) : - blk_aio_preadv(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb, - req); - } - - return NVME_NO_COMPLETE; + return nvme_blk_submit_io(n, req, nvme_rw_cb); } static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { - NvmeNamespace *ns; + NvmeRwCmd *rw; + int err; + uint32_t nsid = le32_to_cpu(cmd->nsid); if (unlikely(nsid == 0 || nsid > n->params.num_ns)) { @@ -562,15 +720,26 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) return NVME_INVALID_NSID | NVME_DNR; } - ns = &n->namespaces[nsid - 1]; + req->ns = &n->namespaces[nsid - 1]; + switch (cmd->opcode) { case NVME_CMD_FLUSH: - return nvme_flush(n, ns, cmd, req); + return nvme_flush(n, cmd, req); case NVME_CMD_WRITE_ZEROS: - return nvme_write_zeros(n, ns, cmd, req); + return nvme_write_zeros(n, cmd, req); case NVME_CMD_WRITE: case NVME_CMD_READ: - return nvme_rw(n, ns, cmd, req); + rw = (NvmeRwCmd *)cmd; + + req->nlb = le16_to_cpu(rw->nlb) + 1; + req->slba = le64_to_cpu(rw->slba); + + err = nvme_rw_check_req(n, cmd, req); + if (err) { + return err; + } + + return nvme_rw(n, cmd, req); default: trace_nvme_err_invalid_opc(cmd->opcode); return NVME_INVALID_OPCODE | NVME_DNR; @@ -595,6 +764,7 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd) NvmeRequest *req, *next; NvmeSQueue *sq; NvmeCQueue *cq; + NvmeBlockBackendRequest *blk_req; uint16_t qid = le16_to_cpu(c->qid); if (unlikely(!qid || nvme_check_sqid(n, qid))) { @@ -607,8 +777,11 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd) sq = n->sq[qid]; while (!QTAILQ_EMPTY(&sq->out_req_list)) { req = QTAILQ_FIRST(&sq->out_req_list); - assert(req->aiocb); - blk_aio_cancel(req->aiocb); + while (!QTAILQ_EMPTY(&req->blk_req_tailq)) { + blk_req = QTAILQ_FIRST(&req->blk_req_tailq); + assert(blk_req->aiocb); + blk_aio_cancel(blk_req->aiocb); + } } if (!nvme_check_cqid(n, sq->cqid)) { cq = n->cq[sq->cqid]; @@ -645,6 +818,7 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, QTAILQ_INIT(&sq->out_req_list); for (i = 0; i < sq->size; i++) { sq->io_req[i].sq = sq; + QTAILQ_INIT(&(sq->io_req[i].blk_req_tailq)); QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); } sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq); @@ -1842,6 +2016,7 @@ static void nvme_init_ctrl(NvmeCtrl *n) id->ieee[1] = 0x02; id->ieee[2] = 0xb3; id->cmic = 0; + id->mdts = params->mdts; id->ver = cpu_to_le32(0x00010300); id->oacs = cpu_to_le16(0); id->acl = 3; diff --git a/hw/block/nvme.h b/hw/block/nvme.h index 05217257ca3f..711ca249eac5 100644 --- a/hw/block/nvme.h +++ b/hw/block/nvme.h @@ -7,12 +7,14 @@ DEFINE_PROP_STRING("serial", _state, _props.serial), \ DEFINE_PROP_UINT32("cmb_size_mb", _state, _props.cmb_size_mb, 0), \ DEFINE_PROP_UINT32("num_queues", _state, _props.num_queues, 64), \ - DEFINE_PROP_UINT32("num_ns", _state, _props.num_ns, 1) + DEFINE_PROP_UINT32("num_ns", _state, _props.num_ns, 1), \ + DEFINE_PROP_UINT8("mdts", _state, _props.mdts, 7) typedef struct NvmeParams { char *serial; uint32_t num_queues; uint32_t num_ns; + uint8_t mdts; uint32_t cmb_size_mb; } NvmeParams; @@ -21,16 +23,36 @@ typedef struct NvmeAsyncEvent { NvmeAerResult result; } NvmeAsyncEvent; +typedef struct NvmeBlockBackendRequest { + uint64_t slba; + uint16_t nlb; + uint64_t blk_offset; + + struct NvmeRequest *req; + + BlockAIOCB *aiocb; + BlockAcctCookie acct; + + QEMUSGList qsg; + QEMUIOVector iov; + + QTAILQ_ENTRY(NvmeBlockBackendRequest) tailq_entry; + QSLIST_ENTRY(NvmeBlockBackendRequest) slist_entry; +} NvmeBlockBackendRequest; + typedef struct NvmeRequest { - struct NvmeSQueue *sq; - BlockAIOCB *aiocb; - uint16_t status; - bool is_cmb; - uint8_t cmd_opcode; - NvmeCqe cqe; - BlockAcctCookie acct; - QEMUSGList qsg; - QEMUIOVector iov; + struct NvmeSQueue *sq; + struct NvmeNamespace *ns; + NvmeCqe cqe; + + uint64_t slba; + uint16_t nlb; + uint16_t status; + bool is_cmb; + bool is_write; + uint8_t cmd_opcode; + + QTAILQ_HEAD(, NvmeBlockBackendRequest) blk_req_tailq; QTAILQ_ENTRY(NvmeRequest)entry; } NvmeRequest; @@ -116,6 +138,11 @@ typedef struct NvmeCtrl { NvmeIdCtrl id_ctrl; } NvmeCtrl; +static inline bool nvme_rw_is_write(NvmeRequest *req) +{ + return req->cmd_opcode == NVME_CMD_WRITE; +} + static inline uint8_t nvme_ns_lbads(NvmeNamespace *ns) { NvmeIdNs *id = &ns->id_ns; diff --git a/hw/block/trace-events b/hw/block/trace-events index 676a3a615c9d..56fec40d130c 100644 --- a/hw/block/trace-events +++ b/hw/block/trace-events @@ -37,6 +37,7 @@ nvme_irq_masked(void) "IRQ is masked" nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64"" nvme_map_prp(uint8_t cmd_opcode, uint64_t trans_len, uint32_t len, uint64_t prp1, uint64_t prp2, int num_prps) "cmd_opcode=0x%"PRIx8", trans_len=%"PRIu64", len=%"PRIu32", prp1=0x%"PRIx64", prp2=0x%"PRIx64", num_prps=%d" nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64"" +nvme_rw_cb(uint16_t cid, uint32_t nsid) "cid %"PRIu16" nsid %"PRIu32"" nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16"" nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d" nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16"" @@ -115,6 +116,7 @@ nvme_err_startfail_sqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_star nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the admin submission queue size is zero" nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero" nvme_err_startfail(void) "setting controller enable bit failed" +nvme_err_internal_dev_error(const char *reason) "%s" # Traces for undefined behavior nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64""