From patchwork Wed Apr 15 05:51:26 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Klaus Jensen X-Patchwork-Id: 11489829 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 260E3913 for ; Wed, 15 Apr 2020 06:08:04 +0000 (UTC) Received: from lists.gnu.org (lists.gnu.org [209.51.188.17]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 058CB20737 for ; Wed, 15 Apr 2020 06:08:04 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 058CB20737 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=irrelevant.dk Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=qemu-devel-bounces+patchwork-qemu-devel=patchwork.kernel.org@nongnu.org Received: from localhost ([::1]:43780 helo=lists1p.gnu.org) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1jObDP-00060R-5p for patchwork-qemu-devel@patchwork.kernel.org; Wed, 15 Apr 2020 02:08:03 -0400 Received: from eggs.gnu.org ([2001:470:142:3::10]:35638) by lists.gnu.org with esmtp (Exim 4.90_1) (envelope-from ) id 1jOaz2-0002vW-JN for qemu-devel@nongnu.org; Wed, 15 Apr 2020 01:53:13 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1jOaz0-0002s8-N8 for qemu-devel@nongnu.org; Wed, 15 Apr 2020 01:53:12 -0400 Received: from charlie.dont.surf ([128.199.63.193]:47576) by eggs.gnu.org with esmtps (TLS1.0:DHE_RSA_AES_256_CBC_SHA1:32) (Exim 4.71) (envelope-from ) id 1jOayw-0002ad-2X; Wed, 15 Apr 2020 01:53:06 -0400 Received: from apples.local (80-167-98-190-cable.dk.customer.tdc.net [80.167.98.190]) by charlie.dont.surf (Postfix) with ESMTPSA id 7AE27BFDAE; Wed, 15 Apr 2020 05:52:30 +0000 (UTC) From: Klaus Jensen To: qemu-block@nongnu.org Subject: [PATCH v7 34/48] nvme: refactor NvmeRequest Date: Wed, 15 Apr 2020 07:51:26 +0200 Message-Id: <20200415055140.466900-35-its@irrelevant.dk> X-Mailer: git-send-email 2.26.0 In-Reply-To: <20200415055140.466900-1-its@irrelevant.dk> References: <20200415055140.466900-1-its@irrelevant.dk> MIME-Version: 1.0 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 2.2.x-3.x [generic] [fuzzy] X-Received-From: 128.199.63.193 X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Kevin Wolf , Beata Michalska , Klaus Jensen , qemu-devel@nongnu.org, Max Reitz , Klaus Jensen , Keith Busch , Javier Gonzalez , Maxim Levitsky Errors-To: qemu-devel-bounces+patchwork-qemu-devel=patchwork.kernel.org@nongnu.org Sender: "Qemu-devel" From: Klaus Jensen Add a reference to the NvmeNamespace and move clearing of the structure from "clear before use" to "clear after use". Signed-off-by: Klaus Jensen --- hw/block/nvme.c | 38 +++++++++++++++++++++----------------- hw/block/nvme.h | 1 + 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/hw/block/nvme.c b/hw/block/nvme.c index 94d42046149e..a7c5f93fc545 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -159,6 +159,12 @@ static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq) } } +static void nvme_req_clear(NvmeRequest *req) +{ + req->ns = NULL; + memset(&req->cqe, 0x0, sizeof(req->cqe)); +} + static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, size_t len) { @@ -404,6 +410,7 @@ static void nvme_post_cqes(void *opaque) nvme_inc_cq_tail(cq); pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe, sizeof(req->cqe)); + nvme_req_clear(req); QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); } if (cq->tail != cq->head) { @@ -513,10 +520,10 @@ static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len, return NVME_SUCCESS; } -static inline uint16_t nvme_check_bounds(NvmeCtrl *n, NvmeNamespace *ns, - uint64_t slba, uint32_t nlb, - NvmeRequest *req) +static inline uint16_t nvme_check_bounds(NvmeCtrl *n, uint64_t slba, + uint32_t nlb, NvmeRequest *req) { + NvmeNamespace *ns = req->ns; uint64_t nsze = le64_to_cpu(ns->id_ns.nsze); if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) { @@ -554,8 +561,7 @@ static void nvme_rw_cb(void *opaque, int ret) nvme_enqueue_req_completion(cq, req); } -static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, - NvmeRequest *req) +static uint16_t nvme_flush(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, BLOCK_ACCT_FLUSH); @@ -564,10 +570,10 @@ static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, return NVME_NO_COMPLETE; } -static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, - NvmeRequest *req) +static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { NvmeRwCmd *rw = (NvmeRwCmd *)cmd; + NvmeNamespace *ns = req->ns; const uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas); const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds; uint64_t slba = le64_to_cpu(rw->slba); @@ -578,7 +584,7 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, trace_nvme_dev_write_zeroes(nvme_cid(req), slba, nlb); - status = nvme_check_bounds(n, ns, slba, nlb, req); + status = nvme_check_bounds(n, slba, nlb, req); if (status) { return status; } @@ -590,10 +596,10 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, return NVME_NO_COMPLETE; } -static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, - NvmeRequest *req) +static uint16_t nvme_rw(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { NvmeRwCmd *rw = (NvmeRwCmd *)cmd; + NvmeNamespace *ns = req->ns; uint32_t nlb = le32_to_cpu(rw->nlb) + 1; uint64_t slba = le64_to_cpu(rw->slba); @@ -613,7 +619,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, return status; } - status = nvme_check_bounds(n, ns, slba, nlb, req); + status = nvme_check_bounds(n, slba, nlb, req); if (status) { block_acct_invalid(blk_get_stats(n->conf.blk), acct); return status; @@ -647,7 +653,6 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { - NvmeNamespace *ns; uint32_t nsid = le32_to_cpu(cmd->nsid); trace_nvme_dev_io_cmd(nvme_cid(req), nsid, le16_to_cpu(req->sq->sqid), @@ -658,15 +663,15 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) return NVME_INVALID_NSID | NVME_DNR; } - ns = &n->namespaces[nsid - 1]; + req->ns = &n->namespaces[nsid - 1]; switch (cmd->opcode) { case NVME_CMD_FLUSH: - return nvme_flush(n, ns, cmd, req); + return nvme_flush(n, cmd, req); case NVME_CMD_WRITE_ZEROES: - return nvme_write_zeroes(n, ns, cmd, req); + return nvme_write_zeroes(n, cmd, req); case NVME_CMD_WRITE: case NVME_CMD_READ: - return nvme_rw(n, ns, cmd, req); + return nvme_rw(n, cmd, req); default: trace_nvme_dev_err_invalid_opc(cmd->opcode); return NVME_INVALID_OPCODE | NVME_DNR; @@ -1463,7 +1468,6 @@ static void nvme_process_sq(void *opaque) req = QTAILQ_FIRST(&sq->req_list); QTAILQ_REMOVE(&sq->req_list, req, entry); QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry); - memset(&req->cqe, 0, sizeof(req->cqe)); req->cqe.cid = cmd.cid; status = sq->sqid ? nvme_io_cmd(n, &cmd, req) : diff --git a/hw/block/nvme.h b/hw/block/nvme.h index a25568723d0d..11a42fa213ab 100644 --- a/hw/block/nvme.h +++ b/hw/block/nvme.h @@ -29,6 +29,7 @@ typedef struct NvmeAsyncEvent { typedef struct NvmeRequest { struct NvmeSQueue *sq; + struct NvmeNamespace *ns; BlockAIOCB *aiocb; uint16_t status; NvmeCqe cqe;