From patchwork Tue Jan 25 09:17:41 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhu, Lingshan" X-Patchwork-Id: 12723576 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D85BEC433EF for ; Tue, 25 Jan 2022 09:37:07 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1455679AbiAYJhF (ORCPT ); Tue, 25 Jan 2022 04:37:05 -0500 Received: from mga12.intel.com ([192.55.52.136]:42541 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1350789AbiAYJbU (ORCPT ); Tue, 25 Jan 2022 04:31:20 -0500 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1643103078; x=1674639078; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=awqaYgTgRNiL9sT8pUM13xpikrGICfXMnPjsQvcVDjQ=; b=X0h9nO37vMJRDW5qKUVmKO+as3Xr9aA0MWJb+4wnmorGjJa+o/KRe/kv mJ2iApIEKFbEHRDszWDCaWNMg12YEF/rP+zn6Qqylc2ASxrjZjz6RCKLO jxBOpRV175dSimd8YwiC0nCcDMANe5F+3aUp38X/kJV3iovVWxEvBImzG wC0sAN89BzBE0v04xIRfkpwbyy0BRfO+dxoEHUZr9G6nQC7pUS/xQR2No DWImvP7c64ND9D81wFlBZu/++PVNXUGVFmY37FOU7t4HwvNNYpsCdXH9/ mH9z17Y/yC8Z5GaLlO9NBbobVkvHPX6t9JSxRf0Utpu5rd5Y8Qa9N/Aec g==; X-IronPort-AV: E=McAfee;i="6200,9189,10237"; a="226240755" X-IronPort-AV: E=Sophos;i="5.88,314,1635231600"; d="scan'208";a="226240755" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 25 Jan 2022 01:25:03 -0800 X-IronPort-AV: E=Sophos;i="5.88,314,1635231600"; d="scan'208";a="520318757" Received: from unknown (HELO cra01infra01.deacluster.intel.com) ([10.240.193.73]) by orsmga007-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 25 Jan 2022 01:25:02 -0800 From: Zhu Lingshan To: mst@redhat.com, jasowang@redhat.com Cc: netdev@vger.kernel.org, virtualization@lists.linux-foundation.org, Zhu Lingshan Subject: [PATCH V2 1/4] vDPA/ifcvf: implement IO read/write helpers in the header file Date: Tue, 25 Jan 2022 17:17:41 +0800 Message-Id: <20220125091744.115996-2-lingshan.zhu@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20220125091744.115996-1-lingshan.zhu@intel.com> References: <20220125091744.115996-1-lingshan.zhu@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org re-implement IO read/write helpers in the header file, so that they can be utilized among modules. Signed-off-by: Zhu Lingshan --- drivers/vdpa/ifcvf/ifcvf_base.c | 36 -------------------------------- drivers/vdpa/ifcvf/ifcvf_base.h | 37 +++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 36 deletions(-) diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 7d41dfe48ade..397692ae671c 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -10,42 +10,6 @@ #include "ifcvf_base.h" -static inline u8 ifc_ioread8(u8 __iomem *addr) -{ - return ioread8(addr); -} -static inline u16 ifc_ioread16 (__le16 __iomem *addr) -{ - return ioread16(addr); -} - -static inline u32 ifc_ioread32(__le32 __iomem *addr) -{ - return ioread32(addr); -} - -static inline void ifc_iowrite8(u8 value, u8 __iomem *addr) -{ - iowrite8(value, addr); -} - -static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr) -{ - iowrite16(value, addr); -} - -static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr) -{ - iowrite32(value, addr); -} - -static void ifc_iowrite64_twopart(u64 val, - __le32 __iomem *lo, __le32 __iomem *hi) -{ - ifc_iowrite32((u32)val, lo); - ifc_iowrite32(val >> 32, hi); -} - struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw) { return container_of(hw, struct ifcvf_adapter, vf); diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index c486873f370a..949b4fb9d554 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -42,6 +42,43 @@ #define ifcvf_private_to_vf(adapter) \ (&((struct ifcvf_adapter *)adapter)->vf) +static inline u8 ifc_ioread8(u8 __iomem *addr) +{ + return ioread8(addr); +} + +static inline u16 ifc_ioread16(__le16 __iomem *addr) +{ + return ioread16(addr); +} + +static inline u32 ifc_ioread32(__le32 __iomem *addr) +{ + return ioread32(addr); +} + +static inline void ifc_iowrite8(u8 value, u8 __iomem *addr) +{ + iowrite8(value, addr); +} + +static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr) +{ + iowrite16(value, addr); +} + +static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr) +{ + iowrite32(value, addr); +} + +static inline void ifc_iowrite64_twopart(u64 val, + __le32 __iomem *lo, __le32 __iomem *hi) +{ + ifc_iowrite32((u32)val, lo); + ifc_iowrite32(val >> 32, hi); +} + struct vring_info { u64 desc; u64 avail; From patchwork Tue Jan 25 09:17:42 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhu, Lingshan" X-Patchwork-Id: 12723577 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id AC626C433F5 for ; Tue, 25 Jan 2022 09:37:11 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1574396AbiAYJhH (ORCPT ); Tue, 25 Jan 2022 04:37:07 -0500 Received: from mga12.intel.com ([192.55.52.136]:43254 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1574019AbiAYJb2 (ORCPT ); Tue, 25 Jan 2022 04:31:28 -0500 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1643103088; x=1674639088; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=/BarL4LjMS3Q++t6GST4rWeN1SsD5trpG6ZWkvycAl8=; b=lemRF52sy1QyMA0C5iLqsE7H6zy+h1tbYakZiHHmF8YnIHOjHKE7Ajlz iXZ8TSc3kAwSgM/ujZNj4bggXYABeSXOFB81SfoGh4zWQgCSvkvZH5VxG SMrwjcFyxavxzR7Cuw2/V75R+xKNqR6VRWifw2Y047YvPpj62NWTw2myq 9OmHV11Igchx+0i/HgA2efCRW/EhkErIuF6PEk+chI8ZtpS9Ysk5QAlyU eH47wBigNtnUek6trUiUtmawkZZbv70II0DL8P9KN9+KXzKWb9rR3GBwU 2Cxn7psW/8oqbFFvRUh0h3VrF5LBm/iVziGCpfc5Ms/ndxYRGlDLhTN/D A==; X-IronPort-AV: E=McAfee;i="6200,9189,10237"; a="226240759" X-IronPort-AV: E=Sophos;i="5.88,314,1635231600"; d="scan'208";a="226240759" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 25 Jan 2022 01:25:05 -0800 X-IronPort-AV: E=Sophos;i="5.88,314,1635231600"; d="scan'208";a="520318800" Received: from unknown (HELO cra01infra01.deacluster.intel.com) ([10.240.193.73]) by orsmga007-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 25 Jan 2022 01:25:03 -0800 From: Zhu Lingshan To: mst@redhat.com, jasowang@redhat.com Cc: netdev@vger.kernel.org, virtualization@lists.linux-foundation.org, Zhu Lingshan Subject: [PATCH V2 2/4] vDPA/ifcvf: implement device MSIX vector allocator Date: Tue, 25 Jan 2022 17:17:42 +0800 Message-Id: <20220125091744.115996-3-lingshan.zhu@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20220125091744.115996-1-lingshan.zhu@intel.com> References: <20220125091744.115996-1-lingshan.zhu@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This commit implements a MSIX vector allocation helper for vqs and config interrupts. Signed-off-by: Zhu Lingshan --- drivers/vdpa/ifcvf/ifcvf_main.c | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index d1a6b5ab543c..7e2af2d2aaf5 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -58,14 +58,40 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues) ifcvf_free_irq_vectors(pdev); } +static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ifcvf_hw *vf = &adapter->vf; + u16 max_intr, ret; + + /* all queues and config interrupt */ + max_intr = vf->nr_vring + 1; + ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + + if (ret < 0) { + IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n"); + return ret; + } + + if (ret < max_intr) + IFCVF_INFO(pdev, + "Requested %u vectors, however only %u allocated, lower performance\n", + max_intr, ret); + + return ret; +} + static int ifcvf_request_irq(struct ifcvf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct ifcvf_hw *vf = &adapter->vf; int vector, i, ret, irq; - u16 max_intr; + u16 nvectors, max_intr; + + nvectors = ifcvf_alloc_vectors(adapter); + if (!(nvectors > 0)) + return nvectors; - /* all queues and config interrupt */ max_intr = vf->nr_vring + 1; ret = pci_alloc_irq_vectors(pdev, max_intr, From patchwork Tue Jan 25 09:17:43 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhu, Lingshan" X-Patchwork-Id: 12723579 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 95B7CC433EF for ; Tue, 25 Jan 2022 09:37:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1574422AbiAYJhL (ORCPT ); Tue, 25 Jan 2022 04:37:11 -0500 Received: from mga12.intel.com ([192.55.52.136]:42501 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1574102AbiAYJcQ (ORCPT ); Tue, 25 Jan 2022 04:32:16 -0500 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1643103136; x=1674639136; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=305fK8tSqRCXLg4QAOScMdAdoUi93+U03hNstM48FgQ=; b=UnGhVVdb5OFUh105fkRZ8QeExpEEzIPyeVGGvPHV3Al/wA308nvaTrxK 5nHfgK/fVvttU5/X6sGz0wDVRFibNoA8JnSHe7RoU4bdvO5z0mbotYtSk l+YveI4zGodsjL5TlAGCDCx8Xab/qEQTwViW4fEqQN4KkOB59YfX3sGSK 4YYAfAeQga7Wh8F3hFQquQv6c4fAbc6Wi+M3LBvP3a7MGl5p/olTsQiIn GgLHvZWfLfJDP9sY97EzPI2Ou0XtHMIHRZIwOSdRAJo1d0YU9HXBM0jZF Uhu21NnOmQlSAyUolPdNDp3QKCh84Sfc2CK7A+lny2HOReeON3mgUhEZF A==; X-IronPort-AV: E=McAfee;i="6200,9189,10237"; a="226240763" X-IronPort-AV: E=Sophos;i="5.88,314,1635231600"; d="scan'208";a="226240763" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 25 Jan 2022 01:25:07 -0800 X-IronPort-AV: E=Sophos;i="5.88,314,1635231600"; d="scan'208";a="520318840" Received: from unknown (HELO cra01infra01.deacluster.intel.com) ([10.240.193.73]) by orsmga007-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 25 Jan 2022 01:25:05 -0800 From: Zhu Lingshan To: mst@redhat.com, jasowang@redhat.com Cc: netdev@vger.kernel.org, virtualization@lists.linux-foundation.org, Zhu Lingshan Subject: [PATCH V2 3/4] vhost_vdpa: don't setup irq offloading when irq_num < 0 Date: Tue, 25 Jan 2022 17:17:43 +0800 Message-Id: <20220125091744.115996-4-lingshan.zhu@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20220125091744.115996-1-lingshan.zhu@intel.com> References: <20220125091744.115996-1-lingshan.zhu@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org When irq number is negative(e.g., -EINVAL), the virtqueue may be disabled or the virtqueues are sharing a device irq. In such case, we should not setup irq offloading for a virtqueue. Signed-off-by: Zhu Lingshan Reported-by: kernel test robot --- drivers/vhost/vdpa.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 851539807bc9..909891d518e8 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -96,6 +96,9 @@ static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) if (!ops->get_vq_irq) return; + if (irq < 0) + return; + irq = ops->get_vq_irq(vdpa, qid); irq_bypass_unregister_producer(&vq->call_ctx.producer); if (!vq->call_ctx.ctx || irq < 0) From patchwork Tue Jan 25 09:17:44 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Zhu, Lingshan" X-Patchwork-Id: 12723578 X-Patchwork-Delegate: kuba@kernel.org Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id DBED1C433EF for ; Tue, 25 Jan 2022 09:37:17 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1574411AbiAYJhJ (ORCPT ); Tue, 25 Jan 2022 04:37:09 -0500 Received: from mga12.intel.com ([192.55.52.136]:42637 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1574112AbiAYJcQ (ORCPT ); Tue, 25 Jan 2022 04:32:16 -0500 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1643103136; x=1674639136; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=c2gGbJ74xtlvQLPdgZIgxTd46LHUZo6BWeohzowBq8k=; b=k6+sueNQNu5SG3S3DFIs/wYvXIYynqj39idWih/eMHoq/LuVC4lBtKJ8 aM0Tzcl4aTefUt8U7g/PJSkNAqRB5KDcEZeWNkxIZB5gNcr8zfENBaJZ8 q+Q+SD9bDtgTqFysERfHyZVge6vkX7Rkr03EwnzKRfLqntrOWyjsj255L nEOtsmVUCEvAM9TePuxLRf14gY82Bbqo+ALtNz5IPql0etNHMIoy1C1W0 v9GSbID0cRXsjLpBmFupCY0q2mN4ue43DQezrbRD1OQKijXttiPjIDSWM Hwn5fiZaOf+pwmLgzawTmsWo8yTItLnSv1aKX+upYJWFIjP3xZ6aBd3bL Q==; X-IronPort-AV: E=McAfee;i="6200,9189,10237"; a="226240770" X-IronPort-AV: E=Sophos;i="5.88,314,1635231600"; d="scan'208";a="226240770" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga106.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 25 Jan 2022 01:25:09 -0800 X-IronPort-AV: E=Sophos;i="5.88,314,1635231600"; d="scan'208";a="520318881" Received: from unknown (HELO cra01infra01.deacluster.intel.com) ([10.240.193.73]) by orsmga007-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 25 Jan 2022 01:25:07 -0800 From: Zhu Lingshan To: mst@redhat.com, jasowang@redhat.com Cc: netdev@vger.kernel.org, virtualization@lists.linux-foundation.org, Zhu Lingshan Subject: [PATCH V2 4/4] vDPA/ifcvf: implement shared IRQ feature Date: Tue, 25 Jan 2022 17:17:44 +0800 Message-Id: <20220125091744.115996-5-lingshan.zhu@intel.com> X-Mailer: git-send-email 2.27.0 In-Reply-To: <20220125091744.115996-1-lingshan.zhu@intel.com> References: <20220125091744.115996-1-lingshan.zhu@intel.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org On some platforms/devices, there may not be enough MSI vector slots allocated for virtqueues and config changes. In such a case, the interrupt sources(virtqueues, config changes) must share an IRQ/vector, to avoid initialization failures, keep the device functional. This commit handles three cases: (1) number of the allocated vectors == the number of virtqueues + 1 (config changes), every virtqueue and the config interrupt has a separated vector/IRQ, the best and the most likely case. (2) number of the allocated vectors is less than the best case, but greater than 1. In this case, all virtqueues share a vector/IRQ, the config interrupt has a separated vector/IRQ (3) only one vector is allocated, in this case, the virtqueues and the config interrupt share a vector/IRQ. The worst and most unlikely case. Otherwise, it needs to fail. This commit introduces some helper functions: ifcvf_set_vq_vector() and ifcvf_set_config_vector() sets virtqueue vector and config vector in the device config space, so that the device can send interrupt DMA. This commit adds some fields in struct ifcvf_hw and re-placed the existed fields to be aligned with the cacheline. Signed-off-by: Zhu Lingshan --- drivers/vdpa/ifcvf/ifcvf_base.c | 47 +++++-- drivers/vdpa/ifcvf/ifcvf_base.h | 23 ++- drivers/vdpa/ifcvf/ifcvf_main.c | 240 +++++++++++++++++++++++++++----- 3 files changed, 253 insertions(+), 57 deletions(-) diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 397692ae671c..18dcb63ab1e3 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -15,6 +15,36 @@ struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw) return container_of(hw, struct ifcvf_adapter, vf); } +int ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector) +{ + struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; + struct ifcvf_adapter *ifcvf = vf_to_adapter(hw); + + ifc_iowrite16(qid, &cfg->queue_select); + ifc_iowrite16(vector, &cfg->queue_msix_vector); + if (ifc_ioread16(&cfg->queue_msix_vector) == VIRTIO_MSI_NO_VECTOR) { + IFCVF_ERR(ifcvf->pdev, "No msix vector for queue %u\n", qid); + return -EINVAL; + } + + return 0; +} + +int ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector) +{ + struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; + struct ifcvf_adapter *ifcvf = vf_to_adapter(hw); + + cfg = hw->common_cfg; + ifc_iowrite16(vector, &cfg->msix_config); + if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) { + IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n"); + return -EINVAL; + } + + return 0; +} + static void __iomem *get_cap_addr(struct ifcvf_hw *hw, struct virtio_pci_cap *cap) { @@ -140,6 +170,8 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) hw->common_cfg, hw->notify_base, hw->isr, hw->dev_cfg, hw->notify_off_multiplier); + hw->vqs_shared_irq = -EINVAL; + return 0; } @@ -321,12 +353,6 @@ static int ifcvf_hw_enable(struct ifcvf_hw *hw) ifcvf = vf_to_adapter(hw); cfg = hw->common_cfg; - ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config); - - if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) { - IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n"); - return -EINVAL; - } for (i = 0; i < hw->nr_vring; i++) { if (!hw->vring[i].ready) @@ -340,15 +366,6 @@ static int ifcvf_hw_enable(struct ifcvf_hw *hw) ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo, &cfg->queue_used_hi); ifc_iowrite16(hw->vring[i].size, &cfg->queue_size); - ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector); - - if (ifc_ioread16(&cfg->queue_msix_vector) == - VIRTIO_MSI_NO_VECTOR) { - IFCVF_ERR(ifcvf->pdev, - "No msix vector for queue %u\n", i); - return -EINVAL; - } - ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx); ifc_iowrite16(1, &cfg->queue_enable); } diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index 949b4fb9d554..d2a2a526f0fc 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -27,8 +27,6 @@ #define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE #define IFCVF_QUEUE_MAX 32768 -#define IFCVF_MSI_CONFIG_OFF 0 -#define IFCVF_MSI_QUEUE_OFF 1 #define IFCVF_PCI_MAX_RESOURCE 6 #define IFCVF_LM_CFG_SIZE 0x40 @@ -42,6 +40,13 @@ #define ifcvf_private_to_vf(adapter) \ (&((struct ifcvf_adapter *)adapter)->vf) +/* all vqs and config interrupt has its own vector */ +#define MSIX_VECTOR_PER_VQ_AND_CONFIG 1 +/* all vqs share a vector, and config interrupt has a separate vector */ +#define MSIX_VECTOR_SHARED_VQ_AND_CONFIG 2 +/* all vqs and config interrupt share a vector */ +#define MSIX_VECTOR_DEV_SHARED 3 + static inline u8 ifc_ioread8(u8 __iomem *addr) { return ioread8(addr); @@ -97,25 +102,27 @@ struct ifcvf_hw { u8 __iomem *isr; /* Live migration */ u8 __iomem *lm_cfg; - u16 nr_vring; /* Notification bar number */ u8 notify_bar; + u8 msix_vector_status; + /* virtio-net or virtio-blk device config size */ + u32 config_size; /* Notificaiton bar address */ void __iomem *notify_base; phys_addr_t notify_base_pa; u32 notify_off_multiplier; + u32 dev_type; u64 req_features; u64 hw_features; - u32 dev_type; struct virtio_pci_common_cfg __iomem *common_cfg; void __iomem *dev_cfg; struct vring_info vring[IFCVF_MAX_QUEUES]; void __iomem * const *base; char config_msix_name[256]; struct vdpa_callback config_cb; - unsigned int config_irq; - /* virtio-net or virtio-blk device config size */ - u32 config_size; + u32 config_irq; + u32 vqs_shared_irq; + u16 nr_vring; }; struct ifcvf_adapter { @@ -160,4 +167,6 @@ int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num); struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw); int ifcvf_probed_virtio_net(struct ifcvf_hw *hw); u32 ifcvf_get_config_size(struct ifcvf_hw *hw); +int ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector); +int ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector); #endif /* _IFCVF_H_ */ diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index 7e2af2d2aaf5..c7070ff01776 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -17,6 +17,7 @@ #define DRIVER_AUTHOR "Intel Corporation" #define IFCVF_DRIVER_NAME "ifcvf" +/* handles config interrupt */ static irqreturn_t ifcvf_config_changed(int irq, void *arg) { struct ifcvf_hw *vf = arg; @@ -27,6 +28,7 @@ static irqreturn_t ifcvf_config_changed(int irq, void *arg) return IRQ_HANDLED; } +/* handles vqs interrupt */ static irqreturn_t ifcvf_intr_handler(int irq, void *arg) { struct vring_info *vring = arg; @@ -37,24 +39,77 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg) return IRQ_HANDLED; } +/* handls vqs shared interrupt */ +static irqreturn_t ifcvf_vq_shared_intr_handler(int irq, void *arg) +{ + struct ifcvf_hw *vf = arg; + struct vring_info *vring; + int i; + + for (i = 0; i < vf->nr_vring; i++) { + vring = &vf->vring[i]; + if (vring->cb.callback) + vf->vring->cb.callback(vring->cb.private); + } + + return IRQ_HANDLED; +} + +/* handles a shared interrupt for vqs and config */ +static irqreturn_t ifcvf_dev_shared_intr_handler(int irq, void *arg) +{ + struct ifcvf_hw *vf = arg; + u8 isr; + + isr = ifc_ioread8(vf->isr); + if (isr & VIRTIO_PCI_ISR_CONFIG) + ifcvf_config_changed(irq, arg); + + return ifcvf_vq_shared_intr_handler(irq, arg); +} + static void ifcvf_free_irq_vectors(void *data) { pci_free_irq_vectors(data); } -static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues) +static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter, int queues) { struct pci_dev *pdev = adapter->pdev; struct ifcvf_hw *vf = &adapter->vf; int i; + if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) + for (i = 0; i < queues; i++) { + devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]); + vf->vring[i].irq = -EINVAL; + } + else { + devm_free_irq(&pdev->dev, vf->vqs_shared_irq, vf); + } vf->vqs_shared_irq = -EINVAL; +} - for (i = 0; i < queues; i++) { - devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]); - vf->vring[i].irq = -EINVAL; +static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ifcvf_hw *vf = &adapter->vf; + + /* If the irq is shared by all vqs and the config interrupt, + * it is already freed in ifcvf_free_vq_irq, so here only + * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED + */ + if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) { + devm_free_irq(&pdev->dev, vf->config_irq, vf); + vf->config_irq = -EINVAL; } +} + +static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues) +{ + struct pci_dev *pdev = adapter->pdev; - devm_free_irq(&pdev->dev, vf->config_irq, vf); + ifcvf_free_vq_irq(adapter, queues); + ifcvf_free_config_irq(adapter); ifcvf_free_irq_vectors(pdev); } @@ -81,58 +136,170 @@ static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter) return ret; } -static int ifcvf_request_irq(struct ifcvf_adapter *adapter) +static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct ifcvf_hw *vf = &adapter->vf; - int vector, i, ret, irq; - u16 nvectors, max_intr; + int i, vector, ret, irq; - nvectors = ifcvf_alloc_vectors(adapter); - if (!(nvectors > 0)) - return nvectors; + for (i = 0; i < vf->nr_vring; i++) { + snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i); + vector = i; + irq = pci_irq_vector(pdev, vector); + ret = devm_request_irq(&pdev->dev, irq, + ifcvf_intr_handler, 0, + vf->vring[i].msix_name, + &vf->vring[i]); + if (ret) { + IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i); + ifcvf_free_vq_irq(adapter, i); + } else { + vf->vring[i].irq = irq; + ifcvf_set_vq_vector(vf, i, vector); + } + } - max_intr = vf->nr_vring + 1; + vf->vqs_shared_irq = -EINVAL; + + return 0; +} + +static int ifcvf_request_shared_vq_irq(struct ifcvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ifcvf_hw *vf = &adapter->vf; + int i, vector, ret, irq; + + vector = 0; + irq = pci_irq_vector(pdev, vector); + ret = devm_request_irq(&pdev->dev, irq, + ifcvf_vq_shared_intr_handler, 0, + "ifcvf_vq_shared_irq", + vf); + if (ret) { + IFCVF_ERR(pdev, "Failed to request shared irq for vf\n"); + + return ret; + } + + vf->vqs_shared_irq = irq; + for (i = 0; i < vf->nr_vring; i++) { + vf->vring[i].irq = -EINVAL; + ifcvf_set_vq_vector(vf, i, vector); + } + + return 0; + +} + +static int ifcvf_request_dev_shared_irq(struct ifcvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ifcvf_hw *vf = &adapter->vf; + int i, vector, ret, irq; + + vector = 0; + irq = pci_irq_vector(pdev, vector); + ret = devm_request_irq(&pdev->dev, irq, + ifcvf_dev_shared_intr_handler, 0, + "ifcvf_dev_shared_irq", + vf); + if (ret) { + IFCVF_ERR(pdev, "Failed to request shared irq for vf\n"); - ret = pci_alloc_irq_vectors(pdev, max_intr, - max_intr, PCI_IRQ_MSIX); - if (ret < 0) { - IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n"); return ret; } + vf->vqs_shared_irq = irq; + for (i = 0; i < vf->nr_vring; i++) { + vf->vring[i].irq = -EINVAL; + ifcvf_set_vq_vector(vf, i, vector); + } + + vf->config_irq = irq; + ifcvf_set_config_vector(vf, vector); + + return 0; + +} + +static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter) +{ + struct ifcvf_hw *vf = &adapter->vf; + int ret; + + if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) + ret = ifcvf_request_per_vq_irq(adapter); + else + ret = ifcvf_request_shared_vq_irq(adapter); + + return ret; +} + +static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ifcvf_hw *vf = &adapter->vf; + int config_vector, ret; + + if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED) + return 0; + + if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) + /* vector 0 ~ vf->nr_vring for vqs, num vf->nr_vring vector for config interrupt */ + config_vector = vf->nr_vring; + + if (vf->msix_vector_status == MSIX_VECTOR_SHARED_VQ_AND_CONFIG) + /* vector 0 for vqs and 1 for config interrupt */ + config_vector = 1; + snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n", pci_name(pdev)); - vector = 0; - vf->config_irq = pci_irq_vector(pdev, vector); + vf->config_irq = pci_irq_vector(pdev, config_vector); ret = devm_request_irq(&pdev->dev, vf->config_irq, ifcvf_config_changed, 0, vf->config_msix_name, vf); if (ret) { IFCVF_ERR(pdev, "Failed to request config irq\n"); + ifcvf_free_vq_irq(adapter, vf->nr_vring); return ret; } + ifcvf_set_config_vector(vf, config_vector); - for (i = 0; i < vf->nr_vring; i++) { - snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", - pci_name(pdev), i); - vector = i + IFCVF_MSI_QUEUE_OFF; - irq = pci_irq_vector(pdev, vector); - ret = devm_request_irq(&pdev->dev, irq, - ifcvf_intr_handler, 0, - vf->vring[i].msix_name, - &vf->vring[i]); - if (ret) { - IFCVF_ERR(pdev, - "Failed to request irq for vq %d\n", i); - ifcvf_free_irq(adapter, i); + return 0; +} - return ret; - } +static int ifcvf_request_irq(struct ifcvf_adapter *adapter) +{ + struct ifcvf_hw *vf = &adapter->vf; + u16 nvectors, max_intr; + int ret; - vf->vring[i].irq = irq; + nvectors = ifcvf_alloc_vectors(adapter); + if (!(nvectors > 0)) + return nvectors; + + vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG; + max_intr = vf->nr_vring + 1; + if (nvectors < max_intr) + vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG; + + if (nvectors == 1) { + vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED; + ret = ifcvf_request_dev_shared_irq(adapter); + + return ret; } + ret = ifcvf_request_vq_irq(adapter); + if (ret) + return ret; + + ret = ifcvf_request_config_irq(adapter); + + if (ret) + return ret; + return 0; } @@ -436,7 +603,10 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - return vf->vring[qid].irq; + if (vf->vqs_shared_irq < 0) + return vf->vring[qid].irq; + else + return -EINVAL; } static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,