From patchwork Fri Dec 10 17:54:52 2021 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Robin Murphy X-Patchwork-Id: 12670559 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from kanga.kvack.org (kanga.kvack.org [205.233.56.17]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1619DC433EF for ; Fri, 10 Dec 2021 18:01:09 +0000 (UTC) Received: by kanga.kvack.org (Postfix) id 96FA76B0083; Fri, 10 Dec 2021 12:55:35 -0500 (EST) Received: by kanga.kvack.org (Postfix, from userid 40) id 91F2E6B0085; Fri, 10 Dec 2021 12:55:35 -0500 (EST) X-Delivered-To: int-list-linux-mm@kvack.org Received: by kanga.kvack.org (Postfix, from userid 63042) id 7C03F6B0087; Fri, 10 Dec 2021 12:55:35 -0500 (EST) X-Delivered-To: linux-mm@kvack.org Received: from relay.hostedemail.com (relay032.a.hostedemail.com [64.99.140.32]) by kanga.kvack.org (Postfix) with ESMTP id 6D9F76B0083 for ; Fri, 10 Dec 2021 12:55:35 -0500 (EST) Received: from smtpin04.hostedemail.com (a10.router.float.18 [10.200.18.1]) by unirelay06.hostedemail.com (Postfix) with ESMTP id 40EF521D58 for ; Fri, 10 Dec 2021 17:55:25 +0000 (UTC) X-FDA: 78902636610.04.63B594D Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by imf13.hostedemail.com (Postfix) with ESMTP id D0C592000B for ; Fri, 10 Dec 2021 17:55:22 +0000 (UTC) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 3209613D5; Fri, 10 Dec 2021 09:55:23 -0800 (PST) Received: from e121345-lin.cambridge.arm.com (e121345-lin.cambridge.arm.com [10.1.196.40]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id D3EB93F73B; Fri, 10 Dec 2021 09:55:21 -0800 (PST) From: Robin Murphy To: joro@8bytes.org, will@kernel.org Cc: iommu@lists.linux-foundation.org, suravee.suthikulpanit@amd.com, baolu.lu@linux.intel.com, willy@infradead.org, linux-kernel@vger.kernel.org, john.garry@huawei.com, linux-mm@kvack.org Subject: [PATCH v2 11/11] iommu: Move flush queue data into iommu_dma_cookie Date: Fri, 10 Dec 2021 17:54:52 +0000 Message-Id: X-Mailer: git-send-email 2.28.0.dirty In-Reply-To: References: MIME-Version: 1.0 X-Rspamd-Server: rspam01 X-Rspamd-Queue-Id: D0C592000B X-Stat-Signature: e7kt5xwo4qd7w5zbbfnxetr4fn1whhrh Authentication-Results: imf13.hostedemail.com; dkim=none; spf=pass (imf13.hostedemail.com: domain of robin.murphy@arm.com designates 217.140.110.172 as permitted sender) smtp.mailfrom=robin.murphy@arm.com; dmarc=pass (policy=none) header.from=arm.com X-HE-Tag: 1639158922-602682 X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 Sender: owner-linux-mm@kvack.org Precedence: bulk X-Loop: owner-majordomo@kvack.org List-ID: Complete the move into iommu-dma by refactoring the flush queues themselves to belong to the DMA cookie rather than the IOVA domain. The refactoring may as well extend to some minor cosmetic aspects too, to help us stay one step ahead of the style police. Signed-off-by: Robin Murphy Reported-by: kernel test robot Reported-by: kernel test robot Reviewed-by: John Garry --- v2: Rebase with del_timer_sync() change drivers/iommu/dma-iommu.c | 171 +++++++++++++++++++++----------------- drivers/iommu/iova.c | 2 - include/linux/iova.h | 44 +--------- 3 files changed, 95 insertions(+), 122 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index ab8818965b2f..a7cd3a875481 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -9,9 +9,12 @@ */ #include +#include +#include #include -#include +#include #include +#include #include #include #include @@ -20,11 +23,10 @@ #include #include #include -#include #include +#include +#include #include -#include -#include struct iommu_dma_msi_page { struct list_head list; @@ -41,7 +43,19 @@ struct iommu_dma_cookie { enum iommu_dma_cookie_type type; union { /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ - struct iova_domain iovad; + struct { + struct iova_domain iovad; + + struct iova_fq __percpu *fq; /* Flush queue */ + /* Number of TLB flushes that have been started */ + atomic64_t fq_flush_start_cnt; + /* Number of TLB flushes that have been finished */ + atomic64_t fq_flush_finish_cnt; + /* Timer to regularily empty the flush queues */ + struct timer_list fq_timer; + /* 1 when timer is active, 0 when not */ + atomic_t fq_timer_on; + }; /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ dma_addr_t msi_iova; }; @@ -65,6 +79,27 @@ static int __init iommu_dma_forcedac_setup(char *str) early_param("iommu.forcedac", iommu_dma_forcedac_setup); +/* Number of entries per flush queue */ +#define IOVA_FQ_SIZE 256 + +/* Timeout (in ms) after which entries are flushed from the queue */ +#define IOVA_FQ_TIMEOUT 10 + +/* Flush queue entry for deferred flushing */ +struct iova_fq_entry { + unsigned long iova_pfn; + unsigned long pages; + struct list_head freelist; + u64 counter; /* Flush counter when this entry was added */ +}; + +/* Per-CPU flush queue structure */ +struct iova_fq { + struct iova_fq_entry entries[IOVA_FQ_SIZE]; + unsigned int head, tail; + spinlock_t lock; +}; + #define fq_ring_for_each(i, fq) \ for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) @@ -74,9 +109,9 @@ static inline bool fq_full(struct iova_fq *fq) return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); } -static inline unsigned fq_ring_add(struct iova_fq *fq) +static inline unsigned int fq_ring_add(struct iova_fq *fq) { - unsigned idx = fq->tail; + unsigned int idx = fq->tail; assert_spin_locked(&fq->lock); @@ -85,10 +120,10 @@ static inline unsigned fq_ring_add(struct iova_fq *fq) return idx; } -static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) +static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq) { - u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt); - unsigned idx; + u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); + unsigned int idx; assert_spin_locked(&fq->lock); @@ -98,7 +133,7 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) break; put_pages_list(&fq->entries[idx].freelist); - free_iova_fast(iovad, + free_iova_fast(&cookie->iovad, fq->entries[idx].iova_pfn, fq->entries[idx].pages); @@ -106,50 +141,50 @@ static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) } } -static void iova_domain_flush(struct iova_domain *iovad) +static void fq_flush_iotlb(struct iommu_dma_cookie *cookie) { - atomic64_inc(&iovad->fq_flush_start_cnt); - iovad->fq_domain->ops->flush_iotlb_all(iovad->fq_domain); - atomic64_inc(&iovad->fq_flush_finish_cnt); + atomic64_inc(&cookie->fq_flush_start_cnt); + cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); + atomic64_inc(&cookie->fq_flush_finish_cnt); } static void fq_flush_timeout(struct timer_list *t) { - struct iova_domain *iovad = from_timer(iovad, t, fq_timer); + struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer); int cpu; - atomic_set(&iovad->fq_timer_on, 0); - iova_domain_flush(iovad); + atomic_set(&cookie->fq_timer_on, 0); + fq_flush_iotlb(cookie); for_each_possible_cpu(cpu) { unsigned long flags; struct iova_fq *fq; - fq = per_cpu_ptr(iovad->fq, cpu); + fq = per_cpu_ptr(cookie->fq, cpu); spin_lock_irqsave(&fq->lock, flags); - fq_ring_free(iovad, fq); + fq_ring_free(cookie, fq); spin_unlock_irqrestore(&fq->lock, flags); } } -void queue_iova(struct iova_domain *iovad, +static void queue_iova(struct iommu_dma_cookie *cookie, unsigned long pfn, unsigned long pages, struct list_head *freelist) { struct iova_fq *fq; unsigned long flags; - unsigned idx; + unsigned int idx; /* * Order against the IOMMU driver's pagetable update from unmapping - * @pte, to guarantee that iova_domain_flush() observes that if called + * @pte, to guarantee that fq_flush_iotlb() observes that if called * from a different CPU before we release the lock below. Full barrier * so it also pairs with iommu_dma_init_fq() to avoid seeing partially * written fq state here. */ smp_mb(); - fq = raw_cpu_ptr(iovad->fq); + fq = raw_cpu_ptr(cookie->fq); spin_lock_irqsave(&fq->lock, flags); /* @@ -157,65 +192,66 @@ void queue_iova(struct iova_domain *iovad, * flushed out on another CPU. This makes the fq_full() check below less * likely to be true. */ - fq_ring_free(iovad, fq); + fq_ring_free(cookie, fq); if (fq_full(fq)) { - iova_domain_flush(iovad); - fq_ring_free(iovad, fq); + fq_flush_iotlb(cookie); + fq_ring_free(cookie, fq); } idx = fq_ring_add(fq); fq->entries[idx].iova_pfn = pfn; fq->entries[idx].pages = pages; - fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); + fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); list_splice(freelist, &fq->entries[idx].freelist); spin_unlock_irqrestore(&fq->lock, flags); /* Avoid false sharing as much as possible. */ - if (!atomic_read(&iovad->fq_timer_on) && - !atomic_xchg(&iovad->fq_timer_on, 1)) - mod_timer(&iovad->fq_timer, + if (!atomic_read(&cookie->fq_timer_on) && + !atomic_xchg(&cookie->fq_timer_on, 1)) + mod_timer(&cookie->fq_timer, jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); } -static void free_iova_flush_queue(struct iova_domain *iovad) +static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie) { int cpu, idx; - if (!iovad->fq) + if (!cookie->fq) return; - del_timer_sync(&iovad->fq_timer); - /* - * This code runs when the iova_domain is being detroyed, so don't - * bother to free iovas, just free any remaining pagetable pages. - */ + del_timer_sync(&cookie->fq_timer); + /* The IOVAs will be torn down separately, so just free our queued pages */ for_each_possible_cpu(cpu) { - struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); + struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu); fq_ring_for_each(idx, fq) put_pages_list(&fq->entries[idx].freelist); } - free_percpu(iovad->fq); - - iovad->fq = NULL; - iovad->fq_domain = NULL; + free_percpu(cookie->fq); } -int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain) +/* sysfs updates are serialised by the mutex of the group owning @domain */ +int iommu_dma_init_fq(struct iommu_domain *domain) { + struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iova_fq __percpu *queue; int i, cpu; - atomic64_set(&iovad->fq_flush_start_cnt, 0); - atomic64_set(&iovad->fq_flush_finish_cnt, 0); + if (cookie->fq_domain) + return 0; + + atomic64_set(&cookie->fq_flush_start_cnt, 0); + atomic64_set(&cookie->fq_flush_finish_cnt, 0); queue = alloc_percpu(struct iova_fq); - if (!queue) + if (!queue) { + pr_warn("iova flush queue initialization failed\n"); return -ENOMEM; + } for_each_possible_cpu(cpu) { struct iova_fq *fq = per_cpu_ptr(queue, cpu); @@ -229,12 +265,16 @@ int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_dom INIT_LIST_HEAD(&fq->entries[i].freelist); } - iovad->fq_domain = fq_domain; - iovad->fq = queue; - - timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); - atomic_set(&iovad->fq_timer_on, 0); + cookie->fq = queue; + timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); + atomic_set(&cookie->fq_timer_on, 0); + /* + * Prevent incomplete fq state being observable. Pairs with path from + * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova() + */ + smp_wmb(); + WRITE_ONCE(cookie->fq_domain, domain); return 0; } @@ -320,7 +360,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) return; if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { - free_iova_flush_queue(&cookie->iovad); + iommu_dma_free_fq(cookie); put_iova_domain(&cookie->iovad); } @@ -469,29 +509,6 @@ static bool dev_use_swiotlb(struct device *dev) return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev); } -/* sysfs updates are serialised by the mutex of the group owning @domain */ -int iommu_dma_init_fq(struct iommu_domain *domain) -{ - struct iommu_dma_cookie *cookie = domain->iova_cookie; - int ret; - - if (cookie->fq_domain) - return 0; - - ret = init_iova_flush_queue(&cookie->iovad, domain); - if (ret) { - pr_warn("iova flush queue initialization failed\n"); - return ret; - } - /* - * Prevent incomplete iovad->fq being observable. Pairs with path from - * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova() - */ - smp_wmb(); - WRITE_ONCE(cookie->fq_domain, domain); - return 0; -} - /** * iommu_dma_init_domain - Initialise a DMA mapping domain * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() @@ -630,7 +647,7 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, if (cookie->type == IOMMU_DMA_MSI_COOKIE) cookie->msi_iova -= size; else if (gather && gather->queued) - queue_iova(iovad, iova_pfn(iovad, iova), + queue_iova(cookie, iova_pfn(iovad, iova), size >> iova_shift(iovad), &gather->freelist); else diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 6673dfa8e7c5..72ac25831584 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -61,8 +61,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->start_pfn = start_pfn; iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad)); iovad->max32_alloc_size = iovad->dma_32bit_pfn; - iovad->fq_domain = NULL; - iovad->fq = NULL; iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); rb_insert_color(&iovad->anchor.node, &iovad->rbroot); diff --git a/include/linux/iova.h b/include/linux/iova.h index 072a09c06e8a..0abd48c5e622 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -12,9 +12,6 @@ #include #include #include -#include -#include -#include /* iova structure */ struct iova { @@ -36,27 +33,6 @@ struct iova_rcache { struct iova_cpu_rcache __percpu *cpu_rcaches; }; -/* Number of entries per Flush Queue */ -#define IOVA_FQ_SIZE 256 - -/* Timeout (in ms) after which entries are flushed from the Flush-Queue */ -#define IOVA_FQ_TIMEOUT 10 - -/* Flush Queue entry for defered flushing */ -struct iova_fq_entry { - unsigned long iova_pfn; - unsigned long pages; - struct list_head freelist; - u64 counter; /* Flush counter when this entrie was added */ -}; - -/* Per-CPU Flush Queue structure */ -struct iova_fq { - struct iova_fq_entry entries[IOVA_FQ_SIZE]; - unsigned head, tail; - spinlock_t lock; -}; - /* holds all the iova translations for a domain */ struct iova_domain { spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ @@ -67,23 +43,9 @@ struct iova_domain { unsigned long start_pfn; /* Lower limit for this domain */ unsigned long dma_32bit_pfn; unsigned long max32_alloc_size; /* Size of last failed allocation */ - struct iova_fq __percpu *fq; /* Flush Queue */ - - atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that - have been started */ - - atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that - have been finished */ - struct iova anchor; /* rbtree lookup anchor */ + struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ - - struct iommu_domain *fq_domain; - - struct timer_list fq_timer; /* Timer to regularily empty the - flush-queues */ - atomic_t fq_timer_on; /* 1 when timer is active, 0 - when not */ struct hlist_node cpuhp_dead; }; @@ -133,16 +95,12 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, bool size_aligned); void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size); -void queue_iova(struct iova_domain *iovad, - unsigned long pfn, unsigned long pages, - struct list_head *freelist); unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool flush_rcache); struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); -int init_iova_flush_queue(struct iova_domain *iovad, struct iommu_domain *fq_domain); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); #else