From patchwork Wed Feb 22 08:21:32 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alexey Kardashevskiy X-Patchwork-Id: 9586377 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 05A9A600CA for ; Wed, 22 Feb 2017 08:32:44 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id E0D4C28686 for ; Wed, 22 Feb 2017 08:32:43 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id D434A288B7; Wed, 22 Feb 2017 08:32:43 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 613F028686 for ; Wed, 22 Feb 2017 08:32:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754588AbdBVIcl (ORCPT ); Wed, 22 Feb 2017 03:32:41 -0500 Received: from ozlabs.ru ([107.173.13.209]:57740 "EHLO ozlabs.ru" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754477AbdBVIcY (ORCPT ); Wed, 22 Feb 2017 03:32:24 -0500 Received: from vpl2.ozlabs.ibm.com (localhost [IPv6:::1]) by ozlabs.ru (Postfix) with ESMTP id 0D4B43A60484; Wed, 22 Feb 2017 03:21:43 -0500 (EST) From: Alexey Kardashevskiy To: linuxppc-dev@lists.ozlabs.org Cc: Alexey Kardashevskiy , Alex Williamson , David Gibson , Paul Mackerras , kvm-ppc@vger.kernel.org, kvm@vger.kernel.org Subject: [PATCH kernel v5 09/10] KVM: PPC: iommu: Unify TCE checking Date: Wed, 22 Feb 2017 19:21:32 +1100 Message-Id: <20170222082133.10277-10-aik@ozlabs.ru> X-Mailer: git-send-email 2.11.0 In-Reply-To: <20170222082133.10277-1-aik@ozlabs.ru> References: <20170222082133.10277-1-aik@ozlabs.ru> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Signed-off-by: Alexey Kardashevskiy --- arch/powerpc/include/asm/iommu.h | 20 +++++++++++++++----- arch/powerpc/include/asm/kvm_ppc.h | 6 ++++-- arch/powerpc/kernel/iommu.c | 37 +++++++++++++------------------------ arch/powerpc/kvm/book3s_64_vio_hv.c | 31 +++++++------------------------ 4 files changed, 39 insertions(+), 55 deletions(-) diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 82e77ebf85f4..d4396d1d89df 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -296,11 +296,21 @@ static inline void iommu_restore(void) #endif /* The API to support IOMMU operations for VFIO */ -extern int iommu_tce_clear_param_check(struct iommu_table *tbl, - unsigned long ioba, unsigned long tce_value, - unsigned long npages); -extern int iommu_tce_put_param_check(struct iommu_table *tbl, - unsigned long ioba, unsigned long tce); +extern int iommu_tce_check_ioba(unsigned long page_shift, + unsigned long offset, unsigned long size, + unsigned long ioba, unsigned long npages); +extern int iommu_tce_check_tce(unsigned long page_shift, + unsigned long tce); + +#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \ + (iommu_tce_check_ioba((tbl)->it_page_shift, \ + (tbl)->it_offset, (tbl)->it_size, \ + (ioba), (npages)) || (tce_value)) +#define iommu_tce_put_param_check(tbl, ioba, tce) \ + (iommu_tce_check_ioba((tbl)->it_page_shift, \ + (tbl)->it_offset, (tbl)->it_size, \ + (ioba), 1) || \ + iommu_tce_check_tce((tbl)->it_page_shift, (tce))) extern void iommu_flush_tce(struct iommu_table *tbl); extern int iommu_take_ownership(struct iommu_table *tbl); diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 37bc9e7e90ba..e04b7fb8ccaa 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -168,8 +168,10 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce_64 *args); extern struct kvmppc_spapr_tce_table *kvmppc_find_table( struct kvm *kvm, unsigned long liobn); -extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, - unsigned long ioba, unsigned long npages); +#define kvmppc_ioba_validate(stt, ioba, npages) \ + (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \ + (stt)->size, (ioba), (npages)) ? \ + H_PARAMETER : H_SUCCESS) extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, unsigned long tce); extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d02b8d22fb50..a419bd3997ba 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -960,47 +960,36 @@ void iommu_flush_tce(struct iommu_table *tbl) } EXPORT_SYMBOL_GPL(iommu_flush_tce); -int iommu_tce_clear_param_check(struct iommu_table *tbl, - unsigned long ioba, unsigned long tce_value, - unsigned long npages) +int iommu_tce_check_ioba(unsigned long page_shift, + unsigned long offset, unsigned long size, + unsigned long ioba, unsigned long npages) { - /* tbl->it_ops->clear() does not support any value but 0 */ - if (tce_value) - return -EINVAL; + unsigned long mask = (1 << page_shift) - 1; - if (ioba & ~IOMMU_PAGE_MASK(tbl)) + if (ioba & mask) return -EINVAL; - ioba >>= tbl->it_page_shift; - if (ioba < tbl->it_offset) + ioba >>= page_shift; + if (ioba < offset) return -EINVAL; - if ((ioba + npages) > (tbl->it_offset + tbl->it_size)) + if ((ioba + 1) > (offset + size)) return -EINVAL; return 0; } -EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check); +EXPORT_SYMBOL_GPL(iommu_tce_check_ioba); -int iommu_tce_put_param_check(struct iommu_table *tbl, - unsigned long ioba, unsigned long tce) +int iommu_tce_check_tce(unsigned long page_shift, unsigned long tce) { - if (tce & ~IOMMU_PAGE_MASK(tbl)) - return -EINVAL; - - if (ioba & ~IOMMU_PAGE_MASK(tbl)) - return -EINVAL; - - ioba >>= tbl->it_page_shift; - if (ioba < tbl->it_offset) - return -EINVAL; + unsigned long mask = (1 << page_shift) - 1; - if ((ioba + 1) > (tbl->it_offset + tbl->it_size)) + if (tce & mask) return -EINVAL; return 0; } -EXPORT_SYMBOL_GPL(iommu_tce_put_param_check); +EXPORT_SYMBOL_GPL(iommu_tce_check_tce); long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction) diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 0f145fc7a3a5..92d769f4eaea 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -62,27 +62,6 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, EXPORT_SYMBOL_GPL(kvmppc_find_table); /* - * Validates IO address. - * - * WARNING: This will be called in real-mode on HV KVM and virtual - * mode on PR KVM - */ -long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, - unsigned long ioba, unsigned long npages) -{ - unsigned long mask = (1ULL << stt->page_shift) - 1; - unsigned long idx = ioba >> stt->page_shift; - - if ((ioba & mask) || (idx < stt->offset) || - (idx - stt->offset + npages > stt->size) || - (idx + npages < idx)) - return H_PARAMETER; - - return H_SUCCESS; -} -EXPORT_SYMBOL_GPL(kvmppc_ioba_validate); - -/* * Validates TCE address. * At the moment flags and page mask are validated. * As the host kernel does not access those addresses (just puts them @@ -95,10 +74,14 @@ EXPORT_SYMBOL_GPL(kvmppc_ioba_validate); */ long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) { - unsigned long page_mask = ~((1ULL << stt->page_shift) - 1); - unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ); + unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); + enum dma_data_direction dir = iommu_tce_direction(tce); - if (tce & mask) + /* Allow userspace to poison TCE table */ + if (dir == DMA_NONE) + return H_SUCCESS; + + if (iommu_tce_check_tce(stt->page_shift, gpa)) return H_PARAMETER; return H_SUCCESS;