From patchwork Thu Sep 26 14:28:53 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jan Beulich X-Patchwork-Id: 11162855 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id AA6A413B1 for ; Thu, 26 Sep 2019 14:30:16 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 8EF76222C6 for ; Thu, 26 Sep 2019 14:30:16 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 8EF76222C6 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=suse.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iDUlM-0008Mr-27; Thu, 26 Sep 2019 14:28:56 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iDUlK-0008Mi-Ik for xen-devel@lists.xenproject.org; Thu, 26 Sep 2019 14:28:54 +0000 X-Inumbo-ID: f6c7f17e-e069-11e9-bf31-bc764e2007e4 Received: from mx1.suse.de (unknown [195.135.220.15]) by localhost (Halon) with ESMTPS id f6c7f17e-e069-11e9-bf31-bc764e2007e4; Thu, 26 Sep 2019 14:28:53 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id 8A500AC49; Thu, 26 Sep 2019 14:28:52 +0000 (UTC) From: Jan Beulich To: "xen-devel@lists.xenproject.org" References: Message-ID: Date: Thu, 26 Sep 2019 16:28:53 +0200 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:60.0) Gecko/20100101 Thunderbird/60.9.0 MIME-Version: 1.0 In-Reply-To: Content-Language: en-US Subject: [Xen-devel] [PATCH v7 1/3] AMD/IOMMU: allocate one device table per PCI segment X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Andrew Cooper , Suravee Suthikulpanit , Paul Durrant Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Having a single device table for all segments can't possibly be right. (Even worse, the symbol wasn't static despite being used in just one source file.) Attach the device tables to their respective IVRS mapping ones. Signed-off-by: Jan Beulich Reviewed-by: Paul Durrant --- v6: New. --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -39,7 +39,6 @@ unsigned int __read_mostly ivrs_bdf_entr u8 __read_mostly ivhd_type; static struct radix_tree_root ivrs_maps; LIST_HEAD_READ_MOSTLY(amd_iommu_head); -struct table_struct device_table; bool_t iommuv2_enabled; static bool iommu_has_ht_flag(struct amd_iommu *iommu, u8 mask) @@ -989,6 +988,12 @@ static void disable_iommu(struct amd_iom spin_unlock_irqrestore(&iommu->lock, flags); } +static unsigned int __init dt_alloc_size(void) +{ + return PAGE_SIZE << get_order_from_bytes(ivrs_bdf_entries * + IOMMU_DEV_TABLE_ENTRY_SIZE); +} + static void __init deallocate_buffer(void *buf, uint32_t sz) { int order = 0; @@ -999,12 +1004,6 @@ static void __init deallocate_buffer(voi } } -static void __init deallocate_device_table(struct table_struct *table) -{ - deallocate_buffer(table->buffer, table->alloc_size); - table->buffer = NULL; -} - static void __init deallocate_ring_buffer(struct ring_buffer *ring_buf) { deallocate_buffer(ring_buf->buffer, ring_buf->alloc_size); @@ -1068,8 +1067,29 @@ static void * __init allocate_ppr_log(st IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log"); } +/* + * Within ivrs_mappings[] we allocate an extra array element to store + * - segment number, + * - device table. + */ +#define IVRS_MAPPINGS_SEG(m) (m)[ivrs_bdf_entries].dte_requestor_id +#define IVRS_MAPPINGS_DEVTAB(m) (m)[ivrs_bdf_entries].intremap_table + +static void __init free_ivrs_mapping(void *ptr) +{ + const struct ivrs_mappings *ivrs_mappings = ptr; + + if ( IVRS_MAPPINGS_DEVTAB(ivrs_mappings) ) + deallocate_buffer(IVRS_MAPPINGS_DEVTAB(ivrs_mappings), + dt_alloc_size()); + + xfree(ptr); +} + static int __init amd_iommu_init_one(struct amd_iommu *iommu, bool intr) { + const struct ivrs_mappings *ivrs_mappings; + if ( allocate_cmd_buffer(iommu) == NULL ) goto error_out; @@ -1082,13 +1102,15 @@ static int __init amd_iommu_init_one(str if ( intr && !set_iommu_interrupt_handler(iommu) ) goto error_out; - /* To make sure that device_table.buffer has been successfully allocated */ - if ( device_table.buffer == NULL ) + /* Make sure that the device table has been successfully allocated. */ + ivrs_mappings = get_ivrs_mappings(iommu->seg); + if ( !IVRS_MAPPINGS_DEVTAB(ivrs_mappings) ) goto error_out; - iommu->dev_table.alloc_size = device_table.alloc_size; - iommu->dev_table.entries = device_table.entries; - iommu->dev_table.buffer = device_table.buffer; + iommu->dev_table.alloc_size = dt_alloc_size(); + iommu->dev_table.entries = iommu->dev_table.alloc_size / + IOMMU_DEV_TABLE_ENTRY_SIZE; + iommu->dev_table.buffer = IVRS_MAPPINGS_DEVTAB(ivrs_mappings); enable_iommu(iommu); printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus ); @@ -1135,11 +1157,8 @@ static void __init amd_iommu_init_cleanu xfree(iommu); } - /* free device table */ - deallocate_device_table(&device_table); - - /* free ivrs_mappings[] */ - radix_tree_destroy(&ivrs_maps, xfree); + /* Free ivrs_mappings[] and their device tables. */ + radix_tree_destroy(&ivrs_maps, free_ivrs_mapping); iommu_enabled = 0; iommu_hwdom_passthrough = false; @@ -1147,12 +1166,6 @@ static void __init amd_iommu_init_cleanu iommuv2_enabled = 0; } -/* - * We allocate an extra array element to store the segment number - * (and in the future perhaps other global information). - */ -#define IVRS_MAPPINGS_SEG(m) m[ivrs_bdf_entries].dte_requestor_id - struct ivrs_mappings *get_ivrs_mappings(u16 seg) { return radix_tree_lookup(&ivrs_maps, seg); @@ -1235,24 +1248,18 @@ static int __init alloc_ivrs_mappings(u1 static int __init amd_iommu_setup_device_table( u16 seg, struct ivrs_mappings *ivrs_mappings) { + struct amd_iommu_dte *dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings); unsigned int bdf; BUG_ON( (ivrs_bdf_entries == 0) ); - if ( !device_table.buffer ) + if ( !dt ) { /* allocate 'device table' on a 4K boundary */ - device_table.alloc_size = PAGE_SIZE << - get_order_from_bytes( - PAGE_ALIGN(ivrs_bdf_entries * - IOMMU_DEV_TABLE_ENTRY_SIZE)); - device_table.entries = device_table.alloc_size / - IOMMU_DEV_TABLE_ENTRY_SIZE; - - device_table.buffer = allocate_buffer(device_table.alloc_size, - "Device Table"); + dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings) = + allocate_buffer(dt_alloc_size(), "Device Table"); } - if ( !device_table.buffer ) + if ( !dt ) return -ENOMEM; /* Add device table entries */ @@ -1260,12 +1267,10 @@ static int __init amd_iommu_setup_device { if ( ivrs_mappings[bdf].valid ) { - void *dte; const struct pci_dev *pdev = NULL; /* add device table entry */ - dte = device_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE); - iommu_dte_add_device_entry(dte, &ivrs_mappings[bdf]); + iommu_dte_add_device_entry(&dt[bdf], &ivrs_mappings[bdf]); if ( iommu_intremap && ivrs_mappings[bdf].dte_requestor_id == bdf && @@ -1308,7 +1313,7 @@ static int __init amd_iommu_setup_device } amd_iommu_set_intremap_table( - dte, ivrs_mappings[bdf].intremap_table, + &dt[bdf], ivrs_mappings[bdf].intremap_table, ivrs_mappings[bdf].iommu, iommu_intremap); } } From patchwork Thu Sep 26 14:29:20 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jan Beulich X-Patchwork-Id: 11162857 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 756B713B1 for ; Thu, 26 Sep 2019 14:30:49 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 5B36A222C6 for ; Thu, 26 Sep 2019 14:30:49 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 5B36A222C6 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=suse.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iDUlm-0008QX-DS; Thu, 26 Sep 2019 14:29:22 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iDUll-0008QN-Bh for xen-devel@lists.xenproject.org; Thu, 26 Sep 2019 14:29:21 +0000 X-Inumbo-ID: 06e219c2-e06a-11e9-9657-12813bfff9fa Received: from mx1.suse.de (unknown [195.135.220.15]) by localhost (Halon) with ESMTPS id 06e219c2-e06a-11e9-9657-12813bfff9fa; Thu, 26 Sep 2019 14:29:20 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id 6F5F7B087; Thu, 26 Sep 2019 14:29:19 +0000 (UTC) From: Jan Beulich To: "xen-devel@lists.xenproject.org" References: Message-ID: Date: Thu, 26 Sep 2019 16:29:20 +0200 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:60.0) Gecko/20100101 Thunderbird/60.9.0 MIME-Version: 1.0 In-Reply-To: Content-Language: en-US Subject: [Xen-devel] [PATCH v7 2/3] AMD/IOMMU: allow callers to request allocate_buffer() to skip its memset() X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Andrew Cooper , Suravee Suthikulpanit , Paul Durrant Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" The command ring buffer doesn't need clearing up front in any event. Subsequently we'll also want to avoid clearing the device tables. While playing with functions signatures replace undue use of fixed width types at the same time, and extend this to deallocate_buffer() as well. Signed-off-by: Jan Beulich --- v7: New. --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -994,12 +994,12 @@ static unsigned int __init dt_alloc_size IOMMU_DEV_TABLE_ENTRY_SIZE); } -static void __init deallocate_buffer(void *buf, uint32_t sz) +static void __init deallocate_buffer(void *buf, unsigned long sz) { - int order = 0; if ( buf ) { - order = get_order_from_bytes(sz); + unsigned int order = get_order_from_bytes(sz); + __free_amd_iommu_tables(buf, order); } } @@ -1012,10 +1012,11 @@ static void __init deallocate_ring_buffe ring_buf->tail = 0; } -static void * __init allocate_buffer(uint32_t alloc_size, const char *name) +static void *__init allocate_buffer(unsigned long alloc_size, + const char *name, bool clear) { - void * buffer; - int order = get_order_from_bytes(alloc_size); + void *buffer; + unsigned int order = get_order_from_bytes(alloc_size); buffer = __alloc_amd_iommu_tables(order); @@ -1025,13 +1026,16 @@ static void * __init allocate_buffer(uin return NULL; } - memset(buffer, 0, PAGE_SIZE * (1UL << order)); + if ( clear ) + memset(buffer, 0, PAGE_SIZE << order); + return buffer; } -static void * __init allocate_ring_buffer(struct ring_buffer *ring_buf, - uint32_t entry_size, - uint64_t entries, const char *name) +static void *__init allocate_ring_buffer(struct ring_buffer *ring_buf, + unsigned int entry_size, + unsigned long entries, + const char *name, bool clear) { ring_buf->head = 0; ring_buf->tail = 0; @@ -1041,7 +1045,8 @@ static void * __init allocate_ring_buffe ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries * entry_size); ring_buf->entries = ring_buf->alloc_size / entry_size; - ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name); + ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name, clear); + return ring_buf->buffer; } @@ -1050,21 +1055,23 @@ static void * __init allocate_cmd_buffer /* allocate 'command buffer' in power of 2 increments of 4K */ return allocate_ring_buffer(&iommu->cmd_buffer, sizeof(cmd_entry_t), IOMMU_CMD_BUFFER_DEFAULT_ENTRIES, - "Command Buffer"); + "Command Buffer", false); } static void * __init allocate_event_log(struct amd_iommu *iommu) { /* allocate 'event log' in power of 2 increments of 4K */ return allocate_ring_buffer(&iommu->event_log, sizeof(event_entry_t), - IOMMU_EVENT_LOG_DEFAULT_ENTRIES, "Event Log"); + IOMMU_EVENT_LOG_DEFAULT_ENTRIES, "Event Log", + true); } static void * __init allocate_ppr_log(struct amd_iommu *iommu) { /* allocate 'ppr log' in power of 2 increments of 4K */ return allocate_ring_buffer(&iommu->ppr_log, sizeof(ppr_entry_t), - IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log"); + IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log", + true); } /* @@ -1257,7 +1264,7 @@ static int __init amd_iommu_setup_device { /* allocate 'device table' on a 4K boundary */ dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings) = - allocate_buffer(dt_alloc_size(), "Device Table"); + allocate_buffer(dt_alloc_size(), "Device Table", true); } if ( !dt ) return -ENOMEM; From patchwork Thu Sep 26 14:29:53 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jan Beulich X-Patchwork-Id: 11162859 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 6885814E5 for ; Thu, 26 Sep 2019 14:31:22 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 4DC6E21A4A for ; Thu, 26 Sep 2019 14:31:22 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 4DC6E21A4A Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=suse.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iDUmI-0008VI-Nt; Thu, 26 Sep 2019 14:29:54 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iDUmH-0008V2-TZ for xen-devel@lists.xenproject.org; Thu, 26 Sep 2019 14:29:53 +0000 X-Inumbo-ID: 1a399324-e06a-11e9-b588-bc764e2007e4 Received: from mx1.suse.de (unknown [195.135.220.15]) by localhost (Halon) with ESMTPS id 1a399324-e06a-11e9-b588-bc764e2007e4; Thu, 26 Sep 2019 14:29:52 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id 03F90AFC3; Thu, 26 Sep 2019 14:29:52 +0000 (UTC) From: Jan Beulich To: "xen-devel@lists.xenproject.org" References: Message-ID: Date: Thu, 26 Sep 2019 16:29:53 +0200 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:60.0) Gecko/20100101 Thunderbird/60.9.0 MIME-Version: 1.0 In-Reply-To: Content-Language: en-US Subject: [Xen-devel] [PATCH v7 3/3] AMD/IOMMU: pre-fill all DTEs right after table allocation X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Andrew Cooper , Suravee Suthikulpanit , Paul Durrant Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Make sure we don't leave any DTEs unexpected requests through which would be passed through untranslated. Set V and IV right away (with all other fields left as zero), relying on the V and/or IV bits getting cleared only by amd_iommu_set_root_page_table() and amd_iommu_set_intremap_table() under special pass-through circumstances. Switch back to initial settings in amd_iommu_disable_domain_device(). Take the liberty and also make the latter function static, constifying its first parameter at the same time, at this occasion. Signed-off-by: Jan Beulich Reviewed-by: Paul Durrant Reviewed-by: Andrew Cooper --- v7: Avoid writing the DT twice during initial allocation. v6: New. --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -1262,12 +1262,28 @@ static int __init amd_iommu_setup_device if ( !dt ) { + unsigned int size = dt_alloc_size(); + /* allocate 'device table' on a 4K boundary */ dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings) = - allocate_buffer(dt_alloc_size(), "Device Table", true); + allocate_buffer(size, "Device Table", false); + if ( !dt ) + return -ENOMEM; + + /* + * Prefill every DTE such that all kinds of requests will get aborted. + * Besides the two bits set to true below this builds upon + * IOMMU_DEV_TABLE_SYS_MGT_DMA_ABORTED, + * IOMMU_DEV_TABLE_IO_CONTROL_ABORTED, as well as + * IOMMU_DEV_TABLE_INT_CONTROL_ABORTED all being zero, and us also + * wanting at least TV, GV, I, and EX set to false. + */ + for ( bdf = 0, size /= sizeof(*dt); bdf < size; ++bdf ) + dt[bdf] = (struct amd_iommu_dte){ + .v = true, + .iv = true, + }; } - if ( !dt ) - return -ENOMEM; /* Add device table entries */ for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ ) --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c @@ -267,9 +267,9 @@ static void __hwdom_init amd_iommu_hwdom setup_hwdom_pci_devices(d, amd_iommu_add_device); } -void amd_iommu_disable_domain_device(struct domain *domain, - struct amd_iommu *iommu, - u8 devfn, struct pci_dev *pdev) +static void amd_iommu_disable_domain_device(const struct domain *domain, + struct amd_iommu *iommu, + uint8_t devfn, struct pci_dev *pdev) { struct amd_iommu_dte *table, *dte; unsigned long flags; @@ -284,9 +284,21 @@ void amd_iommu_disable_domain_device(str spin_lock_irqsave(&iommu->lock, flags); if ( dte->tv || dte->v ) { + /* See the comment in amd_iommu_setup_device_table(). */ + dte->int_ctl = IOMMU_DEV_TABLE_INT_CONTROL_ABORTED; + smp_wmb(); + dte->iv = true; dte->tv = false; - dte->v = false; + dte->gv = false; dte->i = false; + dte->ex = false; + dte->sa = false; + dte->se = false; + dte->sd = false; + dte->sys_mgt = IOMMU_DEV_TABLE_SYS_MGT_DMA_ABORTED; + dte->ioctl = IOMMU_DEV_TABLE_IO_CONTROL_ABORTED; + smp_wmb(); + dte->v = true; amd_iommu_flush_device(iommu, req_id);