From patchwork Thu Sep 26 14:29:20 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jan Beulich X-Patchwork-Id: 11162857 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 756B713B1 for ; Thu, 26 Sep 2019 14:30:49 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 5B36A222C6 for ; Thu, 26 Sep 2019 14:30:49 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 5B36A222C6 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=suse.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iDUlm-0008QX-DS; Thu, 26 Sep 2019 14:29:22 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iDUll-0008QN-Bh for xen-devel@lists.xenproject.org; Thu, 26 Sep 2019 14:29:21 +0000 X-Inumbo-ID: 06e219c2-e06a-11e9-9657-12813bfff9fa Received: from mx1.suse.de (unknown [195.135.220.15]) by localhost (Halon) with ESMTPS id 06e219c2-e06a-11e9-9657-12813bfff9fa; Thu, 26 Sep 2019 14:29:20 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id 6F5F7B087; Thu, 26 Sep 2019 14:29:19 +0000 (UTC) From: Jan Beulich To: "xen-devel@lists.xenproject.org" References: Message-ID: Date: Thu, 26 Sep 2019 16:29:20 +0200 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:60.0) Gecko/20100101 Thunderbird/60.9.0 MIME-Version: 1.0 In-Reply-To: Content-Language: en-US Subject: [Xen-devel] [PATCH v7 2/3] AMD/IOMMU: allow callers to request allocate_buffer() to skip its memset() X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Andrew Cooper , Suravee Suthikulpanit , Paul Durrant Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" The command ring buffer doesn't need clearing up front in any event. Subsequently we'll also want to avoid clearing the device tables. While playing with functions signatures replace undue use of fixed width types at the same time, and extend this to deallocate_buffer() as well. Signed-off-by: Jan Beulich --- v7: New. --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -994,12 +994,12 @@ static unsigned int __init dt_alloc_size IOMMU_DEV_TABLE_ENTRY_SIZE); } -static void __init deallocate_buffer(void *buf, uint32_t sz) +static void __init deallocate_buffer(void *buf, unsigned long sz) { - int order = 0; if ( buf ) { - order = get_order_from_bytes(sz); + unsigned int order = get_order_from_bytes(sz); + __free_amd_iommu_tables(buf, order); } } @@ -1012,10 +1012,11 @@ static void __init deallocate_ring_buffe ring_buf->tail = 0; } -static void * __init allocate_buffer(uint32_t alloc_size, const char *name) +static void *__init allocate_buffer(unsigned long alloc_size, + const char *name, bool clear) { - void * buffer; - int order = get_order_from_bytes(alloc_size); + void *buffer; + unsigned int order = get_order_from_bytes(alloc_size); buffer = __alloc_amd_iommu_tables(order); @@ -1025,13 +1026,16 @@ static void * __init allocate_buffer(uin return NULL; } - memset(buffer, 0, PAGE_SIZE * (1UL << order)); + if ( clear ) + memset(buffer, 0, PAGE_SIZE << order); + return buffer; } -static void * __init allocate_ring_buffer(struct ring_buffer *ring_buf, - uint32_t entry_size, - uint64_t entries, const char *name) +static void *__init allocate_ring_buffer(struct ring_buffer *ring_buf, + unsigned int entry_size, + unsigned long entries, + const char *name, bool clear) { ring_buf->head = 0; ring_buf->tail = 0; @@ -1041,7 +1045,8 @@ static void * __init allocate_ring_buffe ring_buf->alloc_size = PAGE_SIZE << get_order_from_bytes(entries * entry_size); ring_buf->entries = ring_buf->alloc_size / entry_size; - ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name); + ring_buf->buffer = allocate_buffer(ring_buf->alloc_size, name, clear); + return ring_buf->buffer; } @@ -1050,21 +1055,23 @@ static void * __init allocate_cmd_buffer /* allocate 'command buffer' in power of 2 increments of 4K */ return allocate_ring_buffer(&iommu->cmd_buffer, sizeof(cmd_entry_t), IOMMU_CMD_BUFFER_DEFAULT_ENTRIES, - "Command Buffer"); + "Command Buffer", false); } static void * __init allocate_event_log(struct amd_iommu *iommu) { /* allocate 'event log' in power of 2 increments of 4K */ return allocate_ring_buffer(&iommu->event_log, sizeof(event_entry_t), - IOMMU_EVENT_LOG_DEFAULT_ENTRIES, "Event Log"); + IOMMU_EVENT_LOG_DEFAULT_ENTRIES, "Event Log", + true); } static void * __init allocate_ppr_log(struct amd_iommu *iommu) { /* allocate 'ppr log' in power of 2 increments of 4K */ return allocate_ring_buffer(&iommu->ppr_log, sizeof(ppr_entry_t), - IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log"); + IOMMU_PPR_LOG_DEFAULT_ENTRIES, "PPR Log", + true); } /* @@ -1257,7 +1264,7 @@ static int __init amd_iommu_setup_device { /* allocate 'device table' on a 4K boundary */ dt = IVRS_MAPPINGS_DEVTAB(ivrs_mappings) = - allocate_buffer(dt_alloc_size(), "Device Table"); + allocate_buffer(dt_alloc_size(), "Device Table", true); } if ( !dt ) return -ENOMEM;