From patchwork Wed Nov 2 20:52:41 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Jones X-Patchwork-Id: 9409835 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 5DCA860722 for ; Wed, 2 Nov 2016 20:52:56 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 4F7B92A5B1 for ; Wed, 2 Nov 2016 20:52:56 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 447052A5B3; Wed, 2 Nov 2016 20:52:56 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id D73432A5B2 for ; Wed, 2 Nov 2016 20:52:55 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756930AbcKBUwx (ORCPT ); Wed, 2 Nov 2016 16:52:53 -0400 Received: from mx1.redhat.com ([209.132.183.28]:57304 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756401AbcKBUww (ORCPT ); Wed, 2 Nov 2016 16:52:52 -0400 Received: from int-mx10.intmail.prod.int.phx2.redhat.com (int-mx10.intmail.prod.int.phx2.redhat.com [10.5.11.23]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id C42258F516 for ; Wed, 2 Nov 2016 20:52:51 +0000 (UTC) Received: from kamzik.brq.redhat.com (kamzik.brq.redhat.com [10.34.1.143]) by int-mx10.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id uA2KqmbV010366; Wed, 2 Nov 2016 16:52:50 -0400 From: Andrew Jones To: kvm@vger.kernel.org Cc: pbonzini@redhat.com, lvivier@redhat.com, thuth@redhat.com Subject: [kvm-unit-tests PATCH v2 1/6] lib/x86/vm: collection of improvements Date: Wed, 2 Nov 2016 21:52:41 +0100 Message-Id: <1478119966-13252-2-git-send-email-drjones@redhat.com> In-Reply-To: <1478119966-13252-1-git-send-email-drjones@redhat.com> References: <1478119966-13252-1-git-send-email-drjones@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.23 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.28]); Wed, 02 Nov 2016 20:52:51 +0000 (UTC) Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Ensure we're page aligned, add locking, just return if NULL is passed to vfree(). Signed-off-by: Andrew Jones --- lib/x86/asm/page.h | 2 ++ lib/x86/vm.c | 44 +++++++++++++++++++++++++++++++++++++------- 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/lib/x86/asm/page.h b/lib/x86/asm/page.h index 5044a49ab0cc..dd999304f1f0 100644 --- a/lib/x86/asm/page.h +++ b/lib/x86/asm/page.h @@ -16,6 +16,8 @@ #ifndef __ASSEMBLY__ +#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) + #ifdef __x86_64__ #define LARGE_PAGE_SIZE (512 * PAGE_SIZE) #else diff --git a/lib/x86/vm.c b/lib/x86/vm.c index f7e778b3779c..baea17e7f475 100644 --- a/lib/x86/vm.c +++ b/lib/x86/vm.c @@ -1,37 +1,54 @@ #include "fwcfg.h" #include "vm.h" #include "libcflat.h" +#include "asm/spinlock.h" +static struct spinlock heap_lock; +static struct spinlock vm_lock; static void *free = 0; static void *vfree_top = 0; static void free_memory(void *mem, unsigned long size) { + assert(!((unsigned long)mem & ~PAGE_MASK)); + + spin_lock(&heap_lock); + + free = NULL; + while (size >= PAGE_SIZE) { *(void **)mem = free; free = mem; mem += PAGE_SIZE; size -= PAGE_SIZE; } + + spin_unlock(&heap_lock); } void *alloc_page() { void *p; + spin_lock(&heap_lock); + if (!free) - return 0; + return NULL; p = free; free = *(void **)free; + spin_unlock(&heap_lock); + return p; } void free_page(void *page) { + spin_lock(&heap_lock); *(void **)page = free; free = page; + spin_unlock(&heap_lock); } extern char edata; @@ -162,11 +179,13 @@ void *vmalloc(unsigned long size) void *mem, *p; unsigned pages; - size += sizeof(unsigned long); + size = PAGE_ALIGN(size + sizeof(unsigned long)); - size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); + spin_lock(&vm_lock); vfree_top -= size; mem = p = vfree_top; + spin_unlock(&vm_lock); + pages = size / PAGE_SIZE; while (pages--) { install_page(phys_to_virt(read_cr3()), virt_to_phys(alloc_page()), p); @@ -179,12 +198,18 @@ void *vmalloc(unsigned long size) uint64_t virt_to_phys_cr3(void *mem) { - return (*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK) + ((ulong)mem & (PAGE_SIZE - 1)); + return (*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK) + ((ulong)mem & ~PAGE_MASK); } void vfree(void *mem) { - unsigned long size = ((unsigned long *)mem)[-1]; + unsigned long size; + + if (mem == NULL) + return; + + mem -= sizeof(unsigned long); + size = *(unsigned long *)mem; while (size) { free_page(phys_to_virt(*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK)); @@ -198,11 +223,14 @@ void *vmap(unsigned long long phys, unsigned long size) void *mem, *p; unsigned pages; - size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); - vfree_top -= size; + size = PAGE_ALIGN(size); phys &= ~(unsigned long long)(PAGE_SIZE - 1); + spin_lock(&vm_lock); + vfree_top -= size; mem = p = vfree_top; + spin_unlock(&vm_lock); + pages = size / PAGE_SIZE; while (pages--) { install_page(phys_to_virt(read_cr3()), phys, p); @@ -214,7 +242,9 @@ void *vmap(unsigned long long phys, unsigned long size) void *alloc_vpages(ulong nr) { + spin_lock(&vm_lock); vfree_top -= PAGE_SIZE * nr; + spin_unlock(&vm_lock); return vfree_top; }