From patchwork Wed Sep 25 15:48:39 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160971 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 84B6E924 for ; Wed, 25 Sep 2019 15:51:00 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 5F64221D7B for ; Wed, 25 Sep 2019 15:51:00 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 5F64221D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xp-0000mg-Oc; Wed, 25 Sep 2019 15:49:33 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xo-0000mI-TW for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:32 +0000 X-Inumbo-ID: 0f5c2020-dfac-11e9-9637-12813bfff9fa Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 0f5c2020-dfac-11e9-9637-12813bfff9fa; Wed, 25 Sep 2019 15:49:30 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:28 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812624" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:27 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:39 -0700 Message-Id: <07b30029c26f90d292d05d2e28944d9034f27fa7.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 01/18] x86: make hvm_{get/set}_param accessible X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Andrew Cooper , Tamas K Lengyel , Wei Liu , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Currently the hvm parameters are only accessible via the HVMOP hypercalls. By exposing hvm_{get/set}_param it will be possible for VM forking to copy the parameters directly into the clone domain. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/hvm/hvm.c | 169 ++++++++++++++++++++-------------- xen/include/asm-x86/hvm/hvm.h | 4 + 2 files changed, 106 insertions(+), 67 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index fdb1e17f59..667c830db5 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4054,16 +4054,17 @@ static int hvmop_set_evtchn_upcall_vector( } static int hvm_allow_set_param(struct domain *d, - const struct xen_hvm_param *a) + uint32_t index, + uint64_t new_value) { - uint64_t value = d->arch.hvm.params[a->index]; + uint64_t value = d->arch.hvm.params[index]; int rc; rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_set_param); if ( rc ) return rc; - switch ( a->index ) + switch ( index ) { /* The following parameters can be set by the guest. */ case HVM_PARAM_CALLBACK_IRQ: @@ -4096,7 +4097,7 @@ static int hvm_allow_set_param(struct domain *d, if ( rc ) return rc; - switch ( a->index ) + switch ( index ) { /* The following parameters should only be changed once. */ case HVM_PARAM_VIRIDIAN: @@ -4106,7 +4107,7 @@ static int hvm_allow_set_param(struct domain *d, case HVM_PARAM_NR_IOREQ_SERVER_PAGES: case HVM_PARAM_ALTP2M: case HVM_PARAM_MCA_CAP: - if ( value != 0 && a->value != value ) + if ( value != 0 && new_value != value ) rc = -EEXIST; break; default: @@ -4116,13 +4117,11 @@ static int hvm_allow_set_param(struct domain *d, return rc; } -static int hvmop_set_param( +int hvmop_set_param( XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg) { - struct domain *curr_d = current->domain; struct xen_hvm_param a; struct domain *d; - struct vcpu *v; int rc; if ( copy_from_guest(&a, arg, 1) ) @@ -4142,23 +4141,42 @@ static int hvmop_set_param( if ( !is_hvm_domain(d) ) goto out; - rc = hvm_allow_set_param(d, &a); + rc = hvm_set_param(d, a.index, a.value); + + out: + rcu_unlock_domain(d); + return rc; +} + +int hvm_set_param( + struct domain *d, + uint32_t index, + uint64_t value) +{ + struct domain *curr_d = current->domain; + int rc; + struct vcpu *v; + + if ( index >= HVM_NR_PARAMS ) + return -EINVAL; + + rc = hvm_allow_set_param(d, index, value); if ( rc ) goto out; - switch ( a.index ) + switch ( index ) { case HVM_PARAM_CALLBACK_IRQ: - hvm_set_callback_via(d, a.value); + hvm_set_callback_via(d, value); hvm_latch_shinfo_size(d); break; case HVM_PARAM_TIMER_MODE: - if ( a.value > HVMPTM_one_missed_tick_pending ) + if ( value > HVMPTM_one_missed_tick_pending ) rc = -EINVAL; break; case HVM_PARAM_VIRIDIAN: - if ( (a.value & ~HVMPV_feature_mask) || - !(a.value & HVMPV_base_freq) ) + if ( (value & ~HVMPV_feature_mask) || + !(value & HVMPV_base_freq) ) rc = -EINVAL; break; case HVM_PARAM_IDENT_PT: @@ -4168,7 +4186,7 @@ static int hvmop_set_param( */ if ( !paging_mode_hap(d) || !cpu_has_vmx ) { - d->arch.hvm.params[a.index] = a.value; + d->arch.hvm.params[index] = value; break; } @@ -4183,7 +4201,7 @@ static int hvmop_set_param( rc = 0; domain_pause(d); - d->arch.hvm.params[a.index] = a.value; + d->arch.hvm.params[index] = value; for_each_vcpu ( d, v ) paging_update_cr3(v, false); domain_unpause(d); @@ -4192,23 +4210,23 @@ static int hvmop_set_param( break; case HVM_PARAM_DM_DOMAIN: /* The only value this should ever be set to is DOMID_SELF */ - if ( a.value != DOMID_SELF ) + if ( value != DOMID_SELF ) rc = -EINVAL; - a.value = curr_d->domain_id; + value = curr_d->domain_id; break; case HVM_PARAM_ACPI_S_STATE: rc = 0; - if ( a.value == 3 ) + if ( value == 3 ) hvm_s3_suspend(d); - else if ( a.value == 0 ) + else if ( value == 0 ) hvm_s3_resume(d); else rc = -EINVAL; break; case HVM_PARAM_ACPI_IOPORTS_LOCATION: - rc = pmtimer_change_ioport(d, a.value); + rc = pmtimer_change_ioport(d, value); break; case HVM_PARAM_MEMORY_EVENT_CR0: case HVM_PARAM_MEMORY_EVENT_CR3: @@ -4223,24 +4241,24 @@ static int hvmop_set_param( rc = xsm_hvm_param_nested(XSM_PRIV, d); if ( rc ) break; - if ( a.value > 1 ) + if ( value > 1 ) rc = -EINVAL; /* * Remove the check below once we have * shadow-on-shadow. */ - if ( !paging_mode_hap(d) && a.value ) + if ( !paging_mode_hap(d) && value ) rc = -EINVAL; - if ( a.value && + if ( value && d->arch.hvm.params[HVM_PARAM_ALTP2M] ) rc = -EINVAL; /* Set up NHVM state for any vcpus that are already up. */ - if ( a.value && + if ( value && !d->arch.hvm.params[HVM_PARAM_NESTEDHVM] ) for_each_vcpu(d, v) if ( rc == 0 ) rc = nestedhvm_vcpu_initialise(v); - if ( !a.value || rc ) + if ( !value || rc ) for_each_vcpu(d, v) nestedhvm_vcpu_destroy(v); break; @@ -4248,30 +4266,30 @@ static int hvmop_set_param( rc = xsm_hvm_param_altp2mhvm(XSM_PRIV, d); if ( rc ) break; - if ( a.value > XEN_ALTP2M_limited ) + if ( value > XEN_ALTP2M_limited ) rc = -EINVAL; - if ( a.value && + if ( value && d->arch.hvm.params[HVM_PARAM_NESTEDHVM] ) rc = -EINVAL; break; case HVM_PARAM_TRIPLE_FAULT_REASON: - if ( a.value > SHUTDOWN_MAX ) + if ( value > SHUTDOWN_MAX ) rc = -EINVAL; break; case HVM_PARAM_IOREQ_SERVER_PFN: - d->arch.hvm.ioreq_gfn.base = a.value; + d->arch.hvm.ioreq_gfn.base = value; break; case HVM_PARAM_NR_IOREQ_SERVER_PAGES: { unsigned int i; - if ( a.value == 0 || - a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 ) + if ( value == 0 || + value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 ) { rc = -EINVAL; break; } - for ( i = 0; i < a.value; i++ ) + for ( i = 0; i < value; i++ ) set_bit(i, &d->arch.hvm.ioreq_gfn.mask); break; @@ -4283,35 +4301,35 @@ static int hvmop_set_param( sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8); BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN > sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8); - if ( a.value ) - set_bit(a.index, &d->arch.hvm.ioreq_gfn.legacy_mask); + if ( value ) + set_bit(index, &d->arch.hvm.ioreq_gfn.legacy_mask); break; case HVM_PARAM_X87_FIP_WIDTH: - if ( a.value != 0 && a.value != 4 && a.value != 8 ) + if ( value != 0 && value != 4 && value != 8 ) { rc = -EINVAL; break; } - d->arch.x87_fip_width = a.value; + d->arch.x87_fip_width = value; break; case HVM_PARAM_VM86_TSS: /* Hardware would silently truncate high bits. */ - if ( a.value != (uint32_t)a.value ) + if ( value != (uint32_t)value ) { if ( d == curr_d ) domain_crash(d); rc = -EINVAL; } /* Old hvmloader binaries hardcode the size to 128 bytes. */ - if ( a.value ) - a.value |= (128ULL << 32) | VM86_TSS_UPDATED; - a.index = HVM_PARAM_VM86_TSS_SIZED; + if ( value ) + value |= (128ULL << 32) | VM86_TSS_UPDATED; + index = HVM_PARAM_VM86_TSS_SIZED; break; case HVM_PARAM_VM86_TSS_SIZED: - if ( (a.value >> 32) < sizeof(struct tss32) ) + if ( (value >> 32) < sizeof(struct tss32) ) { if ( d == curr_d ) domain_crash(d); @@ -4322,34 +4340,33 @@ static int hvmop_set_param( * 256 bits interrupt redirection bitmap + 64k bits I/O bitmap * plus one padding byte). */ - if ( (a.value >> 32) > sizeof(struct tss32) + + if ( (value >> 32) > sizeof(struct tss32) + (0x100 / 8) + (0x10000 / 8) + 1 ) - a.value = (uint32_t)a.value | + value = (uint32_t)value | ((sizeof(struct tss32) + (0x100 / 8) + (0x10000 / 8) + 1) << 32); - a.value |= VM86_TSS_UPDATED; + value |= VM86_TSS_UPDATED; break; case HVM_PARAM_MCA_CAP: - rc = vmce_enable_mca_cap(d, a.value); + rc = vmce_enable_mca_cap(d, value); break; } if ( rc != 0 ) goto out; - d->arch.hvm.params[a.index] = a.value; + d->arch.hvm.params[index] = value; HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64, - a.index, a.value); + index, value); out: - rcu_unlock_domain(d); return rc; } static int hvm_allow_get_param(struct domain *d, - const struct xen_hvm_param *a) + uint32_t index) { int rc; @@ -4357,7 +4374,7 @@ static int hvm_allow_get_param(struct domain *d, if ( rc ) return rc; - switch ( a->index ) + switch ( index ) { /* The following parameters can be read by the guest. */ case HVM_PARAM_CALLBACK_IRQ: @@ -4411,42 +4428,60 @@ static int hvmop_get_param( if ( !is_hvm_domain(d) ) goto out; - rc = hvm_allow_get_param(d, &a); + rc = hvm_get_param(d, a.index, &a.value); if ( rc ) goto out; - switch ( a.index ) + rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + + HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64, + a.index, a.value); + + out: + rcu_unlock_domain(d); + return rc; +} + +int hvm_get_param( + struct domain *d, + uint32_t index, + uint64_t *value) +{ + int rc; + + if ( index >= HVM_NR_PARAMS || !value ) + return -EINVAL; + + rc = hvm_allow_get_param(d, index); + if ( rc ) + return rc; + + switch ( index ) { case HVM_PARAM_ACPI_S_STATE: - a.value = d->arch.hvm.is_s3_suspended ? 3 : 0; + *value = d->arch.hvm.is_s3_suspended ? 3 : 0; break; case HVM_PARAM_VM86_TSS: - a.value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED]; + *value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED]; break; case HVM_PARAM_VM86_TSS_SIZED: - a.value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] & - ~VM86_TSS_UPDATED; + *value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] & + ~VM86_TSS_UPDATED; break; case HVM_PARAM_X87_FIP_WIDTH: - a.value = d->arch.x87_fip_width; + *value = d->arch.x87_fip_width; break; default: - a.value = d->arch.hvm.params[a.index]; + *value = d->arch.hvm.params[index]; break; } - rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0; - - HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64, - a.index, a.value); + return 0; +}; - out: - rcu_unlock_domain(d); - return rc; -} /* * altp2m operations are envisioned as being used in several different diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 4e72d0732e..7f000fa6b1 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -336,6 +336,10 @@ unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore); bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v), void *ctxt); +/* Caller must hold domain locks */ +int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value); +int hvm_set_param(struct domain *d, uint32_t index, uint64_t value); + #ifdef CONFIG_HVM #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0) From patchwork Wed Sep 25 15:48:40 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160969 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 9FF9017D4 for ; Wed, 25 Sep 2019 15:50:59 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 8573521D7B for ; Wed, 25 Sep 2019 15:50:59 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 8573521D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xu-0000oM-7N; Wed, 25 Sep 2019 15:49:38 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xs-0000nw-MZ for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:36 +0000 X-Inumbo-ID: 107d8cd2-dfac-11e9-8628-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 107d8cd2-dfac-11e9-8628-bc764e2007e4; Wed, 25 Sep 2019 15:49:31 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:29 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812631" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:28 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:40 -0700 Message-Id: X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 02/18] xen/x86: Make hap_get_allocation accessible X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" During VM forking we'll copy the parent domain's parameters to the client, including the HAP shadow memory setting that is used for storing the domain's EPT. We'll copy this in the hypervisor instead doing it during toolstack launch to allow the domain to start executing and unsharing memory before (or even completely without) the toolstack. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/mm/hap/hap.c | 3 +-- xen/include/asm-x86/hap.h | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index 412a442b6a..83a67470cf 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -321,8 +321,7 @@ static void hap_free_p2m_page(struct domain *d, struct page_info *pg) } /* Return the size of the pool, rounded up to the nearest MB */ -static unsigned int -hap_get_allocation(struct domain *d) +unsigned int hap_get_allocation(struct domain *d) { unsigned int pg = d->arch.paging.hap.total_pages + d->arch.paging.hap.p2m_pages; diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h index b94bfb4ed0..1bf07e49fe 100644 --- a/xen/include/asm-x86/hap.h +++ b/xen/include/asm-x86/hap.h @@ -45,6 +45,7 @@ int hap_track_dirty_vram(struct domain *d, extern const struct paging_mode *hap_paging_get_mode(struct vcpu *); int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted); +unsigned int hap_get_allocation(struct domain *d); #endif /* XEN_HAP_H */ From patchwork Wed Sep 25 15:48:41 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160975 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 2372217D4 for ; Wed, 25 Sep 2019 15:51:06 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 092EE21D7B for ; Wed, 25 Sep 2019 15:51:06 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 092EE21D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xz-0000rb-2T; Wed, 25 Sep 2019 15:49:43 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xx-0000qq-MD for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:41 +0000 X-Inumbo-ID: 104658e8-dfac-11e9-bf31-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 104658e8-dfac-11e9-bf31-bc764e2007e4; Wed, 25 Sep 2019 15:49:31 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:29 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812635" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:29 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:41 -0700 Message-Id: X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 03/18] tools/libxc: clean up memory sharing files X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Ian Jackson , Tamas K Lengyel , Wei Liu Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" No functional changes. Signed-off-by: Tamas K Lengyel Acked-by: Wei Liu --- tools/libxc/include/xenctrl.h | 24 ++++++++++++------------ tools/libxc/xc_memshr.c | 12 ++++++------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h index 7559e1bc69..b7c990aafd 100644 --- a/tools/libxc/include/xenctrl.h +++ b/tools/libxc/include/xenctrl.h @@ -2060,7 +2060,7 @@ int xc_monitor_emulate_each_rep(xc_interface *xch, uint32_t domain_id, * * Sharing is supported only on the x86 architecture in 64 bit mode, with * Hardware-Assisted Paging (i.e. Intel EPT, AMD NPT). Moreover, AMD NPT - * support is considered experimental. + * support is considered experimental. * Calls below return ENOSYS if not in the x86_64 architecture. * Calls below return ENODEV if the domain does not support HAP. @@ -2107,13 +2107,13 @@ int xc_memshr_control(xc_interface *xch, * EINVAL or EACCESS if the request is denied by the security policy */ -int xc_memshr_ring_enable(xc_interface *xch, +int xc_memshr_ring_enable(xc_interface *xch, uint32_t domid, uint32_t *port); /* Disable the ring for ENOMEM communication. * May fail with EINVAL if the ring was not enabled in the first place. */ -int xc_memshr_ring_disable(xc_interface *xch, +int xc_memshr_ring_disable(xc_interface *xch, uint32_t domid); /* @@ -2126,7 +2126,7 @@ int xc_memshr_ring_disable(xc_interface *xch, int xc_memshr_domain_resume(xc_interface *xch, uint32_t domid); -/* Select a page for sharing. +/* Select a page for sharing. * * A 64 bit opaque handle will be stored in handle. The hypervisor ensures * that if the page is modified, the handle will be invalidated, and future @@ -2155,7 +2155,7 @@ int xc_memshr_nominate_gref(xc_interface *xch, /* The three calls below may fail with * 10 (or -XENMEM_SHARING_OP_S_HANDLE_INVALID) if the handle passed as source - * is invalid. + * is invalid. * 9 (or -XENMEM_SHARING_OP_C_HANDLE_INVALID) if the handle passed as client is * invalid. */ @@ -2168,7 +2168,7 @@ int xc_memshr_nominate_gref(xc_interface *xch, * * After successful sharing, the client handle becomes invalid. Both tuples point to the same mfn with the same handle, the one specified as - * source. Either 3-tuple can be specified later for further re-sharing. + * source. Either 3-tuple can be specified later for further re-sharing. */ int xc_memshr_share_gfns(xc_interface *xch, uint32_t source_domain, @@ -2193,7 +2193,7 @@ int xc_memshr_share_grefs(xc_interface *xch, /* Allows to add to the guest physmap of the client domain a shared frame * directly. * - * May additionally fail with + * May additionally fail with * 9 (-XENMEM_SHARING_OP_C_HANDLE_INVALID) if the physmap entry for the gfn is * not suitable. * ENOMEM if internal data structures cannot be allocated. @@ -2222,7 +2222,7 @@ int xc_memshr_range_share(xc_interface *xch, uint64_t last_gfn); /* Debug calls: return the number of pages referencing the shared frame backing - * the input argument. Should be one or greater. + * the input argument. Should be one or greater. * * May fail with EINVAL if there is no backing shared frame for the input * argument. @@ -2235,9 +2235,9 @@ int xc_memshr_debug_gref(xc_interface *xch, uint32_t domid, grant_ref_t gref); -/* Audits the share subsystem. - * - * Returns ENOSYS if not supported (may not be compiled into the hypervisor). +/* Audits the share subsystem. + * + * Returns ENOSYS if not supported (may not be compiled into the hypervisor). * * Returns the number of errors found during auditing otherwise. May be (should * be!) zero. @@ -2273,7 +2273,7 @@ long xc_sharing_freed_pages(xc_interface *xch); * should return 1. (And dominfo(d) for each of the two domains should return 1 * as well). * - * Note that some of these sharing_used_frames may be referenced by + * Note that some of these sharing_used_frames may be referenced by * a single domain page, and thus not realize any savings. The same * applies to some of the pages counted in dominfo(d)->shr_pages. */ diff --git a/tools/libxc/xc_memshr.c b/tools/libxc/xc_memshr.c index d5e135e0d9..5ef56a6933 100644 --- a/tools/libxc/xc_memshr.c +++ b/tools/libxc/xc_memshr.c @@ -41,7 +41,7 @@ int xc_memshr_control(xc_interface *xch, return do_domctl(xch, &domctl); } -int xc_memshr_ring_enable(xc_interface *xch, +int xc_memshr_ring_enable(xc_interface *xch, uint32_t domid, uint32_t *port) { @@ -57,7 +57,7 @@ int xc_memshr_ring_enable(xc_interface *xch, port); } -int xc_memshr_ring_disable(xc_interface *xch, +int xc_memshr_ring_disable(xc_interface *xch, uint32_t domid) { return xc_vm_event_control(xch, domid, @@ -85,11 +85,11 @@ int xc_memshr_nominate_gfn(xc_interface *xch, memset(&mso, 0, sizeof(mso)); mso.op = XENMEM_sharing_op_nominate_gfn; - mso.u.nominate.u.gfn = gfn; + mso.u.nominate.u.gfn = gfn; rc = xc_memshr_memop(xch, domid, &mso); - if (!rc) *handle = mso.u.nominate.handle; + if (!rc) *handle = mso.u.nominate.handle; return rc; } @@ -105,11 +105,11 @@ int xc_memshr_nominate_gref(xc_interface *xch, memset(&mso, 0, sizeof(mso)); mso.op = XENMEM_sharing_op_nominate_gref; - mso.u.nominate.u.grant_ref = gref; + mso.u.nominate.u.grant_ref = gref; rc = xc_memshr_memop(xch, domid, &mso); - if (!rc) *handle = mso.u.nominate.handle; + if (!rc) *handle = mso.u.nominate.handle; return rc; } From patchwork Wed Sep 25 15:48:42 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160983 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id C2CCC924 for ; Wed, 25 Sep 2019 15:51:10 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 913FE21D7B for ; Wed, 25 Sep 2019 15:51:10 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 913FE21D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xv-0000pB-H7; Wed, 25 Sep 2019 15:49:39 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xt-0000oE-VZ for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:38 +0000 X-Inumbo-ID: 113e1fba-dfac-11e9-9637-12813bfff9fa Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 113e1fba-dfac-11e9-9637-12813bfff9fa; Wed, 25 Sep 2019 15:49:32 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812639" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:29 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:42 -0700 Message-Id: X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 04/18] x86/mem_sharing: cleanup code in various locations X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" No functional changes. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/hvm/hvm.c | 11 +- xen/arch/x86/mm/mem_sharing.c | 342 +++++++++++++++++------------- xen/arch/x86/mm/p2m.c | 17 +- xen/include/asm-x86/mem_sharing.h | 49 +++-- 4 files changed, 235 insertions(+), 184 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 667c830db5..d71d2ad5d7 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1879,12 +1879,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, if ( npfec.write_access && (p2mt == p2m_ram_shared) ) { ASSERT(p2m_is_hostp2m(p2m)); - sharing_enomem = - (mem_sharing_unshare_page(currd, gfn, 0) < 0); + sharing_enomem = mem_sharing_unshare_page(currd, gfn, 0); rc = 1; goto out_put_gfn; } - + /* Spurious fault? PoD and log-dirty also take this path. */ if ( p2m_is_ram(p2mt) ) { @@ -1930,9 +1929,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, __put_gfn(p2m, gfn); __put_gfn(hostp2m, gfn); out: - /* All of these are delayed until we exit, since we might + /* + * All of these are delayed until we exit, since we might * sleep on event ring wait queues, and we must not hold - * locks in such circumstance */ + * locks in such circumstance. + */ if ( paged ) p2m_mem_paging_populate(currd, gfn); if ( sharing_enomem ) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index a5fe89e339..8ad6cf3850 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -59,8 +59,10 @@ static DEFINE_PER_CPU(pg_lock_data_t, __pld); #define RMAP_USES_HASHTAB(page) \ ((page)->sharing->hash_table.flag == NULL) #define RMAP_HEAVY_SHARED_PAGE RMAP_HASHTAB_SIZE -/* A bit of hysteresis. We don't want to be mutating between list and hash - * table constantly. */ +/* + * A bit of hysteresis. We don't want to be mutating between list and hash + * table constantly. + */ #define RMAP_LIGHT_SHARED_PAGE (RMAP_HEAVY_SHARED_PAGE >> 2) #if MEM_SHARING_AUDIT @@ -88,7 +90,7 @@ static inline void page_sharing_dispose(struct page_info *page) { /* Unlikely given our thresholds, but we should be careful. */ if ( unlikely(RMAP_USES_HASHTAB(page)) ) - free_xenheap_pages(page->sharing->hash_table.bucket, + free_xenheap_pages(page->sharing->hash_table.bucket, RMAP_HASHTAB_ORDER); spin_lock(&shr_audit_lock); @@ -105,7 +107,7 @@ static inline void page_sharing_dispose(struct page_info *page) { /* Unlikely given our thresholds, but we should be careful. */ if ( unlikely(RMAP_USES_HASHTAB(page)) ) - free_xenheap_pages(page->sharing->hash_table.bucket, + free_xenheap_pages(page->sharing->hash_table.bucket, RMAP_HASHTAB_ORDER); xfree(page->sharing); } @@ -122,8 +124,8 @@ static inline void page_sharing_dispose(struct page_info *page) * Nesting may happen when sharing (and locking) two pages. * Deadlock is avoided by locking pages in increasing order. * All memory sharing code paths take the p2m lock of the affected gfn before - * taking the lock for the underlying page. We enforce ordering between page_lock - * and p2m_lock using an mm-locks.h construct. + * taking the lock for the underlying page. We enforce ordering between + * page_lock and p2m_lock using an mm-locks.h construct. * * TODO: Investigate if PGT_validated is necessary. */ @@ -168,7 +170,7 @@ static inline bool mem_sharing_page_lock(struct page_info *pg) if ( rc ) { preempt_disable(); - page_sharing_mm_post_lock(&pld->mm_unlock_level, + page_sharing_mm_post_lock(&pld->mm_unlock_level, &pld->recurse_count); } return rc; @@ -178,7 +180,7 @@ static inline void mem_sharing_page_unlock(struct page_info *pg) { pg_lock_data_t *pld = &(this_cpu(__pld)); - page_sharing_mm_unlock(pld->mm_unlock_level, + page_sharing_mm_unlock(pld->mm_unlock_level, &pld->recurse_count); preempt_enable(); _page_unlock(pg); @@ -186,7 +188,7 @@ static inline void mem_sharing_page_unlock(struct page_info *pg) static inline shr_handle_t get_next_handle(void) { - /* Get the next handle get_page style */ + /* Get the next handle get_page style */ uint64_t x, y = next_handle; do { x = y; @@ -198,24 +200,26 @@ static inline shr_handle_t get_next_handle(void) #define mem_sharing_enabled(d) \ (is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled) -static atomic_t nr_saved_mfns = ATOMIC_INIT(0); +static atomic_t nr_saved_mfns = ATOMIC_INIT(0); static atomic_t nr_shared_mfns = ATOMIC_INIT(0); -/** Reverse map **/ -/* Every shared frame keeps a reverse map (rmap) of tuples that +/* + * Reverse map + * + * Every shared frame keeps a reverse map (rmap) of tuples that * this shared frame backs. For pages with a low degree of sharing, a O(n) * search linked list is good enough. For pages with higher degree of sharing, - * we use a hash table instead. */ + * we use a hash table instead. + */ typedef struct gfn_info { unsigned long gfn; - domid_t domain; + domid_t domain; struct list_head list; } gfn_info_t; -static inline void -rmap_init(struct page_info *page) +static inline void rmap_init(struct page_info *page) { /* We always start off as a doubly linked list. */ INIT_LIST_HEAD(&page->sharing->gfns); @@ -225,10 +229,11 @@ rmap_init(struct page_info *page) #define HASH(domain, gfn) \ (((gfn) + (domain)) % RMAP_HASHTAB_SIZE) -/* Conversions. Tuned by the thresholds. Should only happen twice - * (once each) during the lifetime of a shared page */ -static inline int -rmap_list_to_hash_table(struct page_info *page) +/* + * Conversions. Tuned by the thresholds. Should only happen twice + * (once each) during the lifetime of a shared page. + */ +static inline int rmap_list_to_hash_table(struct page_info *page) { unsigned int i; struct list_head *pos, *tmp, *b = @@ -254,8 +259,7 @@ rmap_list_to_hash_table(struct page_info *page) return 0; } -static inline void -rmap_hash_table_to_list(struct page_info *page) +static inline void rmap_hash_table_to_list(struct page_info *page) { unsigned int i; struct list_head *bucket = page->sharing->hash_table.bucket; @@ -276,8 +280,7 @@ rmap_hash_table_to_list(struct page_info *page) } /* Generic accessors to the rmap */ -static inline unsigned long -rmap_count(struct page_info *pg) +static inline unsigned long rmap_count(struct page_info *pg) { unsigned long count; unsigned long t = read_atomic(&pg->u.inuse.type_info); @@ -287,11 +290,13 @@ rmap_count(struct page_info *pg) return count; } -/* The page type count is always decreased after removing from the rmap. - * Use a convert flag to avoid mutating the rmap if in the middle of an - * iterator, or if the page will be soon destroyed anyways. */ -static inline void -rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert) +/* + * The page type count is always decreased after removing from the rmap. + * Use a convert flag to avoid mutating the rmap if in the middle of an + * iterator, or if the page will be soon destroyed anyways. + */ +static inline +void rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert) { if ( RMAP_USES_HASHTAB(page) && convert && (rmap_count(page) <= RMAP_LIGHT_SHARED_PAGE) ) @@ -302,8 +307,7 @@ rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert) } /* The page type count is always increased before adding to the rmap. */ -static inline void -rmap_add(gfn_info_t *gfn_info, struct page_info *page) +static inline void rmap_add(gfn_info_t *gfn_info, struct page_info *page) { struct list_head *head; @@ -314,7 +318,7 @@ rmap_add(gfn_info_t *gfn_info, struct page_info *page) (void)rmap_list_to_hash_table(page); head = (RMAP_USES_HASHTAB(page)) ? - page->sharing->hash_table.bucket + + page->sharing->hash_table.bucket + HASH(gfn_info->domain, gfn_info->gfn) : &page->sharing->gfns; @@ -322,9 +326,9 @@ rmap_add(gfn_info_t *gfn_info, struct page_info *page) list_add(&gfn_info->list, head); } -static inline gfn_info_t * -rmap_retrieve(uint16_t domain_id, unsigned long gfn, - struct page_info *page) +static inline +gfn_info_t *rmap_retrieve(uint16_t domain_id, unsigned long gfn, + struct page_info *page) { gfn_info_t *gfn_info; struct list_head *le, *head; @@ -364,18 +368,18 @@ struct rmap_iterator { unsigned int bucket; }; -static inline void -rmap_seed_iterator(struct page_info *page, struct rmap_iterator *ri) +static inline +void rmap_seed_iterator(struct page_info *page, struct rmap_iterator *ri) { ri->curr = (RMAP_USES_HASHTAB(page)) ? page->sharing->hash_table.bucket : &page->sharing->gfns; - ri->next = ri->curr->next; + ri->next = ri->curr->next; ri->bucket = 0; } -static inline gfn_info_t * -rmap_iterate(struct page_info *page, struct rmap_iterator *ri) +static inline +gfn_info_t *rmap_iterate(struct page_info *page, struct rmap_iterator *ri) { struct list_head *head = (RMAP_USES_HASHTAB(page)) ? page->sharing->hash_table.bucket + ri->bucket : @@ -405,14 +409,14 @@ retry: return list_entry(ri->curr, gfn_info_t, list); } -static inline gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page, - struct domain *d, - unsigned long gfn) +static inline +gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page, struct domain *d, + unsigned long gfn) { gfn_info_t *gfn_info = xmalloc(gfn_info_t); if ( gfn_info == NULL ) - return NULL; + return NULL; gfn_info->gfn = gfn; gfn_info->domain = d->domain_id; @@ -425,9 +429,9 @@ static inline gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page, return gfn_info; } -static inline void mem_sharing_gfn_destroy(struct page_info *page, - struct domain *d, - gfn_info_t *gfn_info) +static inline +void mem_sharing_gfn_destroy(struct page_info *page, struct domain *d, + gfn_info_t *gfn_info) { /* Decrement the number of pages. */ atomic_dec(&d->shr_pages); @@ -437,25 +441,29 @@ static inline void mem_sharing_gfn_destroy(struct page_info *page, xfree(gfn_info); } -static struct page_info* mem_sharing_lookup(unsigned long mfn) +static inline struct page_info* mem_sharing_lookup(unsigned long mfn) { - if ( mfn_valid(_mfn(mfn)) ) - { - struct page_info* page = mfn_to_page(_mfn(mfn)); - if ( page_get_owner(page) == dom_cow ) - { - /* Count has to be at least two, because we're called - * with the mfn locked (1) and this is supposed to be - * a shared page (1). */ - unsigned long t = read_atomic(&page->u.inuse.type_info); - ASSERT((t & PGT_type_mask) == PGT_shared_page); - ASSERT((t & PGT_count_mask) >= 2); - ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn))); - return page; - } - } + struct page_info* page; + unsigned long t; - return NULL; + if ( !mfn_valid(_mfn(mfn)) ) + return NULL; + + page = mfn_to_page(_mfn(mfn)); + if ( page_get_owner(page) != dom_cow ) + return NULL; + + /* + * Count has to be at least two, because we're called + * with the mfn locked (1) and this is supposed to be + * a shared page (1). + */ + t = read_atomic(&page->u.inuse.type_info); + ASSERT((t & PGT_type_mask) == PGT_shared_page); + ASSERT((t & PGT_count_mask) >= 2); + ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn))); + + return page; } static int audit(void) @@ -492,7 +500,7 @@ static int audit(void) continue; } - /* Check if the MFN has correct type, owner and handle. */ + /* Check if the MFN has correct type, owner and handle. */ if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_shared_page ) { MEM_SHARING_DEBUG("mfn %lx in audit list, but not PGT_shared_page (%lx)!\n", @@ -545,7 +553,7 @@ static int audit(void) errors++; continue; } - o_mfn = get_gfn_query_unlocked(d, g->gfn, &t); + o_mfn = get_gfn_query_unlocked(d, g->gfn, &t); if ( !mfn_eq(o_mfn, mfn) ) { MEM_SHARING_DEBUG("Incorrect P2M for d=%hu, PFN=%lx." @@ -568,7 +576,7 @@ static int audit(void) { MEM_SHARING_DEBUG("Mismatched counts for MFN=%lx." "nr_gfns in list %lu, in type_info %lx\n", - mfn_x(mfn), nr_gfns, + mfn_x(mfn), nr_gfns, (pg->u.inuse.type_info & PGT_count_mask)); errors++; } @@ -603,7 +611,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, .u.mem_sharing.p2mt = p2m_ram_shared }; - if ( (rc = __vm_event_claim_slot(d, + if ( (rc = __vm_event_claim_slot(d, d->vm_event_share, allow_sleep)) < 0 ) return rc; @@ -629,9 +637,9 @@ unsigned int mem_sharing_get_nr_shared_mfns(void) } /* Functions that change a page's type and ownership */ -static int page_make_sharable(struct domain *d, - struct page_info *page, - int expected_refcnt) +static int page_make_sharable(struct domain *d, + struct page_info *page, + int expected_refcnt) { bool_t drop_dom_ref; @@ -658,8 +666,10 @@ static int page_make_sharable(struct domain *d, return -EEXIST; } - /* Check if the ref count is 2. The first from PGC_allocated, and - * the second from get_page_and_type at the top of this function */ + /* + * Check if the ref count is 2. The first from PGC_allocated, and + * the second from get_page_and_type at the top of this function. + */ if ( page->count_info != (PGC_allocated | (2 + expected_refcnt)) ) { spin_unlock(&d->page_alloc_lock); @@ -675,6 +685,7 @@ static int page_make_sharable(struct domain *d, if ( drop_dom_ref ) put_domain(d); + return 0; } @@ -684,7 +695,7 @@ static int page_make_private(struct domain *d, struct page_info *page) if ( !get_page(page, dom_cow) ) return -EINVAL; - + spin_lock(&d->page_alloc_lock); if ( d->is_dying ) @@ -727,10 +738,13 @@ static inline struct page_info *__grab_shared_page(mfn_t mfn) if ( !mfn_valid(mfn) ) return NULL; + pg = mfn_to_page(mfn); - /* If the page is not validated we can't lock it, and if it's - * not validated it's obviously not shared. */ + /* + * If the page is not validated we can't lock it, and if it's + * not validated it's obviously not shared. + */ if ( !mem_sharing_page_lock(pg) ) return NULL; @@ -754,10 +768,10 @@ static int debug_mfn(mfn_t mfn) return -EINVAL; } - MEM_SHARING_DEBUG( + MEM_SHARING_DEBUG( "Debug page: MFN=%lx is ci=%lx, ti=%lx, owner_id=%d\n", - mfn_x(page_to_mfn(page)), - page->count_info, + mfn_x(page_to_mfn(page)), + page->count_info, page->u.inuse.type_info, page_get_owner(page)->domain_id); @@ -775,7 +789,7 @@ static int debug_gfn(struct domain *d, gfn_t gfn) mfn = get_gfn_query(d, gfn_x(gfn), &p2mt); - MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n", + MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n", d->domain_id, gfn_x(gfn)); num_refs = debug_mfn(mfn); put_gfn(d, gfn_x(gfn)); @@ -796,9 +810,9 @@ static int debug_gref(struct domain *d, grant_ref_t ref) d->domain_id, ref, rc); return rc; } - + MEM_SHARING_DEBUG( - "==> Grant [dom=%d,ref=%d], status=%x. ", + "==> Grant [dom=%d,ref=%d], status=%x. ", d->domain_id, ref, status); return debug_gfn(d, gfn); @@ -824,15 +838,12 @@ static int nominate_page(struct domain *d, gfn_t gfn, goto out; /* Return the handle if the page is already shared */ - if ( p2m_is_shared(p2mt) ) { + if ( p2m_is_shared(p2mt) ) + { struct page_info *pg = __grab_shared_page(mfn); if ( !pg ) - { - gprintk(XENLOG_ERR, - "Shared p2m entry gfn %" PRI_gfn ", but could not grab mfn %" PRI_mfn " dom%d\n", - gfn_x(gfn), mfn_x(mfn), d->domain_id); BUG(); - } + *phandle = pg->sharing->handle; ret = 0; mem_sharing_page_unlock(pg); @@ -843,7 +854,6 @@ static int nominate_page(struct domain *d, gfn_t gfn, if ( !p2m_is_sharable(p2mt) ) goto out; -#ifdef CONFIG_HVM /* Check if there are mem_access/remapped altp2m entries for this page */ if ( altp2m_active(d) ) { @@ -872,42 +882,42 @@ static int nominate_page(struct domain *d, gfn_t gfn, altp2m_list_unlock(d); } -#endif /* Try to convert the mfn to the sharable type */ page = mfn_to_page(mfn); - ret = page_make_sharable(d, page, expected_refcnt); - if ( ret ) + ret = page_make_sharable(d, page, expected_refcnt); + if ( ret ) goto out; - /* Now that the page is validated, we can lock it. There is no - * race because we're holding the p2m entry, so no one else - * could be nominating this gfn */ + /* + * Now that the page is validated, we can lock it. There is no + * race because we're holding the p2m entry, so no one else + * could be nominating this gfn. + */ ret = -ENOENT; if ( !mem_sharing_page_lock(page) ) goto out; /* Initialize the shared state */ ret = -ENOMEM; - if ( (page->sharing = - xmalloc(struct page_sharing_info)) == NULL ) + if ( !(page->sharing = xmalloc(struct page_sharing_info)) ) { /* Making a page private atomically unlocks it */ - BUG_ON(page_make_private(d, page) != 0); + BUG_ON(page_make_private(d, page)); goto out; } page->sharing->pg = page; rmap_init(page); /* Create the handle */ - page->sharing->handle = get_next_handle(); + page->sharing->handle = get_next_handle(); /* Create the local gfn info */ - if ( mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) == NULL ) + if ( !mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) ) { xfree(page->sharing); page->sharing = NULL; - BUG_ON(page_make_private(d, page) != 0); + BUG_ON(page_make_private(d, page)); goto out; } @@ -946,15 +956,19 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh, get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn, cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg); - /* This tricky business is to avoid two callers deadlocking if - * grabbing pages in opposite client/source order */ + /* + * This tricky business is to avoid two callers deadlocking if + * grabbing pages in opposite client/source order. + */ if ( mfn_eq(smfn, cmfn) ) { - /* The pages are already the same. We could return some + /* + * The pages are already the same. We could return some * kind of error here, but no matter how you look at it, * the pages are already 'shared'. It possibly represents * a big problem somewhere else, but as far as sharing is - * concerned: great success! */ + * concerned: great success! + */ ret = 0; goto err_out; } @@ -1010,11 +1024,15 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh, rmap_seed_iterator(cpage, &ri); while ( (gfn = rmap_iterate(cpage, &ri)) != NULL) { - /* Get the source page and type, this should never fail: - * we are under shr lock, and got a successful lookup */ + /* + * Get the source page and type, this should never fail: + * we are under shr lock, and got a successful lookup. + */ BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page)); - /* Move the gfn_info from client list to source list. - * Don't change the type of rmap for the client page. */ + /* + * Move the gfn_info from client list to source list. + * Don't change the type of rmap for the client page. + */ rmap_del(gfn, cpage, 0); rmap_add(gfn, spage); put_count++; @@ -1043,14 +1061,14 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh, atomic_dec(&nr_shared_mfns); atomic_inc(&nr_saved_mfns); ret = 0; - + err_out: put_two_gfns(&tg); return ret; } int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle_t sh, - struct domain *cd, unsigned long cgfn) + struct domain *cd, unsigned long cgfn) { struct page_info *spage; int ret = -EINVAL; @@ -1069,15 +1087,18 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle spage = __grab_shared_page(smfn); if ( spage == NULL ) goto err_out; + ASSERT(smfn_type == p2m_ram_shared); /* Check that the handles match */ if ( spage->sharing->handle != sh ) goto err_unlock; - /* Make sure the target page is a hole in the physmap. These are typically + /* + * Make sure the target page is a hole in the physmap. These are typically * p2m_mmio_dm, but also accept p2m_invalid and paged out pages. See the - * definition of p2m_is_hole in p2m.h. */ + * definition of p2m_is_hole in p2m.h. + */ if ( !p2m_is_hole(cmfn_type) ) { ret = XENMEM_SHARING_OP_C_HANDLE_INVALID; @@ -1086,7 +1107,7 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle /* This is simpler than regular sharing */ BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page)); - if ( (gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) == NULL ) + if ( !(gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) ) { put_page_and_type(spage); ret = -ENOMEM; @@ -1102,11 +1123,17 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle mem_sharing_gfn_destroy(spage, cd, gfn_info); put_page_and_type(spage); } else { - /* There is a chance we're plugging a hole where a paged out page was */ + /* + * There is a chance we're plugging a hole where a paged out + * page was. + */ if ( p2m_is_paging(cmfn_type) && (cmfn_type != p2m_ram_paging_out) ) { atomic_dec(&cd->paged_pages); - /* Further, there is a chance this was a valid page. Don't leak it. */ + /* + * Further, there is a chance this was a valid page. + * Don't leak it. + */ if ( mfn_valid(cmfn) ) { struct page_info *cpage = mfn_to_page(cmfn); @@ -1133,13 +1160,14 @@ err_out: } -/* A note on the rationale for unshare error handling: +/* + * A note on the rationale for unshare error handling: * 1. Unshare can only fail with ENOMEM. Any other error conditions BUG_ON()'s * 2. We notify a potential dom0 helper through a vm_event ring. But we - * allow the notification to not go to sleep. If the event ring is full + * allow the notification to not go to sleep. If the event ring is full * of ENOMEM warnings, then it's on the ball. * 3. We cannot go to sleep until the unshare is resolved, because we might - * be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy) + * be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy) * 4. So, we make sure we: * 4.1. return an error * 4.2. do not corrupt shared memory @@ -1147,19 +1175,20 @@ err_out: * 4.4. let the guest deal with it if the error propagation will reach it */ int __mem_sharing_unshare_page(struct domain *d, - unsigned long gfn, - uint16_t flags) + unsigned long gfn, + uint16_t flags) { p2m_type_t p2mt; mfn_t mfn; struct page_info *page, *old_page; int last_gfn; gfn_info_t *gfn_info = NULL; - + mfn = get_gfn(d, gfn, &p2mt); - + /* Has someone already unshared it? */ - if ( !p2m_is_shared(p2mt) ) { + if ( !p2m_is_shared(p2mt) ) + { put_gfn(d, gfn); return 0; } @@ -1167,26 +1196,30 @@ int __mem_sharing_unshare_page(struct domain *d, page = __grab_shared_page(mfn); if ( page == NULL ) { - gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: " - "%lx\n", gfn); + gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: %lx\n", + gfn); BUG(); } gfn_info = rmap_retrieve(d->domain_id, gfn, page); if ( unlikely(gfn_info == NULL) ) { - gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: " - "%lx\n", gfn); + gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: %lx\n", + gfn); BUG(); } - /* Do the accounting first. If anything fails below, we have bigger - * bigger fish to fry. First, remove the gfn from the list. */ + /* + * Do the accounting first. If anything fails below, we have bigger + * bigger fish to fry. First, remove the gfn from the list. + */ last_gfn = rmap_has_one_entry(page); if ( last_gfn ) { - /* Clean up shared state. Get rid of the tuple - * before destroying the rmap. */ + /* + * Clean up shared state. Get rid of the tuple + * before destroying the rmap. + */ mem_sharing_gfn_destroy(page, d, gfn_info); page_sharing_dispose(page); page->sharing = NULL; @@ -1195,8 +1228,10 @@ int __mem_sharing_unshare_page(struct domain *d, else atomic_dec(&nr_saved_mfns); - /* If the GFN is getting destroyed drop the references to MFN - * (possibly freeing the page), and exit early */ + /* + * If the GFN is getting destroyed drop the references to MFN + * (possibly freeing the page), and exit early. + */ if ( flags & MEM_SHARING_DESTROY_GFN ) { if ( !last_gfn ) @@ -1212,7 +1247,7 @@ int __mem_sharing_unshare_page(struct domain *d, return 0; } - + if ( last_gfn ) { /* Making a page private atomically unlocks it */ @@ -1222,14 +1257,16 @@ int __mem_sharing_unshare_page(struct domain *d, old_page = page; page = alloc_domheap_page(d, 0); - if ( !page ) + if ( !page ) { /* Undo dec of nr_saved_mfns, as the retry will decrease again. */ atomic_inc(&nr_saved_mfns); mem_sharing_page_unlock(old_page); put_gfn(d, gfn); - /* Caller is responsible for placing an event - * in the ring */ + /* + * Caller is responsible for placing an event + * in the ring. + */ return -ENOMEM; } @@ -1240,11 +1277,11 @@ int __mem_sharing_unshare_page(struct domain *d, mem_sharing_page_unlock(old_page); put_page_and_type(old_page); -private_page_found: +private_page_found: if ( p2m_change_type_one(d, gfn, p2m_ram_shared, p2m_ram_rw) ) { - gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n", - d->domain_id, gfn); + gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n", + d->domain_id, gfn); BUG(); } @@ -1277,20 +1314,23 @@ int relinquish_shared_pages(struct domain *d) mfn_t mfn; int set_rc; - if ( atomic_read(&d->shr_pages) == 0 ) + if ( !atomic_read(&d->shr_pages) ) break; + mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, NULL, NULL); - if ( mfn_valid(mfn) && (t == p2m_ram_shared) ) + if ( mfn_valid(mfn) && t == p2m_ram_shared ) { /* Does not fail with ENOMEM given the DESTROY flag */ - BUG_ON(__mem_sharing_unshare_page(d, gfn, - MEM_SHARING_DESTROY_GFN)); - /* Clear out the p2m entry so no one else may try to + BUG_ON(__mem_sharing_unshare_page(d, gfn, + MEM_SHARING_DESTROY_GFN)); + /* + * Clear out the p2m entry so no one else may try to * unshare. Must succeed: we just read the old entry and - * we hold the p2m lock. */ + * we hold the p2m lock. + */ set_rc = p2m->set_entry(p2m, _gfn(gfn), _mfn(0), PAGE_ORDER_4K, p2m_invalid, p2m_access_rwx, -1); - ASSERT(set_rc == 0); + ASSERT(!set_rc); count += 0x10; } else @@ -1454,7 +1494,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) ) { - grant_ref_t gref = (grant_ref_t) + grant_ref_t gref = (grant_ref_t) (XENMEM_SHARING_OP_FIELD_GET_GREF( mso.u.share.source_gfn)); rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &sgfn, @@ -1470,7 +1510,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.client_gfn) ) { - grant_ref_t gref = (grant_ref_t) + grant_ref_t gref = (grant_ref_t) (XENMEM_SHARING_OP_FIELD_GET_GREF( mso.u.share.client_gfn)); rc = mem_sharing_gref_to_gfn(cd->grant_table, gref, &cgfn, @@ -1534,7 +1574,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) sh = mso.u.share.source_handle; cgfn = mso.u.share.client_gfn; - rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn); + rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn); rcu_unlock_domain(cd); } diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 8a5229ee21..714158d2a6 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -506,8 +506,10 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l, if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) ) { ASSERT(p2m_is_hostp2m(p2m)); - /* Try to unshare. If we fail, communicate ENOMEM without - * sleeping. */ + /* + * Try to unshare. If we fail, communicate ENOMEM without + * sleeping. + */ if ( mem_sharing_unshare_page(p2m->domain, gfn_l, 0) < 0 ) mem_sharing_notify_enomem(p2m->domain, gfn_l, false); mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL); @@ -887,15 +889,15 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, &a, 0, NULL, NULL); if ( p2m_is_shared(ot) ) { - /* Do an unshare to cleanly take care of all corner - * cases. */ + /* Do an unshare to cleanly take care of all corner cases. */ int rc; rc = mem_sharing_unshare_page(p2m->domain, gfn_x(gfn_add(gfn, i)), 0); if ( rc ) { p2m_unlock(p2m); - /* NOTE: Should a guest domain bring this upon itself, + /* + * NOTE: Should a guest domain bring this upon itself, * there is not a whole lot we can do. We are buried * deep in locks from most code paths by now. So, fail * the call and don't try to sleep on a wait queue @@ -904,8 +906,9 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, * However, all current (changeset 3432abcf9380) code * paths avoid this unsavoury situation. For now. * - * Foreign domains are okay to place an event as they - * won't go to sleep. */ + * Foreign domains are okay to place an event as they + * won't go to sleep. + */ (void)mem_sharing_notify_enomem(p2m->domain, gfn_x(gfn_add(gfn, i)), false); return rc; diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h index db22468744..1280830a85 100644 --- a/xen/include/asm-x86/mem_sharing.h +++ b/xen/include/asm-x86/mem_sharing.h @@ -33,12 +33,14 @@ #define MEM_SHARING_AUDIT 0 #endif -typedef uint64_t shr_handle_t; +typedef uint64_t shr_handle_t; typedef struct rmap_hashtab { struct list_head *bucket; - /* Overlaps with prev pointer of list_head in union below. - * Unlike the prev pointer, this can be NULL. */ + /* + * Overlaps with prev pointer of list_head in union below. + * Unlike the prev pointer, this can be NULL. + */ void *flag; } rmap_hashtab_t; @@ -57,34 +59,34 @@ struct page_sharing_info }; }; -#define sharing_supported(_d) \ - (is_hvm_domain(_d) && paging_mode_hap(_d)) - unsigned int mem_sharing_get_nr_saved_mfns(void); unsigned int mem_sharing_get_nr_shared_mfns(void); #define MEM_SHARING_DESTROY_GFN (1<<1) /* Only fails with -ENOMEM. Enforce it with a BUG_ON wrapper. */ int __mem_sharing_unshare_page(struct domain *d, - unsigned long gfn, - uint16_t flags); -static inline int mem_sharing_unshare_page(struct domain *d, - unsigned long gfn, - uint16_t flags) + unsigned long gfn, + uint16_t flags); + +static inline +int mem_sharing_unshare_page(struct domain *d, + unsigned long gfn, + uint16_t flags) { int rc = __mem_sharing_unshare_page(d, gfn, flags); BUG_ON( rc && (rc != -ENOMEM) ); return rc; } -/* If called by a foreign domain, possible errors are +/* + * If called by a foreign domain, possible errors are * -EBUSY -> ring full * -ENOSYS -> no ring to begin with * and the foreign mapper is responsible for retrying. * - * If called by the guest vcpu itself and allow_sleep is set, may - * sleep on a wait queue, so the caller is responsible for not - * holding locks on entry. It may only fail with ENOSYS + * If called by the guest vcpu itself and allow_sleep is set, may + * sleep on a wait queue, so the caller is responsible for not + * holding locks on entry. It may only fail with ENOSYS * * If called by the guest vcpu itself and allow_sleep is not set, * then it's the same as a foreign domain. @@ -92,10 +94,11 @@ static inline int mem_sharing_unshare_page(struct domain *d, int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, bool allow_sleep); int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg); -int mem_sharing_domctl(struct domain *d, +int mem_sharing_domctl(struct domain *d, struct xen_domctl_mem_sharing_op *mec); -/* Scans the p2m and relinquishes any shared pages, destroying +/* + * Scans the p2m and relinquishes any shared pages, destroying * those for which this domain holds the final reference. * Preemptible. */ @@ -107,18 +110,22 @@ static inline unsigned int mem_sharing_get_nr_saved_mfns(void) { return 0; } + static inline unsigned int mem_sharing_get_nr_shared_mfns(void) { return 0; } -static inline int mem_sharing_unshare_page(struct domain *d, - unsigned long gfn, - uint16_t flags) + +static inline +int mem_sharing_unshare_page(struct domain *d, unsigned long gfn, + uint16_t flags) { ASSERT_UNREACHABLE(); return -EOPNOTSUPP; } -static inline int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, + +static inline +int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, bool allow_sleep) { ASSERT_UNREACHABLE(); From patchwork Wed Sep 25 15:48:43 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160985 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 1884A17D4 for ; Wed, 25 Sep 2019 15:51:11 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id F275221D7B for ; Wed, 25 Sep 2019 15:51:10 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org F275221D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Y3-0000uz-OC; Wed, 25 Sep 2019 15:49:47 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Y2-0000uM-N4 for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:46 +0000 X-Inumbo-ID: 114983be-dfac-11e9-8628-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 114983be-dfac-11e9-8628-bc764e2007e4; Wed, 25 Sep 2019 15:49:33 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:30 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812645" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:30 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:43 -0700 Message-Id: <6dcafecd884999e288eeab0347c7814808b137e2.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 05/18] x86/mem_sharing: make get_two_gfns take locks conditionally X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" During VM forking the client lock will already be taken. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/mm/mem_sharing.c | 11 ++++++----- xen/include/asm-x86/p2m.h | 10 +++++----- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index 8ad6cf3850..df308a75cd 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -954,7 +954,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh, unsigned long put_count = 0; get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn, - cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg); + cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg, true); /* * This tricky business is to avoid two callers deadlocking if @@ -1068,7 +1068,7 @@ err_out: } int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle_t sh, - struct domain *cd, unsigned long cgfn) + struct domain *cd, unsigned long cgfn, bool lock) { struct page_info *spage; int ret = -EINVAL; @@ -1080,7 +1080,7 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle struct two_gfns tg; get_two_gfns(sd, _gfn(sgfn), &smfn_type, NULL, &smfn, - cd, _gfn(cgfn), &cmfn_type, &a, &cmfn, 0, &tg); + cd, _gfn(cgfn), &cmfn_type, &a, &cmfn, 0, &tg, lock); /* Get the source shared page, check and lock */ ret = XENMEM_SHARING_OP_S_HANDLE_INVALID; @@ -1155,7 +1155,8 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle err_unlock: mem_sharing_page_unlock(spage); err_out: - put_two_gfns(&tg); + if ( lock ) + put_two_gfns(&tg); return ret; } @@ -1574,7 +1575,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) sh = mso.u.share.source_handle; cgfn = mso.u.share.client_gfn; - rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn); + rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn, true); rcu_unlock_domain(cd); } diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 94285db1b4..7399c4a897 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -539,7 +539,7 @@ struct two_gfns { static inline void get_two_gfns(struct domain *rd, gfn_t rgfn, p2m_type_t *rt, p2m_access_t *ra, mfn_t *rmfn, struct domain *ld, gfn_t lgfn, p2m_type_t *lt, p2m_access_t *la, mfn_t *lmfn, - p2m_query_t q, struct two_gfns *rval) + p2m_query_t q, struct two_gfns *rval, bool lock) { mfn_t *first_mfn, *second_mfn, scratch_mfn; p2m_access_t *first_a, *second_a, scratch_a; @@ -569,10 +569,10 @@ do { \ #undef assign_pointers /* Now do the gets */ - *first_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->first_domain), - gfn_x(rval->first_gfn), first_t, first_a, q, NULL); - *second_mfn = get_gfn_type_access(p2m_get_hostp2m(rval->second_domain), - gfn_x(rval->second_gfn), second_t, second_a, q, NULL); + *first_mfn = __get_gfn_type_access(p2m_get_hostp2m(rval->first_domain), + gfn_x(rval->first_gfn), first_t, first_a, q, NULL, lock); + *second_mfn = __get_gfn_type_access(p2m_get_hostp2m(rval->second_domain), + gfn_x(rval->second_gfn), second_t, second_a, q, NULL, lock); } static inline void put_two_gfns(struct two_gfns *arg) From patchwork Wed Sep 25 15:48:44 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160987 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 8D85A1747 for ; Wed, 25 Sep 2019 15:51:14 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 7369A21D7A for ; Wed, 25 Sep 2019 15:51:14 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 7369A21D7A Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Y9-0000zg-CS; Wed, 25 Sep 2019 15:49:53 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Y7-0000yP-NH for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:51 +0000 X-Inumbo-ID: 11b601b0-dfac-11e9-bf31-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 11b601b0-dfac-11e9-bf31-bc764e2007e4; Wed, 25 Sep 2019 15:49:33 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:31 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812652" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:30 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:44 -0700 Message-Id: X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 06/18] x86/mem_sharing: drop flags from mem_sharing_unshare_page X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" All callers pass 0 in. Signed-off-by: Tamas K Lengyel Reviewed-by: Wei Liu --- xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/mm/p2m.c | 5 ++--- xen/include/asm-x86/mem_sharing.h | 8 +++----- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index d71d2ad5d7..7c255728c2 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1879,7 +1879,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, if ( npfec.write_access && (p2mt == p2m_ram_shared) ) { ASSERT(p2m_is_hostp2m(p2m)); - sharing_enomem = mem_sharing_unshare_page(currd, gfn, 0); + sharing_enomem = mem_sharing_unshare_page(currd, gfn); rc = 1; goto out_put_gfn; } diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 714158d2a6..3d27c6c91a 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -510,7 +510,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l, * Try to unshare. If we fail, communicate ENOMEM without * sleeping. */ - if ( mem_sharing_unshare_page(p2m->domain, gfn_l, 0) < 0 ) + if ( mem_sharing_unshare_page(p2m->domain, gfn_l) < 0 ) mem_sharing_notify_enomem(p2m->domain, gfn_l, false); mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL); } @@ -891,8 +891,7 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn, { /* Do an unshare to cleanly take care of all corner cases. */ int rc; - rc = mem_sharing_unshare_page(p2m->domain, - gfn_x(gfn_add(gfn, i)), 0); + rc = mem_sharing_unshare_page(p2m->domain, gfn_x(gfn_add(gfn, i))); if ( rc ) { p2m_unlock(p2m); diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h index 1280830a85..8deb0ceee5 100644 --- a/xen/include/asm-x86/mem_sharing.h +++ b/xen/include/asm-x86/mem_sharing.h @@ -70,10 +70,9 @@ int __mem_sharing_unshare_page(struct domain *d, static inline int mem_sharing_unshare_page(struct domain *d, - unsigned long gfn, - uint16_t flags) + unsigned long gfn) { - int rc = __mem_sharing_unshare_page(d, gfn, flags); + int rc = __mem_sharing_unshare_page(d, gfn, 0); BUG_ON( rc && (rc != -ENOMEM) ); return rc; } @@ -117,8 +116,7 @@ static inline unsigned int mem_sharing_get_nr_shared_mfns(void) } static inline -int mem_sharing_unshare_page(struct domain *d, unsigned long gfn, - uint16_t flags) +int mem_sharing_unshare_page(struct domain *d, unsigned long gfn) { ASSERT_UNREACHABLE(); return -EOPNOTSUPP; From patchwork Wed Sep 25 15:48:45 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160981 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 0B742924 for ; Wed, 25 Sep 2019 15:51:09 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id E5CDE21D7B for ; Wed, 25 Sep 2019 15:51:08 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org E5CDE21D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YE-00014a-DL; Wed, 25 Sep 2019 15:49:58 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YC-000139-Nx for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:56 +0000 X-Inumbo-ID: 11e6bb5c-dfac-11e9-8628-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 11e6bb5c-dfac-11e9-8628-bc764e2007e4; Wed, 25 Sep 2019 15:49:34 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:32 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812656" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:31 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:45 -0700 Message-Id: <8be4685eb151ec323f822fdee0d55cb2be7ab1b2.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 07/18] x86/mem_sharing: don't try to unshare twice during page fault X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Andrew Cooper , Tamas K Lengyel , Wei Liu , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" The page was already tried to be unshared in get_gfn_type_access. If that didn't work, then trying again is pointless. Don't try to send vm_event again either, simply check if there is a ring or not. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/hvm/hvm.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 7c255728c2..2af2f936a5 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -1701,11 +1702,14 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, struct domain *currd = curr->domain; struct p2m_domain *p2m, *hostp2m; int rc, fall_through = 0, paged = 0; - int sharing_enomem = 0; vm_event_request_t *req_ptr = NULL; bool sync = false; unsigned int page_order; +#ifdef CONFIG_MEM_SHARING + bool sharing_enomem = false; +#endif + /* On Nested Virtualization, walk the guest page table. * If this succeeds, all is fine. * If this fails, inject a nested page fault into the guest. @@ -1875,14 +1879,16 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) ) paged = 1; - /* Mem sharing: unshare the page and try again */ - if ( npfec.write_access && (p2mt == p2m_ram_shared) ) +#ifdef CONFIG_MEM_SHARING + /* Mem sharing: if still shared on write access then its enomem */ + if ( npfec.write_access && p2m_is_shared(p2mt) ) { ASSERT(p2m_is_hostp2m(p2m)); - sharing_enomem = mem_sharing_unshare_page(currd, gfn); + sharing_enomem = true; rc = 1; goto out_put_gfn; } +#endif /* Spurious fault? PoD and log-dirty also take this path. */ if ( p2m_is_ram(p2mt) ) @@ -1936,19 +1942,21 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, */ if ( paged ) p2m_mem_paging_populate(currd, gfn); + +#ifdef CONFIG_MEM_SHARING if ( sharing_enomem ) { - int rv; - - if ( (rv = mem_sharing_notify_enomem(currd, gfn, true)) < 0 ) + if ( !vm_event_check_ring(currd->vm_event_share) ) { gdprintk(XENLOG_ERR, "Domain %hu attempt to unshare " - "gfn %lx, ENOMEM and no helper (rc %d)\n", - currd->domain_id, gfn, rv); + "gfn %lx, ENOMEM and no helper\n", + currd->domain_id, gfn); /* Crash the domain */ rc = 0; } } +#endif + if ( req_ptr ) { if ( monitor_traps(curr, sync, req_ptr) < 0 ) From patchwork Wed Sep 25 15:48:46 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160997 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 780FA1747 for ; Wed, 25 Sep 2019 15:51:34 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 5C2F821D7A for ; Wed, 25 Sep 2019 15:51:34 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 5C2F821D7A Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YN-0001h5-TG; Wed, 25 Sep 2019 15:50:07 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YM-0001Yq-NH for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:50:06 +0000 X-Inumbo-ID: 12999768-dfac-11e9-bf31-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 12999768-dfac-11e9-bf31-bc764e2007e4; Wed, 25 Sep 2019 15:49:35 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:33 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812663" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:32 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:46 -0700 Message-Id: X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 08/18] x86/mem_sharing: define mem_sharing_domain to hold some scattered variables X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Create struct mem_sharing_domain under hvm_domain and move mem sharing variables into it from p2m_domain and hvm_domain. Expose the mem_sharing_enabled macro to be used consistently across Xen. Remove some duplicate calls to mem_sharing_enabled in mem_sharing.c Signed-off-by: Tamas K Lengyel --- xen/arch/x86/mm/mem_sharing.c | 30 +++++------------------------- xen/drivers/passthrough/pci.c | 2 +- xen/include/asm-x86/hvm/domain.h | 7 ++++++- xen/include/asm-x86/mem_sharing.h | 16 ++++++++++++++++ xen/include/asm-x86/p2m.h | 4 ---- 5 files changed, 28 insertions(+), 31 deletions(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index df308a75cd..d5ecb77679 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -197,9 +197,6 @@ static inline shr_handle_t get_next_handle(void) return x + 1; } -#define mem_sharing_enabled(d) \ - (is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled) - static atomic_t nr_saved_mfns = ATOMIC_INIT(0); static atomic_t nr_shared_mfns = ATOMIC_INIT(0); @@ -1300,6 +1297,7 @@ private_page_found: int relinquish_shared_pages(struct domain *d) { int rc = 0; + struct mem_sharing_domain *msd = &d->arch.hvm.mem_sharing; struct p2m_domain *p2m = p2m_get_hostp2m(d); unsigned long gfn, count = 0; @@ -1307,7 +1305,7 @@ int relinquish_shared_pages(struct domain *d) return 0; p2m_lock(p2m); - for ( gfn = p2m->next_shared_gfn_to_relinquish; + for ( gfn = msd->next_shared_gfn_to_relinquish; gfn <= p2m->max_mapped_pfn; gfn++ ) { p2m_access_t a; @@ -1342,7 +1340,7 @@ int relinquish_shared_pages(struct domain *d) { if ( hypercall_preempt_check() ) { - p2m->next_shared_gfn_to_relinquish = gfn + 1; + msd->next_shared_gfn_to_relinquish = gfn + 1; rc = -ERESTART; break; } @@ -1428,7 +1426,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) /* Only HAP is supported */ rc = -ENODEV; - if ( !hap_enabled(d) || !d->arch.hvm.mem_sharing_enabled ) + if ( !mem_sharing_enabled(d) ) goto out; switch ( mso.op ) @@ -1437,10 +1435,6 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) { shr_handle_t handle; - rc = -EINVAL; - if ( !mem_sharing_enabled(d) ) - goto out; - rc = nominate_page(d, _gfn(mso.u.nominate.u.gfn), 0, &handle); mso.u.nominate.handle = handle; } @@ -1452,9 +1446,6 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) gfn_t gfn; shr_handle_t handle; - rc = -EINVAL; - if ( !mem_sharing_enabled(d) ) - goto out; rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &gfn, NULL); if ( rc < 0 ) goto out; @@ -1470,10 +1461,6 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) struct domain *cd; shr_handle_t sh, ch; - rc = -EINVAL; - if ( !mem_sharing_enabled(d) ) - goto out; - rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain, &cd); if ( rc ) @@ -1540,10 +1527,6 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) struct domain *cd; shr_handle_t sh; - rc = -EINVAL; - if ( !mem_sharing_enabled(d) ) - goto out; - rc = rcu_lock_live_remote_domain_by_id(mso.u.share.client_domain, &cd); if ( rc ) @@ -1602,9 +1585,6 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) mso.u.range.opaque > mso.u.range.last_gfn) ) goto out; - if ( !mem_sharing_enabled(d) ) - goto out; - rc = rcu_lock_live_remote_domain_by_id(mso.u.range.client_domain, &cd); if ( rc ) @@ -1708,7 +1688,7 @@ int mem_sharing_domctl(struct domain *d, struct xen_domctl_mem_sharing_op *mec) if ( unlikely(has_iommu_pt(d) && mec->u.enable) ) rc = -EXDEV; else - d->arch.hvm.mem_sharing_enabled = mec->u.enable; + d->arch.hvm.mem_sharing.enabled = mec->u.enable; } break; diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c index d28f17af75..12e24b58fd 100644 --- a/xen/drivers/passthrough/pci.c +++ b/xen/drivers/passthrough/pci.c @@ -1482,7 +1482,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag) /* Prevent device assign if mem paging or mem sharing have been * enabled for this domain */ - if ( unlikely(d->arch.hvm.mem_sharing_enabled || + if ( unlikely(mem_sharing_enabled(d) || vm_event_check_ring(d->vm_event_paging) || p2m_get_hostp2m(d)->global_logdirty) ) return -EXDEV; diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h index bcc5621797..d54663b386 100644 --- a/xen/include/asm-x86/hvm/domain.h +++ b/xen/include/asm-x86/hvm/domain.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -156,7 +157,7 @@ struct hvm_domain { struct viridian_domain *viridian; - bool_t mem_sharing_enabled; + bool_t hap_enabled; bool_t qemu_mapcache_invalidate; bool_t is_s3_suspended; @@ -192,6 +193,10 @@ struct hvm_domain { struct vmx_domain vmx; struct svm_domain svm; }; + +#ifdef CONFIG_MEM_SHARING + struct mem_sharing_domain mem_sharing; +#endif }; #endif /* __ASM_X86_HVM_DOMAIN_H__ */ diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h index 8deb0ceee5..57db72376b 100644 --- a/xen/include/asm-x86/mem_sharing.h +++ b/xen/include/asm-x86/mem_sharing.h @@ -26,6 +26,20 @@ #ifdef CONFIG_MEM_SHARING +struct mem_sharing_domain +{ + bool enabled; + + /* + * When releasing shared gfn's in a preemptible manner, recall where + * to resume the search. + */ + unsigned long next_shared_gfn_to_relinquish; +}; + +#define mem_sharing_enabled(d) \ + (hap_enabled(d) && (d)->arch.hvm.mem_sharing.enabled) + /* Auditing of memory sharing code? */ #ifndef NDEBUG #define MEM_SHARING_AUDIT 1 @@ -105,6 +119,8 @@ int relinquish_shared_pages(struct domain *d); #else +#define mem_sharing_enabled(d) false + static inline unsigned int mem_sharing_get_nr_saved_mfns(void) { return 0; diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 7399c4a897..8defa90306 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -305,10 +305,6 @@ struct p2m_domain { unsigned long min_remapped_gfn; unsigned long max_remapped_gfn; - /* When releasing shared gfn's in a preemptible manner, recall where - * to resume the search */ - unsigned long next_shared_gfn_to_relinquish; - #ifdef CONFIG_HVM /* Populate-on-demand variables * All variables are protected with the pod lock. We cannot rely on From patchwork Wed Sep 25 15:48:47 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160973 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id D475017D4 for ; Wed, 25 Sep 2019 15:51:00 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id BA52421D7B for ; Wed, 25 Sep 2019 15:51:00 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org BA52421D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Y0-0000sV-CD; Wed, 25 Sep 2019 15:49:44 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xy-0000rO-TX for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:42 +0000 X-Inumbo-ID: 12ae8a4c-dfac-11e9-9637-12813bfff9fa Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 12ae8a4c-dfac-11e9-9637-12813bfff9fa; Wed, 25 Sep 2019 15:49:35 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:33 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812668" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:33 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:47 -0700 Message-Id: <8e1eedcf984b7948683fe2a2cf5cb1d2b6c5a674.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 09/18] x86/mem_sharing: Use INVALID_MFN and p2m_is_shared in relinquish_shared_pages X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" While using _mfn(0) is of no consequence during teardown, INVALID_MFN is the correct value that should be used. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/mm/mem_sharing.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index d5ecb77679..60057fd3ed 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -1317,7 +1317,7 @@ int relinquish_shared_pages(struct domain *d) break; mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, NULL, NULL); - if ( mfn_valid(mfn) && t == p2m_ram_shared ) + if ( mfn_valid(mfn) && p2m_is_shared(t) ) { /* Does not fail with ENOMEM given the DESTROY flag */ BUG_ON(__mem_sharing_unshare_page(d, gfn, @@ -1327,7 +1327,7 @@ int relinquish_shared_pages(struct domain *d) * unshare. Must succeed: we just read the old entry and * we hold the p2m lock. */ - set_rc = p2m->set_entry(p2m, _gfn(gfn), _mfn(0), PAGE_ORDER_4K, + set_rc = p2m->set_entry(p2m, _gfn(gfn), INVALID_MFN, PAGE_ORDER_4K, p2m_invalid, p2m_access_rwx, -1); ASSERT(!set_rc); count += 0x10; From patchwork Wed Sep 25 15:48:48 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11161001 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id CE4B8924 for ; Wed, 25 Sep 2019 15:51:36 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id B497921D7A for ; Wed, 25 Sep 2019 15:51:36 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org B497921D7A Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YS-0001qR-AB; Wed, 25 Sep 2019 15:50:12 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YR-0001oQ-OA for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:50:11 +0000 X-Inumbo-ID: 12b7d296-dfac-11e9-8628-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 12b7d296-dfac-11e9-8628-bc764e2007e4; Wed, 25 Sep 2019 15:49:35 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812674" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:33 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:48 -0700 Message-Id: <03a38e7b047b7a66c63935b7a7f2a9c697198c09.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 10/18] x86/mem_sharing: Make add_to_physmap static and shorten name X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" It's not being called from outside mem_sharing.c Signed-off-by: Tamas K Lengyel --- xen/arch/x86/mm/mem_sharing.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index 60057fd3ed..bb6a44413b 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -1064,8 +1064,9 @@ err_out: return ret; } -int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle_t sh, - struct domain *cd, unsigned long cgfn, bool lock) +static +int add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle_t sh, + struct domain *cd, unsigned long cgfn, bool lock) { struct page_info *spage; int ret = -EINVAL; @@ -1558,7 +1559,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) sh = mso.u.share.source_handle; cgfn = mso.u.share.client_gfn; - rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn, true); + rc = add_to_physmap(d, sgfn, sh, cd, cgfn, true); rcu_unlock_domain(cd); } From patchwork Wed Sep 25 15:48:49 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160977 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id D501F924 for ; Wed, 25 Sep 2019 15:51:06 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id BACB821D7B for ; Wed, 25 Sep 2019 15:51:06 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org BACB821D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Y5-0000wK-2N; Wed, 25 Sep 2019 15:49:49 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Y3-0000v7-W6 for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:48 +0000 X-Inumbo-ID: 12ae8a4e-dfac-11e9-9637-12813bfff9fa Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 12ae8a4e-dfac-11e9-9637-12813bfff9fa; Wed, 25 Sep 2019 15:49:36 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812679" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:34 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:49 -0700 Message-Id: <4a503c85061b8fc76736047c1818e2efdd9f6042.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 11/18] x86/mem_sharing: Convert MEM_SHARING_DESTROY_GFN to a bool X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , Konrad Rzeszutek Wilk , George Dunlap , Andrew Cooper , Ian Jackson , Tim Deegan , Julien Grall , Stefano Stabellini , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" MEM_SHARING_DESTROY_GFN is used on the 'flags' bitfield during unsharing. However, the bitfield is not used for anything else, so just convert it to a bool instead. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/mm/mem_sharing.c | 7 +++---- xen/arch/x86/mm/p2m.c | 1 + xen/common/memory.c | 2 +- xen/include/asm-x86/mem_sharing.h | 5 ++--- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index bb6a44413b..157a3a1303 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -1175,7 +1175,7 @@ err_out: */ int __mem_sharing_unshare_page(struct domain *d, unsigned long gfn, - uint16_t flags) + bool destroy) { p2m_type_t p2mt; mfn_t mfn; @@ -1231,7 +1231,7 @@ int __mem_sharing_unshare_page(struct domain *d, * If the GFN is getting destroyed drop the references to MFN * (possibly freeing the page), and exit early. */ - if ( flags & MEM_SHARING_DESTROY_GFN ) + if ( destroy ) { if ( !last_gfn ) mem_sharing_gfn_destroy(page, d, gfn_info); @@ -1321,8 +1321,7 @@ int relinquish_shared_pages(struct domain *d) if ( mfn_valid(mfn) && p2m_is_shared(t) ) { /* Does not fail with ENOMEM given the DESTROY flag */ - BUG_ON(__mem_sharing_unshare_page(d, gfn, - MEM_SHARING_DESTROY_GFN)); + BUG_ON(__mem_sharing_unshare_page(d, gfn, true)); /* * Clear out the p2m entry so no one else may try to * unshare. Must succeed: we just read the old entry and diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 3d27c6c91a..aee0347785 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -512,6 +512,7 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l, */ if ( mem_sharing_unshare_page(p2m->domain, gfn_l) < 0 ) mem_sharing_notify_enomem(p2m->domain, gfn_l, false); + mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL); } diff --git a/xen/common/memory.c b/xen/common/memory.c index d5aff83f2d..04bcd84a15 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -352,7 +352,7 @@ int guest_remove_page(struct domain *d, unsigned long gmfn) * might be the only one using this shared page, and we need to * trigger proper cleanup. Once done, this is like any other page. */ - rc = mem_sharing_unshare_page(d, gmfn, 0); + rc = mem_sharing_unshare_page(d, gmfn); if ( rc ) { mem_sharing_notify_enomem(d, gmfn, false); diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h index 57db72376b..18302adbfa 100644 --- a/xen/include/asm-x86/mem_sharing.h +++ b/xen/include/asm-x86/mem_sharing.h @@ -76,17 +76,16 @@ struct page_sharing_info unsigned int mem_sharing_get_nr_saved_mfns(void); unsigned int mem_sharing_get_nr_shared_mfns(void); -#define MEM_SHARING_DESTROY_GFN (1<<1) /* Only fails with -ENOMEM. Enforce it with a BUG_ON wrapper. */ int __mem_sharing_unshare_page(struct domain *d, unsigned long gfn, - uint16_t flags); + bool destroy); static inline int mem_sharing_unshare_page(struct domain *d, unsigned long gfn) { - int rc = __mem_sharing_unshare_page(d, gfn, 0); + int rc = __mem_sharing_unshare_page(d, gfn, false); BUG_ON( rc && (rc != -ENOMEM) ); return rc; } From patchwork Wed Sep 25 15:48:50 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160979 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 0CBBA17D4 for ; Wed, 25 Sep 2019 15:51:07 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id E713721D7B for ; Wed, 25 Sep 2019 15:51:06 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org E713721D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YX-0001xS-Lq; Wed, 25 Sep 2019 15:50:17 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YW-0001wd-Oa for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:50:16 +0000 X-Inumbo-ID: 13500822-dfac-11e9-bf31-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 13500822-dfac-11e9-bf31-bc764e2007e4; Wed, 25 Sep 2019 15:49:36 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812682" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:35 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:50 -0700 Message-Id: <0312d1c6313d753e5e72050c49fb4aa5c4e05353.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 12/18] x86/mem_sharing: Replace MEM_SHARING_DEBUG with gdprintk X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Using XENLOG_ERR level since this is only used in debug paths (ie. it's expected the user already has loglvl=all set). Signed-off-by: Tamas K Lengyel --- xen/arch/x86/mm/mem_sharing.c | 81 ++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index 157a3a1303..a7b9665c8b 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -49,9 +49,6 @@ typedef struct pg_lock_data { static DEFINE_PER_CPU(pg_lock_data_t, __pld); -#define MEM_SHARING_DEBUG(_f, _a...) \ - debugtrace_printk("mem_sharing_debug: %s(): " _f, __func__, ##_a) - /* Reverse map defines */ #define RMAP_HASHTAB_ORDER 0 #define RMAP_HASHTAB_SIZE \ @@ -491,8 +488,9 @@ static int audit(void) /* If we can't lock it, it's definitely not a shared page */ if ( !mem_sharing_page_lock(pg) ) { - MEM_SHARING_DEBUG("mfn %lx in audit list, but cannot be locked (%lx)!\n", - mfn_x(mfn), pg->u.inuse.type_info); + gdprintk(XENLOG_ERR, + "mfn %lx in audit list, but cannot be locked (%lx)!\n", + mfn_x(mfn), pg->u.inuse.type_info); errors++; continue; } @@ -500,8 +498,9 @@ static int audit(void) /* Check if the MFN has correct type, owner and handle. */ if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_shared_page ) { - MEM_SHARING_DEBUG("mfn %lx in audit list, but not PGT_shared_page (%lx)!\n", - mfn_x(mfn), pg->u.inuse.type_info & PGT_type_mask); + gdprintk(XENLOG_ERR, + "mfn %lx in audit list, but not PGT_shared_page (%lx)!\n", + mfn_x(mfn), pg->u.inuse.type_info & PGT_type_mask); errors++; continue; } @@ -509,24 +508,24 @@ static int audit(void) /* Check the page owner. */ if ( page_get_owner(pg) != dom_cow ) { - MEM_SHARING_DEBUG("mfn %lx shared, but wrong owner (%hu)!\n", - mfn_x(mfn), page_get_owner(pg)->domain_id); + gdprintk(XENLOG_ERR, "mfn %lx shared, but wrong owner (%hu)!\n", + mfn_x(mfn), page_get_owner(pg)->domain_id); errors++; } /* Check the m2p entry */ if ( !SHARED_M2P(get_gpfn_from_mfn(mfn_x(mfn))) ) { - MEM_SHARING_DEBUG("mfn %lx shared, but wrong m2p entry (%lx)!\n", - mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn))); + gdprintk(XENLOG_ERR, "mfn %lx shared, but wrong m2p entry (%lx)!\n", + mfn_x(mfn), get_gpfn_from_mfn(mfn_x(mfn))); errors++; } /* Check we have a list */ if ( (!pg->sharing) || !rmap_has_entries(pg) ) { - MEM_SHARING_DEBUG("mfn %lx shared, but empty gfn list!\n", - mfn_x(mfn)); + gdprintk(XENLOG_ERR, "mfn %lx shared, but empty gfn list!\n", + mfn_x(mfn)); errors++; continue; } @@ -545,24 +544,26 @@ static int audit(void) d = get_domain_by_id(g->domain); if ( d == NULL ) { - MEM_SHARING_DEBUG("Unknown dom: %hu, for PFN=%lx, MFN=%lx\n", - g->domain, g->gfn, mfn_x(mfn)); + gdprintk(XENLOG_ERR, + "Unknown dom: %hu, for PFN=%lx, MFN=%lx\n", + g->domain, g->gfn, mfn_x(mfn)); errors++; continue; } o_mfn = get_gfn_query_unlocked(d, g->gfn, &t); if ( !mfn_eq(o_mfn, mfn) ) { - MEM_SHARING_DEBUG("Incorrect P2M for d=%hu, PFN=%lx." - "Expecting MFN=%lx, got %lx\n", - g->domain, g->gfn, mfn_x(mfn), mfn_x(o_mfn)); + gdprintk(XENLOG_ERR, "Incorrect P2M for d=%hu, PFN=%lx." + "Expecting MFN=%lx, got %lx\n", + g->domain, g->gfn, mfn_x(mfn), mfn_x(o_mfn)); errors++; } if ( t != p2m_ram_shared ) { - MEM_SHARING_DEBUG("Incorrect P2M type for d=%hu, PFN=%lx MFN=%lx." - "Expecting t=%d, got %d\n", - g->domain, g->gfn, mfn_x(mfn), p2m_ram_shared, t); + gdprintk(XENLOG_ERR, + "Incorrect P2M type for d=%hu, PFN=%lx MFN=%lx." + "Expecting t=%d, got %d\n", + g->domain, g->gfn, mfn_x(mfn), p2m_ram_shared, t); errors++; } put_domain(d); @@ -571,10 +572,10 @@ static int audit(void) /* The type count has an extra ref because we have locked the page */ if ( (nr_gfns + 1) != (pg->u.inuse.type_info & PGT_count_mask) ) { - MEM_SHARING_DEBUG("Mismatched counts for MFN=%lx." - "nr_gfns in list %lu, in type_info %lx\n", - mfn_x(mfn), nr_gfns, - (pg->u.inuse.type_info & PGT_count_mask)); + gdprintk(XENLOG_ERR, "Mismatched counts for MFN=%lx." + "nr_gfns in list %lu, in type_info %lx\n", + mfn_x(mfn), nr_gfns, + (pg->u.inuse.type_info & PGT_count_mask)); errors++; } @@ -585,8 +586,8 @@ static int audit(void) if ( count_found != count_expected ) { - MEM_SHARING_DEBUG("Expected %ld shared mfns, found %ld.", - count_expected, count_found); + gdprintk(XENLOG_ERR, "Expected %ld shared mfns, found %ld.", + count_expected, count_found); errors++; } @@ -765,12 +766,12 @@ static int debug_mfn(mfn_t mfn) return -EINVAL; } - MEM_SHARING_DEBUG( - "Debug page: MFN=%lx is ci=%lx, ti=%lx, owner_id=%d\n", - mfn_x(page_to_mfn(page)), - page->count_info, - page->u.inuse.type_info, - page_get_owner(page)->domain_id); + gdprintk(XENLOG_ERR, + "Debug page: MFN=%lx is ci=%lx, ti=%lx, owner_id=%d\n", + mfn_x(page_to_mfn(page)), + page->count_info, + page->u.inuse.type_info, + page_get_owner(page)->domain_id); /* -1 because the page is locked and that's an additional type ref */ num_refs = ((int) (page->u.inuse.type_info & PGT_count_mask)) - 1; @@ -786,8 +787,9 @@ static int debug_gfn(struct domain *d, gfn_t gfn) mfn = get_gfn_query(d, gfn_x(gfn), &p2mt); - MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n", - d->domain_id, gfn_x(gfn)); + gdprintk(XENLOG_ERR, "Debug for dom%d, gfn=%" PRI_gfn "\n", + d->domain_id, gfn_x(gfn)); + num_refs = debug_mfn(mfn); put_gfn(d, gfn_x(gfn)); @@ -803,14 +805,13 @@ static int debug_gref(struct domain *d, grant_ref_t ref) rc = mem_sharing_gref_to_gfn(d->grant_table, ref, &gfn, &status); if ( rc ) { - MEM_SHARING_DEBUG("Asked to debug [dom=%d,gref=%u]: error %d.\n", - d->domain_id, ref, rc); + gdprintk(XENLOG_ERR, "Asked to debug [dom=%d,gref=%u]: error %d.\n", + d->domain_id, ref, rc); return rc; } - MEM_SHARING_DEBUG( - "==> Grant [dom=%d,ref=%d], status=%x. ", - d->domain_id, ref, status); + gdprintk(XENLOG_ERR, " ==> Grant [dom=%d,ref=%d], status=%x. ", + d->domain_id, ref, status); return debug_gfn(d, gfn); } From patchwork Wed Sep 25 15:48:51 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160989 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id C765717D4 for ; Wed, 25 Sep 2019 15:51:14 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id ADA5121D7A for ; Wed, 25 Sep 2019 15:51:14 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org ADA5121D7A Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Yd-00023J-6M; Wed, 25 Sep 2019 15:50:23 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Yb-000220-Op for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:50:21 +0000 X-Inumbo-ID: 13934be6-dfac-11e9-8628-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 13934be6-dfac-11e9-8628-bc764e2007e4; Wed, 25 Sep 2019 15:49:36 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:36 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812688" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:35 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:51 -0700 Message-Id: <92649106a0d6651ebc8cf42a381418a9c1c8890c.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 13/18] x86/mem_sharing: ASSERT that p2m_set_entry succeeds X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Signed-off-by: Tamas K Lengyel --- xen/arch/x86/mm/mem_sharing.c | 46 +++++++++++++++++------------------ 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index a7b9665c8b..99f24fcf6c 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -1113,39 +1113,37 @@ int add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle_t sh, goto err_unlock; } + /* + * Must succeed, we just read the entry and hold the p2m lock + * via get_two_gfns. + */ ret = p2m_set_entry(p2m, _gfn(cgfn), smfn, PAGE_ORDER_4K, p2m_ram_shared, a); + ASSERT(!ret); - /* Tempted to turn this into an assert */ - if ( ret ) + /* + * There is a chance we're plugging a hole where a paged out + * page was. + */ + if ( p2m_is_paging(cmfn_type) && (cmfn_type != p2m_ram_paging_out) ) { - mem_sharing_gfn_destroy(spage, cd, gfn_info); - put_page_and_type(spage); - } else { + atomic_dec(&cd->paged_pages); /* - * There is a chance we're plugging a hole where a paged out - * page was. + * Further, there is a chance this was a valid page. + * Don't leak it. */ - if ( p2m_is_paging(cmfn_type) && (cmfn_type != p2m_ram_paging_out) ) + if ( mfn_valid(cmfn) ) { - atomic_dec(&cd->paged_pages); - /* - * Further, there is a chance this was a valid page. - * Don't leak it. - */ - if ( mfn_valid(cmfn) ) - { - struct page_info *cpage = mfn_to_page(cmfn); + struct page_info *cpage = mfn_to_page(cmfn); - if ( !get_page(cpage, cd) ) - { - domain_crash(cd); - ret = -EOVERFLOW; - goto err_unlock; - } - put_page_alloc_ref(cpage); - put_page(cpage); + if ( !get_page(cpage, cd) ) + { + domain_crash(cd); + ret = -EOVERFLOW; + goto err_unlock; } + put_page_alloc_ref(cpage); + put_page(cpage); } } From patchwork Wed Sep 25 15:48:52 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160999 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id D3957924 for ; Wed, 25 Sep 2019 15:51:34 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id B93BB21D7A for ; Wed, 25 Sep 2019 15:51:34 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org B93BB21D7A Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Yh-00027m-Il; Wed, 25 Sep 2019 15:50:27 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Yg-00026y-Nt for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:50:26 +0000 X-Inumbo-ID: 13f17c02-dfac-11e9-bf31-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 13f17c02-dfac-11e9-bf31-bc764e2007e4; Wed, 25 Sep 2019 15:49:37 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:37 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812693" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:36 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:52 -0700 Message-Id: <31f87866d0f24657ce12e6ca32c8a552639cb34d.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 14/18] x86/mem_sharing: Enable mem_sharing on first memop X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" It is wasteful to require separate hypercalls to enable sharing on both the parent and the client domain during VM forking. To speed things up we enable sharing on the first memop in case it wasn't already enabled. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/mm/mem_sharing.c | 39 +++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index 99f24fcf6c..65aa64be99 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -1402,6 +1402,24 @@ static int range_share(struct domain *d, struct domain *cd, return rc; } +static inline int mem_sharing_control(struct domain *d, bool enable) +{ + if ( enable ) + { + if ( unlikely(!is_hvm_domain(d)) ) + return -ENOSYS; + + if ( unlikely(!hap_enabled(d)) ) + return -ENODEV; + + if ( unlikely(has_iommu_pt(d)) ) + return -EXDEV; + } + + d->arch.hvm.mem_sharing.enabled = enable; + return 0; +} + int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) { int rc; @@ -1423,10 +1441,8 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) if ( rc ) goto out; - /* Only HAP is supported */ - rc = -ENODEV; - if ( !mem_sharing_enabled(d) ) - goto out; + if ( !mem_sharing_enabled(d) && (rc = mem_sharing_control(d, true)) ) + return rc; switch ( mso.op ) { @@ -1675,24 +1691,15 @@ int mem_sharing_domctl(struct domain *d, struct xen_domctl_mem_sharing_op *mec) { int rc; - /* Only HAP is supported */ - if ( !hap_enabled(d) ) - return -ENODEV; - switch(mec->op) { case XEN_DOMCTL_MEM_SHARING_CONTROL: - { - rc = 0; - if ( unlikely(has_iommu_pt(d) && mec->u.enable) ) - rc = -EXDEV; - else - d->arch.hvm.mem_sharing.enabled = mec->u.enable; - } - break; + rc = mem_sharing_control(d, mec->u.enable); + break; default: rc = -ENOSYS; + break; } return rc; From patchwork Wed Sep 25 15:48:53 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160991 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 984D3924 for ; Wed, 25 Sep 2019 15:51:15 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 7E55321D7A for ; Wed, 25 Sep 2019 15:51:15 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 7E55321D7A Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YA-00010p-PI; Wed, 25 Sep 2019 15:49:54 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Y8-0000zI-W7 for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:53 +0000 X-Inumbo-ID: 13cf570a-dfac-11e9-9637-12813bfff9fa Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 13cf570a-dfac-11e9-9637-12813bfff9fa; Wed, 25 Sep 2019 15:49:37 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:37 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812705" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:37 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:53 -0700 Message-Id: <17cdac1caa0c29c4d6a32c5d9167d223550ac0e6.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 15/18] x86/mem_sharing: Skip xen heap pages in memshr nominate X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Trying to share these would fail anyway, better to skip them early. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/mm/mem_sharing.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index 65aa64be99..d35b7eb138 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -852,6 +852,11 @@ static int nominate_page(struct domain *d, gfn_t gfn, if ( !p2m_is_sharable(p2mt) ) goto out; + /* Skip xen heap pages */ + page = mfn_to_page(mfn); + if ( !page || is_xen_heap_page(page) ) + goto out; + /* Check if there are mem_access/remapped altp2m entries for this page */ if ( altp2m_active(d) ) { @@ -882,7 +887,6 @@ static int nominate_page(struct domain *d, gfn_t gfn, } /* Try to convert the mfn to the sharable type */ - page = mfn_to_page(mfn); ret = page_make_sharable(d, page, expected_refcnt); if ( ret ) goto out; From patchwork Wed Sep 25 15:48:54 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160993 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id C5D4C1747 for ; Wed, 25 Sep 2019 15:51:27 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id AA78D21D7A for ; Wed, 25 Sep 2019 15:51:27 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org AA78D21D7A Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Yn-0002DT-0L; Wed, 25 Sep 2019 15:50:33 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Yl-0002CM-Of for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:50:31 +0000 X-Inumbo-ID: 14936df0-dfac-11e9-8628-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 14936df0-dfac-11e9-8628-bc764e2007e4; Wed, 25 Sep 2019 15:49:38 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812709" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:37 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:54 -0700 Message-Id: X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 16/18] x86/mem_sharing: check page type count earlier X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Tamas K Lengyel , Tamas K Lengyel , Wei Liu , George Dunlap , Andrew Cooper , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" --- xen/arch/x86/mm/mem_sharing.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index d35b7eb138..f54969bcad 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -649,19 +649,18 @@ static int page_make_sharable(struct domain *d, return -EBUSY; } - /* Change page type and count atomically */ - if ( !get_page_and_type(page, d, PGT_shared_page) ) + /* Check if page is already typed and bail early if it is */ + if ( (page->u.inuse.type_info & PGT_count_mask) != 1 ) { spin_unlock(&d->page_alloc_lock); - return -EINVAL; + return -EEXIST; } - /* Check it wasn't already sharable and undo if it was */ - if ( (page->u.inuse.type_info & PGT_count_mask) != 1 ) + /* Change page type and count atomically */ + if ( !get_page_and_type(page, d, PGT_shared_page) ) { spin_unlock(&d->page_alloc_lock); - put_page_and_type(page); - return -EEXIST; + return -EINVAL; } /* From patchwork Wed Sep 25 15:48:55 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11161003 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id AD8B31747 for ; Wed, 25 Sep 2019 15:52:13 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 87C0321D7A for ; Wed, 25 Sep 2019 15:52:13 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 87C0321D7A Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Ys-0002Ij-Cs; Wed, 25 Sep 2019 15:50:38 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Yq-0002HV-Pa for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:50:36 +0000 X-Inumbo-ID: 15334a78-dfac-11e9-8628-bc764e2007e4 Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 15334a78-dfac-11e9-8628-bc764e2007e4; Wed, 25 Sep 2019 15:49:39 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812716" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:38 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:55 -0700 Message-Id: X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 17/18] xen/mem_sharing: VM forking X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Stefano Stabellini , Tamas K Lengyel , Wei Liu , Konrad Rzeszutek Wilk , George Dunlap , Andrew Cooper , Ian Jackson , Tim Deegan , Julien Grall , Tamas K Lengyel , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" VM forking is the process of creating a domain with an empty memory space and a parent domain specified from which to populate the memory when necessary. For the new domain to be functional the VM state is copied over as part of the fork operation (HVM params, hap allocation, etc). Signed-off-by: Tamas K Lengyel --- xen/arch/x86/hvm/hvm.c | 2 +- xen/arch/x86/mm/mem_sharing.c | 235 ++++++++++++++++++++++++++++++ xen/arch/x86/mm/p2m.c | 11 +- xen/include/asm-x86/mem_sharing.h | 20 ++- xen/include/public/memory.h | 5 + xen/include/xen/sched.h | 1 + 6 files changed, 270 insertions(+), 4 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 2af2f936a5..872bd112ba 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1890,7 +1890,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla, } #endif - /* Spurious fault? PoD and log-dirty also take this path. */ + /* Spurious fault? PoD, log-dirty and VM forking also take this path. */ if ( p2m_is_ram(p2mt) ) { rc = 1; diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index f54969bcad..64b9723f8c 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -22,11 +22,13 @@ #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -36,6 +38,9 @@ #include #include #include +#include +#include +#include #include #include "mm-locks.h" @@ -1423,6 +1428,207 @@ static inline int mem_sharing_control(struct domain *d, bool enable) return 0; } +/* + * Forking a page only gets called when the VM faults due to no entry being + * in the EPT for the access. Depending on the type of access we either + * populate the physmap with a shared entry for read-only access or + * fork the page if its a write access. + * + * The client p2m is already locked so we only need to lock + * the parent's here. + */ +int mem_sharing_fork_page(struct domain *d, gfn_t gfn, bool unsharing) +{ + int rc = -ENOENT; + shr_handle_t handle; + struct domain *parent; + struct p2m_domain *p2m; + unsigned long gfn_l = gfn_x(gfn); + mfn_t mfn, new_mfn; + p2m_type_t p2mt; + struct page_info *page; + + if ( !mem_sharing_is_fork(d) ) + return -ENOENT; + + parent = d->parent; + + if ( !unsharing ) + { + /* For read-only accesses we just add a shared entry to the physmap */ + while ( parent ) + { + if ( !(rc = nominate_page(parent, gfn, 0, &handle)) ) + break; + + parent = parent->parent; + } + + if ( !rc ) + { + /* The client's p2m is already locked */ + struct p2m_domain *pp2m = p2m_get_hostp2m(parent); + + p2m_lock(pp2m); + rc = add_to_physmap(parent, gfn_l, handle, d, gfn_l, false); + p2m_unlock(pp2m); + + if ( !rc ) + return 0; + } + } + + /* + * If it's a write access (ie. unsharing) or if adding a shared entry to + * the physmap failed we'll fork the page directly. + */ + p2m = p2m_get_hostp2m(d); + parent = d->parent; + + while ( parent ) + { + mfn = get_gfn_query(parent, gfn_l, &p2mt); + + if ( mfn_valid(mfn) && p2m_is_any_ram(p2mt) ) + break; + + put_gfn(parent, gfn_l); + parent = parent->parent; + } + + if ( !parent ) + return -ENOENT; + + if ( !(page = alloc_domheap_page(d, 0)) ) + { + put_gfn(parent, gfn_l); + return -ENOMEM; + } + + new_mfn = page_to_mfn(page); + copy_domain_page(new_mfn, mfn); + set_gpfn_from_mfn(mfn_x(new_mfn), gfn_l); + + put_gfn(parent, gfn_l); + + return p2m->set_entry(p2m, gfn, new_mfn, PAGE_ORDER_4K, p2m_ram_rw, + p2m->default_access, -1); +} + +static int bring_up_vcpus(struct domain *cd, struct cpupool *cpupool) +{ + int ret; + unsigned int i, cpu; + cpumask_t *online; + + if ( (ret = cpupool_move_domain(cd, cpupool)) ) + return ret; + + for ( i = 0; i < cd->max_vcpus; i++ ) + { + if ( cd->vcpu[i] ) + continue; + + online = cpupool_domain_cpumask(cd); + + cpu = (i == 0) ? + cpumask_any(online) : + cpumask_cycle(cd->vcpu[i-1]->processor, online); + + if ( !vcpu_create(cd, i, cpu) ) + return -EINVAL; + } + + domain_update_node_affinity(cd); + return 0; +} + +static int fork_hap_allocation(struct domain *d, struct domain *cd) +{ + int rc; + bool preempted; + unsigned long mb = hap_get_allocation(d); + + if ( mb == hap_get_allocation(cd) ) + return 0; + + paging_lock(cd); + rc = hap_set_allocation(cd, mb << (20 - PAGE_SHIFT), &preempted); + paging_unlock(cd); + + if ( rc ) + return rc; + + if ( preempted ) + return -ERESTART; + + return 0; +} + +static int fork_hvm(struct domain *d, struct domain *cd) +{ + int rc, i; + struct hvm_domain_context c = { 0 }; + uint32_t tsc_mode; + uint32_t gtsc_khz; + uint32_t incarnation; + uint64_t elapsed_nsec; + + c.size = hvm_save_size(d); + if ( (c.data = xmalloc_bytes(c.size)) == NULL ) + return -ENOMEM; + + for ( i = 0; i < HVM_NR_PARAMS; i++ ) + { + uint64_t value = 0; + + if ( hvm_get_param(d, i, &value) || !value ) + continue; + + if ( (rc = hvm_set_param(cd, i, value)) ) + goto out; + } + + tsc_get_info(d, &tsc_mode, &elapsed_nsec, >sc_khz, &incarnation); + tsc_set_info(cd, tsc_mode, elapsed_nsec, gtsc_khz, incarnation); + + if ( (rc = hvm_save(d, &c)) ) + goto out; + + c.cur = 0; + rc = hvm_load(cd, &c); + +out: + xfree(c.data); + return rc; +} + +static int mem_sharing_fork(struct domain *d, struct domain *cd) +{ + int rc; + + if ( !d->controller_pause_count && + (rc = domain_pause_by_systemcontroller(d)) ) + return rc; + + cd->max_pages = d->max_pages; + cd->max_vcpus = d->max_vcpus; + + /* this is preemptible so it's the first to get done */ + if ( (rc = fork_hap_allocation(d, cd)) ) + return rc; + + if ( (rc = bring_up_vcpus(cd, d->cpupool)) ) + return rc; + + if ( (rc = fork_hvm(d, cd)) ) + return rc; + + cd->parent = d; + + return 0; +} + int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) { int rc; @@ -1677,6 +1883,35 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg) rc = debug_gref(d, mso.u.debug.u.gref); break; + case XENMEM_sharing_op_fork: + { + struct domain *pd; + + rc = -EINVAL; + if ( mso.u.fork._pad[0] || mso.u.fork._pad[1] || + mso.u.fork._pad[2] ) + goto out; + + rc = rcu_lock_live_remote_domain_by_id(mso.u.fork.parent_domain, + &pd); + if ( rc ) + break; + + if ( !mem_sharing_enabled(pd) ) + { + if ( (rc = mem_sharing_control(pd, true)) ) + return rc; + } + + rc = mem_sharing_fork(pd, d); + + if ( rc == -ERESTART ) + rc = hypercall_create_continuation(__HYPERVISOR_memory_op, + "lh", XENMEM_sharing_op, + arg); + rcu_unlock_domain(pd); + break; + } default: rc = -ENOSYS; break; diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index aee0347785..97872a7cc4 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -503,6 +503,14 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l, mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL); + /* Check if we need to fork the page */ + if ( (q & P2M_ALLOC) && p2m_is_hole(*t) && + !mem_sharing_fork_page(p2m->domain, gfn, !!(q & P2M_UNSHARE)) ) + { + mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL); + } + + /* Check if we need to unshare the page */ if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) ) { ASSERT(p2m_is_hostp2m(p2m)); @@ -581,7 +589,8 @@ struct page_info *p2m_get_page_from_gfn( return page; /* Error path: not a suitable GFN at all */ - if ( !p2m_is_ram(*t) && !p2m_is_paging(*t) && !p2m_is_pod(*t) ) + if ( !p2m_is_ram(*t) && !p2m_is_paging(*t) && !p2m_is_pod(*t) && + !mem_sharing_is_fork(p2m->domain) ) return NULL; } diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h index 18302adbfa..a5617c87dd 100644 --- a/xen/include/asm-x86/mem_sharing.h +++ b/xen/include/asm-x86/mem_sharing.h @@ -26,8 +26,7 @@ #ifdef CONFIG_MEM_SHARING -struct mem_sharing_domain -{ +struct mem_sharing_domain { bool enabled; /* @@ -40,6 +39,9 @@ struct mem_sharing_domain #define mem_sharing_enabled(d) \ (hap_enabled(d) && (d)->arch.hvm.mem_sharing.enabled) +#define mem_sharing_is_fork(d) \ + (mem_sharing_enabled(d) && !!((d)->parent)) + /* Auditing of memory sharing code? */ #ifndef NDEBUG #define MEM_SHARING_AUDIT 1 @@ -90,6 +92,9 @@ int mem_sharing_unshare_page(struct domain *d, return rc; } +int mem_sharing_fork_page(struct domain *d, gfn_t gfn, + bool unsharing); + /* * If called by a foreign domain, possible errors are * -EBUSY -> ring full @@ -119,6 +124,7 @@ int relinquish_shared_pages(struct domain *d); #else #define mem_sharing_enabled(d) false +#define mem_sharing_is_fork(p2m) false static inline unsigned int mem_sharing_get_nr_saved_mfns(void) { @@ -145,6 +151,16 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn, return -EOPNOTSUPP; } +static inline int mem_sharing_fork(struct domain *d, struct domain *cd, bool vcpu) +{ + return -EOPNOTSUPP; +} + +static inline int mem_sharing_fork_page(struct domain *d, gfn_t gfn, bool lock) +{ + return -EOPNOTSUPP; +} + #endif #endif /* __MEM_SHARING_H__ */ diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index cfdda6e2a8..90a3f4498e 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -482,6 +482,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t); #define XENMEM_sharing_op_add_physmap 6 #define XENMEM_sharing_op_audit 7 #define XENMEM_sharing_op_range_share 8 +#define XENMEM_sharing_op_fork 9 #define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10) #define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9) @@ -532,6 +533,10 @@ struct xen_mem_sharing_op { uint32_t gref; /* IN: gref to debug */ } u; } debug; + struct mem_sharing_op_fork { + domid_t parent_domain; + uint16_t _pad[3]; /* Must be set to 0 */ + } fork; } u; }; typedef struct xen_mem_sharing_op xen_mem_sharing_op_t; diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index 2d17c84915..dad6715d14 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -455,6 +455,7 @@ struct domain /* Memory sharing support */ #ifdef CONFIG_MEM_SHARING struct vm_event_domain *vm_event_share; + struct domain *parent; /* VM fork parent */ #endif /* Memory paging support */ #ifdef CONFIG_HAS_MEM_PAGING From patchwork Wed Sep 25 15:48:56 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160995 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 3CE451747 for ; Wed, 25 Sep 2019 15:51:29 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 1736F21D7B for ; Wed, 25 Sep 2019 15:51:29 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 1736F21D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YF-00015x-SH; Wed, 25 Sep 2019 15:49:59 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9YD-000147-UH for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:57 +0000 X-Inumbo-ID: 15103845-dfac-11e9-9637-12813bfff9fa Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 15103845-dfac-11e9-9637-12813bfff9fa; Wed, 25 Sep 2019 15:49:40 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812722" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:39 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:56 -0700 Message-Id: X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 18/18] xen/tools: VM forking toolstack side X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Anthony PERARD , Ian Jackson , Tamas K Lengyel , Wei Liu Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Add necessary bits to implement "xl fork-vm" and "xl fork-launch-dm" commands. The process is split in two to allow tools needing access to the new VM as fast as possible after it was forked. It is expected that under certain use-cases the second command that launches QEMU will be skipped entirely. Signed-off-by: Tamas K Lengyel --- tools/libxc/include/xenctrl.h | 4 + tools/libxc/xc_memshr.c | 12 ++ tools/libxl/libxl.h | 6 + tools/libxl/libxl_create.c | 212 +++++++++++++++++++++++----------- tools/libxl/libxl_dm.c | 2 +- tools/libxl/libxl_dom.c | 83 ++++++++----- tools/libxl/libxl_internal.h | 1 + tools/libxl/libxl_types.idl | 1 + tools/xl/xl.h | 4 + tools/xl/xl_cmdtable.c | 15 +++ tools/xl/xl_saverestore.c | 69 +++++++++++ tools/xl/xl_vmcontrol.c | 8 ++ 12 files changed, 323 insertions(+), 94 deletions(-) diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h index b7c990aafd..8719893f1b 100644 --- a/tools/libxc/include/xenctrl.h +++ b/tools/libxc/include/xenctrl.h @@ -2221,6 +2221,10 @@ int xc_memshr_range_share(xc_interface *xch, uint64_t first_gfn, uint64_t last_gfn); +int xc_memshr_fork(xc_interface *xch, + uint32_t source_domain, + uint32_t client_domain); + /* Debug calls: return the number of pages referencing the shared frame backing * the input argument. Should be one or greater. * diff --git a/tools/libxc/xc_memshr.c b/tools/libxc/xc_memshr.c index 5ef56a6933..1e01140e1d 100644 --- a/tools/libxc/xc_memshr.c +++ b/tools/libxc/xc_memshr.c @@ -237,6 +237,18 @@ int xc_memshr_debug_gref(xc_interface *xch, return xc_memshr_memop(xch, domid, &mso); } +int xc_memshr_fork(xc_interface *xch, uint32_t pdomid, uint32_t domid) +{ + xen_mem_sharing_op_t mso; + + memset(&mso, 0, sizeof(mso)); + + mso.op = XENMEM_sharing_op_fork; + mso.u.fork.parent_domain = pdomid; + + return xc_memshr_memop(xch, domid, &mso); +} + int xc_memshr_audit(xc_interface *xch) { xen_mem_sharing_op_t mso; diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h index 3421e5aa98..1ceda6f7de 100644 --- a/tools/libxl/libxl.h +++ b/tools/libxl/libxl.h @@ -1494,6 +1494,12 @@ int libxl_domain_create_new(libxl_ctx *ctx, libxl_domain_config *d_config, const libxl_asyncop_how *ao_how, const libxl_asyncprogress_how *aop_console_how) LIBXL_EXTERNAL_CALLERS_ONLY; +int libxl_domain_fork_vm(libxl_ctx *ctx, uint32_t pdomid, uint32_t *domid) + LIBXL_EXTERNAL_CALLERS_ONLY; +int libxl_domain_fork_launch_dm(libxl_ctx *ctx, libxl_domain_config *d_config, + uint32_t domid, + const libxl_asyncprogress_how *aop_console_how) + LIBXL_EXTERNAL_CALLERS_ONLY; int libxl_domain_create_restore(libxl_ctx *ctx, libxl_domain_config *d_config, uint32_t *domid, int restore_fd, int send_back_fd, diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c index 93fbe1d740..c38af0a214 100644 --- a/tools/libxl/libxl_create.c +++ b/tools/libxl/libxl_create.c @@ -535,12 +535,12 @@ out: return ret; } -int libxl__domain_make(libxl__gc *gc, libxl_domain_config *d_config, - libxl__domain_build_state *state, - uint32_t *domid) +static int libxl__domain_make_xs_entries(libxl__gc *gc, libxl_domain_config *d_config, + libxl__domain_build_state *state, + uint32_t domid) { libxl_ctx *ctx = libxl__gc_owner(gc); - int ret, rc, nb_vm; + int rc, nb_vm; const char *dom_type; char *uuid_string; char *dom_path, *vm_path, *libxl_path; @@ -552,7 +552,6 @@ int libxl__domain_make(libxl__gc *gc, libxl_domain_config *d_config, /* convenience aliases */ libxl_domain_create_info *info = &d_config->c_info; - libxl_domain_build_info *b_info = &d_config->b_info; uuid_string = libxl__uuid2string(gc, info->uuid); if (!uuid_string) { @@ -560,54 +559,7 @@ int libxl__domain_make(libxl__gc *gc, libxl_domain_config *d_config, goto out; } - /* Valid domid here means we're soft resetting. */ - if (!libxl_domid_valid_guest(*domid)) { - struct xen_domctl_createdomain create = { - .ssidref = info->ssidref, - .max_vcpus = b_info->max_vcpus, - .max_evtchn_port = b_info->event_channels, - .max_grant_frames = b_info->max_grant_frames, - .max_maptrack_frames = b_info->max_maptrack_frames, - }; - - if (info->type != LIBXL_DOMAIN_TYPE_PV) { - create.flags |= XEN_DOMCTL_CDF_hvm; - create.flags |= - libxl_defbool_val(info->hap) ? XEN_DOMCTL_CDF_hap : 0; - create.flags |= - libxl_defbool_val(info->oos) ? 0 : XEN_DOMCTL_CDF_oos_off; - } - - /* Ultimately, handle is an array of 16 uint8_t, same as uuid */ - libxl_uuid_copy(ctx, (libxl_uuid *)&create.handle, &info->uuid); - - ret = libxl__arch_domain_prepare_config(gc, d_config, &create); - if (ret < 0) { - LOGED(ERROR, *domid, "fail to get domain config"); - rc = ERROR_FAIL; - goto out; - } - - ret = xc_domain_create(ctx->xch, domid, &create); - if (ret < 0) { - LOGED(ERROR, *domid, "domain creation fail"); - rc = ERROR_FAIL; - goto out; - } - - rc = libxl__arch_domain_save_config(gc, d_config, state, &create); - if (rc < 0) - goto out; - } - - ret = xc_cpupool_movedomain(ctx->xch, info->poolid, *domid); - if (ret < 0) { - LOGED(ERROR, *domid, "domain move fail"); - rc = ERROR_FAIL; - goto out; - } - - dom_path = libxl__xs_get_dompath(gc, *domid); + dom_path = libxl__xs_get_dompath(gc, domid); if (!dom_path) { rc = ERROR_FAIL; goto out; @@ -615,12 +567,12 @@ int libxl__domain_make(libxl__gc *gc, libxl_domain_config *d_config, vm_path = GCSPRINTF("/vm/%s", uuid_string); if (!vm_path) { - LOGD(ERROR, *domid, "cannot allocate create paths"); + LOGD(ERROR, domid, "cannot allocate create paths"); rc = ERROR_FAIL; goto out; } - libxl_path = libxl__xs_libxl_path(gc, *domid); + libxl_path = libxl__xs_libxl_path(gc, domid); if (!libxl_path) { rc = ERROR_FAIL; goto out; @@ -631,10 +583,10 @@ int libxl__domain_make(libxl__gc *gc, libxl_domain_config *d_config, roperm[0].id = 0; roperm[0].perms = XS_PERM_NONE; - roperm[1].id = *domid; + roperm[1].id = domid; roperm[1].perms = XS_PERM_READ; - rwperm[0].id = *domid; + rwperm[0].id = domid; rwperm[0].perms = XS_PERM_NONE; retry_transaction: @@ -652,7 +604,7 @@ retry_transaction: noperm, ARRAY_SIZE(noperm)); xs_write(ctx->xsh, t, GCSPRINTF("%s/vm", dom_path), vm_path, strlen(vm_path)); - rc = libxl__domain_rename(gc, *domid, 0, info->name, t); + rc = libxl__domain_rename(gc, domid, 0, info->name, t); if (rc) goto out; @@ -729,7 +681,7 @@ retry_transaction: vm_list = libxl_list_vm(ctx, &nb_vm); if (!vm_list) { - LOGD(ERROR, *domid, "cannot get number of running guests"); + LOGD(ERROR, domid, "cannot get number of running guests"); rc = ERROR_FAIL; goto out; } @@ -753,7 +705,7 @@ retry_transaction: t = 0; goto retry_transaction; } - LOGED(ERROR, *domid, "domain creation ""xenstore transaction commit failed"); + LOGED(ERROR, domid, "domain creation ""xenstore transaction commit failed"); rc = ERROR_FAIL; goto out; } @@ -765,6 +717,70 @@ retry_transaction: return rc; } +int libxl__domain_make(libxl__gc *gc, libxl_domain_config *d_config, + libxl__domain_build_state *state, + uint32_t *domid) +{ + libxl_ctx *ctx = libxl__gc_owner(gc); + int ret, rc; + + /* convenience aliases */ + libxl_domain_create_info *info = &d_config->c_info; + libxl_domain_build_info *b_info = &d_config->b_info; + + /* Valid domid here means we're soft resetting. */ + if (!libxl_domid_valid_guest(*domid)) { + struct xen_domctl_createdomain create = { + .ssidref = info->ssidref, + .max_vcpus = b_info->max_vcpus, + .max_evtchn_port = b_info->event_channels, + .max_grant_frames = b_info->max_grant_frames, + .max_maptrack_frames = b_info->max_maptrack_frames, + }; + + if (info->type != LIBXL_DOMAIN_TYPE_PV) { + create.flags |= XEN_DOMCTL_CDF_hvm; + create.flags |= + libxl_defbool_val(info->hap) ? XEN_DOMCTL_CDF_hap : 0; + create.flags |= + libxl_defbool_val(info->oos) ? 0 : XEN_DOMCTL_CDF_oos_off; + } + + /* Ultimately, handle is an array of 16 uint8_t, same as uuid */ + libxl_uuid_copy(ctx, (libxl_uuid *)&create.handle, &info->uuid); + + ret = libxl__arch_domain_prepare_config(gc, d_config, &create); + if (ret < 0) { + LOGED(ERROR, *domid, "fail to get domain config"); + rc = ERROR_FAIL; + goto out; + } + + ret = xc_domain_create(ctx->xch, domid, &create); + if (ret < 0) { + LOGED(ERROR, *domid, "domain creation fail"); + rc = ERROR_FAIL; + goto out; + } + + rc = libxl__arch_domain_save_config(gc, d_config, state, &create); + if (rc < 0) + goto out; + } + + ret = xc_cpupool_movedomain(ctx->xch, info->poolid, *domid); + if (ret < 0) { + LOGED(ERROR, *domid, "domain move fail"); + rc = ERROR_FAIL; + goto out; + } + + rc = libxl__domain_make_xs_entries(gc, d_config, state, *domid); + +out: + return rc; +} + static int store_libxl_entry(libxl__gc *gc, uint32_t domid, libxl_domain_build_info *b_info) { @@ -985,15 +1001,31 @@ static void initiate_domain_create(libxl__egc *egc, goto error_out; } - ret = libxl__domain_make(gc, d_config, &dcs->build_state, &domid); - if (ret) { - LOGD(ERROR, domid, "cannot make domain: %d", ret); + if ( !d_config->dm_restore_file ) + { + ret = libxl__domain_make(gc, d_config, &dcs->build_state, &domid); dcs->guest_domid = domid; + + if (ret) { + LOGD(ERROR, domid, "cannot make domain: %d", ret); + ret = ERROR_FAIL; + goto error_out; + } + } else if ( dcs->guest_domid != INVALID_DOMID ) { + domid = dcs->guest_domid; + + ret = libxl__domain_make_xs_entries(gc, d_config, &dcs->build_state, domid); + if (ret) { + LOGD(ERROR, domid, "cannot make domain: %d", ret); + ret = ERROR_FAIL; + goto error_out; + } + } else { + LOGD(ERROR, domid, "cannot make domain"); ret = ERROR_FAIL; goto error_out; } - dcs->guest_domid = domid; dcs->sdss.dm.guest_domid = 0; /* means we haven't spawned */ /* @@ -1026,7 +1058,7 @@ static void initiate_domain_create(libxl__egc *egc, if (ret) goto error_out; - if (restore_fd >= 0 || dcs->domid_soft_reset != INVALID_DOMID) { + if (restore_fd >= 0 || dcs->domid_soft_reset != INVALID_DOMID || d_config->dm_restore_file) { LOGD(DEBUG, domid, "restoring, not running bootloader"); domcreate_bootloader_done(egc, &dcs->bl, 0); } else { @@ -1102,7 +1134,16 @@ static void domcreate_bootloader_done(libxl__egc *egc, dcs->sdss.dm.callback = domcreate_devmodel_started; dcs->sdss.callback = domcreate_devmodel_started; - if (restore_fd < 0 && dcs->domid_soft_reset == INVALID_DOMID) { + if (restore_fd < 0 && dcs->domid_soft_reset == INVALID_DOMID && !d_config->dm_restore_file) { + rc = libxl__domain_build(gc, d_config, domid, state); + domcreate_rebuild_done(egc, dcs, rc); + return; + } + + if ( d_config->dm_restore_file ) { + dcs->srs.dcs = dcs; + dcs->srs.ao = ao; + state->forked_vm = true; rc = libxl__domain_build(gc, d_config, domid, state); domcreate_rebuild_done(egc, dcs, rc); return; @@ -1300,6 +1341,7 @@ static void domcreate_rebuild_done(libxl__egc *egc, /* convenience aliases */ const uint32_t domid = dcs->guest_domid; libxl_domain_config *const d_config = dcs->guest_config; + libxl__domain_build_state *const state = &dcs->build_state; if (ret) { LOGD(ERROR, domid, "cannot (re-)build domain: %d", ret); @@ -1307,6 +1349,9 @@ static void domcreate_rebuild_done(libxl__egc *egc, goto error_out; } + if ( d_config->dm_restore_file ) + state->saved_state = GCSPRINTF("%s", d_config->dm_restore_file); + store_libxl_entry(gc, domid, &d_config->b_info); libxl__multidev_begin(ao, &dcs->multidev); @@ -1708,10 +1753,13 @@ static int do_domain_create(libxl_ctx *ctx, libxl_domain_config *d_config, GCNEW(cdcs); cdcs->dcs.ao = ao; cdcs->dcs.guest_config = d_config; + cdcs->dcs.guest_domid = *domid; + libxl_domain_config_init(&cdcs->dcs.guest_config_saved); libxl_domain_config_copy(ctx, &cdcs->dcs.guest_config_saved, d_config); cdcs->dcs.restore_fd = cdcs->dcs.libxc_fd = restore_fd; cdcs->dcs.send_back_fd = send_back_fd; + if (restore_fd > -1) { cdcs->dcs.restore_params = *params; rc = libxl__fd_flags_modify_save(gc, cdcs->dcs.restore_fd, @@ -1954,6 +2002,38 @@ int libxl_domain_create_new(libxl_ctx *ctx, libxl_domain_config *d_config, ao_how, aop_console_how); } +int libxl_domain_fork_vm(libxl_ctx *ctx, uint32_t pdomid, uint32_t *domid) +{ + int rc; + struct xen_domctl_createdomain create = {0}; + create.flags |= XEN_DOMCTL_CDF_hvm; + create.flags |= XEN_DOMCTL_CDF_hap; + create.flags |= XEN_DOMCTL_CDF_oos_off; + create.arch.emulation_flags = (XEN_X86_EMU_ALL & ~XEN_X86_EMU_VPCI); + + create.ssidref = SECINITSID_DOMU; + create.max_vcpus = 1; // placeholder, will be cloned from pdomid + create.max_evtchn_port = 1023; + create.max_grant_frames = LIBXL_MAX_GRANT_FRAMES_DEFAULT; + create.max_maptrack_frames = LIBXL_MAX_MAPTRACK_FRAMES_DEFAULT; + + if ( (rc = xc_domain_create(ctx->xch, domid, &create)) ) + return rc; + + if ( (rc = xc_memshr_fork(ctx->xch, pdomid, *domid)) ) + xc_domain_destroy(ctx->xch, *domid); + + return rc; +} + +int libxl_domain_fork_launch_dm(libxl_ctx *ctx, libxl_domain_config *d_config, + uint32_t domid, + const libxl_asyncprogress_how *aop_console_how) +{ + unset_disk_colo_restore(d_config); + return do_domain_create(ctx, d_config, &domid, -1, -1, 0, 0, aop_console_how); +} + int libxl_domain_create_restore(libxl_ctx *ctx, libxl_domain_config *d_config, uint32_t *domid, int restore_fd, int send_back_fd, diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c index c00356a2f1..72c6fb7541 100644 --- a/tools/libxl/libxl_dm.c +++ b/tools/libxl/libxl_dm.c @@ -2785,7 +2785,7 @@ static void device_model_spawn_outcome(libxl__egc *egc, libxl__domain_build_state *state = dmss->build_state; - if (state->saved_state) { + if (state->saved_state && !state->forked_vm) { ret2 = unlink(state->saved_state); if (ret2) { LOGED(ERROR, dmss->guest_domid, "%s: failed to remove device-model state %s", diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c index c5685b061c..6e07893e3e 100644 --- a/tools/libxl/libxl_dom.c +++ b/tools/libxl/libxl_dom.c @@ -391,9 +391,12 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid, libxl_domain_build_info *const info = &d_config->b_info; libxl_ctx *ctx = libxl__gc_owner(gc); char *xs_domid, *con_domid; - int rc; + int rc = 0; uint64_t size; + if ( state->forked_vm ) + goto skip_fork; + if (xc_domain_max_vcpus(ctx->xch, domid, info->max_vcpus) != 0) { LOG(ERROR, "Couldn't set max vcpu count"); return ERROR_FAIL; @@ -498,29 +501,6 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid, } } - - rc = libxl__arch_extra_memory(gc, info, &size); - if (rc < 0) { - LOGE(ERROR, "Couldn't get arch extra constant memory size"); - return ERROR_FAIL; - } - - if (xc_domain_setmaxmem(ctx->xch, domid, info->target_memkb + size) < 0) { - LOGE(ERROR, "Couldn't set max memory"); - return ERROR_FAIL; - } - - xs_domid = xs_read(ctx->xsh, XBT_NULL, "/tool/xenstored/domid", NULL); - state->store_domid = xs_domid ? atoi(xs_domid) : 0; - free(xs_domid); - - con_domid = xs_read(ctx->xsh, XBT_NULL, "/tool/xenconsoled/domid", NULL); - state->console_domid = con_domid ? atoi(con_domid) : 0; - free(con_domid); - - state->store_port = xc_evtchn_alloc_unbound(ctx->xch, domid, state->store_domid); - state->console_port = xc_evtchn_alloc_unbound(ctx->xch, domid, state->console_domid); - if (info->type != LIBXL_DOMAIN_TYPE_PV) hvm_set_conf_params(ctx->xch, domid, info); @@ -555,8 +535,34 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid, info->altp2m); } + rc = libxl__arch_extra_memory(gc, info, &size); + if (rc < 0) { + LOGE(ERROR, "Couldn't get arch extra constant memory size"); + return ERROR_FAIL; + } + + if (xc_domain_setmaxmem(ctx->xch, domid, info->target_memkb + size) < 0) { + LOGE(ERROR, "Couldn't set max memory"); + return ERROR_FAIL; + } + rc = libxl__arch_domain_create(gc, d_config, domid); + if ( rc ) + goto out; +skip_fork: + xs_domid = xs_read(ctx->xsh, XBT_NULL, "/tool/xenstored/domid", NULL); + state->store_domid = xs_domid ? atoi(xs_domid) : 0; + free(xs_domid); + + con_domid = xs_read(ctx->xsh, XBT_NULL, "/tool/xenconsoled/domid", NULL); + state->console_domid = con_domid ? atoi(con_domid) : 0; + free(con_domid); + + state->store_port = xc_evtchn_alloc_unbound(ctx->xch, domid, state->store_domid); + state->console_port = xc_evtchn_alloc_unbound(ctx->xch, domid, state->console_domid); + +out: return rc; } @@ -614,6 +620,9 @@ int libxl__build_post(libxl__gc *gc, uint32_t domid, char **ents; int i, rc; + if ( state->forked_vm ) + goto skip_fork; + if (info->num_vnuma_nodes && !info->num_vcpu_soft_affinity) { rc = set_vnuma_affinity(gc, domid, info); if (rc) @@ -638,6 +647,7 @@ int libxl__build_post(libxl__gc *gc, uint32_t domid, } } +skip_fork: ents = libxl__calloc(gc, 12 + (info->max_vcpus * 2) + 2, sizeof(char *)); ents[0] = "memory/static-max"; ents[1] = GCSPRINTF("%"PRId64, info->max_memkb); @@ -900,14 +910,16 @@ static int hvm_build_set_params(xc_interface *handle, uint32_t domid, libxl_domain_build_info *info, int store_evtchn, unsigned long *store_mfn, int console_evtchn, unsigned long *console_mfn, - domid_t store_domid, domid_t console_domid) + domid_t store_domid, domid_t console_domid, + bool forked_vm) { struct hvm_info_table *va_hvm; uint8_t *va_map, sum; uint64_t str_mfn, cons_mfn; int i; - if (info->type == LIBXL_DOMAIN_TYPE_HVM) { + if ( info->type == LIBXL_DOMAIN_TYPE_HVM && !forked_vm ) + { va_map = xc_map_foreign_range(handle, domid, XC_PAGE_SIZE, PROT_READ | PROT_WRITE, HVM_INFO_PFN); @@ -1223,6 +1235,23 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid, struct xc_dom_image *dom = NULL; bool device_model = info->type == LIBXL_DOMAIN_TYPE_HVM ? true : false; + if ( state->forked_vm ) + { + rc = hvm_build_set_params(ctx->xch, domid, info, state->store_port, + &state->store_mfn, state->console_port, + &state->console_mfn, state->store_domid, + state->console_domid, state->forked_vm); + + if ( rc ) + return rc; + + return xc_dom_gnttab_seed(ctx->xch, domid, true, + state->console_mfn, + state->store_mfn, + state->console_domid, + state->store_domid); + } + xc_dom_loginit(ctx->xch); /* @@ -1347,7 +1376,7 @@ int libxl__build_hvm(libxl__gc *gc, uint32_t domid, rc = hvm_build_set_params(ctx->xch, domid, info, state->store_port, &state->store_mfn, state->console_port, &state->console_mfn, state->store_domid, - state->console_domid); + state->console_domid, false); if (rc != 0) { LOG(ERROR, "hvm build set params failed"); goto out; diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h index bfeb38e0ed..0becfc3e31 100644 --- a/tools/libxl/libxl_internal.h +++ b/tools/libxl/libxl_internal.h @@ -1271,6 +1271,7 @@ typedef struct { char *saved_state; int dm_monitor_fd; + bool forked_vm; libxl__file_reference pv_kernel; libxl__file_reference pv_ramdisk; diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl index 04c9762af1..f12c1882dc 100644 --- a/tools/libxl/libxl_types.idl +++ b/tools/libxl/libxl_types.idl @@ -946,6 +946,7 @@ libxl_domain_config = Struct("domain_config", [ ("on_watchdog", libxl_action_on_shutdown), ("on_crash", libxl_action_on_shutdown), ("on_soft_reset", libxl_action_on_shutdown), + ("dm_restore_file", string, {'const': True}), ], dir=DIR_IN) libxl_diskinfo = Struct("diskinfo", [ diff --git a/tools/xl/xl.h b/tools/xl/xl.h index 60bdad8ffb..993a63d268 100644 --- a/tools/xl/xl.h +++ b/tools/xl/xl.h @@ -31,6 +31,7 @@ struct cmd_spec { }; struct domain_create { + uint32_t ddomid; /* fork launch dm for this domid */ int debug; int daemonize; int monitor; /* handle guest reboots etc */ @@ -45,6 +46,7 @@ struct domain_create { const char *config_file; char *extra_config; /* extra config string */ const char *restore_file; + const char *dm_restore_file; char *colo_proxy_script; bool userspace_colo_proxy; int migrate_fd; /* -1 means none */ @@ -127,6 +129,8 @@ int main_pciassignable_remove(int argc, char **argv); int main_pciassignable_list(int argc, char **argv); #ifndef LIBXL_HAVE_NO_SUSPEND_RESUME int main_restore(int argc, char **argv); +int main_fork_vm(int argc, char **argv); +int main_fork_launch_dm(int argc, char **argv); int main_migrate_receive(int argc, char **argv); int main_save(int argc, char **argv); int main_migrate(int argc, char **argv); diff --git a/tools/xl/xl_cmdtable.c b/tools/xl/xl_cmdtable.c index 5baa6023aa..902b3d7b01 100644 --- a/tools/xl/xl_cmdtable.c +++ b/tools/xl/xl_cmdtable.c @@ -180,6 +180,21 @@ struct cmd_spec cmd_table[] = { "-V, --vncviewer Connect to the VNC display after the domain is created.\n" "-A, --vncviewer-autopass Pass VNC password to viewer via stdin." }, + { "fork-vm", + &main_fork_vm, 0, 1, + "Fork a domain from the running parent domid", + "[options] ", + "-h Print this help.\n" + "-d Enable debug messages.\n" + }, + { "fork-launch-dm", + &main_fork_launch_dm, 0, 1, + "Launch the device model for a forked VM", + "[options] ", + "-h Print this help.\n" + "-p Do not unpause domain after restoring it.\n" + "-d Enable debug messages.\n" + }, { "migrate-receive", &main_migrate_receive, 0, 1, "Restore a domain from a saved state", diff --git a/tools/xl/xl_saverestore.c b/tools/xl/xl_saverestore.c index 9be033fe65..ba0e300f2c 100644 --- a/tools/xl/xl_saverestore.c +++ b/tools/xl/xl_saverestore.c @@ -229,6 +229,75 @@ int main_restore(int argc, char **argv) return EXIT_SUCCESS; } +int main_fork_vm(int argc, char **argv) +{ + int debug = 0; + uint32_t pdomid = 0, domid = INVALID_DOMID; + int opt; + + SWITCH_FOREACH_OPT(opt, "d", NULL, "fork-vm", 1) { + case 'd': + debug = 1; + break; + } + + if (argc-optind == 1) { + pdomid = atoi(argv[optind]); + } else { + help("fork-vm"); + return EXIT_FAILURE; + } + + if (libxl_domain_fork_vm(ctx, pdomid, &domid) || domid == INVALID_DOMID) + return EXIT_FAILURE; + + fprintf(stderr, "VM fork created with domid: %u\n", domid); + return EXIT_SUCCESS; +} + +int main_fork_launch_dm(int argc, char **argv) +{ + const char *config_file = NULL; + const char *dm_restore_file = NULL; + struct domain_create dom_info; + int paused = 0, debug = 0; + uint32_t ddomid = 0; + int opt, rc; + + SWITCH_FOREACH_OPT(opt, "pd", NULL, "fork-launch-dm", 1) { + case 'p': + paused = 1; + break; + case 'd': + debug = 1; + break; + } + + if (argc-optind == 3) { + config_file = argv[optind]; + dm_restore_file = argv[optind + 1]; + ddomid = atoi(argv[optind + 2]); + } else { + help("fork-launch-dm"); + return EXIT_FAILURE; + } + + memset(&dom_info, 0, sizeof(dom_info)); + dom_info.ddomid = ddomid; + dom_info.dm_restore_file = dm_restore_file; + dom_info.debug = debug; + dom_info.paused = paused; + dom_info.config_file = config_file; + dom_info.migrate_fd = -1; + dom_info.send_back_fd = -1; + + rc = create_domain(&dom_info); + if (rc < 0) + return EXIT_FAILURE; + + return EXIT_SUCCESS; +} + int main_save(int argc, char **argv) { uint32_t domid; diff --git a/tools/xl/xl_vmcontrol.c b/tools/xl/xl_vmcontrol.c index eb6779a561..f32a4714ee 100644 --- a/tools/xl/xl_vmcontrol.c +++ b/tools/xl/xl_vmcontrol.c @@ -645,6 +645,7 @@ int create_domain(struct domain_create *dom_info) libxl_domain_config d_config; + uint32_t ddomid = dom_info->ddomid; // launch dm for this domain iff set int debug = dom_info->debug; int daemonize = dom_info->daemonize; int monitor = dom_info->monitor; @@ -655,6 +656,7 @@ int create_domain(struct domain_create *dom_info) const char *restore_file = dom_info->restore_file; const char *config_source = NULL; const char *restore_source = NULL; + const char *dm_restore_file = dom_info->dm_restore_file; int migrate_fd = dom_info->migrate_fd; bool config_in_json; @@ -923,6 +925,12 @@ start: * restore/migrate-receive it again. */ restoring = 0; + } else if ( ddomid ) { + d_config.dm_restore_file = dm_restore_file; + ret = libxl_domain_fork_launch_dm(ctx, &d_config, ddomid, + autoconnect_console_how); + domid = ddomid; + ddomid = INVALID_DOMID; } else if (domid_soft_reset != INVALID_DOMID) { /* Do soft reset. */ ret = libxl_domain_soft_reset(ctx, &d_config, domid_soft_reset,