From patchwork Wed Sep 25 15:48:39 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tamas K Lengyel X-Patchwork-Id: 11160971 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 84B6E924 for ; Wed, 25 Sep 2019 15:51:00 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 5F64221D7B for ; Wed, 25 Sep 2019 15:51:00 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 5F64221D7B Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xp-0000mg-Oc; Wed, 25 Sep 2019 15:49:33 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1iD9Xo-0000mI-TW for xen-devel@lists.xenproject.org; Wed, 25 Sep 2019 15:49:32 +0000 X-Inumbo-ID: 0f5c2020-dfac-11e9-9637-12813bfff9fa Received: from mga12.intel.com (unknown [192.55.52.136]) by localhost (Halon) with ESMTPS id 0f5c2020-dfac-11e9-9637-12813bfff9fa; Wed, 25 Sep 2019 15:49:30 +0000 (UTC) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 25 Sep 2019 08:49:28 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,548,1559545200"; d="scan'208";a="193812624" Received: from tlengyel-mobl2.amr.corp.intel.com (HELO localhost.localdomain) ([10.252.129.153]) by orsmga006.jf.intel.com with ESMTP; 25 Sep 2019 08:49:27 -0700 From: Tamas K Lengyel To: xen-devel@lists.xenproject.org Date: Wed, 25 Sep 2019 08:48:39 -0700 Message-Id: <07b30029c26f90d292d05d2e28944d9034f27fa7.1569425745.git.tamas.lengyel@intel.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: References: MIME-Version: 1.0 Subject: [Xen-devel] [RFC PATCH for-next 01/18] x86: make hvm_{get/set}_param accessible X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Andrew Cooper , Tamas K Lengyel , Wei Liu , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Currently the hvm parameters are only accessible via the HVMOP hypercalls. By exposing hvm_{get/set}_param it will be possible for VM forking to copy the parameters directly into the clone domain. Signed-off-by: Tamas K Lengyel --- xen/arch/x86/hvm/hvm.c | 169 ++++++++++++++++++++-------------- xen/include/asm-x86/hvm/hvm.h | 4 + 2 files changed, 106 insertions(+), 67 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index fdb1e17f59..667c830db5 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4054,16 +4054,17 @@ static int hvmop_set_evtchn_upcall_vector( } static int hvm_allow_set_param(struct domain *d, - const struct xen_hvm_param *a) + uint32_t index, + uint64_t new_value) { - uint64_t value = d->arch.hvm.params[a->index]; + uint64_t value = d->arch.hvm.params[index]; int rc; rc = xsm_hvm_param(XSM_TARGET, d, HVMOP_set_param); if ( rc ) return rc; - switch ( a->index ) + switch ( index ) { /* The following parameters can be set by the guest. */ case HVM_PARAM_CALLBACK_IRQ: @@ -4096,7 +4097,7 @@ static int hvm_allow_set_param(struct domain *d, if ( rc ) return rc; - switch ( a->index ) + switch ( index ) { /* The following parameters should only be changed once. */ case HVM_PARAM_VIRIDIAN: @@ -4106,7 +4107,7 @@ static int hvm_allow_set_param(struct domain *d, case HVM_PARAM_NR_IOREQ_SERVER_PAGES: case HVM_PARAM_ALTP2M: case HVM_PARAM_MCA_CAP: - if ( value != 0 && a->value != value ) + if ( value != 0 && new_value != value ) rc = -EEXIST; break; default: @@ -4116,13 +4117,11 @@ static int hvm_allow_set_param(struct domain *d, return rc; } -static int hvmop_set_param( +int hvmop_set_param( XEN_GUEST_HANDLE_PARAM(xen_hvm_param_t) arg) { - struct domain *curr_d = current->domain; struct xen_hvm_param a; struct domain *d; - struct vcpu *v; int rc; if ( copy_from_guest(&a, arg, 1) ) @@ -4142,23 +4141,42 @@ static int hvmop_set_param( if ( !is_hvm_domain(d) ) goto out; - rc = hvm_allow_set_param(d, &a); + rc = hvm_set_param(d, a.index, a.value); + + out: + rcu_unlock_domain(d); + return rc; +} + +int hvm_set_param( + struct domain *d, + uint32_t index, + uint64_t value) +{ + struct domain *curr_d = current->domain; + int rc; + struct vcpu *v; + + if ( index >= HVM_NR_PARAMS ) + return -EINVAL; + + rc = hvm_allow_set_param(d, index, value); if ( rc ) goto out; - switch ( a.index ) + switch ( index ) { case HVM_PARAM_CALLBACK_IRQ: - hvm_set_callback_via(d, a.value); + hvm_set_callback_via(d, value); hvm_latch_shinfo_size(d); break; case HVM_PARAM_TIMER_MODE: - if ( a.value > HVMPTM_one_missed_tick_pending ) + if ( value > HVMPTM_one_missed_tick_pending ) rc = -EINVAL; break; case HVM_PARAM_VIRIDIAN: - if ( (a.value & ~HVMPV_feature_mask) || - !(a.value & HVMPV_base_freq) ) + if ( (value & ~HVMPV_feature_mask) || + !(value & HVMPV_base_freq) ) rc = -EINVAL; break; case HVM_PARAM_IDENT_PT: @@ -4168,7 +4186,7 @@ static int hvmop_set_param( */ if ( !paging_mode_hap(d) || !cpu_has_vmx ) { - d->arch.hvm.params[a.index] = a.value; + d->arch.hvm.params[index] = value; break; } @@ -4183,7 +4201,7 @@ static int hvmop_set_param( rc = 0; domain_pause(d); - d->arch.hvm.params[a.index] = a.value; + d->arch.hvm.params[index] = value; for_each_vcpu ( d, v ) paging_update_cr3(v, false); domain_unpause(d); @@ -4192,23 +4210,23 @@ static int hvmop_set_param( break; case HVM_PARAM_DM_DOMAIN: /* The only value this should ever be set to is DOMID_SELF */ - if ( a.value != DOMID_SELF ) + if ( value != DOMID_SELF ) rc = -EINVAL; - a.value = curr_d->domain_id; + value = curr_d->domain_id; break; case HVM_PARAM_ACPI_S_STATE: rc = 0; - if ( a.value == 3 ) + if ( value == 3 ) hvm_s3_suspend(d); - else if ( a.value == 0 ) + else if ( value == 0 ) hvm_s3_resume(d); else rc = -EINVAL; break; case HVM_PARAM_ACPI_IOPORTS_LOCATION: - rc = pmtimer_change_ioport(d, a.value); + rc = pmtimer_change_ioport(d, value); break; case HVM_PARAM_MEMORY_EVENT_CR0: case HVM_PARAM_MEMORY_EVENT_CR3: @@ -4223,24 +4241,24 @@ static int hvmop_set_param( rc = xsm_hvm_param_nested(XSM_PRIV, d); if ( rc ) break; - if ( a.value > 1 ) + if ( value > 1 ) rc = -EINVAL; /* * Remove the check below once we have * shadow-on-shadow. */ - if ( !paging_mode_hap(d) && a.value ) + if ( !paging_mode_hap(d) && value ) rc = -EINVAL; - if ( a.value && + if ( value && d->arch.hvm.params[HVM_PARAM_ALTP2M] ) rc = -EINVAL; /* Set up NHVM state for any vcpus that are already up. */ - if ( a.value && + if ( value && !d->arch.hvm.params[HVM_PARAM_NESTEDHVM] ) for_each_vcpu(d, v) if ( rc == 0 ) rc = nestedhvm_vcpu_initialise(v); - if ( !a.value || rc ) + if ( !value || rc ) for_each_vcpu(d, v) nestedhvm_vcpu_destroy(v); break; @@ -4248,30 +4266,30 @@ static int hvmop_set_param( rc = xsm_hvm_param_altp2mhvm(XSM_PRIV, d); if ( rc ) break; - if ( a.value > XEN_ALTP2M_limited ) + if ( value > XEN_ALTP2M_limited ) rc = -EINVAL; - if ( a.value && + if ( value && d->arch.hvm.params[HVM_PARAM_NESTEDHVM] ) rc = -EINVAL; break; case HVM_PARAM_TRIPLE_FAULT_REASON: - if ( a.value > SHUTDOWN_MAX ) + if ( value > SHUTDOWN_MAX ) rc = -EINVAL; break; case HVM_PARAM_IOREQ_SERVER_PFN: - d->arch.hvm.ioreq_gfn.base = a.value; + d->arch.hvm.ioreq_gfn.base = value; break; case HVM_PARAM_NR_IOREQ_SERVER_PAGES: { unsigned int i; - if ( a.value == 0 || - a.value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 ) + if ( value == 0 || + value > sizeof(d->arch.hvm.ioreq_gfn.mask) * 8 ) { rc = -EINVAL; break; } - for ( i = 0; i < a.value; i++ ) + for ( i = 0; i < value; i++ ) set_bit(i, &d->arch.hvm.ioreq_gfn.mask); break; @@ -4283,35 +4301,35 @@ static int hvmop_set_param( sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8); BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN > sizeof(d->arch.hvm.ioreq_gfn.legacy_mask) * 8); - if ( a.value ) - set_bit(a.index, &d->arch.hvm.ioreq_gfn.legacy_mask); + if ( value ) + set_bit(index, &d->arch.hvm.ioreq_gfn.legacy_mask); break; case HVM_PARAM_X87_FIP_WIDTH: - if ( a.value != 0 && a.value != 4 && a.value != 8 ) + if ( value != 0 && value != 4 && value != 8 ) { rc = -EINVAL; break; } - d->arch.x87_fip_width = a.value; + d->arch.x87_fip_width = value; break; case HVM_PARAM_VM86_TSS: /* Hardware would silently truncate high bits. */ - if ( a.value != (uint32_t)a.value ) + if ( value != (uint32_t)value ) { if ( d == curr_d ) domain_crash(d); rc = -EINVAL; } /* Old hvmloader binaries hardcode the size to 128 bytes. */ - if ( a.value ) - a.value |= (128ULL << 32) | VM86_TSS_UPDATED; - a.index = HVM_PARAM_VM86_TSS_SIZED; + if ( value ) + value |= (128ULL << 32) | VM86_TSS_UPDATED; + index = HVM_PARAM_VM86_TSS_SIZED; break; case HVM_PARAM_VM86_TSS_SIZED: - if ( (a.value >> 32) < sizeof(struct tss32) ) + if ( (value >> 32) < sizeof(struct tss32) ) { if ( d == curr_d ) domain_crash(d); @@ -4322,34 +4340,33 @@ static int hvmop_set_param( * 256 bits interrupt redirection bitmap + 64k bits I/O bitmap * plus one padding byte). */ - if ( (a.value >> 32) > sizeof(struct tss32) + + if ( (value >> 32) > sizeof(struct tss32) + (0x100 / 8) + (0x10000 / 8) + 1 ) - a.value = (uint32_t)a.value | + value = (uint32_t)value | ((sizeof(struct tss32) + (0x100 / 8) + (0x10000 / 8) + 1) << 32); - a.value |= VM86_TSS_UPDATED; + value |= VM86_TSS_UPDATED; break; case HVM_PARAM_MCA_CAP: - rc = vmce_enable_mca_cap(d, a.value); + rc = vmce_enable_mca_cap(d, value); break; } if ( rc != 0 ) goto out; - d->arch.hvm.params[a.index] = a.value; + d->arch.hvm.params[index] = value; HVM_DBG_LOG(DBG_LEVEL_HCALL, "set param %u = %"PRIx64, - a.index, a.value); + index, value); out: - rcu_unlock_domain(d); return rc; } static int hvm_allow_get_param(struct domain *d, - const struct xen_hvm_param *a) + uint32_t index) { int rc; @@ -4357,7 +4374,7 @@ static int hvm_allow_get_param(struct domain *d, if ( rc ) return rc; - switch ( a->index ) + switch ( index ) { /* The following parameters can be read by the guest. */ case HVM_PARAM_CALLBACK_IRQ: @@ -4411,42 +4428,60 @@ static int hvmop_get_param( if ( !is_hvm_domain(d) ) goto out; - rc = hvm_allow_get_param(d, &a); + rc = hvm_get_param(d, a.index, &a.value); if ( rc ) goto out; - switch ( a.index ) + rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0; + + HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64, + a.index, a.value); + + out: + rcu_unlock_domain(d); + return rc; +} + +int hvm_get_param( + struct domain *d, + uint32_t index, + uint64_t *value) +{ + int rc; + + if ( index >= HVM_NR_PARAMS || !value ) + return -EINVAL; + + rc = hvm_allow_get_param(d, index); + if ( rc ) + return rc; + + switch ( index ) { case HVM_PARAM_ACPI_S_STATE: - a.value = d->arch.hvm.is_s3_suspended ? 3 : 0; + *value = d->arch.hvm.is_s3_suspended ? 3 : 0; break; case HVM_PARAM_VM86_TSS: - a.value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED]; + *value = (uint32_t)d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED]; break; case HVM_PARAM_VM86_TSS_SIZED: - a.value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] & - ~VM86_TSS_UPDATED; + *value = d->arch.hvm.params[HVM_PARAM_VM86_TSS_SIZED] & + ~VM86_TSS_UPDATED; break; case HVM_PARAM_X87_FIP_WIDTH: - a.value = d->arch.x87_fip_width; + *value = d->arch.x87_fip_width; break; default: - a.value = d->arch.hvm.params[a.index]; + *value = d->arch.hvm.params[index]; break; } - rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0; - - HVM_DBG_LOG(DBG_LEVEL_HCALL, "get param %u = %"PRIx64, - a.index, a.value); + return 0; +}; - out: - rcu_unlock_domain(d); - return rc; -} /* * altp2m operations are envisioned as being used in several different diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h index 4e72d0732e..7f000fa6b1 100644 --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -336,6 +336,10 @@ unsigned long hvm_cr4_guest_valid_bits(const struct domain *d, bool restore); bool hvm_flush_vcpu_tlb(bool (*flush_vcpu)(void *ctxt, struct vcpu *v), void *ctxt); +/* Caller must hold domain locks */ +int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value); +int hvm_set_param(struct domain *d, uint32_t index, uint64_t value); + #ifdef CONFIG_HVM #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)