From patchwork Wed Sep 11 15:23:20 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jan Beulich X-Patchwork-Id: 11141357 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 9835416B1 for ; Wed, 11 Sep 2019 15:25:02 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 7DDC2207FC for ; Wed, 11 Sep 2019 15:25:02 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 7DDC2207FC Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=suse.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i84Sp-0002JF-Aa; Wed, 11 Sep 2019 15:23:23 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i84Sn-0002Iz-Sc for xen-devel@lists.xenproject.org; Wed, 11 Sep 2019 15:23:21 +0000 X-Inumbo-ID: 16384306-d4a8-11e9-978d-bc764e2007e4 Received: from mx1.suse.de (unknown [195.135.220.15]) by us1-rack-iad1.inumbo.com (Halon) with ESMTPS id 16384306-d4a8-11e9-978d-bc764e2007e4; Wed, 11 Sep 2019 15:23:20 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id 236B7B65D; Wed, 11 Sep 2019 15:23:20 +0000 (UTC) From: Jan Beulich To: "xen-devel@lists.xenproject.org" References: Message-ID: <443ba725-01b7-9174-3298-66f44ba3f1ec@suse.com> Date: Wed, 11 Sep 2019 17:23:20 +0200 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:60.0) Gecko/20100101 Thunderbird/60.9.0 MIME-Version: 1.0 In-Reply-To: Content-Language: en-US Subject: [Xen-devel] [PATCH 4/9] x86/HVM: move NOFLUSH handling out of hvm_set_cr3() X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: George Dunlap , Andrew Cooper , Wei Liu , =?utf-8?q?Roger_Pau_Monn=C3=A9?= Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" The bit is meaningful only for MOV-to-CR3 insns, not anywhere else, in particular not when loading nested guest state. Signed-off-by: Jan Beulich --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -2080,6 +2080,8 @@ static int hvmemul_write_cr( HVMTRACE_LONG_2D(CR_WRITE, reg, TRC_PAR_LONG(val)); switch ( reg ) { + bool noflush; + case 0: rc = hvm_set_cr0(val, true); break; @@ -2090,7 +2092,10 @@ static int hvmemul_write_cr( break; case 3: - rc = hvm_set_cr3(val, true); + noflush = hvm_pcid_enabled(current) && (val & X86_CR3_NOFLUSH); + if ( noflush ) + val &= ~X86_CR3_NOFLUSH; + rc = hvm_set_cr3(val, noflush, true); break; case 4: --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2059,12 +2059,17 @@ int hvm_mov_to_cr(unsigned int cr, unsig switch ( cr ) { + bool noflush; + case 0: rc = hvm_set_cr0(val, true); break; case 3: - rc = hvm_set_cr3(val, true); + noflush = hvm_pcid_enabled(curr) && (val & X86_CR3_NOFLUSH); + if ( noflush ) + val &= ~X86_CR3_NOFLUSH; + rc = hvm_set_cr3(val, noflush, true); break; case 4: @@ -2282,12 +2287,11 @@ int hvm_set_cr0(unsigned long value, boo return X86EMUL_OKAY; } -int hvm_set_cr3(unsigned long value, bool may_defer) +int hvm_set_cr3(unsigned long value, bool noflush, bool may_defer) { struct vcpu *v = current; struct page_info *page; unsigned long old = v->arch.hvm.guest_cr[3]; - bool noflush = false; if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled & monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) ) @@ -2299,17 +2303,12 @@ int hvm_set_cr3(unsigned long value, boo /* The actual write will occur in hvm_do_resume(), if permitted. */ v->arch.vm_event->write_data.do_write.cr3 = 1; v->arch.vm_event->write_data.cr3 = value; + v->arch.vm_event->write_data.cr3_noflush = noflush; return X86EMUL_OKAY; } } - if ( hvm_pcid_enabled(v) ) /* Clear the noflush bit. */ - { - noflush = value & X86_CR3_NOFLUSH; - value &= ~X86_CR3_NOFLUSH; - } - if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) && (value != v->arch.hvm.guest_cr[3]) ) { @@ -3004,7 +3003,7 @@ void hvm_task_switch( if ( task_switch_load_seg(x86_seg_ldtr, tss.ldt, new_cpl, 0) ) goto out; - rc = hvm_set_cr3(tss.cr3, true); + rc = hvm_set_cr3(tss.cr3, false, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); if ( rc != X86EMUL_OKAY ) --- a/xen/arch/x86/hvm/svm/nestedsvm.c +++ b/xen/arch/x86/hvm/svm/nestedsvm.c @@ -324,7 +324,7 @@ static int nsvm_vcpu_hostrestore(struct v->arch.guest_table = pagetable_null(); /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */ } - rc = hvm_set_cr3(n1vmcb->_cr3, true); + rc = hvm_set_cr3(n1vmcb->_cr3, false, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); if (rc != X86EMUL_OKAY) @@ -584,7 +584,7 @@ static int nsvm_vmcb_prepare4vmrun(struc nestedsvm_vmcb_set_nestedp2m(v, ns_vmcb, n2vmcb); /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */ - rc = hvm_set_cr3(ns_vmcb->_cr3, true); + rc = hvm_set_cr3(ns_vmcb->_cr3, false, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); if (rc != X86EMUL_OKAY) @@ -598,7 +598,7 @@ static int nsvm_vmcb_prepare4vmrun(struc * we assume it intercepts page faults. */ /* hvm_set_cr3() below sets v->arch.hvm.guest_cr[3] for us. */ - rc = hvm_set_cr3(ns_vmcb->_cr3, true); + rc = hvm_set_cr3(ns_vmcb->_cr3, false, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); if (rc != X86EMUL_OKAY) --- a/xen/arch/x86/hvm/vm_event.c +++ b/xen/arch/x86/hvm/vm_event.c @@ -110,7 +110,7 @@ void hvm_vm_event_do_resume(struct vcpu if ( unlikely(w->do_write.cr3) ) { - if ( hvm_set_cr3(w->cr3, false) == X86EMUL_EXCEPTION ) + if ( hvm_set_cr3(w->cr3, w->cr3_noflush, false) == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); w->do_write.cr3 = 0; --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -1032,7 +1032,7 @@ static void load_shadow_guest_state(stru if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), true); + rc = hvm_set_cr3(get_vvmcs(v, GUEST_CR3), false, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); @@ -1246,7 +1246,7 @@ static void load_vvmcs_host_state(struct if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); - rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), true); + rc = hvm_set_cr3(get_vvmcs(v, HOST_CR3), false, true); if ( rc == X86EMUL_EXCEPTION ) hvm_inject_hw_exception(TRAP_gp_fault, 0); --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -274,6 +274,8 @@ struct monitor_write_data { unsigned int cr4 : 1; } do_write; + bool cr3_noflush; + uint32_t msr; uint64_t value; uint64_t cr0; --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -135,7 +135,7 @@ void hvm_shadow_handle_cd(struct vcpu *v */ int hvm_set_efer(uint64_t value); int hvm_set_cr0(unsigned long value, bool may_defer); -int hvm_set_cr3(unsigned long value, bool may_defer); +int hvm_set_cr3(unsigned long value, bool noflush, bool may_defer); int hvm_set_cr4(unsigned long value, bool may_defer); int hvm_descriptor_access_intercept(uint64_t exit_info, uint64_t vmx_exit_qualification,