Message ID | 20211110101805.16343-3-suravee.suthikulpanit@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | svm: avic: Allow AVIC support on system w/ physical APIC ID > 255 | expand |
On Wed, Nov 10, 2021, Suravee Suthikulpanit wrote: > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index 989685098b3e..0b066bb5149d 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -1031,6 +1031,12 @@ static __init int svm_hardware_setup(void) > nrips = false; > } > > + if (avic) { > + r = avic_init_host_physical_apicid_mask(); > + if (r) > + avic = false; > + } Haven't yet dedicated any brain cells to the rest of the patch, but this can be written as if (avic && avic_init_host_physical_apicid_mask()) avic = false; or avic = avic && !avic_init_host_physical_apicid_mask(); But looking at the context below, combining everything would be preferable. I would say split out the enable_apicv part to make it more obvious that enable_apicv is merely a reflection of avic. avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC) && !avic_init_host_physical_apicid_mask(); enable_apicv = avic; > + > enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC); > > if (enable_apicv) {
On 11/10/2021 11:48 AM, Sean Christopherson wrote: > Wed, Nov 10, 2021, Suravee Suthikulpanit wrote: >> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c >> index 989685098b3e..0b066bb5149d 100644 >> --- a/arch/x86/kvm/svm/svm.c >> +++ b/arch/x86/kvm/svm/svm.c >> @@ -1031,6 +1031,12 @@ static __init int svm_hardware_setup(void) >> nrips = false; >> } >> >> + if (avic) { >> + r = avic_init_host_physical_apicid_mask(); >> + if (r) >> + avic = false; >> + } > Haven't yet dedicated any brain cells to the rest of the patch, but this can be > written as > > if (avic && avic_init_host_physical_apicid_mask()) > avic = false; > > or > > avic = avic && !avic_init_host_physical_apicid_mask(); > > But looking at the context below, combining everything would be preferable. I > would say split out the enable_apicv part to make it more obvious that enable_apicv > is merely a reflection of avic. > > avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC) && > !avic_init_host_physical_apicid_mask(); > enable_apicv = avic; > Yes, we can do that. I'll update the logic in V2. Thanks, Suravee
On 11/10/21 11:18, Suravee Suthikulpanit wrote: > + if (level_type != 2 || !x2apic_mode) { > + avic_host_physical_id_mask = 0xffULL; > + goto out; > + } > + > + core_mask_width = eax & 0xF; > + > + max_phys_mask_width = get_count_order(apic_get_max_phys_apicid()); > + > + /* > + * Sanity check to ensure core_mask_width for a processor does not > + * exceed the calculated mask. > + */ > + if (WARN_ON(core_mask_width > max_phys_mask_width)) > + return -EINVAL; Can it just use apic_get_max_phys_apicid() in x2apic mode, and 0xff in !x2apic mode? I'm not sure why you need to check CPUID[0xb,0x1]. Paolo
Paolo, On 11/17/2021 8:06 PM, Paolo Bonzini wrote: > On 11/10/21 11:18, Suravee Suthikulpanit wrote: >> + if (level_type != 2 || !x2apic_mode) { >> + avic_host_physical_id_mask = 0xffULL; >> + goto out; >> + } >> + >> + core_mask_width = eax & 0xF; >> + >> + max_phys_mask_width = get_count_order(apic_get_max_phys_apicid()); >> + >> + /* >> + * Sanity check to ensure core_mask_width for a processor does not >> + * exceed the calculated mask. >> + */ >> + if (WARN_ON(core_mask_width > max_phys_mask_width)) >> + return -EINVAL; > > Can it just use apic_get_max_phys_apicid() in x2apic mode, and 0xff in !x2apic mode? I'm not sure why you need to check CPUID[0xb,0x1]. This is mainly for sanity check of the max_physical_apicid (derived from information in ACPI or MP table), which can be removed if you think not necessary. Suravee
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 8052d92069e0..0b073f63dabd 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -19,6 +19,7 @@ #include <linux/amd-iommu.h> #include <linux/kvm_host.h> +#include <asm/apic.h> #include <asm/irq_remapping.h> #include "trace.h" @@ -63,6 +64,7 @@ static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS); static u32 next_vm_id = 0; static bool next_vm_id_wrapped = 0; +static u64 avic_host_physical_id_mask; static DEFINE_SPINLOCK(svm_vm_data_hash_lock); /* @@ -133,6 +135,46 @@ void avic_vm_destroy(struct kvm *kvm) spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags); } +int avic_init_host_physical_apicid_mask(void) +{ + unsigned int eax, ebx, ecx, edx; + u32 level_type, core_mask_width, max_phys_mask_width; + + /* + * Calculate minimum number of bits required to represent + * host physical APIC ID for each processor (level type 2) + * using CPUID leaf 0xb sub-leaf 0x1. + */ + cpuid_count(0xb, 0x1, &eax, &ebx, &ecx, &edx); + level_type = (ecx >> 8) & 0xff; + + /* + * If level-type 2 (i.e. processor type) not available, + * or host is in xAPIC mode, default to only 8-bit mask. + */ + if (level_type != 2 || !x2apic_mode) { + avic_host_physical_id_mask = 0xffULL; + goto out; + } + + core_mask_width = eax & 0xF; + + max_phys_mask_width = get_count_order(apic_get_max_phys_apicid()); + + /* + * Sanity check to ensure core_mask_width for a processor does not + * exceed the calculated mask. + */ + if (WARN_ON(core_mask_width > max_phys_mask_width)) + return -EINVAL; + + avic_host_physical_id_mask = BIT(max_phys_mask_width) - 1; +out: + pr_debug("Using AVIC host physical APIC ID mask %#0llx\n", + avic_host_physical_id_mask); + return 0; +} + int avic_vm_init(struct kvm *kvm) { unsigned long flags; @@ -943,22 +985,17 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r) void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { u64 entry; - /* ID = 0xff (broadcast), ID > 0xff (reserved) */ int h_physical_id = kvm_cpu_get_apicid(cpu); struct vcpu_svm *svm = to_svm(vcpu); - /* - * Since the host physical APIC id is 8 bits, - * we can support host APIC ID upto 255. - */ - if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK)) + if (WARN_ON(h_physical_id > avic_host_physical_id_mask)) return; entry = READ_ONCE(*(svm->avic_physical_id_cache)); WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK); - entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK; - entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK); + entry &= ~avic_host_physical_id_mask; + entry |= h_physical_id; entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK; if (svm->avic_is_running) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 989685098b3e..0b066bb5149d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1031,6 +1031,12 @@ static __init int svm_hardware_setup(void) nrips = false; } + if (avic) { + r = avic_init_host_physical_apicid_mask(); + if (r) + avic = false; + } + enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC); if (enable_apicv) { diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 5d30db599e10..e215092d0411 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -497,7 +497,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops; #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31) -#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL) #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12) #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62) #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63) @@ -525,6 +524,7 @@ int avic_init_vcpu(struct vcpu_svm *svm); void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void avic_vcpu_put(struct kvm_vcpu *vcpu); void avic_post_state_restore(struct kvm_vcpu *vcpu); +int avic_init_host_physical_apicid_mask(void); void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu); void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu); bool svm_check_apicv_inhibit_reasons(ulong bit);