@@ -1116,11 +1116,11 @@ static void disable_nmi_singlestep(struct vcpu_svm *svm)
* when handling AMD IOMMU GALOG notification to schedule in
* a particular vCPU.
*/
-#define SVM_VM_DATA_HASH_BITS 8
-static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
+#define KVM_SVM_HASH_BITS 8
+static DEFINE_HASHTABLE(kvm_svm_hash, KVM_SVM_HASH_BITS);
static u32 next_vm_id = 0;
static bool next_vm_id_wrapped = 0;
-static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
+static DEFINE_SPINLOCK(kvm_svm_hash_lock);
/* Note:
* This function is called from IOMMU driver to notify
@@ -1136,14 +1136,14 @@ static int avic_ga_log_notifier(u32 ga_tag)
pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
- spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
- hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
+ spin_lock_irqsave(&kvm_svm_hash_lock, flags);
+ hash_for_each_possible(kvm_svm_hash, kvm_svm, hnode, vm_id) {
if (kvm_svm->avic_vm_id != vm_id)
continue;
vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
break;
}
- spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+ spin_unlock_irqrestore(&kvm_svm_hash_lock, flags);
/* Note:
* At this point, the IOMMU should have already set the pending
@@ -1791,9 +1791,9 @@ static void avic_vm_destroy(struct kvm *kvm)
if (kvm_svm->avic_physical_id_table_page)
__free_page(kvm_svm->avic_physical_id_table_page);
- spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+ spin_lock_irqsave(&kvm_svm_hash_lock, flags);
hash_del(&kvm_svm->hnode);
- spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+ spin_unlock_irqrestore(&kvm_svm_hash_lock, flags);
}
static void svm_vm_destroy(struct kvm *kvm)
@@ -1831,7 +1831,7 @@ static int avic_vm_init(struct kvm *kvm)
kvm_svm->avic_logical_id_table_page = l_page;
clear_page(page_address(l_page));
- spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+ spin_lock_irqsave(&kvm_svm_hash_lock, flags);
again:
vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
if (vm_id == 0) { /* id is 1-based, zero is not okay */
@@ -1840,14 +1840,14 @@ static int avic_vm_init(struct kvm *kvm)
}
/* Is it still in use? Only possible if wrapped at least once */
if (next_vm_id_wrapped) {
- hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
+ hash_for_each_possible(kvm_svm_hash, k2, hnode, vm_id) {
if (k2->avic_vm_id == vm_id)
goto again;
}
}
kvm_svm->avic_vm_id = vm_id;
- hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
- spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+ hash_add(kvm_svm_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
+ spin_unlock_irqrestore(&kvm_svm_hash_lock, flags);
return 0;
The information that is ultimately consumed when walking through the hash is now stored in struct kvm_svm, e.g. avic_vm_id. Rename the hash variables so that their nomenclature is consistent with the new struct and its usage. Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kvm/svm.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-)