@@ -39,7 +39,7 @@
/* How long a lock should spin before we consider blocking */
#define SPIN_THRESHOLD (1 << 15)
-extern struct static_key paravirt_ticketlocks_enabled;
+extern struct static_key paravirt_spinlocks_enabled;
static __always_inline bool static_key_false(struct static_key *key);
#ifdef CONFIG_QUEUE_SPINLOCK
@@ -150,7 +150,7 @@ static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
if (TICKET_SLOWPATH_FLAG &&
- static_key_false(¶virt_ticketlocks_enabled)) {
+ static_key_false(¶virt_spinlocks_enabled)) {
arch_spinlock_t prev;
prev = *lock;
@@ -819,7 +819,7 @@ static __init int kvm_spinlock_init_jump(void)
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
return 0;
- static_key_slow_inc(¶virt_ticketlocks_enabled);
+ static_key_slow_inc(¶virt_spinlocks_enabled);
printk(KERN_INFO "KVM setup paravirtual spinlock\n");
return 0;
@@ -16,5 +16,5 @@ struct pv_lock_ops pv_lock_ops = {
};
EXPORT_SYMBOL(pv_lock_ops);
-struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE;
-EXPORT_SYMBOL(paravirt_ticketlocks_enabled);
+struct static_key paravirt_spinlocks_enabled = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL(paravirt_spinlocks_enabled);
@@ -293,7 +293,7 @@ static __init int xen_init_spinlocks_jump(void)
if (!xen_domain())
return 0;
- static_key_slow_inc(¶virt_ticketlocks_enabled);
+ static_key_slow_inc(¶virt_spinlocks_enabled);
return 0;
}
early_initcall(xen_init_spinlocks_jump);