@@ -748,6 +748,7 @@ struct kvm_vcpu_arch {
bool skey_enabled;
struct kvm_s390_pv_vcpu pv;
union diag318_info diag318_info;
+ atomic_t busy;
};
struct kvm_vm_stat {
@@ -941,6 +942,7 @@ struct kvm_arch{
int user_sigp;
int user_stsi;
int user_instr0;
+ int user_busy;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
int ipte_lock_count;
@@ -564,6 +564,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_VCPU_RESETS:
case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_S390_DIAG318:
+ case KVM_CAP_S390_USER_BUSY:
r = 1;
break;
case KVM_CAP_SET_GUEST_DEBUG2:
@@ -706,6 +707,11 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
kvm->arch.user_sigp = 1;
r = 0;
break;
+ case KVM_CAP_S390_USER_BUSY:
+ VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_BUSY");
+ kvm->arch.user_busy = 1;
+ r = 0;
+ break;
case KVM_CAP_S390_VECTOR_REGISTERS:
mutex_lock(&kvm->lock);
if (kvm->created_vcpus) {
@@ -4825,6 +4831,40 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
return -EINVAL;
return kvm_s390_inject_vcpu(vcpu, &s390irq);
}
+ case KVM_S390_USER_BUSY: {
+ struct kvm_s390_user_busy_info busy;
+
+ if (!vcpu->kvm->arch.user_busy)
+ return -EINVAL;
+
+ if (copy_from_user(&busy, argp, sizeof(busy)))
+ return -EFAULT;
+
+ switch (busy.reason) {
+ case KVM_S390_USER_BUSY_REASON_SIGP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (busy.function) {
+ case KVM_S390_USER_BUSY_FUNCTION_RESET:
+ kvm_s390_vcpu_clear_busy(vcpu);
+ break;
+ case KVM_S390_USER_BUSY_FUNCTION_SET:
+ if (!kvm_s390_vcpu_set_busy(vcpu))
+ return -EBUSY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ VCPU_EVENT(vcpu, 3, "BUSY: CPU %x %x reason %x payload %x",
+ vcpu->vcpu_id,
+ busy.function, busy.reason, busy.payload);
+
+ return 0;
+ }
}
return -ENOIOCTLCMD;
}
@@ -82,6 +82,21 @@ static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
}
+static inline bool kvm_s390_vcpu_is_busy(struct kvm_vcpu *vcpu)
+{
+ return atomic_read(&vcpu->arch.busy) == 1;
+}
+
+static inline bool kvm_s390_vcpu_set_busy(struct kvm_vcpu *vcpu)
+{
+ return atomic_cmpxchg(&vcpu->arch.busy, 0, 1) == 0;
+}
+
+static inline void kvm_s390_vcpu_clear_busy(struct kvm_vcpu *vcpu)
+{
+ atomic_set(&vcpu->arch.busy, 0);
+}
+
static inline int kvm_is_ucontrol(struct kvm *kvm)
{
#ifdef CONFIG_KVM_S390_UCONTROL
@@ -276,6 +276,9 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
+ if (kvm_s390_vcpu_is_busy(dst_vcpu))
+ return SIGP_CC_BUSY;
+
switch (order_code) {
case SIGP_SENSE:
vcpu->stat.instruction_sigp_sense++;
With commit 2444b352c3ac ("KVM: s390: forward most SIGP orders to user space") we have a capability that allows the "fast" SIGP orders (as defined by the Programming Notes for the SIGNAL PROCESSOR instruction in the Principles of Operation) to be handled in-kernel, while all others are sent to userspace for processing. This works fine but it creates a situation when, for example, a SIGP SENSE might return CC1 (STATUS STORED, and status bits indicating the vcpu is stopped), when in actuality userspace is still processing a SIGP STOP AND STORE STATUS order, and the vcpu is not yet actually stopped. Thus, the SIGP SENSE should actually be returning CC2 (busy) instead of CC1. To fix this, add another CPU capability and an associated IOCTL. The IOCTL can be used by userspace to mark a vcpu "busy" processing a SIGP order, and cause concurrent orders handled in-kernel to be returned with CC2 (busy). Another invocation of the IOCTL with a different payload can be used to mark the SIGP completed, and thus the vcpu is free to process additional orders. Signed-off-by: Eric Farman <farman@linux.ibm.com> --- arch/s390/include/asm/kvm_host.h | 2 ++ arch/s390/kvm/kvm-s390.c | 40 ++++++++++++++++++++++++++++++++ arch/s390/kvm/kvm-s390.h | 15 ++++++++++++ arch/s390/kvm/sigp.c | 3 +++ 4 files changed, 60 insertions(+)