@@ -4326,6 +4326,9 @@ Errors:
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
#define KVM_STATE_NESTED_EVMCS 0x00000004
+ /* Available with KVM_CAP_NESTED_STATE_PREEMPTION_TIMER */
+ #define KVM_STATE_NESTED_PREEMPTION_TIMER 0x00000010
+
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
@@ -4346,6 +4349,7 @@ Errors:
struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+ __u32 preemption_timer_remaining;
};
This ioctl copies the vcpu's nested virtualization state from the kernel to
@@ -391,6 +391,7 @@ struct kvm_sync_regs {
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
#define KVM_STATE_NESTED_EVMCS 0x00000004
#define KVM_STATE_NESTED_MTF_PENDING 0x00000008
+#define KVM_STATE_NESTED_PREEMPTION_TIMER 0x00000010
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
@@ -400,6 +401,7 @@ struct kvm_sync_regs {
struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+ __u32 preemption_timer_remaining;
};
struct kvm_vmx_nested_state_hdr {
@@ -2020,9 +2020,9 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
+static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
+ u64 preemption_timeout)
{
- u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
@@ -3293,8 +3293,15 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
* the timer.
*/
vmx->nested.preemption_timer_expired = false;
- if (nested_cpu_has_preemption_timer(vmcs12))
- vmx_start_preemption_timer(vcpu);
+ if (nested_cpu_has_preemption_timer(vmcs12)) {
+ u64 timer_value;
+
+ if (from_vmentry)
+ timer_value = vmcs12->vmx_preemption_timer_value;
+ else
+ timer_value = vmx->nested.preemption_timer_remaining;
+ vmx_start_preemption_timer(vcpu, timer_value);
+ }
/*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -3889,9 +3896,13 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
if (nested_cpu_has_preemption_timer(vmcs12) &&
- vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
+ !vmx->nested.nested_run_pending) {
+ vmx->nested.preemption_timer_remaining =
+ vmx_get_preemption_timer_value(vcpu);
+ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
vmcs12->vmx_preemption_timer_value =
- vmx_get_preemption_timer_value(vcpu);
+ vmx->nested.preemption_timer_remaining;
+ }
/*
* In some cases (usually, nested EPT), L2 is allowed to change its
@@ -5759,6 +5770,13 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (vmx->nested.mtf_pending)
kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
+
+ if (nested_cpu_has_preemption_timer(vmcs12)) {
+ kvm_state.flags |=
+ KVM_STATE_NESTED_PREEMPTION_TIMER;
+ kvm_state.size +=
+ sizeof(user_vmx_nested_state->preemption_timer_remaining);
+ }
}
}
@@ -5790,6 +5808,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
+ BUILD_BUG_ON(sizeof(user_vmx_nested_state->preemption_timer_remaining)
+ != sizeof(vmx->nested.preemption_timer_remaining));
+
/*
* Copy over the full allocated size of vmcs12 rather than just the size
@@ -5805,6 +5826,13 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
return -EFAULT;
}
+ if (kvm_state.flags & KVM_STATE_NESTED_PREEMPTION_TIMER) {
+ if (copy_to_user(&user_vmx_nested_state->preemption_timer_remaining,
+ &vmx->nested.preemption_timer_remaining,
+ sizeof(vmx->nested.preemption_timer_remaining)))
+ return -EFAULT;
+ }
+
out:
return kvm_state.size;
}
@@ -5876,7 +5904,8 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
*/
if (is_smm(vcpu) ?
(kvm_state->flags &
- (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
+ (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING |
+ KVM_STATE_NESTED_PREEMPTION_TIMER))
: kvm_state->hdr.vmx.smm.flags)
return -EINVAL;
@@ -5966,6 +5995,21 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
goto error_guest_mode;
}
+ if (kvm_state->flags & KVM_STATE_NESTED_PREEMPTION_TIMER) {
+
+ if (kvm_state->size <
+ offsetof(struct kvm_nested_state, hdr.vmx) +
+ offsetofend(struct kvm_vmx_nested_state_data, preemption_timer_remaining))
+ goto error_guest_mode;
+
+ if (copy_from_user(&vmx->nested.preemption_timer_remaining,
+ &user_vmx_nested_state->preemption_timer_remaining,
+ sizeof(user_vmx_nested_state->preemption_timer_remaining))) {
+ ret = -EFAULT;
+ goto error_guest_mode;
+ }
+ }
+
if (nested_vmx_check_controls(vcpu, vmcs12) ||
nested_vmx_check_host_state(vcpu, vmcs12) ||
nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
@@ -167,6 +167,7 @@ struct nested_vmx {
u16 posted_intr_nv;
struct hrtimer preemption_timer;
+ u32 preemption_timer_remaining;
bool preemption_timer_expired;
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
@@ -3374,6 +3374,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_GET_MSR_FEATURES:
case KVM_CAP_MSR_PLATFORM_INFO:
case KVM_CAP_EXCEPTION_PAYLOAD:
+ case KVM_CAP_NESTED_STATE_PREEMPTION_TIMER:
r = 1;
break;
case KVM_CAP_SYNC_REGS:
@@ -1017,6 +1017,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_VCPU_RESETS 179
#define KVM_CAP_S390_PROTECTED 180
#define KVM_CAP_PPC_SECURE_GUEST 181
+#define KVM_CAP_NESTED_STATE_PREEMPTION_TIMER 182
#ifdef KVM_CAP_IRQ_ROUTING
@@ -391,6 +391,8 @@ struct kvm_sync_regs {
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
#define KVM_STATE_NESTED_EVMCS 0x00000004
#define KVM_STATE_NESTED_MTF_PENDING 0x00000008
+/* Available with KVM_CAP_NESTED_STATE_PREEMPTION_TIMER */
+#define KVM_STATE_NESTED_PREEMPTION_TIMER 0x00000010
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002