diff mbox series

[8/9] KVM: x86, SVM: do not clobber guest DR6 on KVM_EXIT_DEBUG

Message ID 20200506111034.11756-9-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM_SET_GUEST_DEBUG tests and fixes, DR accessors cleanups | expand

Commit Message

Paolo Bonzini May 6, 2020, 11:10 a.m. UTC
On Intel, #DB exceptions transmit the DR6 value via the exit qualification
field of the VMCS, and the exit qualification only contains the description
of the precise event that caused a vmexit.

On AMD, instead the DR6 field of the VMCB is filled in as if the #DB exception
was to be injected into the guest.  This has two effects when guest debugging
is in use:

* the guest DR6 is clobbered

* the kvm_run->debug.arch.dr6 field can accumulate more debug events, rather
than just the last one that happened.

Fortunately, if guest debugging is in use we debug register reads and writes
are always intercepted.  Now that the guest DR6 is always synchronized with
vcpu->arch.dr6, we can just run the guest with an all-zero DR6 while guest
debugging is enabled, and restore the guest value when it is disabled.  This
fixes both problems.

A testcase for the second issue is added in the next patch.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/svm/svm.c |  2 ++
 arch/x86/kvm/x86.c     | 12 ++++++++----
 arch/x86/kvm/x86.h     |  2 ++
 3 files changed, 12 insertions(+), 4 deletions(-)

Comments

Peter Xu May 6, 2020, 6:15 p.m. UTC | #1
On Wed, May 06, 2020 at 07:10:33AM -0400, Paolo Bonzini wrote:
> On Intel, #DB exceptions transmit the DR6 value via the exit qualification
> field of the VMCS, and the exit qualification only contains the description
> of the precise event that caused a vmexit.
> 
> On AMD, instead the DR6 field of the VMCB is filled in as if the #DB exception
> was to be injected into the guest.  This has two effects when guest debugging
> is in use:
> 
> * the guest DR6 is clobbered
> 
> * the kvm_run->debug.arch.dr6 field can accumulate more debug events, rather
> than just the last one that happened.
> 
> Fortunately, if guest debugging is in use we debug register reads and writes
> are always intercepted.  Now that the guest DR6 is always synchronized with
> vcpu->arch.dr6, we can just run the guest with an all-zero DR6 while guest
> debugging is enabled, and restore the guest value when it is disabled.  This
> fixes both problems.
> 
> A testcase for the second issue is added in the next patch.

Is there supposed to be another test after this one, or the GD test?

> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  arch/x86/kvm/svm/svm.c |  2 ++
>  arch/x86/kvm/x86.c     | 12 ++++++++----
>  arch/x86/kvm/x86.h     |  2 ++
>  3 files changed, 12 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index f03bffafd9e6..29dc7311dbb1 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -1750,6 +1750,8 @@ static int db_interception(struct vcpu_svm *svm)
>  		kvm_run->exit_reason = KVM_EXIT_DEBUG;
>  		kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
>  		kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;

[1]

> +		/* This restores DR6 to all zeros.  */
> +		kvm_update_dr6(vcpu);

I feel like it won't work as expected for KVM_GUESTDBG_SINGLESTEP, because at
[2] below it'll go to the "else" instead so dr6 seems won't be cleared in that
case.

Another concern I have is that, I mostly read kvm_update_dr6() as "apply the
dr6 memory cache --> VMCB".  I'm worried this might confuse people (at least I
used quite a few minutes to digest...) here because latest data should already
be in the VMCB.

Also, IMHO it would be fine to have invalid dr6 values during
KVM_SET_GUEST_DEBUG.  I'm not sure whether my understanding is correct, but I
see KVM_SET_GUEST_DEBUG needs to override the in-guest debug completely.  If we
worry about dr6 being incorrect after KVM_SET_GUEST_DEBUG is disabled, IMHO we
can reset dr6 in kvm_arch_vcpu_ioctl_set_guest_debug() properly before we
return the debug registers to the guest.

PS. I cannot see above lines [1] in my local tree (which seems to be really a
bugfix...).  I tried to use kvm/queue just in case I missed some patches, but I
still didn't see them.  So am I reading the wrong tree here?

Thanks,

>  		kvm_run->debug.arch.pc =
>  			svm->vmcb->save.cs.base + svm->vmcb->save.rip;
>  		kvm_run->debug.arch.exception = DB_VECTOR;
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index f4254d716b10..1b5e0fc346bb 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -104,7 +104,6 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
>                                      KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
>  
>  static void update_cr8_intercept(struct kvm_vcpu *vcpu);
> -static void kvm_update_dr6(struct kvm_vcpu *vcpu);
>  static void process_nmi(struct kvm_vcpu *vcpu);
>  static void enter_smm(struct kvm_vcpu *vcpu);
>  static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
> @@ -1048,10 +1047,14 @@ static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
>  	}
>  }
>  
> -static void kvm_update_dr6(struct kvm_vcpu *vcpu)
> +void kvm_update_dr6(struct kvm_vcpu *vcpu)
>  {
> -	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
> -	    kvm_x86_ops.set_dr6)
> +	if (!kvm_x86_ops.set_dr6)
> +		return;
> +
> +	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)

[2]

> +		kvm_x86_ops.set_dr6(vcpu, DR6_FIXED_1 | DR6_RTM);
> +	else
>  		kvm_x86_ops.set_dr6(vcpu, vcpu->arch.dr6);
>  }
>  
> @@ -9154,6 +9157,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
>  		for (i = 0; i < KVM_NR_DB_REGS; i++)
>  			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
>  	}
> +	kvm_update_dr6(vcpu);
>  	kvm_update_dr7(vcpu);
>  
>  	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
> diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
> index b968acc0516f..a4c950ad4d60 100644
> --- a/arch/x86/kvm/x86.h
> +++ b/arch/x86/kvm/x86.h
> @@ -240,6 +240,8 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
>  	return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
>  }
>  
> +void kvm_update_dr6(struct kvm_vcpu *vcpu);
> +
>  void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
>  void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
>  
> -- 
> 2.18.2
> 
>
Paolo Bonzini May 6, 2020, 8:07 p.m. UTC | #2
On 06/05/20 20:15, Peter Xu wrote:
> On Wed, May 06, 2020 at 07:10:33AM -0400, Paolo Bonzini wrote:
>> On Intel, #DB exceptions transmit the DR6 value via the exit qualification
>> field of the VMCS, and the exit qualification only contains the description
>> of the precise event that caused a vmexit.
>>
>> On AMD, instead the DR6 field of the VMCB is filled in as if the #DB exception
>> was to be injected into the guest.  This has two effects when guest debugging
>> is in use:
>>
>> * the guest DR6 is clobbered
>>
>> * the kvm_run->debug.arch.dr6 field can accumulate more debug events, rather
>> than just the last one that happened.
>>
>> Fortunately, if guest debugging is in use we debug register reads and writes
>> are always intercepted.  Now that the guest DR6 is always synchronized with
>> vcpu->arch.dr6, we can just run the guest with an all-zero DR6 while guest
>> debugging is enabled, and restore the guest value when it is disabled.  This
>> fixes both problems.
>>
>> A testcase for the second issue is added in the next patch.
> 
> Is there supposed to be another test after this one, or the GD test?

It's the GD test.
>> +		/* This restores DR6 to all zeros.  */
>> +		kvm_update_dr6(vcpu);
> 
> I feel like it won't work as expected for KVM_GUESTDBG_SINGLESTEP, because at
> [2] below it'll go to the "else" instead so dr6 seems won't be cleared in that
> case.

You're right, I need to cover both cases that trigger #DB.

> Another concern I have is that, I mostly read kvm_update_dr6() as "apply the
> dr6 memory cache --> VMCB".  I'm worried this might confuse people (at least I
> used quite a few minutes to digest...) here because latest data should already
> be in the VMCB.

No, the latest guest register is always in vcpu->arch.dr6.  It's only
because of KVM_DEBUGREG_WONT_EXIT that kvm_update_dr6() needs to pass
vcpu->arch.dr6 to kvm_x86_ops.set_dr6.  Actually this patch could even
check KVM_DEBUGREG_WONT_EXIT instead of vcpu->guest_debug.  I'll take a
look tomorrow.

> Also, IMHO it would be fine to have invalid dr6 values during
> KVM_SET_GUEST_DEBUG.  I'm not sure whether my understanding is correct, but I
> see KVM_SET_GUEST_DEBUG needs to override the in-guest debug completely.

Sort of, userspace can try to juggle host and guest debugging (this is
why you have KVM_GUESTDBG_INJECT_DB and KVM_GUESTDBG_INJECT_BP).

> If we worry about dr6 being incorrect after KVM_SET_GUEST_DEBUG is disabled,
> IMHO we can reset dr6 in kvm_arch_vcpu_ioctl_set_guest_debug() properly before
> we return the debug registers to the guest.
> 
> PS. I cannot see above lines [1] in my local tree (which seems to be really a
> bugfix...).  I tried to use kvm/queue just in case I missed some patches, but I
> still didn't see them.  So am I reading the wrong tree here?

The patch is based on kvm/master, and indeed that line is from a bugfix
that I've posted yesterday ("KVM: SVM: fill in
kvm_run->debug.arch.dr[67]"). I had pushed that one right away, because
it was quite obviously suitable for 5.7.

Paolo
Peter Xu May 6, 2020, 9:13 p.m. UTC | #3
On Wed, May 06, 2020 at 10:07:15PM +0200, Paolo Bonzini wrote:
> On 06/05/20 20:15, Peter Xu wrote:
> > On Wed, May 06, 2020 at 07:10:33AM -0400, Paolo Bonzini wrote:
> >> On Intel, #DB exceptions transmit the DR6 value via the exit qualification
> >> field of the VMCS, and the exit qualification only contains the description
> >> of the precise event that caused a vmexit.
> >>
> >> On AMD, instead the DR6 field of the VMCB is filled in as if the #DB exception
> >> was to be injected into the guest.  This has two effects when guest debugging
> >> is in use:
> >>
> >> * the guest DR6 is clobbered
> >>
> >> * the kvm_run->debug.arch.dr6 field can accumulate more debug events, rather
> >> than just the last one that happened.
> >>
> >> Fortunately, if guest debugging is in use we debug register reads and writes
> >> are always intercepted.  Now that the guest DR6 is always synchronized with
> >> vcpu->arch.dr6, we can just run the guest with an all-zero DR6 while guest
> >> debugging is enabled, and restore the guest value when it is disabled.  This
> >> fixes both problems.
> >>
> >> A testcase for the second issue is added in the next patch.
> > 
> > Is there supposed to be another test after this one, or the GD test?
> 
> It's the GD test.

Oh... so is dr6 going to have some leftover bit set in the GD test if without
this patch for AMD?  Btw, I noticed a small difference on Intel/AMD spec for
this case, e.g., B[0-3] definitions on such leftover bits...

Intel says:

        B0 through B3 (breakpoint condition detected) flags (bits 0 through 3)
        — Indicates (when set) that its associated breakpoint condition was met
        when a debug exception was generated. These flags are set if the
        condition described for each breakpoint by the LENn, and R/Wn flags in
        debug control register DR7 is true. They may or may not be set if the
        breakpoint is not enabled by the Ln or the Gn flags in register
        DR7. Therefore on a #DB, a debug handler should check only those B0-B3
        bits which correspond to an enabled breakpoint.

AMD says:

        Breakpoint-Condition Detected (B3–B0)—Bits 3:0. The processor updates
        these four bits on every debug breakpoint or general-detect
        condition. A bit is set to 1 if the corresponding address- breakpoint
        register detects an enabled breakpoint condition, as specified by the
        DR7 Ln, Gn, R/Wn and LENn controls, and is cleared to 0 otherwise. For
        example, B1 (bit 1) is set to 1 if an address- breakpoint condition is
        detected by DR1.

I'm not sure whether it means AMD B[0-3] bits are more strict on the Intel ones
(if so, then the selftest could be a bit too strict to VMX).

> >> +		/* This restores DR6 to all zeros.  */
> >> +		kvm_update_dr6(vcpu);
> > 
> > I feel like it won't work as expected for KVM_GUESTDBG_SINGLESTEP, because at
> > [2] below it'll go to the "else" instead so dr6 seems won't be cleared in that
> > case.
> 
> You're right, I need to cover both cases that trigger #DB.
> 
> > Another concern I have is that, I mostly read kvm_update_dr6() as "apply the
> > dr6 memory cache --> VMCB".  I'm worried this might confuse people (at least I
> > used quite a few minutes to digest...) here because latest data should already
> > be in the VMCB.
> 
> No, the latest guest register is always in vcpu->arch.dr6.  It's only
> because of KVM_DEBUGREG_WONT_EXIT that kvm_update_dr6() needs to pass
> vcpu->arch.dr6 to kvm_x86_ops.set_dr6.  Actually this patch could even
> check KVM_DEBUGREG_WONT_EXIT instead of vcpu->guest_debug.  I'll take a
> look tomorrow.

OK.

> 
> > Also, IMHO it would be fine to have invalid dr6 values during
> > KVM_SET_GUEST_DEBUG.  I'm not sure whether my understanding is correct, but I
> > see KVM_SET_GUEST_DEBUG needs to override the in-guest debug completely.
> 
> Sort of, userspace can try to juggle host and guest debugging (this is
> why you have KVM_GUESTDBG_INJECT_DB and KVM_GUESTDBG_INJECT_BP).

I see!

> 
> > If we worry about dr6 being incorrect after KVM_SET_GUEST_DEBUG is disabled,
> > IMHO we can reset dr6 in kvm_arch_vcpu_ioctl_set_guest_debug() properly before
> > we return the debug registers to the guest.
> > 
> > PS. I cannot see above lines [1] in my local tree (which seems to be really a
> > bugfix...).  I tried to use kvm/queue just in case I missed some patches, but I
> > still didn't see them.  So am I reading the wrong tree here?
> 
> The patch is based on kvm/master, and indeed that line is from a bugfix
> that I've posted yesterday ("KVM: SVM: fill in
> kvm_run->debug.arch.dr[67]"). I had pushed that one right away, because
> it was quite obviously suitable for 5.7.

Oh that's why it looks very familiar (because I read that patch.. :).  Then it
makes sense now.  Thanks!
Sean Christopherson May 6, 2020, 9:20 p.m. UTC | #4
On Wed, May 06, 2020 at 05:13:56PM -0400, Peter Xu wrote:
> Oh... so is dr6 going to have some leftover bit set in the GD test if without
> this patch for AMD?  Btw, I noticed a small difference on Intel/AMD spec for
> this case, e.g., B[0-3] definitions on such leftover bits...
> 
> Intel says:
> 
>         B0 through B3 (breakpoint condition detected) flags (bits 0 through 3)
>         — Indicates (when set) that its associated breakpoint condition was met
>         when a debug exception was generated. These flags are set if the
>         condition described for each breakpoint by the LENn, and R/Wn flags in
>         debug control register DR7 is true. They may or may not be set if the
>         breakpoint is not enabled by the Ln or the Gn flags in register
>         DR7. Therefore on a #DB, a debug handler should check only those B0-B3
>         bits which correspond to an enabled breakpoint.
> 
> AMD says:
> 
>         Breakpoint-Condition Detected (B3–B0)—Bits 3:0. The processor updates
>         these four bits on every debug breakpoint or general-detect
>         condition. A bit is set to 1 if the corresponding address- breakpoint
>         register detects an enabled breakpoint condition, as specified by the
>         DR7 Ln, Gn, R/Wn and LENn controls, and is cleared to 0 otherwise. For
>         example, B1 (bit 1) is set to 1 if an address- breakpoint condition is
>         detected by DR1.
> 
> I'm not sure whether it means AMD B[0-3] bits are more strict on the Intel ones
> (if so, then the selftest could be a bit too strict to VMX).

If the question is "can DR6 bits 3:0 be set on Intel CPUs even if the
associated breakpoint is disabled?", then the answer is yes.  I haven't
looked at the selftest, but if it's checking DR6 then it should ignore
bits corresponding to disabled breakpoints.
Peter Xu May 6, 2020, 11:33 p.m. UTC | #5
On Wed, May 06, 2020 at 02:20:47PM -0700, Sean Christopherson wrote:
> On Wed, May 06, 2020 at 05:13:56PM -0400, Peter Xu wrote:
> > Oh... so is dr6 going to have some leftover bit set in the GD test if without
> > this patch for AMD?  Btw, I noticed a small difference on Intel/AMD spec for
> > this case, e.g., B[0-3] definitions on such leftover bits...
> > 
> > Intel says:
> > 
> >         B0 through B3 (breakpoint condition detected) flags (bits 0 through 3)
> >         — Indicates (when set) that its associated breakpoint condition was met
> >         when a debug exception was generated. These flags are set if the
> >         condition described for each breakpoint by the LENn, and R/Wn flags in
> >         debug control register DR7 is true. They may or may not be set if the
> >         breakpoint is not enabled by the Ln or the Gn flags in register
> >         DR7. Therefore on a #DB, a debug handler should check only those B0-B3
> >         bits which correspond to an enabled breakpoint.
> > 
> > AMD says:
> > 
> >         Breakpoint-Condition Detected (B3–B0)—Bits 3:0. The processor updates
> >         these four bits on every debug breakpoint or general-detect
> >         condition. A bit is set to 1 if the corresponding address- breakpoint
> >         register detects an enabled breakpoint condition, as specified by the
> >         DR7 Ln, Gn, R/Wn and LENn controls, and is cleared to 0 otherwise. For
> >         example, B1 (bit 1) is set to 1 if an address- breakpoint condition is
> >         detected by DR1.
> > 
> > I'm not sure whether it means AMD B[0-3] bits are more strict on the Intel ones
> > (if so, then the selftest could be a bit too strict to VMX).
> 
> If the question is "can DR6 bits 3:0 be set on Intel CPUs even if the
> associated breakpoint is disabled?", then the answer is yes.  I haven't
> looked at the selftest, but if it's checking DR6 then it should ignore
> bits corresponding to disabled breakpoints.

Currently the selftest will also check that the B[0-3] is cleared when specific
BP is disabled.  We can loose that.  The thing is this same test can also run
on AMD hosts, so logically on the other hand we should still check those bits
as cleared to follow the AMD spec (and it never failed for me even on Intel..).

Thanks,
Sean Christopherson May 6, 2020, 11:47 p.m. UTC | #6
On Wed, May 06, 2020 at 07:33:06PM -0400, Peter Xu wrote:
> On Wed, May 06, 2020 at 02:20:47PM -0700, Sean Christopherson wrote:
> > On Wed, May 06, 2020 at 05:13:56PM -0400, Peter Xu wrote:
> > > Oh... so is dr6 going to have some leftover bit set in the GD test if without
> > > this patch for AMD?  Btw, I noticed a small difference on Intel/AMD spec for
> > > this case, e.g., B[0-3] definitions on such leftover bits...
> > > 
> > > Intel says:
> > > 
> > >         B0 through B3 (breakpoint condition detected) flags (bits 0 through 3)
> > >         — Indicates (when set) that its associated breakpoint condition was met
> > >         when a debug exception was generated. These flags are set if the
> > >         condition described for each breakpoint by the LENn, and R/Wn flags in
> > >         debug control register DR7 is true. They may or may not be set if the
> > >         breakpoint is not enabled by the Ln or the Gn flags in register
> > >         DR7. Therefore on a #DB, a debug handler should check only those B0-B3
> > >         bits which correspond to an enabled breakpoint.
> > > 
> > > AMD says:
> > > 
> > >         Breakpoint-Condition Detected (B3–B0)—Bits 3:0. The processor updates
> > >         these four bits on every debug breakpoint or general-detect
> > >         condition. A bit is set to 1 if the corresponding address- breakpoint
> > >         register detects an enabled breakpoint condition, as specified by the
> > >         DR7 Ln, Gn, R/Wn and LENn controls, and is cleared to 0 otherwise. For
> > >         example, B1 (bit 1) is set to 1 if an address- breakpoint condition is
> > >         detected by DR1.
> > > 
> > > I'm not sure whether it means AMD B[0-3] bits are more strict on the Intel ones
> > > (if so, then the selftest could be a bit too strict to VMX).
> > 
> > If the question is "can DR6 bits 3:0 be set on Intel CPUs even if the
> > associated breakpoint is disabled?", then the answer is yes.  I haven't
> > looked at the selftest, but if it's checking DR6 then it should ignore
> > bits corresponding to disabled breakpoints.
> 
> Currently the selftest will also check that the B[0-3] is cleared when specific
> BP is disabled.  We can loose that.  The thing is this same test can also run
> on AMD hosts, so logically on the other hand we should still check those bits
> as cleared to follow the AMD spec (and it never failed for me even on Intel..).

There still has to be an address and type match, e.g. if the DRs are
completely bogus in the selftest then the "cleard to zero" check is still
valid.  And if I'm reading the selftest code correctly, this is indeed the
case as only the DRn being tested is configured.
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f03bffafd9e6..29dc7311dbb1 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1750,6 +1750,8 @@  static int db_interception(struct vcpu_svm *svm)
 		kvm_run->exit_reason = KVM_EXIT_DEBUG;
 		kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
 		kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
+		/* This restores DR6 to all zeros.  */
+		kvm_update_dr6(vcpu);
 		kvm_run->debug.arch.pc =
 			svm->vmcb->save.cs.base + svm->vmcb->save.rip;
 		kvm_run->debug.arch.exception = DB_VECTOR;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f4254d716b10..1b5e0fc346bb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -104,7 +104,6 @@  static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
-static void kvm_update_dr6(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
 static void enter_smm(struct kvm_vcpu *vcpu);
 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
@@ -1048,10 +1047,14 @@  static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
 	}
 }
 
-static void kvm_update_dr6(struct kvm_vcpu *vcpu)
+void kvm_update_dr6(struct kvm_vcpu *vcpu)
 {
-	if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
-	    kvm_x86_ops.set_dr6)
+	if (!kvm_x86_ops.set_dr6)
+		return;
+
+	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
+		kvm_x86_ops.set_dr6(vcpu, DR6_FIXED_1 | DR6_RTM);
+	else
 		kvm_x86_ops.set_dr6(vcpu, vcpu->arch.dr6);
 }
 
@@ -9154,6 +9157,7 @@  int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 		for (i = 0; i < KVM_NR_DB_REGS; i++)
 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
 	}
+	kvm_update_dr6(vcpu);
 	kvm_update_dr7(vcpu);
 
 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index b968acc0516f..a4c950ad4d60 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -240,6 +240,8 @@  static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
 	return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
 }
 
+void kvm_update_dr6(struct kvm_vcpu *vcpu);
+
 void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);