diff mbox series

[XEN,v3] x86/vm_event: add short-circuit for breakpoints (aka, , "fast single step")

Message ID e6853b44-681a-7423-ede0-43b551b554af@list.ru (mailing list archive)
State New, archived
Headers show
Series [XEN,v3] x86/vm_event: add short-circuit for breakpoints (aka, , "fast single step") | expand

Commit Message

Ковалёв Сергей Dec. 18, 2019, 12:19 p.m. UTC
When using DRAKVUF (or another system using altp2m with shadow pages similar
to what is described in
https://xenproject.org/2016/04/13/stealthy-monitoring-with-xen-altp2m),
after a breakpoint is hit the system switches to the default
unrestricted altp2m view with singlestep enabled. When the singlestep
traps to Xen another vm_event is sent to the monitor agent, which then
normally disables singlestepping and switches the altp2m view back to
the restricted view.

This patch short-circuiting that last part so that it doesn't need to send the
vm_event out for the singlestep event and should switch back to the restricted
view in Xen automatically.

This optimization gains about 35% speed-up.

Was tested on Debian branch of Xen 4.12. See at:
https://github.com/skvl/xen/tree/debian/knorrie/4.12/fast-singlestep

Rebased on master:
https://github.com/skvl/xen/tree/fast-singlestep

Signed-off-by: Sergey Kovalev <valor@list.ru>
---
 xen/arch/x86/hvm/hvm.c         | 15 +++++++++++++++
 xen/arch/x86/hvm/monitor.c     |  9 +++++++++
 xen/arch/x86/vm_event.c        |  8 ++++++--
 xen/include/asm-x86/hvm/hvm.h  |  1 +
 xen/include/asm-x86/hvm/vcpu.h |  4 ++++
 xen/include/public/vm_event.h  | 10 ++++++++++
 6 files changed, 45 insertions(+), 2 deletions(-)

Comments

Jan Beulich Dec. 18, 2019, 12:48 p.m. UTC | #1
On 18.12.2019 13:19, Sergey Kovalev wrote:
> --- a/xen/arch/x86/vm_event.c
> +++ b/xen/arch/x86/vm_event.c
> @@ -61,7 +61,8 @@ void vm_event_cleanup_domain(struct domain *d)
>  void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
>                                  vm_event_response_t *rsp)
>  {
> -    if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) )
> +    if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP ||
> +           rsp->flags & VM_EVENT_FLAG_FAST_SINGLESTEP) )
>          return;

This is still the same as in v2 (could be taken care of while
committing if no other need for a v4 arises). Also please allow
a little more time for reviews between sending versions. And
also please have a brief revision log after a --- separator
after the commit message.

Anyway - applicable bits
Acked-by: Jan Beulich <jbeulich@suse.com>

Jan

> @@ -69,7 +70,10 @@ void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
> 
>      ASSERT(atomic_read(&v->vm_event_pause_count));
> 
> -    hvm_toggle_singlestep(v);
> +    if ( rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
> +        hvm_toggle_singlestep(v);
> +    else
> +        hvm_fast_singlestep(v, rsp->u.fast_singlestep.p2midx);
>  }
> 
>  void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
> index 1d7b66f927..09793c12e9 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -323,6 +323,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op);
> 
>  /* Caller should pause vcpu before calling this function */
>  void hvm_toggle_singlestep(struct vcpu *v);
> +void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx);
> 
>  int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
>                                struct npfec npfec);
> diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
> index 38f5c2bb9b..8b84941111 100644
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -172,6 +172,10 @@ struct hvm_vcpu {
>      bool                flag_dr_dirty;
>      bool                debug_state_latch;
>      bool                single_step;
> +    struct {
> +        bool     enabled;
> +        uint16_t p2midx;
> +    } fast_single_step;
> 
>      struct hvm_vcpu_asid n1asid;
> 
> diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
> index aa54c86325..cb577a7ba9 100644
> --- a/xen/include/public/vm_event.h
> +++ b/xen/include/public/vm_event.h
> @@ -110,6 +110,11 @@
>   * interrupt pending after resuming the VCPU.
>   */
>  #define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
> +/*
> + * Execute fast singlestepping on vm_event response.
> + * Requires the vCPU to be paused already (synchronous events only).
> + */
> +#define VM_EVENT_FLAG_FAST_SINGLESTEP  (1 << 11)
> 
>  /*
>   * Reasons for the vm event request
> @@ -276,6 +281,10 @@ struct vm_event_singlestep {
>      uint64_t gfn;
>  };
> 
> +struct vm_event_fast_singlestep {
> +    uint16_t p2midx;
> +};
> +
>  struct vm_event_debug {
>      uint64_t gfn;
>      uint32_t insn_length;
> @@ -363,6 +372,7 @@ typedef struct vm_event_st {
>          struct vm_event_mov_to_msr            mov_to_msr;
>          struct vm_event_desc_access           desc_access;
>          struct vm_event_singlestep            singlestep;
> +        struct vm_event_fast_singlestep       fast_singlestep;
>          struct vm_event_debug                 software_breakpoint;
>          struct vm_event_debug                 debug_exception;
>          struct vm_event_cpuid                 cpuid;
>
Ковалёв Сергей Dec. 18, 2019, 12:51 p.m. UTC | #2
On 18.12.2019 15:48, Jan Beulich wrote:
> On 18.12.2019 13:19, Sergey Kovalev wrote:
>> --- a/xen/arch/x86/vm_event.c
>> +++ b/xen/arch/x86/vm_event.c
>> @@ -61,7 +61,8 @@ void vm_event_cleanup_domain(struct domain *d)
>>  void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
>>                                  vm_event_response_t *rsp)
>>  {
>> -    if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) )
>> +    if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP ||
>> +           rsp->flags & VM_EVENT_FLAG_FAST_SINGLESTEP) )
>>          return;
> 
> This is still the same as in v2 (could be taken care of while
> committing if no other need for a v4 arises).
Sorry for that. I didn't stage it...
Tamas K Lengyel Dec. 18, 2019, 1:20 p.m. UTC | #3
> diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
> index aa54c86325..cb577a7ba9 100644
> --- a/xen/include/public/vm_event.h
> +++ b/xen/include/public/vm_event.h
> @@ -110,6 +110,11 @@
>   * interrupt pending after resuming the VCPU.
>   */
>  #define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
> +/*
> + * Execute fast singlestepping on vm_event response.
> + * Requires the vCPU to be paused already (synchronous events only).
> + */

Please expand this description here that it also requires setting the
p2midx field of fast_singlestep to which Xen will switch the vCPU to
on the occurance of the first singlestep, after which singlestep gets
automatically disabled.

> +#define VM_EVENT_FLAG_FAST_SINGLESTEP  (1 << 11)
>
>  /*
>   * Reasons for the vm event request
> @@ -276,6 +281,10 @@ struct vm_event_singlestep {
>      uint64_t gfn;
>  };
>
> +struct vm_event_fast_singlestep {
> +    uint16_t p2midx;
> +};
> +
>  struct vm_event_debug {
>      uint64_t gfn;
>      uint32_t insn_length;
> @@ -363,6 +372,7 @@ typedef struct vm_event_st {
>          struct vm_event_mov_to_msr            mov_to_msr;
>          struct vm_event_desc_access           desc_access;
>          struct vm_event_singlestep            singlestep;
> +        struct vm_event_fast_singlestep       fast_singlestep;
>          struct vm_event_debug                 software_breakpoint;
>          struct vm_event_debug                 debug_exception;
>          struct vm_event_cpuid                 cpuid;
> --
> 2.20.1
Tamas K Lengyel Dec. 18, 2019, 2:13 p.m. UTC | #4
> diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
> index aa54c86325..cb577a7ba9 100644
> --- a/xen/include/public/vm_event.h
> +++ b/xen/include/public/vm_event.h
> @@ -110,6 +110,11 @@
>   * interrupt pending after resuming the VCPU.
>   */
>  #define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
> +/*
> + * Execute fast singlestepping on vm_event response.
> + * Requires the vCPU to be paused already (synchronous events only).
> + */
> +#define VM_EVENT_FLAG_FAST_SINGLESTEP  (1 << 11)

Just another minor style nitpick: alignment of (1 << 11) is off
compared to all of the previous declaration above.

Tamas
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 47573f71b8..cb3aa06fd2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5126,6 +5126,21 @@  void hvm_toggle_singlestep(struct vcpu *v)
     v->arch.hvm.single_step = !v->arch.hvm.single_step;
 }

+void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx)
+{
+    ASSERT(atomic_read(&v->pause_count));
+
+    if ( !hvm_is_singlestep_supported() )
+        return;
+
+    if ( p2midx >= MAX_ALTP2M )
+        return;
+
+    v->arch.hvm.single_step = true;
+    v->arch.hvm.fast_single_step.enabled = true;
+    v->arch.hvm.fast_single_step.p2midx = p2midx;
+}
+
 /*
  * Segment caches in VMCB/VMCS are inconsistent about which bits are checked,
  * important, and preserved across vmentry/exit.  Cook the values to make them
diff --git a/xen/arch/x86/hvm/monitor.c b/xen/arch/x86/hvm/monitor.c
index 1f23fe25e8..85996a3edd 100644
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -28,6 +28,7 @@ 
 #include <asm/hvm/monitor.h>
 #include <asm/altp2m.h>
 #include <asm/monitor.h>
+#include <asm/p2m.h>
 #include <asm/paging.h>
 #include <asm/vm_event.h>
 #include <public/vm_event.h>
@@ -159,6 +160,14 @@  int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type,
     case HVM_MONITOR_SINGLESTEP_BREAKPOINT:
         if ( !ad->monitor.singlestep_enabled )
             return 0;
+        if ( curr->arch.hvm.fast_single_step.enabled )
+        {
+            p2m_altp2m_check(curr, curr->arch.hvm.fast_single_step.p2midx);
+            curr->arch.hvm.single_step = false;
+            curr->arch.hvm.fast_single_step.enabled = false;
+            curr->arch.hvm.fast_single_step.p2midx = 0;
+            return 0;
+        }
         req.reason = VM_EVENT_REASON_SINGLESTEP;
         req.u.singlestep.gfn = gfn_of_rip(rip);
         sync = true;
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index 52c2a71fa0..3788d103f9 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -61,7 +61,8 @@  void vm_event_cleanup_domain(struct domain *d)
 void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
                                 vm_event_response_t *rsp)
 {
-    if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) )
+    if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP ||
+           rsp->flags & VM_EVENT_FLAG_FAST_SINGLESTEP) )
         return;

     if ( !is_hvm_domain(d) )
@@ -69,7 +70,10 @@  void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,

     ASSERT(atomic_read(&v->vm_event_pause_count));

-    hvm_toggle_singlestep(v);
+    if ( rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
+        hvm_toggle_singlestep(v);
+    else
+        hvm_fast_singlestep(v, rsp->u.fast_singlestep.p2midx);
 }

 void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 1d7b66f927..09793c12e9 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -323,6 +323,7 @@  int hvm_debug_op(struct vcpu *v, int32_t op);

 /* Caller should pause vcpu before calling this function */
 void hvm_toggle_singlestep(struct vcpu *v);
+void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx);

 int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
                               struct npfec npfec);
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 38f5c2bb9b..8b84941111 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -172,6 +172,10 @@  struct hvm_vcpu {
     bool                flag_dr_dirty;
     bool                debug_state_latch;
     bool                single_step;
+    struct {
+        bool     enabled;
+        uint16_t p2midx;
+    } fast_single_step;

     struct hvm_vcpu_asid n1asid;

diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index aa54c86325..cb577a7ba9 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -110,6 +110,11 @@ 
  * interrupt pending after resuming the VCPU.
  */
 #define VM_EVENT_FLAG_GET_NEXT_INTERRUPT (1 << 10)
+/*
+ * Execute fast singlestepping on vm_event response.
+ * Requires the vCPU to be paused already (synchronous events only).
+ */
+#define VM_EVENT_FLAG_FAST_SINGLESTEP  (1 << 11)

 /*
  * Reasons for the vm event request
@@ -276,6 +281,10 @@  struct vm_event_singlestep {
     uint64_t gfn;
 };

+struct vm_event_fast_singlestep {
+    uint16_t p2midx;
+};
+
 struct vm_event_debug {
     uint64_t gfn;
     uint32_t insn_length;
@@ -363,6 +372,7 @@  typedef struct vm_event_st {
         struct vm_event_mov_to_msr            mov_to_msr;
         struct vm_event_desc_access           desc_access;
         struct vm_event_singlestep            singlestep;
+        struct vm_event_fast_singlestep       fast_singlestep;
         struct vm_event_debug                 software_breakpoint;
         struct vm_event_debug                 debug_exception;
         struct vm_event_cpuid                 cpuid;