diff mbox series

[v3,5/9] x86/PVH: actually show Dom0's register state from debug key '0'

Message ID 4ae1a7ef-d6e8-75db-ddf5-987f10175196@suse.com (mailing list archive)
State Superseded
Headers show
Series x86/PVH: Dom0 building adjustments | expand

Commit Message

Jan Beulich Sept. 21, 2021, 7:19 a.m. UTC
vcpu_show_registers() didn't do anything for HVM so far. Note though
that some extra hackery is needed for VMX - see the code comment.

Note further that the show_guest_stack() invocation is left alone here:
While strictly speaking guest_kernel_mode() should be predicated by a
PV / !HVM check, show_guest_stack() itself will bail immediately for
HVM.

While there and despite not being PVH-specific, take the opportunity and
filter offline vCPU-s: There's not really any register state associated
with them, so avoid spamming the log with useless information while
still leaving an indication of the fact.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
I was pondering whether to also have the VMCS/VMCB dumped for every
vCPU, to present full state. The downside is that for larger systems
this would be a lot of output.
---
v2: New.

Comments

Roger Pau Monne Sept. 22, 2021, 3:48 p.m. UTC | #1
On Tue, Sep 21, 2021 at 09:19:06AM +0200, Jan Beulich wrote:
> vcpu_show_registers() didn't do anything for HVM so far. Note though
> that some extra hackery is needed for VMX - see the code comment.
> 
> Note further that the show_guest_stack() invocation is left alone here:
> While strictly speaking guest_kernel_mode() should be predicated by a
> PV / !HVM check, show_guest_stack() itself will bail immediately for
> HVM.
> 
> While there and despite not being PVH-specific, take the opportunity and
> filter offline vCPU-s: There's not really any register state associated
> with them, so avoid spamming the log with useless information while
> still leaving an indication of the fact.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> I was pondering whether to also have the VMCS/VMCB dumped for every
> vCPU, to present full state. The downside is that for larger systems
> this would be a lot of output.

At least for Intel there's already a debug key to dump VMCS, so I'm
unsure it's worth dumping it here also, as a user can get the
information elsewhere (that's what I've always used to debug PVH
TBH).

> ---
> v2: New.
> 
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -631,6 +631,12 @@ void vcpu_show_execution_state(struct vc
>  {
>      unsigned long flags;
>  
> +    if ( test_bit(_VPF_down, &v->pause_flags) )
> +    {
> +        printk("*** %pv is offline ***\n", v);
> +        return;
> +    }
> +
>      printk("*** Dumping Dom%d vcpu#%d state: ***\n",
>             v->domain->domain_id, v->vcpu_id);
>  
> @@ -642,6 +648,21 @@ void vcpu_show_execution_state(struct vc
>  
>      vcpu_pause(v); /* acceptably dangerous */
>  
> +#ifdef CONFIG_HVM
> +    /*
> +     * For VMX special care is needed: Reading some of the register state will
> +     * require VMCS accesses. Engaging foreign VMCSes involves acquiring of a
> +     * lock, which check_lock() would object to when done from an IRQs-disabled
> +     * region. Despite this being a layering violation, engage the VMCS right
> +     * here. This then also avoids doing so several times in close succession.
> +     */
> +    if ( cpu_has_vmx && is_hvm_vcpu(v) )
> +    {
> +        ASSERT(!in_irq());
> +        vmx_vmcs_enter(v);
> +    }
> +#endif
> +
>      /* Prevent interleaving of output. */
>      flags = console_lock_recursive_irqsave();
>  
> @@ -651,6 +672,11 @@ void vcpu_show_execution_state(struct vc
>  
>      console_unlock_recursive_irqrestore(flags);
>  
> +#ifdef CONFIG_HVM
> +    if ( cpu_has_vmx && is_hvm_vcpu(v) )
> +        vmx_vmcs_exit(v);
> +#endif
> +
>      vcpu_unpause(v);
>  }
>  
> --- a/xen/arch/x86/x86_64/traps.c
> +++ b/xen/arch/x86/x86_64/traps.c
> @@ -49,6 +49,39 @@ static void read_registers(struct cpu_us
>      crs[7] = read_gs_shadow();
>  }
>  
> +static void get_hvm_registers(struct vcpu *v, struct cpu_user_regs *regs,
> +                              unsigned long crs[8])

Would this better be placed in hvm.c now that it's a HVM only
function?

> +{
> +    struct segment_register sreg;
> +
> +    crs[0] = v->arch.hvm.guest_cr[0];
> +    crs[2] = v->arch.hvm.guest_cr[2];
> +    crs[3] = v->arch.hvm.guest_cr[3];
> +    crs[4] = v->arch.hvm.guest_cr[4];
> +
> +    hvm_get_segment_register(v, x86_seg_cs, &sreg);
> +    regs->cs = sreg.sel;
> +
> +    hvm_get_segment_register(v, x86_seg_ds, &sreg);
> +    regs->ds = sreg.sel;
> +
> +    hvm_get_segment_register(v, x86_seg_es, &sreg);
> +    regs->es = sreg.sel;
> +
> +    hvm_get_segment_register(v, x86_seg_fs, &sreg);
> +    regs->fs = sreg.sel;
> +    crs[5] = sreg.base;
> +
> +    hvm_get_segment_register(v, x86_seg_gs, &sreg);
> +    regs->gs = sreg.sel;
> +    crs[6] = sreg.base;
> +
> +    hvm_get_segment_register(v, x86_seg_ss, &sreg);
> +    regs->ss = sreg.sel;
> +
> +    crs[7] = hvm_get_shadow_gs_base(v);
> +}
> +
>  static void _show_registers(
>      const struct cpu_user_regs *regs, unsigned long crs[8],
>      enum context context, const struct vcpu *v)
> @@ -99,27 +132,8 @@ void show_registers(const struct cpu_use
>  
>      if ( guest_mode(regs) && is_hvm_vcpu(v) )
>      {
> -        struct segment_register sreg;
> +        get_hvm_registers(v, &fault_regs, fault_crs);
>          context = CTXT_hvm_guest;
> -        fault_crs[0] = v->arch.hvm.guest_cr[0];
> -        fault_crs[2] = v->arch.hvm.guest_cr[2];
> -        fault_crs[3] = v->arch.hvm.guest_cr[3];
> -        fault_crs[4] = v->arch.hvm.guest_cr[4];
> -        hvm_get_segment_register(v, x86_seg_cs, &sreg);
> -        fault_regs.cs = sreg.sel;
> -        hvm_get_segment_register(v, x86_seg_ds, &sreg);
> -        fault_regs.ds = sreg.sel;
> -        hvm_get_segment_register(v, x86_seg_es, &sreg);
> -        fault_regs.es = sreg.sel;
> -        hvm_get_segment_register(v, x86_seg_fs, &sreg);
> -        fault_regs.fs = sreg.sel;
> -        fault_crs[5] = sreg.base;
> -        hvm_get_segment_register(v, x86_seg_gs, &sreg);
> -        fault_regs.gs = sreg.sel;
> -        fault_crs[6] = sreg.base;
> -        hvm_get_segment_register(v, x86_seg_ss, &sreg);
> -        fault_regs.ss = sreg.sel;
> -        fault_crs[7] = hvm_get_shadow_gs_base(v);
>      }
>      else
>      {
> @@ -159,24 +173,35 @@ void show_registers(const struct cpu_use
>  void vcpu_show_registers(const struct vcpu *v)
>  {
>      const struct cpu_user_regs *regs = &v->arch.user_regs;
> -    bool kernel = guest_kernel_mode(v, regs);
> +    struct cpu_user_regs aux_regs;
> +    enum context context;
>      unsigned long crs[8];
>  
> -    /* Only handle PV guests for now */
> -    if ( !is_pv_vcpu(v) )
> -        return;
> -
> -    crs[0] = v->arch.pv.ctrlreg[0];
> -    crs[2] = arch_get_cr2(v);
> -    crs[3] = pagetable_get_paddr(kernel ?
> -                                 v->arch.guest_table :
> -                                 v->arch.guest_table_user);
> -    crs[4] = v->arch.pv.ctrlreg[4];
> -    crs[5] = v->arch.pv.fs_base;
> -    crs[6 + !kernel] = v->arch.pv.gs_base_kernel;
> -    crs[7 - !kernel] = v->arch.pv.gs_base_user;
> +    if ( is_hvm_vcpu(v) )
> +    {
> +        aux_regs = *regs;
> +        get_hvm_registers(v->domain->vcpu[v->vcpu_id], &aux_regs, crs);

I wonder if you could load the values directly into v->arch.user_regs,
but maybe that would taint some other info already there. I certainly
haven't looked closely.

Thanks, Roger.
Jan Beulich Sept. 23, 2021, 10:21 a.m. UTC | #2
On 22.09.2021 17:48, Roger Pau Monné wrote:
> On Tue, Sep 21, 2021 at 09:19:06AM +0200, Jan Beulich wrote:
>> vcpu_show_registers() didn't do anything for HVM so far. Note though
>> that some extra hackery is needed for VMX - see the code comment.
>>
>> Note further that the show_guest_stack() invocation is left alone here:
>> While strictly speaking guest_kernel_mode() should be predicated by a
>> PV / !HVM check, show_guest_stack() itself will bail immediately for
>> HVM.
>>
>> While there and despite not being PVH-specific, take the opportunity and
>> filter offline vCPU-s: There's not really any register state associated
>> with them, so avoid spamming the log with useless information while
>> still leaving an indication of the fact.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> I was pondering whether to also have the VMCS/VMCB dumped for every
>> vCPU, to present full state. The downside is that for larger systems
>> this would be a lot of output.
> 
> At least for Intel there's already a debug key to dump VMCS, so I'm
> unsure it's worth dumping it here also, as a user can get the
> information elsewhere (that's what I've always used to debug PVH
> TBH).

I know there is a respective debug key. That dumps _all_ VMCSes, though,
so might be quite verbose on a big system (where Dom0's output alone
may already be quite verbose).

>> --- a/xen/arch/x86/x86_64/traps.c
>> +++ b/xen/arch/x86/x86_64/traps.c
>> @@ -49,6 +49,39 @@ static void read_registers(struct cpu_us
>>      crs[7] = read_gs_shadow();
>>  }
>>  
>> +static void get_hvm_registers(struct vcpu *v, struct cpu_user_regs *regs,
>> +                              unsigned long crs[8])
> 
> Would this better be placed in hvm.c now that it's a HVM only
> function?

I was asking myself this question, but decided that the placement here
is perhaps at least no bigger of a problem than putting it there.
Factors played into this:
- the specifics of the usage of the crs[8] array,
- the fact that the PV function also lives here, not under pv/,
- the desire to keep the function static.

I can certainly be talked into moving the code, but I will want to see
convincing arguments that none of the three items above (and possible
other ones I may have missed) are really a problem then.

>> @@ -159,24 +173,35 @@ void show_registers(const struct cpu_use
>>  void vcpu_show_registers(const struct vcpu *v)
>>  {
>>      const struct cpu_user_regs *regs = &v->arch.user_regs;

Please note this in addition to my response below.

>> -    bool kernel = guest_kernel_mode(v, regs);
>> +    struct cpu_user_regs aux_regs;
>> +    enum context context;
>>      unsigned long crs[8];
>>  
>> -    /* Only handle PV guests for now */
>> -    if ( !is_pv_vcpu(v) )
>> -        return;
>> -
>> -    crs[0] = v->arch.pv.ctrlreg[0];
>> -    crs[2] = arch_get_cr2(v);
>> -    crs[3] = pagetable_get_paddr(kernel ?
>> -                                 v->arch.guest_table :
>> -                                 v->arch.guest_table_user);
>> -    crs[4] = v->arch.pv.ctrlreg[4];
>> -    crs[5] = v->arch.pv.fs_base;
>> -    crs[6 + !kernel] = v->arch.pv.gs_base_kernel;
>> -    crs[7 - !kernel] = v->arch.pv.gs_base_user;
>> +    if ( is_hvm_vcpu(v) )
>> +    {
>> +        aux_regs = *regs;
>> +        get_hvm_registers(v->domain->vcpu[v->vcpu_id], &aux_regs, crs);
> 
> I wonder if you could load the values directly into v->arch.user_regs,
> but maybe that would taint some other info already there. I certainly
> haven't looked closely.

I had it that other way first, wondering whether altering the structure
there might be safe. It felt wrong to fiddle with the live registers,
and the "const" above than was the final bit that convinced me I should
go the chosen route. Yet again - I can be talked into going the route
you outline via convincing arguments. Don't forget that we e.g.
deliberately poison the selector values in debug builds (see
hvm_invalidate_regs_fields()) - that poisoning would get undermined if
we wrote directly into the structure.

Jan
Roger Pau Monne Sept. 23, 2021, 2:27 p.m. UTC | #3
On Thu, Sep 23, 2021 at 12:21:42PM +0200, Jan Beulich wrote:
> On 22.09.2021 17:48, Roger Pau Monné wrote:
> > On Tue, Sep 21, 2021 at 09:19:06AM +0200, Jan Beulich wrote:
> >> vcpu_show_registers() didn't do anything for HVM so far. Note though
> >> that some extra hackery is needed for VMX - see the code comment.
> >>
> >> Note further that the show_guest_stack() invocation is left alone here:
> >> While strictly speaking guest_kernel_mode() should be predicated by a
> >> PV / !HVM check, show_guest_stack() itself will bail immediately for
> >> HVM.
> >>
> >> While there and despite not being PVH-specific, take the opportunity and
> >> filter offline vCPU-s: There's not really any register state associated
> >> with them, so avoid spamming the log with useless information while
> >> still leaving an indication of the fact.
> >>
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> ---
> >> I was pondering whether to also have the VMCS/VMCB dumped for every
> >> vCPU, to present full state. The downside is that for larger systems
> >> this would be a lot of output.
> > 
> > At least for Intel there's already a debug key to dump VMCS, so I'm
> > unsure it's worth dumping it here also, as a user can get the
> > information elsewhere (that's what I've always used to debug PVH
> > TBH).
> 
> I know there is a respective debug key. That dumps _all_ VMCSes, though,
> so might be quite verbose on a big system (where Dom0's output alone
> may already be quite verbose).
> 
> >> --- a/xen/arch/x86/x86_64/traps.c
> >> +++ b/xen/arch/x86/x86_64/traps.c
> >> @@ -49,6 +49,39 @@ static void read_registers(struct cpu_us
> >>      crs[7] = read_gs_shadow();
> >>  }
> >>  
> >> +static void get_hvm_registers(struct vcpu *v, struct cpu_user_regs *regs,
> >> +                              unsigned long crs[8])
> > 
> > Would this better be placed in hvm.c now that it's a HVM only
> > function?
> 
> I was asking myself this question, but decided that the placement here
> is perhaps at least no bigger of a problem than putting it there.
> Factors played into this:
> - the specifics of the usage of the crs[8] array,
> - the fact that the PV function also lives here, not under pv/,

I think both functions should live under hvm/ and pv/ respectively.
There's nothing x86_64 specific about them in order to guarantee a
placement here.

> - the desire to keep the function static.

Well, that's obviously not possible if it's moved to a different file.

> I can certainly be talked into moving the code, but I will want to see
> convincing arguments that none of the three items above (and possible
> other ones I may have missed) are really a problem then.

As said above, my preference would be to move those to pv/ and hvm/,
but I can also live with the current arrangement.

> >> @@ -159,24 +173,35 @@ void show_registers(const struct cpu_use
> >>  void vcpu_show_registers(const struct vcpu *v)
> >>  {
> >>      const struct cpu_user_regs *regs = &v->arch.user_regs;
> 
> Please note this in addition to my response below.
> 
> >> -    bool kernel = guest_kernel_mode(v, regs);
> >> +    struct cpu_user_regs aux_regs;
> >> +    enum context context;
> >>      unsigned long crs[8];
> >>  
> >> -    /* Only handle PV guests for now */
> >> -    if ( !is_pv_vcpu(v) )
> >> -        return;
> >> -
> >> -    crs[0] = v->arch.pv.ctrlreg[0];
> >> -    crs[2] = arch_get_cr2(v);
> >> -    crs[3] = pagetable_get_paddr(kernel ?
> >> -                                 v->arch.guest_table :
> >> -                                 v->arch.guest_table_user);
> >> -    crs[4] = v->arch.pv.ctrlreg[4];
> >> -    crs[5] = v->arch.pv.fs_base;
> >> -    crs[6 + !kernel] = v->arch.pv.gs_base_kernel;
> >> -    crs[7 - !kernel] = v->arch.pv.gs_base_user;
> >> +    if ( is_hvm_vcpu(v) )
> >> +    {
> >> +        aux_regs = *regs;
> >> +        get_hvm_registers(v->domain->vcpu[v->vcpu_id], &aux_regs, crs);
> > 
> > I wonder if you could load the values directly into v->arch.user_regs,
> > but maybe that would taint some other info already there. I certainly
> > haven't looked closely.
> 
> I had it that other way first, wondering whether altering the structure
> there might be safe. It felt wrong to fiddle with the live registers,
> and the "const" above than was the final bit that convinced me I should
> go the chosen route. Yet again - I can be talked into going the route
> you outline via convincing arguments. Don't forget that we e.g.
> deliberately poison the selector values in debug builds (see
> hvm_invalidate_regs_fields()) - that poisoning would get undermined if
> we wrote directly into the structure.

The vcpu is paused at this point, but I agree should not undermine the
poisoning. I assume those fields don't get filled because it's an
expensive operation and it doesn't get used that often.

Long term it might be helpful to have something akin to
guest_cpu_user_regs that could be used on paused remote vCPUs.

Anyway, I don't really have much other comments, so:

Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Thanks, Roger.
diff mbox series

Patch

--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -631,6 +631,12 @@  void vcpu_show_execution_state(struct vc
 {
     unsigned long flags;
 
+    if ( test_bit(_VPF_down, &v->pause_flags) )
+    {
+        printk("*** %pv is offline ***\n", v);
+        return;
+    }
+
     printk("*** Dumping Dom%d vcpu#%d state: ***\n",
            v->domain->domain_id, v->vcpu_id);
 
@@ -642,6 +648,21 @@  void vcpu_show_execution_state(struct vc
 
     vcpu_pause(v); /* acceptably dangerous */
 
+#ifdef CONFIG_HVM
+    /*
+     * For VMX special care is needed: Reading some of the register state will
+     * require VMCS accesses. Engaging foreign VMCSes involves acquiring of a
+     * lock, which check_lock() would object to when done from an IRQs-disabled
+     * region. Despite this being a layering violation, engage the VMCS right
+     * here. This then also avoids doing so several times in close succession.
+     */
+    if ( cpu_has_vmx && is_hvm_vcpu(v) )
+    {
+        ASSERT(!in_irq());
+        vmx_vmcs_enter(v);
+    }
+#endif
+
     /* Prevent interleaving of output. */
     flags = console_lock_recursive_irqsave();
 
@@ -651,6 +672,11 @@  void vcpu_show_execution_state(struct vc
 
     console_unlock_recursive_irqrestore(flags);
 
+#ifdef CONFIG_HVM
+    if ( cpu_has_vmx && is_hvm_vcpu(v) )
+        vmx_vmcs_exit(v);
+#endif
+
     vcpu_unpause(v);
 }
 
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -49,6 +49,39 @@  static void read_registers(struct cpu_us
     crs[7] = read_gs_shadow();
 }
 
+static void get_hvm_registers(struct vcpu *v, struct cpu_user_regs *regs,
+                              unsigned long crs[8])
+{
+    struct segment_register sreg;
+
+    crs[0] = v->arch.hvm.guest_cr[0];
+    crs[2] = v->arch.hvm.guest_cr[2];
+    crs[3] = v->arch.hvm.guest_cr[3];
+    crs[4] = v->arch.hvm.guest_cr[4];
+
+    hvm_get_segment_register(v, x86_seg_cs, &sreg);
+    regs->cs = sreg.sel;
+
+    hvm_get_segment_register(v, x86_seg_ds, &sreg);
+    regs->ds = sreg.sel;
+
+    hvm_get_segment_register(v, x86_seg_es, &sreg);
+    regs->es = sreg.sel;
+
+    hvm_get_segment_register(v, x86_seg_fs, &sreg);
+    regs->fs = sreg.sel;
+    crs[5] = sreg.base;
+
+    hvm_get_segment_register(v, x86_seg_gs, &sreg);
+    regs->gs = sreg.sel;
+    crs[6] = sreg.base;
+
+    hvm_get_segment_register(v, x86_seg_ss, &sreg);
+    regs->ss = sreg.sel;
+
+    crs[7] = hvm_get_shadow_gs_base(v);
+}
+
 static void _show_registers(
     const struct cpu_user_regs *regs, unsigned long crs[8],
     enum context context, const struct vcpu *v)
@@ -99,27 +132,8 @@  void show_registers(const struct cpu_use
 
     if ( guest_mode(regs) && is_hvm_vcpu(v) )
     {
-        struct segment_register sreg;
+        get_hvm_registers(v, &fault_regs, fault_crs);
         context = CTXT_hvm_guest;
-        fault_crs[0] = v->arch.hvm.guest_cr[0];
-        fault_crs[2] = v->arch.hvm.guest_cr[2];
-        fault_crs[3] = v->arch.hvm.guest_cr[3];
-        fault_crs[4] = v->arch.hvm.guest_cr[4];
-        hvm_get_segment_register(v, x86_seg_cs, &sreg);
-        fault_regs.cs = sreg.sel;
-        hvm_get_segment_register(v, x86_seg_ds, &sreg);
-        fault_regs.ds = sreg.sel;
-        hvm_get_segment_register(v, x86_seg_es, &sreg);
-        fault_regs.es = sreg.sel;
-        hvm_get_segment_register(v, x86_seg_fs, &sreg);
-        fault_regs.fs = sreg.sel;
-        fault_crs[5] = sreg.base;
-        hvm_get_segment_register(v, x86_seg_gs, &sreg);
-        fault_regs.gs = sreg.sel;
-        fault_crs[6] = sreg.base;
-        hvm_get_segment_register(v, x86_seg_ss, &sreg);
-        fault_regs.ss = sreg.sel;
-        fault_crs[7] = hvm_get_shadow_gs_base(v);
     }
     else
     {
@@ -159,24 +173,35 @@  void show_registers(const struct cpu_use
 void vcpu_show_registers(const struct vcpu *v)
 {
     const struct cpu_user_regs *regs = &v->arch.user_regs;
-    bool kernel = guest_kernel_mode(v, regs);
+    struct cpu_user_regs aux_regs;
+    enum context context;
     unsigned long crs[8];
 
-    /* Only handle PV guests for now */
-    if ( !is_pv_vcpu(v) )
-        return;
-
-    crs[0] = v->arch.pv.ctrlreg[0];
-    crs[2] = arch_get_cr2(v);
-    crs[3] = pagetable_get_paddr(kernel ?
-                                 v->arch.guest_table :
-                                 v->arch.guest_table_user);
-    crs[4] = v->arch.pv.ctrlreg[4];
-    crs[5] = v->arch.pv.fs_base;
-    crs[6 + !kernel] = v->arch.pv.gs_base_kernel;
-    crs[7 - !kernel] = v->arch.pv.gs_base_user;
+    if ( is_hvm_vcpu(v) )
+    {
+        aux_regs = *regs;
+        get_hvm_registers(v->domain->vcpu[v->vcpu_id], &aux_regs, crs);
+        regs = &aux_regs;
+        context = CTXT_hvm_guest;
+    }
+    else
+    {
+        bool kernel = guest_kernel_mode(v, regs);
+
+        crs[0] = v->arch.pv.ctrlreg[0];
+        crs[2] = arch_get_cr2(v);
+        crs[3] = pagetable_get_paddr(kernel ?
+                                     v->arch.guest_table :
+                                     v->arch.guest_table_user);
+        crs[4] = v->arch.pv.ctrlreg[4];
+        crs[5] = v->arch.pv.fs_base;
+        crs[6 + !kernel] = v->arch.pv.gs_base_kernel;
+        crs[7 - !kernel] = v->arch.pv.gs_base_user;
+
+        context = CTXT_pv_guest;
+    }
 
-    _show_registers(regs, crs, CTXT_pv_guest, v);
+    _show_registers(regs, crs, context, v);
 }
 
 void show_page_walk(unsigned long addr)