diff mbox series

[RFC,4/9] arm64: utilize time accounting

Message ID 1568197942-15374-5-git-send-email-andrii.anisov@gmail.com (mailing list archive)
State New, archived
Headers show
Series Changes to time accounting | expand

Commit Message

Andrii Anisov Sept. 11, 2019, 10:32 a.m. UTC
From: Andrii Anisov <andrii_anisov@epam.com>

Call time accounting hooks from appropriate transition points
of the ARM64 code.

Signed-off-by: Andrii Anisov <andrii_anisov@epam.com>
---
 xen/arch/arm/arm64/entry.S | 39 ++++++++++++++++++++++++++++++++++++---
 xen/arch/arm/domain.c      |  2 ++
 2 files changed, 38 insertions(+), 3 deletions(-)

Comments

Volodymyr Babchuk Sept. 11, 2019, 5:48 p.m. UTC | #1
Hi Andrii,

Andrii Anisov writes:

> From: Andrii Anisov <andrii_anisov@epam.com>
>
> Call time accounting hooks from appropriate transition points
> of the ARM64 code.
I'd prefer more elaborate commit message. For example - what are
appropriate transition points? I mean - how you chose ones?

> Signed-off-by: Andrii Anisov <andrii_anisov@epam.com>
> ---
>  xen/arch/arm/arm64/entry.S | 39 ++++++++++++++++++++++++++++++++++++---
>  xen/arch/arm/domain.c      |  2 ++
>  2 files changed, 38 insertions(+), 3 deletions(-)
>
> diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
> index 2d9a271..6fb2fa9 100644
> --- a/xen/arch/arm/arm64/entry.S
> +++ b/xen/arch/arm/arm64/entry.S
> @@ -143,12 +143,21 @@
>
>          .endm
>
> -        .macro  exit, hyp, compat
> +        .macro  exit, hyp, compat, tacc=1
>
>          .if \hyp == 0         /* Guest mode */
>
> +	.if \tacc == 1
There is a hard tab, instead of 8 spaces.

Also, while it is easy to guess what 'hyp' and 'compat' mean, it is hard
to tell what 'tacc' stands for. I think, you need either better
name for this or a comment, which explains all parameters.

> +
> +        mov     x0, #1
> +        bl      tacc_hyp
> +
> +	.endif
The same about hard tabs. Probably, there are more of them in this patch.

> +
>          bl      leave_hypervisor_tail /* Disables interrupts on return */
>
> +	mov     x0, #1
> +	bl      tacc_guest
>          exit_guest \compat
>
>          .endif
> @@ -205,9 +214,15 @@ hyp_sync:
>
>  hyp_irq:
>          entry   hyp=1
> +        mov     x0,#5
Space is missing before #5

> +        bl      tacc_irq_enter
>          msr     daifclr, #4
>          mov     x0, sp
>          bl      do_trap_irq
> +
> +        mov     x0,#5
Space is missing before #5

> +        bl      tacc_irq_exit
> +
>          exit    hyp=1
>
>  guest_sync:
> @@ -291,6 +306,9 @@ guest_sync_slowpath:
>           * to save them.
>           */
>          entry   hyp=0, compat=0, save_x0_x1=0
> +
There are trailing whitespaces. I sure that 'git diff' highlights
such mistakes...

> +        mov     x0,#1
Space is missing before #1

> +        bl      tacc_gsync
>          /*
>           * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>           * is not set. If a vSError took place, the initial exception will be
> @@ -307,6 +325,10 @@ guest_sync_slowpath:
>
>  guest_irq:
>          entry   hyp=0, compat=0
> +
> +        mov     x0,#6
Space is missing before #6

> +        bl      tacc_irq_enter
> +
>          /*
>           * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>           * is not set. If a vSError took place, the initial exception will be
> @@ -319,6 +341,8 @@ guest_irq:
>          mov     x0, sp
>          bl      do_trap_irq
>  1:
> +	mov	x0,#6
Space is missing before #6, also looks like there is a hard tab character.

> +        bl      tacc_irq_exit
>          exit    hyp=0, compat=0
>
>  guest_fiq_invalid:
> @@ -334,6 +358,9 @@ guest_error:
>
>  guest_sync_compat:
>          entry   hyp=0, compat=1
> +
> +        mov     x0,#2
Space is missing before #2.

> +        bl      tacc_gsync
>          /*
>           * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>           * is not set. If a vSError took place, the initial exception will be
> @@ -350,6 +377,10 @@ guest_sync_compat:
>
>  guest_irq_compat:
>          entry   hyp=0, compat=1
> +
> +        mov     x0,#7
Space is missing before #7.

> +        bl      tacc_irq_enter
> +
>          /*
>           * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>           * is not set. If a vSError took place, the initial exception will be
> @@ -362,6 +393,8 @@ guest_irq_compat:
>          mov     x0, sp
>          bl      do_trap_irq
>  1:
> +        mov     x0,#7
Space is missing before #7...

> +        bl      tacc_irq_exit
>          exit    hyp=0, compat=1
>
>  guest_fiq_invalid_compat:
> @@ -376,9 +409,9 @@ guest_error_compat:
>          exit    hyp=0, compat=1
>
>  ENTRY(return_to_new_vcpu32)
> -        exit    hyp=0, compat=1
> +        exit    hyp=0, compat=1, tacc=0
>  ENTRY(return_to_new_vcpu64)
> -        exit    hyp=0, compat=0
> +        exit    hyp=0, compat=0, tacc=0
>
>  return_from_trap:
>          msr     daifset, #2 /* Mask interrupts */


> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index a9c4113..53ef630 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -51,11 +51,13 @@ static void do_idle(void)
>      process_pending_softirqs();
>
>      local_irq_disable();
> +    tacc_idle(1);
1 and 2 (below) look like some magical values to me.
I believe, you need to define some enumeration.

>      if ( cpu_is_haltable(cpu) )
>      {
>          dsb(sy);
>          wfi();
>      }
> +    tacc_hyp(2);
>      local_irq_enable();
>
>      sched_tick_resume();


--
Volodymyr Babchuk at EPAM
Andrii Anisov Sept. 12, 2019, 12:09 p.m. UTC | #2
Hello Volodymyr,

On 11.09.19 20:48, Volodymyr Babchuk wrote:
> 
> Hi Andrii,
> 

As we agreed, I'll wipe out debugging remains as well as cleanup coding style nits and resend the series.
Julien Grall Sept. 12, 2019, 12:17 p.m. UTC | #3
On Thu, 12 Sep 2019, 13:10 Andrii Anisov, <andrii.anisov@gmail.com> wrote:

> Hello Volodymyr,
>
> On 11.09.19 20:48, Volodymyr Babchuk wrote:
> >
> > Hi Andrii,
> >
>
> As we agreed, I'll wipe out debugging remains as well as cleanup coding
> style nits and resend the series.


This an RFC and I am sure there current state is enough to spark a
discussion. There are no need to waste time resending it and use filling up
inboxes.

Please wait for more time.

Cheers,


> --
> Sincerely,
> Andrii Anisov.
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xenproject.org
> https://lists.xenproject.org/mailman/listinfo/xen-devel
Andrii Anisov Sept. 12, 2019, 12:29 p.m. UTC | #4
On 12.09.19 15:17, Julien Grall wrote:
> This an RFC and I am sure there current state is enough to spark a discussion. There are no need to waste time resending it and use filling up inboxes.
> 
> Please wait for more time.

Gotcha!
Julien Grall Oct. 28, 2019, 2:47 p.m. UTC | #5
(+ George and Dario)

Hi,

On 11/09/2019 11:32, Andrii Anisov wrote:
> From: Andrii Anisov <andrii_anisov@epam.com>
> 
> Call time accounting hooks from appropriate transition points
> of the ARM64 code.
> 
> Signed-off-by: Andrii Anisov <andrii_anisov@epam.com>
> ---
>   xen/arch/arm/arm64/entry.S | 39 ++++++++++++++++++++++++++++++++++++---
>   xen/arch/arm/domain.c      |  2 ++
>   2 files changed, 38 insertions(+), 3 deletions(-)
> 
> diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
> index 2d9a271..6fb2fa9 100644
> --- a/xen/arch/arm/arm64/entry.S
> +++ b/xen/arch/arm/arm64/entry.S
> @@ -143,12 +143,21 @@
>   
>           .endm
>   
> -        .macro  exit, hyp, compat
> +        .macro  exit, hyp, compat, tacc=1
>   
>           .if \hyp == 0         /* Guest mode */
>   
> +	.if \tacc == 1

This is here because you may already be in the HYP state, right?

I noticed in the previous patch you mention that you only handle "re-entry" for 
the IRQ state.

As you don't have "exit" for states other than IRQ, then I would not consider 
this is as re-entry. It is more like you transition from one state to another 
(it happen this is the same state).

The problem of re-entry would be if you take an exception that is going to 
switch the state. But the concern would be exactly the same if you take an 
exception that switch the state (such as synchronous hypervisor exception).

This raises the question, how do you account SError interrupt/synchronous exception?

> +
> +        mov     x0, #1
> +        bl      tacc_hyp
> +
> +	.endif
> +
>           bl      leave_hypervisor_tail /* Disables interrupts on return */

As mentioned in the previous patch, leave_hypervisor_tail() may do some IO 
emulation that requires to be preemptible. So I don't think this is correct to 
get that time accounted to the hypervisor.

>   
> +	mov     x0, #1
> +	bl      tacc_guest
>           exit_guest \compat
>   
>           .endif
> @@ -205,9 +214,15 @@ hyp_sync:
>   
>   hyp_irq:
>           entry   hyp=1
> +        mov     x0,#5
> +        bl      tacc_irq_enter
>           msr     daifclr, #4
>           mov     x0, sp
>           bl      do_trap_irq
> +
> +        mov     x0,#5
> +        bl      tacc_irq_exit
> +
>           exit    hyp=1
>   
>   guest_sync:
> @@ -291,6 +306,9 @@ guest_sync_slowpath:
>            * to save them.
>            */
>           entry   hyp=0, compat=0, save_x0_x1=0
> +
> +        mov     x0,#1
> +        bl      tacc_gsync
>           /*
>            * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>            * is not set. If a vSError took place, the initial exception will be
> @@ -307,6 +325,10 @@ guest_sync_slowpath:
>   
>   guest_irq:
>           entry   hyp=0, compat=0
> +
> +        mov     x0,#6
> +        bl      tacc_irq_enter
> +
>           /*
>            * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>            * is not set. If a vSError took place, the initial exception will be
> @@ -319,6 +341,8 @@ guest_irq:
>           mov     x0, sp
>           bl      do_trap_irq
>   1:
> +	mov	x0,#6
> +        bl      tacc_irq_exit
>           exit    hyp=0, compat=0
>   
>   guest_fiq_invalid:
> @@ -334,6 +358,9 @@ guest_error:
>   
>   guest_sync_compat:
>           entry   hyp=0, compat=1
> +
> +        mov     x0,#2
> +        bl      tacc_gsync
>           /*
>            * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>            * is not set. If a vSError took place, the initial exception will be
> @@ -350,6 +377,10 @@ guest_sync_compat:
>   
>   guest_irq_compat:
>           entry   hyp=0, compat=1
> +
> +        mov     x0,#7
> +        bl      tacc_irq_enter
> +
>           /*
>            * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>            * is not set. If a vSError took place, the initial exception will be
> @@ -362,6 +393,8 @@ guest_irq_compat:
>           mov     x0, sp
>           bl      do_trap_irq
>   1:
> +        mov     x0,#7
> +        bl      tacc_irq_exit
>           exit    hyp=0, compat=1
>   
>   guest_fiq_invalid_compat:
> @@ -376,9 +409,9 @@ guest_error_compat:
>           exit    hyp=0, compat=1
>   
>   ENTRY(return_to_new_vcpu32)
> -        exit    hyp=0, compat=1
> +        exit    hyp=0, compat=1, tacc=0
>   ENTRY(return_to_new_vcpu64)
> -        exit    hyp=0, compat=0
> +        exit    hyp=0, compat=0, tacc=0
>   
>   return_from_trap:
>           msr     daifset, #2 /* Mask interrupts */
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index a9c4113..53ef630 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -51,11 +51,13 @@ static void do_idle(void)
>       process_pending_softirqs();
>   
>       local_irq_disable();
> +    tacc_idle(1);

Any reason to call this before the if and not inside?

>       if ( cpu_is_haltable(cpu) )
>       {
>           dsb(sy);
>           wfi();
>       }
> +    tacc_hyp(2);
>       local_irq_enable();
>   
>       sched_tick_resume();
> 

Cheers,
Andrii Anisov Nov. 6, 2019, 11:31 a.m. UTC | #6
On 28.10.19 16:47, Julien Grall wrote:
> (+ George and Dario)
> 
> Hi,
> 
> On 11/09/2019 11:32, Andrii Anisov wrote:
>> From: Andrii Anisov <andrii_anisov@epam.com>
>>
>> Call time accounting hooks from appropriate transition points
>> of the ARM64 code.
>>
>> Signed-off-by: Andrii Anisov <andrii_anisov@epam.com>
>> ---
>>   xen/arch/arm/arm64/entry.S | 39 ++++++++++++++++++++++++++++++++++++---
>>   xen/arch/arm/domain.c      |  2 ++
>>   2 files changed, 38 insertions(+), 3 deletions(-)
>>
>> diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
>> index 2d9a271..6fb2fa9 100644
>> --- a/xen/arch/arm/arm64/entry.S
>> +++ b/xen/arch/arm/arm64/entry.S
>> @@ -143,12 +143,21 @@
>>           .endm
>> -        .macro  exit, hyp, compat
>> +        .macro  exit, hyp, compat, tacc=1
>>           .if \hyp == 0         /* Guest mode */
>> +    .if \tacc == 1
> 
> This is here because you may already be in the HYP state, right?
> 
> I noticed in the previous patch you mention that you only handle "re-entry" for the IRQ state.
> 
> As you don't have "exit" for states other than IRQ, then I would not consider this is as re-entry. It is more like you transition from one state to another (it happen this is the same state).
> 
> The problem of re-entry would be if you take an exception that is going to switch the state. But the concern would be exactly the same if you take an exception that switch the state (such as synchronous hypervisor exception).
> > This raises the question, how do you account SError interrupt/synchronous exception?

Well, I guess I have to think more about it. Maybe I do not understand completely that problem now.

> 
>> +
>> +        mov     x0, #1
>> +        bl      tacc_hyp
>> +
>> +    .endif
>> +
>>           bl      leave_hypervisor_tail /* Disables interrupts on return */
> 
> As mentioned in the previous patch, leave_hypervisor_tail() may do some IO emulation that requires to be preemptible. So I don't think this is correct to get that time accounted to the hypervisor.

Could you please point me to the line of the code? Are you saying about GIC?

> 
>> +    mov     x0, #1
>> +    bl      tacc_guest
>>           exit_guest \compat
>>           .endif
>> @@ -205,9 +214,15 @@ hyp_sync:
>>   hyp_irq:
>>           entry   hyp=1
>> +        mov     x0,#5
>> +        bl      tacc_irq_enter
>>           msr     daifclr, #4
>>           mov     x0, sp
>>           bl      do_trap_irq
>> +
>> +        mov     x0,#5
>> +        bl      tacc_irq_exit
>> +
>>           exit    hyp=1
>>   guest_sync:
>> @@ -291,6 +306,9 @@ guest_sync_slowpath:
>>            * to save them.
>>            */
>>           entry   hyp=0, compat=0, save_x0_x1=0
>> +
>> +        mov     x0,#1
>> +        bl      tacc_gsync
>>           /*
>>            * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>>            * is not set. If a vSError took place, the initial exception will be
>> @@ -307,6 +325,10 @@ guest_sync_slowpath:
>>   guest_irq:
>>           entry   hyp=0, compat=0
>> +
>> +        mov     x0,#6
>> +        bl      tacc_irq_enter
>> +
>>           /*
>>            * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>>            * is not set. If a vSError took place, the initial exception will be
>> @@ -319,6 +341,8 @@ guest_irq:
>>           mov     x0, sp
>>           bl      do_trap_irq
>>   1:
>> +    mov    x0,#6
>> +        bl      tacc_irq_exit
>>           exit    hyp=0, compat=0
>>   guest_fiq_invalid:
>> @@ -334,6 +358,9 @@ guest_error:
>>   guest_sync_compat:
>>           entry   hyp=0, compat=1
>> +
>> +        mov     x0,#2
>> +        bl      tacc_gsync
>>           /*
>>            * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>>            * is not set. If a vSError took place, the initial exception will be
>> @@ -350,6 +377,10 @@ guest_sync_compat:
>>   guest_irq_compat:
>>           entry   hyp=0, compat=1
>> +
>> +        mov     x0,#7
>> +        bl      tacc_irq_enter
>> +
>>           /*
>>            * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
>>            * is not set. If a vSError took place, the initial exception will be
>> @@ -362,6 +393,8 @@ guest_irq_compat:
>>           mov     x0, sp
>>           bl      do_trap_irq
>>   1:
>> +        mov     x0,#7
>> +        bl      tacc_irq_exit
>>           exit    hyp=0, compat=1
>>   guest_fiq_invalid_compat:
>> @@ -376,9 +409,9 @@ guest_error_compat:
>>           exit    hyp=0, compat=1
>>   ENTRY(return_to_new_vcpu32)
>> -        exit    hyp=0, compat=1
>> +        exit    hyp=0, compat=1, tacc=0
>>   ENTRY(return_to_new_vcpu64)
>> -        exit    hyp=0, compat=0
>> +        exit    hyp=0, compat=0, tacc=0
>>   return_from_trap:
>>           msr     daifset, #2 /* Mask interrupts */
>> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
>> index a9c4113..53ef630 100644
>> --- a/xen/arch/arm/domain.c
>> +++ b/xen/arch/arm/domain.c
>> @@ -51,11 +51,13 @@ static void do_idle(void)
>>       process_pending_softirqs();
>>       local_irq_disable();
>> +    tacc_idle(1);
> 
> Any reason to call this before the if and not inside?

Maybe having at least a bit of idle for non-haltable cpu. But from the second thought it would not work.

> 
>>       if ( cpu_is_haltable(cpu) )
>>       {
>>           dsb(sy);
>>           wfi();
>>       }
>> +    tacc_hyp(2);
>>       local_irq_enable();
>>       sched_tick_resume();
>>
> 
> Cheers,
>
diff mbox series

Patch

diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S
index 2d9a271..6fb2fa9 100644
--- a/xen/arch/arm/arm64/entry.S
+++ b/xen/arch/arm/arm64/entry.S
@@ -143,12 +143,21 @@ 
 
         .endm
 
-        .macro  exit, hyp, compat
+        .macro  exit, hyp, compat, tacc=1
 
         .if \hyp == 0         /* Guest mode */
 
+	.if \tacc == 1
+
+        mov     x0, #1
+        bl      tacc_hyp
+
+	.endif
+
         bl      leave_hypervisor_tail /* Disables interrupts on return */
 
+	mov     x0, #1
+	bl      tacc_guest
         exit_guest \compat
 
         .endif
@@ -205,9 +214,15 @@  hyp_sync:
 
 hyp_irq:
         entry   hyp=1
+        mov     x0,#5
+        bl      tacc_irq_enter
         msr     daifclr, #4
         mov     x0, sp
         bl      do_trap_irq
+
+        mov     x0,#5
+        bl      tacc_irq_exit
+
         exit    hyp=1
 
 guest_sync:
@@ -291,6 +306,9 @@  guest_sync_slowpath:
          * to save them.
          */
         entry   hyp=0, compat=0, save_x0_x1=0
+        
+        mov     x0,#1
+        bl      tacc_gsync
         /*
          * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
          * is not set. If a vSError took place, the initial exception will be
@@ -307,6 +325,10 @@  guest_sync_slowpath:
 
 guest_irq:
         entry   hyp=0, compat=0
+
+        mov     x0,#6
+        bl      tacc_irq_enter
+
         /*
          * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
          * is not set. If a vSError took place, the initial exception will be
@@ -319,6 +341,8 @@  guest_irq:
         mov     x0, sp
         bl      do_trap_irq
 1:
+	mov	x0,#6
+        bl      tacc_irq_exit
         exit    hyp=0, compat=0
 
 guest_fiq_invalid:
@@ -334,6 +358,9 @@  guest_error:
 
 guest_sync_compat:
         entry   hyp=0, compat=1
+
+        mov     x0,#2
+        bl      tacc_gsync
         /*
          * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
          * is not set. If a vSError took place, the initial exception will be
@@ -350,6 +377,10 @@  guest_sync_compat:
 
 guest_irq_compat:
         entry   hyp=0, compat=1
+
+        mov     x0,#7
+        bl      tacc_irq_enter
+
         /*
          * The vSError will be checked while SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
          * is not set. If a vSError took place, the initial exception will be
@@ -362,6 +393,8 @@  guest_irq_compat:
         mov     x0, sp
         bl      do_trap_irq
 1:
+        mov     x0,#7
+        bl      tacc_irq_exit
         exit    hyp=0, compat=1
 
 guest_fiq_invalid_compat:
@@ -376,9 +409,9 @@  guest_error_compat:
         exit    hyp=0, compat=1
 
 ENTRY(return_to_new_vcpu32)
-        exit    hyp=0, compat=1
+        exit    hyp=0, compat=1, tacc=0
 ENTRY(return_to_new_vcpu64)
-        exit    hyp=0, compat=0
+        exit    hyp=0, compat=0, tacc=0
 
 return_from_trap:
         msr     daifset, #2 /* Mask interrupts */
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index a9c4113..53ef630 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -51,11 +51,13 @@  static void do_idle(void)
     process_pending_softirqs();
 
     local_irq_disable();
+    tacc_idle(1);
     if ( cpu_is_haltable(cpu) )
     {
         dsb(sy);
         wfi();
     }
+    tacc_hyp(2);
     local_irq_enable();
 
     sched_tick_resume();