diff mbox

[08/11] tcg/aarch64: Make direct jump patching thread-safe

Message ID 1460044433-19282-9-git-send-email-sergey.fedorov@linaro.org (mailing list archive)
State New, archived
Headers show

Commit Message

sergey.fedorov@linaro.org April 7, 2016, 3:53 p.m. UTC
From: Sergey Fedorov <serge.fdrv@gmail.com>

Ensure direct jump patching in AArch64 is atomic by using
atomic_read()/atomic_set() for code patching.

Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
---
 tcg/aarch64/tcg-target.inc.c | 14 +++++++++++++-
 1 file changed, 13 insertions(+), 1 deletion(-)

Comments

Alex Bennée April 20, 2016, 2:01 p.m. UTC | #1
Sergey Fedorov <sergey.fedorov@linaro.org> writes:

> From: Sergey Fedorov <serge.fdrv@gmail.com>
>
> Ensure direct jump patching in AArch64 is atomic by using
> atomic_read()/atomic_set() for code patching.
>
> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
> ---
>  tcg/aarch64/tcg-target.inc.c | 14 +++++++++++++-
>  1 file changed, 13 insertions(+), 1 deletion(-)
>
> diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
> index 0ed10a974121..15fdebec921f 100644
> --- a/tcg/aarch64/tcg-target.inc.c
> +++ b/tcg/aarch64/tcg-target.inc.c
> @@ -73,6 +73,18 @@ static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
>      *code_ptr = deposit32(*code_ptr, 0, 26, offset);
>  }
>
> +static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
> +                                     tcg_insn_unit *target)
> +{
> +    ptrdiff_t offset = target - code_ptr;
> +    tcg_insn_unit insn;
> +    assert(offset == sextract64(offset, 0, 26));
> +    /* read instruction, mask away previous PC_REL26 parameter contents,
> +       set the proper offset, then write back the instruction. */

This comment could be moved from here and reloc_pc26 and made common for
the two following functions.

> +    insn = atomic_read(code_ptr);
> +    atomic_set(code_ptr, deposit32(insn, 0, 26, offset));
> +}
> +
>  static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
>  {
>      ptrdiff_t offset = target - code_ptr;
> @@ -835,7 +847,7 @@ void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
>      tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr;
>      tcg_insn_unit *target = (tcg_insn_unit *)addr;
>
> -    reloc_pc26(code_ptr, target);
> +    reloc_pc26_atomic(code_ptr, target);
>      flush_icache_range(jmp_addr, jmp_addr + 4);
>  }

Otherwise:

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>



--
Alex Bennée
Richard Henderson April 20, 2016, 3:08 p.m. UTC | #2
On 04/20/2016 07:01 AM, Alex Bennée wrote:
>
> Sergey Fedorov <sergey.fedorov@linaro.org> writes:
>
>> From: Sergey Fedorov <serge.fdrv@gmail.com>
>>
>> Ensure direct jump patching in AArch64 is atomic by using
>> atomic_read()/atomic_set() for code patching.
>>
>> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
>> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
>> ---
>>   tcg/aarch64/tcg-target.inc.c | 14 +++++++++++++-
>>   1 file changed, 13 insertions(+), 1 deletion(-)
>>
>> diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
>> index 0ed10a974121..15fdebec921f 100644
>> --- a/tcg/aarch64/tcg-target.inc.c
>> +++ b/tcg/aarch64/tcg-target.inc.c
>> @@ -73,6 +73,18 @@ static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
>>       *code_ptr = deposit32(*code_ptr, 0, 26, offset);
>>   }
>>
>> +static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
>> +                                     tcg_insn_unit *target)
>> +{
>> +    ptrdiff_t offset = target - code_ptr;
>> +    tcg_insn_unit insn;
>> +    assert(offset == sextract64(offset, 0, 26));
>> +    /* read instruction, mask away previous PC_REL26 parameter contents,
>> +       set the proper offset, then write back the instruction. */
>
> This comment could be moved from here and reloc_pc26 and made common for
> the two following functions.

There's a significant amount of cleanup that ought to happen here, now that 
we're not re-translating TBs.  I don't know if Sergey should be gated on that.


r~
Alex Bennée April 20, 2016, 6:22 p.m. UTC | #3
Richard Henderson <rth@twiddle.net> writes:

> On 04/20/2016 07:01 AM, Alex Bennée wrote:
>>
>> Sergey Fedorov <sergey.fedorov@linaro.org> writes:
>>
>>> From: Sergey Fedorov <serge.fdrv@gmail.com>
>>>
>>> Ensure direct jump patching in AArch64 is atomic by using
>>> atomic_read()/atomic_set() for code patching.
>>>
>>> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
>>> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
>>> ---
>>>   tcg/aarch64/tcg-target.inc.c | 14 +++++++++++++-
>>>   1 file changed, 13 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
>>> index 0ed10a974121..15fdebec921f 100644
>>> --- a/tcg/aarch64/tcg-target.inc.c
>>> +++ b/tcg/aarch64/tcg-target.inc.c
>>> @@ -73,6 +73,18 @@ static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
>>>       *code_ptr = deposit32(*code_ptr, 0, 26, offset);
>>>   }
>>>
>>> +static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
>>> +                                     tcg_insn_unit *target)
>>> +{
>>> +    ptrdiff_t offset = target - code_ptr;
>>> +    tcg_insn_unit insn;
>>> +    assert(offset == sextract64(offset, 0, 26));
>>> +    /* read instruction, mask away previous PC_REL26 parameter contents,
>>> +       set the proper offset, then write back the instruction. */
>>
>> This comment could be moved from here and reloc_pc26 and made common for
>> the two following functions.
>
> There's a significant amount of cleanup that ought to happen here, now that
> we're not re-translating TBs.  I don't know if Sergey should be gated
> on that.

Is this stuff already in the works? Otherwise we are trying to get
pre-cursors to MTTCG into the code (once the tree re-opens) to keep the
main diff down. This also is beneficial for linux-user stuff.

>
>
> r~


--
Alex Bennée
Sergey Fedorov April 20, 2016, 6:44 p.m. UTC | #4
On 20/04/16 18:08, Richard Henderson wrote:
> On 04/20/2016 07:01 AM, Alex Bennée wrote:
>>
>> Sergey Fedorov <sergey.fedorov@linaro.org> writes:
>>
>>> From: Sergey Fedorov <serge.fdrv@gmail.com>
>>>
>>> Ensure direct jump patching in AArch64 is atomic by using
>>> atomic_read()/atomic_set() for code patching.
>>>
>>> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
>>> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
>>> ---
>>>   tcg/aarch64/tcg-target.inc.c | 14 +++++++++++++-
>>>   1 file changed, 13 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/tcg/aarch64/tcg-target.inc.c
>>> b/tcg/aarch64/tcg-target.inc.c
>>> index 0ed10a974121..15fdebec921f 100644
>>> --- a/tcg/aarch64/tcg-target.inc.c
>>> +++ b/tcg/aarch64/tcg-target.inc.c
>>> @@ -73,6 +73,18 @@ static inline void reloc_pc26(tcg_insn_unit
>>> *code_ptr, tcg_insn_unit *target)
>>>       *code_ptr = deposit32(*code_ptr, 0, 26, offset);
>>>   }
>>>
>>> +static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
>>> +                                     tcg_insn_unit *target)
>>> +{
>>> +    ptrdiff_t offset = target - code_ptr;
>>> +    tcg_insn_unit insn;
>>> +    assert(offset == sextract64(offset, 0, 26));
>>> +    /* read instruction, mask away previous PC_REL26 parameter
>>> contents,
>>> +       set the proper offset, then write back the instruction. */
>>
>> This comment could be moved from here and reloc_pc26 and made common for
>> the two following functions.
>
> There's a significant amount of cleanup that ought to happen here, now
> that we're not re-translating TBs.  I don't know if Sergey should be
> gated on that.

Do you mean I'd better avoid using stuff like reloc_pc26()?

Kind regards,
Sergey
Richard Henderson April 20, 2016, 6:57 p.m. UTC | #5
On 04/20/2016 11:22 AM, Alex Bennée wrote:
>
> Richard Henderson <rth@twiddle.net> writes:
>
>> On 04/20/2016 07:01 AM, Alex Bennée wrote:
>>>
>>> Sergey Fedorov <sergey.fedorov@linaro.org> writes:
>>>
>>>> From: Sergey Fedorov <serge.fdrv@gmail.com>
>>>>
>>>> Ensure direct jump patching in AArch64 is atomic by using
>>>> atomic_read()/atomic_set() for code patching.
>>>>
>>>> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
>>>> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
>>>> ---
>>>>    tcg/aarch64/tcg-target.inc.c | 14 +++++++++++++-
>>>>    1 file changed, 13 insertions(+), 1 deletion(-)
>>>>
>>>> diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
>>>> index 0ed10a974121..15fdebec921f 100644
>>>> --- a/tcg/aarch64/tcg-target.inc.c
>>>> +++ b/tcg/aarch64/tcg-target.inc.c
>>>> @@ -73,6 +73,18 @@ static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
>>>>        *code_ptr = deposit32(*code_ptr, 0, 26, offset);
>>>>    }
>>>>
>>>> +static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
>>>> +                                     tcg_insn_unit *target)
>>>> +{
>>>> +    ptrdiff_t offset = target - code_ptr;
>>>> +    tcg_insn_unit insn;
>>>> +    assert(offset == sextract64(offset, 0, 26));
>>>> +    /* read instruction, mask away previous PC_REL26 parameter contents,
>>>> +       set the proper offset, then write back the instruction. */
>>>
>>> This comment could be moved from here and reloc_pc26 and made common for
>>> the two following functions.
>>
>> There's a significant amount of cleanup that ought to happen here, now that
>> we're not re-translating TBs.  I don't know if Sergey should be gated
>> on that.
>
> Is this stuff already in the works? Otherwise we are trying to get
> pre-cursors to MTTCG into the code (once the tree re-opens) to keep the
> main diff down. This also is beneficial for linux-user stuff.

We're talking past one another.

No, it's not yet in the works.  I'm saying that this patch set should not wait 
for it.  Thus I don't care if what he adds here is a little messy; we'll clean 
it all up at once later.  Thus don't bother refactoring reloc_pc26 and 
reloc_pc26_atomic.


r~
Alex Bennée April 20, 2016, 7:51 p.m. UTC | #6
Richard Henderson <rth@twiddle.net> writes:

> On 04/20/2016 11:22 AM, Alex Bennée wrote:
>>
>> Richard Henderson <rth@twiddle.net> writes:
>>
>>> On 04/20/2016 07:01 AM, Alex Bennée wrote:
>>>>
>>>> Sergey Fedorov <sergey.fedorov@linaro.org> writes:
>>>>
>>>>> From: Sergey Fedorov <serge.fdrv@gmail.com>
>>>>>
>>>>> Ensure direct jump patching in AArch64 is atomic by using
>>>>> atomic_read()/atomic_set() for code patching.
>>>>>
>>>>> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
>>>>> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
>>>>> ---
>>>>>    tcg/aarch64/tcg-target.inc.c | 14 +++++++++++++-
>>>>>    1 file changed, 13 insertions(+), 1 deletion(-)
>>>>>
>>>>> diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
>>>>> index 0ed10a974121..15fdebec921f 100644
>>>>> --- a/tcg/aarch64/tcg-target.inc.c
>>>>> +++ b/tcg/aarch64/tcg-target.inc.c
>>>>> @@ -73,6 +73,18 @@ static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
>>>>>        *code_ptr = deposit32(*code_ptr, 0, 26, offset);
>>>>>    }
>>>>>
>>>>> +static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
>>>>> +                                     tcg_insn_unit *target)
>>>>> +{
>>>>> +    ptrdiff_t offset = target - code_ptr;
>>>>> +    tcg_insn_unit insn;
>>>>> +    assert(offset == sextract64(offset, 0, 26));
>>>>> +    /* read instruction, mask away previous PC_REL26 parameter contents,
>>>>> +       set the proper offset, then write back the instruction. */
>>>>
>>>> This comment could be moved from here and reloc_pc26 and made common for
>>>> the two following functions.
>>>
>>> There's a significant amount of cleanup that ought to happen here, now that
>>> we're not re-translating TBs.  I don't know if Sergey should be gated
>>> on that.
>>
>> Is this stuff already in the works? Otherwise we are trying to get
>> pre-cursors to MTTCG into the code (once the tree re-opens) to keep the
>> main diff down. This also is beneficial for linux-user stuff.
>
> We're talking past one another.
>
> No, it's not yet in the works.  I'm saying that this patch set should not wait
> for it.  Thus I don't care if what he adds here is a little messy; we'll clean
> it all up at once later.  Thus don't bother refactoring reloc_pc26 and
> reloc_pc26_atomic.

Ahh OK, cool ;-)

>
>
> r~


--
Alex Bennée
Sergey Fedorov April 21, 2016, 3:47 p.m. UTC | #7
On 07/04/16 18:53, Sergey Fedorov wrote:
> From: Sergey Fedorov <serge.fdrv@gmail.com>
>
> Ensure direct jump patching in AArch64 is atomic by using
> atomic_read()/atomic_set() for code patching.
>
> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
> ---
>  tcg/aarch64/tcg-target.inc.c | 14 +++++++++++++-
>  1 file changed, 13 insertions(+), 1 deletion(-)
>
> diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
> index 0ed10a974121..15fdebec921f 100644
> --- a/tcg/aarch64/tcg-target.inc.c
> +++ b/tcg/aarch64/tcg-target.inc.c
> @@ -73,6 +73,18 @@ static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
>      *code_ptr = deposit32(*code_ptr, 0, 26, offset);
>  }
>  
> +static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
> +                                     tcg_insn_unit *target)
> +{
> +    ptrdiff_t offset = target - code_ptr;
> +    tcg_insn_unit insn;
> +    assert(offset == sextract64(offset, 0, 26));

I'd better use tcg_debug_assert() here as in this patch:

http://patchwork.ozlabs.org/patch/613020/

Kind regards,
Sergey

> +    /* read instruction, mask away previous PC_REL26 parameter contents,
> +       set the proper offset, then write back the instruction. */
> +    insn = atomic_read(code_ptr);
> +    atomic_set(code_ptr, deposit32(insn, 0, 26, offset));
> +}
> +
>  static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
>  {
>      ptrdiff_t offset = target - code_ptr;
> @@ -835,7 +847,7 @@ void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
>      tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr;
>      tcg_insn_unit *target = (tcg_insn_unit *)addr;
>  
> -    reloc_pc26(code_ptr, target);
> +    reloc_pc26_atomic(code_ptr, target);
>      flush_icache_range(jmp_addr, jmp_addr + 4);
>  }
>
diff mbox

Patch

diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c
index 0ed10a974121..15fdebec921f 100644
--- a/tcg/aarch64/tcg-target.inc.c
+++ b/tcg/aarch64/tcg-target.inc.c
@@ -73,6 +73,18 @@  static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
     *code_ptr = deposit32(*code_ptr, 0, 26, offset);
 }
 
+static inline void reloc_pc26_atomic(tcg_insn_unit *code_ptr,
+                                     tcg_insn_unit *target)
+{
+    ptrdiff_t offset = target - code_ptr;
+    tcg_insn_unit insn;
+    assert(offset == sextract64(offset, 0, 26));
+    /* read instruction, mask away previous PC_REL26 parameter contents,
+       set the proper offset, then write back the instruction. */
+    insn = atomic_read(code_ptr);
+    atomic_set(code_ptr, deposit32(insn, 0, 26, offset));
+}
+
 static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target)
 {
     ptrdiff_t offset = target - code_ptr;
@@ -835,7 +847,7 @@  void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr)
     tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr;
     tcg_insn_unit *target = (tcg_insn_unit *)addr;
 
-    reloc_pc26(code_ptr, target);
+    reloc_pc26_atomic(code_ptr, target);
     flush_icache_range(jmp_addr, jmp_addr + 4);
 }