diff mbox

[RFC,v7,09/16] softmmu: Include MMIO/invalid exclusive accesses

Message ID 1454059965-23402-10-git-send-email-a.rigo@virtualopensystems.com (mailing list archive)
State New, archived
Headers show

Commit Message

alvise rigo Jan. 29, 2016, 9:32 a.m. UTC
Enable exclusive accesses when the MMIO/invalid flag is set in the TLB
entry.

In case a LL access is done to MMIO memory, we treat it differently from
a RAM access in that we do not rely on the EXCL bitmap to flag the page
as exclusive. In fact, we don't even need the TLB_EXCL flag to force the
slow path, since it is always forced anyway.

This commit does not take care of invalidating an MMIO exclusive range from
other non-exclusive accesses i.e. CPU1 LoadLink to MMIO address X and
CPU2 writes to X. This will be addressed in the following commit.

Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
---
 cputlb.c           |  7 +++----
 softmmu_template.h | 26 ++++++++++++++++++++------
 2 files changed, 23 insertions(+), 10 deletions(-)

Comments

Alex Bennée Feb. 16, 2016, 5:49 p.m. UTC | #1
Alvise Rigo <a.rigo@virtualopensystems.com> writes:

> Enable exclusive accesses when the MMIO/invalid flag is set in the TLB
> entry.
>
> In case a LL access is done to MMIO memory, we treat it differently from
> a RAM access in that we do not rely on the EXCL bitmap to flag the page
> as exclusive. In fact, we don't even need the TLB_EXCL flag to force the
> slow path, since it is always forced anyway.
>
> This commit does not take care of invalidating an MMIO exclusive range from
> other non-exclusive accesses i.e. CPU1 LoadLink to MMIO address X and
> CPU2 writes to X. This will be addressed in the following commit.
>
> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
> ---
>  cputlb.c           |  7 +++----
>  softmmu_template.h | 26 ++++++++++++++++++++------
>  2 files changed, 23 insertions(+), 10 deletions(-)
>
> diff --git a/cputlb.c b/cputlb.c
> index aa9cc17..87d09c8 100644
> --- a/cputlb.c
> +++ b/cputlb.c
> @@ -424,7 +424,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>          if ((memory_region_is_ram(section->mr) && section->readonly)
>              || memory_region_is_romd(section->mr)) {
>              /* Write access calls the I/O callback.  */
> -            te->addr_write = address | TLB_MMIO;
> +            address |= TLB_MMIO;
>          } else if (memory_region_is_ram(section->mr)
>                     && cpu_physical_memory_is_clean(section->mr->ram_addr
>                                                     + xlat)) {
> @@ -437,11 +437,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>              if (cpu_physical_memory_is_excl(section->mr->ram_addr + xlat)) {
>                  /* There is at least one vCPU that has flagged the address as
>                   * exclusive. */
> -                te->addr_write = address | TLB_EXCL;
> -            } else {
> -                te->addr_write = address;
> +                address |= TLB_EXCL;
>              }
>          }
> +        te->addr_write = address;

As mentioned before I think this bit belongs in the earlier patch.

>      } else {
>          te->addr_write = -1;
>      }
> diff --git a/softmmu_template.h b/softmmu_template.h
> index 267c52a..c54bdc9 100644
> --- a/softmmu_template.h
> +++ b/softmmu_template.h
> @@ -476,7 +476,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>
>      /* Handle an IO access or exclusive access.  */
>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
> -        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
> +        if (tlb_addr & TLB_EXCL) {
>              CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
>              CPUState *cpu = ENV_GET_CPU(env);
>              CPUClass *cc = CPU_GET_CLASS(cpu);
> @@ -500,8 +500,15 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>                  }
>              }
>
> -            glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
> -                                                    mmu_idx, index, retaddr);
> +            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO
> access */

What about the other flags? Shouldn't this be tlb_addr & TLB_MMIO?

> +                glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
> +                                                         mmu_idx, index,
> +                                                         retaddr);
> +            } else {
> +                glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
> +                                                        mmu_idx, index,
> +                                                        retaddr);
> +            }
>
>              lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
>
> @@ -620,7 +627,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>
>      /* Handle an IO access or exclusive access.  */
>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
> -        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
> +        if (tlb_addr & TLB_EXCL) {
>              CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
>              CPUState *cpu = ENV_GET_CPU(env);
>              CPUClass *cc = CPU_GET_CLASS(cpu);
> @@ -644,8 +651,15 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>                  }
>              }
>
> -            glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
> -                                                    mmu_idx, index, retaddr);
> +            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */
> +                glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
> +                                                         mmu_idx, index,
> +                                                         retaddr);
> +            } else {
> +                glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
> +                                                        mmu_idx, index,
> +                                                        retaddr);
> +            }
>
>              lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);


--
Alex Bennée
alvise rigo Feb. 18, 2016, 2:18 p.m. UTC | #2
On Tue, Feb 16, 2016 at 6:49 PM, Alex Bennée <alex.bennee@linaro.org> wrote:
>
> Alvise Rigo <a.rigo@virtualopensystems.com> writes:
>
>> Enable exclusive accesses when the MMIO/invalid flag is set in the TLB
>> entry.
>>
>> In case a LL access is done to MMIO memory, we treat it differently from
>> a RAM access in that we do not rely on the EXCL bitmap to flag the page
>> as exclusive. In fact, we don't even need the TLB_EXCL flag to force the
>> slow path, since it is always forced anyway.
>>
>> This commit does not take care of invalidating an MMIO exclusive range from
>> other non-exclusive accesses i.e. CPU1 LoadLink to MMIO address X and
>> CPU2 writes to X. This will be addressed in the following commit.
>>
>> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
>> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
>> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
>> ---
>>  cputlb.c           |  7 +++----
>>  softmmu_template.h | 26 ++++++++++++++++++++------
>>  2 files changed, 23 insertions(+), 10 deletions(-)
>>
>> diff --git a/cputlb.c b/cputlb.c
>> index aa9cc17..87d09c8 100644
>> --- a/cputlb.c
>> +++ b/cputlb.c
>> @@ -424,7 +424,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>>          if ((memory_region_is_ram(section->mr) && section->readonly)
>>              || memory_region_is_romd(section->mr)) {
>>              /* Write access calls the I/O callback.  */
>> -            te->addr_write = address | TLB_MMIO;
>> +            address |= TLB_MMIO;
>>          } else if (memory_region_is_ram(section->mr)
>>                     && cpu_physical_memory_is_clean(section->mr->ram_addr
>>                                                     + xlat)) {
>> @@ -437,11 +437,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>>              if (cpu_physical_memory_is_excl(section->mr->ram_addr + xlat)) {
>>                  /* There is at least one vCPU that has flagged the address as
>>                   * exclusive. */
>> -                te->addr_write = address | TLB_EXCL;
>> -            } else {
>> -                te->addr_write = address;
>> +                address |= TLB_EXCL;
>>              }
>>          }
>> +        te->addr_write = address;
>
> As mentioned before I think this bit belongs in the earlier patch.
>
>>      } else {
>>          te->addr_write = -1;
>>      }
>> diff --git a/softmmu_template.h b/softmmu_template.h
>> index 267c52a..c54bdc9 100644
>> --- a/softmmu_template.h
>> +++ b/softmmu_template.h
>> @@ -476,7 +476,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>
>>      /* Handle an IO access or exclusive access.  */
>>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
>> -        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
>> +        if (tlb_addr & TLB_EXCL) {
>>              CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
>>              CPUState *cpu = ENV_GET_CPU(env);
>>              CPUClass *cc = CPU_GET_CLASS(cpu);
>> @@ -500,8 +500,15 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>                  }
>>              }
>>
>> -            glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
>> -                                                    mmu_idx, index, retaddr);
>> +            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO
>> access */
>
> What about the other flags? Shouldn't this be tlb_addr & TLB_MMIO?

The upstream QEMU's condition to follow the IO access path is:
if (unlikely(tlb_addr & ~TARGET_PAGE_MASK))
Now, we split this in:
if (tlb_addr & TLB_EXCL)
for RAM exclusive accesses and
if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL))
for IO accesses. In this last case, we handle also the IO exclusive accesses.

>
>> +                glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
>> +                                                         mmu_idx, index,
>> +                                                         retaddr);
>> +            } else {
>> +                glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
>> +                                                        mmu_idx, index,
>> +                                                        retaddr);
>> +            }
>>
>>              lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
>>
>> @@ -620,7 +627,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>
>>      /* Handle an IO access or exclusive access.  */
>>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
>> -        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
>> +        if (tlb_addr & TLB_EXCL) {
>>              CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
>>              CPUState *cpu = ENV_GET_CPU(env);
>>              CPUClass *cc = CPU_GET_CLASS(cpu);
>> @@ -644,8 +651,15 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>                  }
>>              }
>>
>> -            glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
>> -                                                    mmu_idx, index, retaddr);
>> +            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */
>> +                glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
>> +                                                         mmu_idx, index,
>> +                                                         retaddr);
>> +            } else {
>> +                glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
>> +                                                        mmu_idx, index,
>> +                                                        retaddr);
>> +            }
>>
>>              lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
>
>
> --
> Alex Bennée
Alex Bennée Feb. 18, 2016, 4:26 p.m. UTC | #3
alvise rigo <a.rigo@virtualopensystems.com> writes:

> On Tue, Feb 16, 2016 at 6:49 PM, Alex Bennée <alex.bennee@linaro.org> wrote:
>>
>> Alvise Rigo <a.rigo@virtualopensystems.com> writes:
>>
>>> Enable exclusive accesses when the MMIO/invalid flag is set in the TLB
>>> entry.
>>>
>>> In case a LL access is done to MMIO memory, we treat it differently from
>>> a RAM access in that we do not rely on the EXCL bitmap to flag the page
>>> as exclusive. In fact, we don't even need the TLB_EXCL flag to force the
>>> slow path, since it is always forced anyway.
>>>
>>> This commit does not take care of invalidating an MMIO exclusive range from
>>> other non-exclusive accesses i.e. CPU1 LoadLink to MMIO address X and
>>> CPU2 writes to X. This will be addressed in the following commit.
>>>
>>> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
>>> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
>>> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
>>> ---
>>>  cputlb.c           |  7 +++----
>>>  softmmu_template.h | 26 ++++++++++++++++++++------
>>>  2 files changed, 23 insertions(+), 10 deletions(-)
>>>
>>> diff --git a/cputlb.c b/cputlb.c
>>> index aa9cc17..87d09c8 100644
>>> --- a/cputlb.c
>>> +++ b/cputlb.c
>>> @@ -424,7 +424,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>>>          if ((memory_region_is_ram(section->mr) && section->readonly)
>>>              || memory_region_is_romd(section->mr)) {
>>>              /* Write access calls the I/O callback.  */
>>> -            te->addr_write = address | TLB_MMIO;
>>> +            address |= TLB_MMIO;
>>>          } else if (memory_region_is_ram(section->mr)
>>>                     && cpu_physical_memory_is_clean(section->mr->ram_addr
>>>                                                     + xlat)) {
>>> @@ -437,11 +437,10 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
>>>              if (cpu_physical_memory_is_excl(section->mr->ram_addr + xlat)) {
>>>                  /* There is at least one vCPU that has flagged the address as
>>>                   * exclusive. */
>>> -                te->addr_write = address | TLB_EXCL;
>>> -            } else {
>>> -                te->addr_write = address;
>>> +                address |= TLB_EXCL;
>>>              }
>>>          }
>>> +        te->addr_write = address;
>>
>> As mentioned before I think this bit belongs in the earlier patch.
>>
>>>      } else {
>>>          te->addr_write = -1;
>>>      }
>>> diff --git a/softmmu_template.h b/softmmu_template.h
>>> index 267c52a..c54bdc9 100644
>>> --- a/softmmu_template.h
>>> +++ b/softmmu_template.h
>>> @@ -476,7 +476,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>>
>>>      /* Handle an IO access or exclusive access.  */
>>>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
>>> -        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
>>> +        if (tlb_addr & TLB_EXCL) {
>>>              CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
>>>              CPUState *cpu = ENV_GET_CPU(env);
>>>              CPUClass *cc = CPU_GET_CLASS(cpu);
>>> @@ -500,8 +500,15 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>>                  }
>>>              }
>>>
>>> -            glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
>>> -                                                    mmu_idx, index, retaddr);
>>> +            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO
>>> access */
>>
>> What about the other flags? Shouldn't this be tlb_addr & TLB_MMIO?
>
> The upstream QEMU's condition to follow the IO access path is:
> if (unlikely(tlb_addr & ~TARGET_PAGE_MASK))
> Now, we split this in:
> if (tlb_addr & TLB_EXCL)
> for RAM exclusive accesses and
> if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL))
> for IO accesses. In this last case, we handle also the IO exclusive
> accesses.

OK I see that now. Thanks.

>
>>
>>> +                glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
>>> +                                                         mmu_idx, index,
>>> +                                                         retaddr);
>>> +            } else {
>>> +                glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
>>> +                                                        mmu_idx, index,
>>> +                                                        retaddr);
>>> +            }
>>>
>>>              lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
>>>
>>> @@ -620,7 +627,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>>
>>>      /* Handle an IO access or exclusive access.  */
>>>      if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
>>> -        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
>>> +        if (tlb_addr & TLB_EXCL) {
>>>              CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
>>>              CPUState *cpu = ENV_GET_CPU(env);
>>>              CPUClass *cc = CPU_GET_CLASS(cpu);
>>> @@ -644,8 +651,15 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>>                  }
>>>              }
>>>
>>> -            glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
>>> -                                                    mmu_idx, index, retaddr);
>>> +            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */
>>> +                glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
>>> +                                                         mmu_idx, index,
>>> +                                                         retaddr);
>>> +            } else {
>>> +                glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
>>> +                                                        mmu_idx, index,
>>> +                                                        retaddr);
>>> +            }
>>>
>>>              lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
>>
>>
>> --
>> Alex Bennée


--
Alex Bennée
diff mbox

Patch

diff --git a/cputlb.c b/cputlb.c
index aa9cc17..87d09c8 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -424,7 +424,7 @@  void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
         if ((memory_region_is_ram(section->mr) && section->readonly)
             || memory_region_is_romd(section->mr)) {
             /* Write access calls the I/O callback.  */
-            te->addr_write = address | TLB_MMIO;
+            address |= TLB_MMIO;
         } else if (memory_region_is_ram(section->mr)
                    && cpu_physical_memory_is_clean(section->mr->ram_addr
                                                    + xlat)) {
@@ -437,11 +437,10 @@  void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
             if (cpu_physical_memory_is_excl(section->mr->ram_addr + xlat)) {
                 /* There is at least one vCPU that has flagged the address as
                  * exclusive. */
-                te->addr_write = address | TLB_EXCL;
-            } else {
-                te->addr_write = address;
+                address |= TLB_EXCL;
             }
         }
+        te->addr_write = address;
     } else {
         te->addr_write = -1;
     }
diff --git a/softmmu_template.h b/softmmu_template.h
index 267c52a..c54bdc9 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -476,7 +476,7 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 
     /* Handle an IO access or exclusive access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
-        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
+        if (tlb_addr & TLB_EXCL) {
             CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
             CPUState *cpu = ENV_GET_CPU(env);
             CPUClass *cc = CPU_GET_CLASS(cpu);
@@ -500,8 +500,15 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                 }
             }
 
-            glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
-                                                    mmu_idx, index, retaddr);
+            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */
+                glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
+                                                         mmu_idx, index,
+                                                         retaddr);
+            } else {
+                glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
+                                                        mmu_idx, index,
+                                                        retaddr);
+            }
 
             lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);
 
@@ -620,7 +627,7 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 
     /* Handle an IO access or exclusive access.  */
     if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
-        if ((tlb_addr & ~TARGET_PAGE_MASK) == TLB_EXCL) {
+        if (tlb_addr & TLB_EXCL) {
             CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
             CPUState *cpu = ENV_GET_CPU(env);
             CPUClass *cc = CPU_GET_CLASS(cpu);
@@ -644,8 +651,15 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                 }
             }
 
-            glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
-                                                    mmu_idx, index, retaddr);
+            if (tlb_addr & ~(TARGET_PAGE_MASK | TLB_EXCL)) { /* MMIO access */
+                glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
+                                                         mmu_idx, index,
+                                                         retaddr);
+            } else {
+                glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
+                                                        mmu_idx, index,
+                                                        retaddr);
+            }
 
             lookup_and_reset_cpus_ll_addr(hw_addr, DATA_SIZE);