diff mbox

[RFC,v7,10/16] softmmu: Protect MMIO exclusive range

Message ID 1454059965-23402-11-git-send-email-a.rigo@virtualopensystems.com (mailing list archive)
State New, archived
Headers show

Commit Message

alvise rigo Jan. 29, 2016, 9:32 a.m. UTC
As for the RAM case, also the MMIO exclusive ranges have to be protected
by other CPU's accesses. In order to do that, we flag the accessed
MemoryRegion to mark that an exclusive access has been performed and is
not concluded yet.

This flag will force the other CPUs to invalidate the exclusive range in
case of collision.

Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
---
 cputlb.c                | 20 +++++++++++++-------
 include/exec/memory.h   |  1 +
 softmmu_llsc_template.h | 11 +++++++----
 softmmu_template.h      | 22 ++++++++++++++++++++++
 4 files changed, 43 insertions(+), 11 deletions(-)

Comments

Alex Bennée Feb. 17, 2016, 6:55 p.m. UTC | #1
Alvise Rigo <a.rigo@virtualopensystems.com> writes:

> As for the RAM case, also the MMIO exclusive ranges have to be protected
> by other CPU's accesses. In order to do that, we flag the accessed
> MemoryRegion to mark that an exclusive access has been performed and is
> not concluded yet.
>
> This flag will force the other CPUs to invalidate the exclusive range in
> case of collision.
>
> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
> ---
>  cputlb.c                | 20 +++++++++++++-------
>  include/exec/memory.h   |  1 +
>  softmmu_llsc_template.h | 11 +++++++----
>  softmmu_template.h      | 22 ++++++++++++++++++++++
>  4 files changed, 43 insertions(+), 11 deletions(-)
>
> diff --git a/cputlb.c b/cputlb.c
> index 87d09c8..06ce2da 100644
> --- a/cputlb.c
> +++ b/cputlb.c
> @@ -496,19 +496,25 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
>  /* For every vCPU compare the exclusive address and reset it in case of a
>   * match. Since only one vCPU is running at once, no lock has to be held to
>   * guard this operation. */
> -static inline void lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
> +static inline bool lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
>  {
>      CPUState *cpu;
> +    bool ret = false;
>
>      CPU_FOREACH(cpu) {
> -        if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
> -            ranges_overlap(cpu->excl_protected_range.begin,
> -                           cpu->excl_protected_range.end -
> -                           cpu->excl_protected_range.begin,
> -                           addr, size)) {
> -            cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
> +        if (current_cpu != cpu) {

I'm confused by this change. I don't see anywhere in the MMIO handling
why we would want to change skipping the CPU. Perhaps this belongs in
the previous patch? Maybe the function should really be
lookup_and_maybe_reset_other_cpu_ll_addr?

> +            if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
> +                ranges_overlap(cpu->excl_protected_range.begin,
> +                               cpu->excl_protected_range.end -
> +                               cpu->excl_protected_range.begin,
> +                               addr, size)) {
> +                cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
> +                ret = true;
> +            }
>          }
>      }
> +
> +    return ret;
>  }
>
>  #define MMUSUFFIX _mmu
> diff --git a/include/exec/memory.h b/include/exec/memory.h
> index 71e0480..bacb3ad 100644
> --- a/include/exec/memory.h
> +++ b/include/exec/memory.h
> @@ -171,6 +171,7 @@ struct MemoryRegion {
>      bool rom_device;
>      bool flush_coalesced_mmio;
>      bool global_locking;
> +    bool pending_excl_access; /* A vCPU issued an exclusive access */
>      uint8_t dirty_log_mask;
>      ram_addr_t ram_addr;
>      Object *owner;
> diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
> index 101f5e8..b4712ba 100644
> --- a/softmmu_llsc_template.h
> +++ b/softmmu_llsc_template.h
> @@ -81,15 +81,18 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr,
>                  }
>              }
>          }
> +        /* For this vCPU, just update the TLB entry, no need to flush. */
> +        env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
>      } else {
> -        hw_error("EXCL accesses to MMIO regions not supported yet.");
> +        /* Set a pending exclusive access in the MemoryRegion */
> +        MemoryRegion *mr = iotlb_to_region(this,
> +                                           env->iotlb[mmu_idx][index].addr,
> +                                           env->iotlb[mmu_idx][index].attrs);
> +        mr->pending_excl_access = true;
>      }
>
>      cc->cpu_set_excl_protected_range(this, hw_addr, DATA_SIZE);
>
> -    /* For this vCPU, just update the TLB entry, no need to flush. */
> -    env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
> -
>      /* From now on we are in LL/SC context */
>      this->ll_sc_context = true;
>
> diff --git a/softmmu_template.h b/softmmu_template.h
> index c54bdc9..71c5152 100644
> --- a/softmmu_template.h
> +++ b/softmmu_template.h
> @@ -360,6 +360,14 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
>      MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
>
>      physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
> +
> +    /* Invalidate the exclusive range that overlaps this access */
> +    if (mr->pending_excl_access) {
> +        if (lookup_and_reset_cpus_ll_addr(physaddr, 1 << SHIFT)) {
> +            mr->pending_excl_access = false;
> +        }
> +    }
> +
>      if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
>          cpu_io_recompile(cpu, retaddr);
>      }
> @@ -504,6 +512,13 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>                  glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
>                                                           mmu_idx, index,
>                                                           retaddr);
> +                /* N.B.: Here excl_succeeded == true means that this access
> +                 * comes from an exclusive instruction. */
> +                if (cpu->excl_succeeded) {
> +                    MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
> +                                                       iotlbentry->attrs);
> +                    mr->pending_excl_access = false;
> +                }
>              } else {
>                  glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
>                                                          mmu_idx, index,
> @@ -655,6 +670,13 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>                  glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
>                                                           mmu_idx, index,
>                                                           retaddr);
> +                /* N.B.: Here excl_succeeded == true means that this access
> +                 * comes from an exclusive instruction. */
> +                if (cpu->excl_succeeded) {
> +                    MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
> +                                                       iotlbentry->attrs);
> +                    mr->pending_excl_access = false;
> +                }

My comments about duplication on previous patches still stand.

>              } else {
>                  glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
>                                                          mmu_idx, index,


--
Alex Bennée
alvise rigo Feb. 18, 2016, 2:15 p.m. UTC | #2
On Wed, Feb 17, 2016 at 7:55 PM, Alex Bennée <alex.bennee@linaro.org> wrote:
>
> Alvise Rigo <a.rigo@virtualopensystems.com> writes:
>
>> As for the RAM case, also the MMIO exclusive ranges have to be protected
>> by other CPU's accesses. In order to do that, we flag the accessed
>> MemoryRegion to mark that an exclusive access has been performed and is
>> not concluded yet.
>>
>> This flag will force the other CPUs to invalidate the exclusive range in
>> case of collision.
>>
>> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
>> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
>> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
>> ---
>>  cputlb.c                | 20 +++++++++++++-------
>>  include/exec/memory.h   |  1 +
>>  softmmu_llsc_template.h | 11 +++++++----
>>  softmmu_template.h      | 22 ++++++++++++++++++++++
>>  4 files changed, 43 insertions(+), 11 deletions(-)
>>
>> diff --git a/cputlb.c b/cputlb.c
>> index 87d09c8..06ce2da 100644
>> --- a/cputlb.c
>> +++ b/cputlb.c
>> @@ -496,19 +496,25 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
>>  /* For every vCPU compare the exclusive address and reset it in case of a
>>   * match. Since only one vCPU is running at once, no lock has to be held to
>>   * guard this operation. */
>> -static inline void lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
>> +static inline bool lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
>>  {
>>      CPUState *cpu;
>> +    bool ret = false;
>>
>>      CPU_FOREACH(cpu) {
>> -        if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
>> -            ranges_overlap(cpu->excl_protected_range.begin,
>> -                           cpu->excl_protected_range.end -
>> -                           cpu->excl_protected_range.begin,
>> -                           addr, size)) {
>> -            cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
>> +        if (current_cpu != cpu) {
>
> I'm confused by this change. I don't see anywhere in the MMIO handling
> why we would want to change skipping the CPU. Perhaps this belongs in
> the previous patch? Maybe the function should really be
> lookup_and_maybe_reset_other_cpu_ll_addr?

This is actually used later on in this patch.

>
>> +            if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
>> +                ranges_overlap(cpu->excl_protected_range.begin,
>> +                               cpu->excl_protected_range.end -
>> +                               cpu->excl_protected_range.begin,
>> +                               addr, size)) {
>> +                cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
>> +                ret = true;
>> +            }
>>          }
>>      }
>> +
>> +    return ret;
>>  }
>>
>>  #define MMUSUFFIX _mmu
>> diff --git a/include/exec/memory.h b/include/exec/memory.h
>> index 71e0480..bacb3ad 100644
>> --- a/include/exec/memory.h
>> +++ b/include/exec/memory.h
>> @@ -171,6 +171,7 @@ struct MemoryRegion {
>>      bool rom_device;
>>      bool flush_coalesced_mmio;
>>      bool global_locking;
>> +    bool pending_excl_access; /* A vCPU issued an exclusive access */
>>      uint8_t dirty_log_mask;
>>      ram_addr_t ram_addr;
>>      Object *owner;
>> diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
>> index 101f5e8..b4712ba 100644
>> --- a/softmmu_llsc_template.h
>> +++ b/softmmu_llsc_template.h
>> @@ -81,15 +81,18 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr,
>>                  }
>>              }
>>          }
>> +        /* For this vCPU, just update the TLB entry, no need to flush. */
>> +        env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
>>      } else {
>> -        hw_error("EXCL accesses to MMIO regions not supported yet.");
>> +        /* Set a pending exclusive access in the MemoryRegion */
>> +        MemoryRegion *mr = iotlb_to_region(this,
>> +                                           env->iotlb[mmu_idx][index].addr,
>> +                                           env->iotlb[mmu_idx][index].attrs);
>> +        mr->pending_excl_access = true;
>>      }
>>
>>      cc->cpu_set_excl_protected_range(this, hw_addr, DATA_SIZE);
>>
>> -    /* For this vCPU, just update the TLB entry, no need to flush. */
>> -    env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
>> -
>>      /* From now on we are in LL/SC context */
>>      this->ll_sc_context = true;
>>
>> diff --git a/softmmu_template.h b/softmmu_template.h
>> index c54bdc9..71c5152 100644
>> --- a/softmmu_template.h
>> +++ b/softmmu_template.h
>> @@ -360,6 +360,14 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
>>      MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
>>
>>      physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
>> +
>> +    /* Invalidate the exclusive range that overlaps this access */
>> +    if (mr->pending_excl_access) {
>> +        if (lookup_and_reset_cpus_ll_addr(physaddr, 1 << SHIFT)) {

Here precisely. As you wrote, we can rename it to
lookup_and_maybe_reset_other_cpu_ll_addr even if this name does not
convince me. What about other_cpus_reset_colliding_ll_addr?

Thank you,
alvise

>> +            mr->pending_excl_access = false;
>> +        }
>> +    }
>> +
>>      if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
>>          cpu_io_recompile(cpu, retaddr);
>>      }
>> @@ -504,6 +512,13 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>                  glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
>>                                                           mmu_idx, index,
>>                                                           retaddr);
>> +                /* N.B.: Here excl_succeeded == true means that this access
>> +                 * comes from an exclusive instruction. */
>> +                if (cpu->excl_succeeded) {
>> +                    MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
>> +                                                       iotlbentry->attrs);
>> +                    mr->pending_excl_access = false;
>> +                }
>>              } else {
>>                  glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
>>                                                          mmu_idx, index,
>> @@ -655,6 +670,13 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>                  glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
>>                                                           mmu_idx, index,
>>                                                           retaddr);
>> +                /* N.B.: Here excl_succeeded == true means that this access
>> +                 * comes from an exclusive instruction. */
>> +                if (cpu->excl_succeeded) {
>> +                    MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
>> +                                                       iotlbentry->attrs);
>> +                    mr->pending_excl_access = false;
>> +                }
>
> My comments about duplication on previous patches still stand.

Indeed.

Thank you,
alvise

>
>>              } else {
>>                  glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
>>                                                          mmu_idx, index,
>
>
> --
> Alex Bennée
Alex Bennée Feb. 18, 2016, 4:25 p.m. UTC | #3
alvise rigo <a.rigo@virtualopensystems.com> writes:

> On Wed, Feb 17, 2016 at 7:55 PM, Alex Bennée <alex.bennee@linaro.org> wrote:
>>
>> Alvise Rigo <a.rigo@virtualopensystems.com> writes:
>>
>>> As for the RAM case, also the MMIO exclusive ranges have to be protected
>>> by other CPU's accesses. In order to do that, we flag the accessed
>>> MemoryRegion to mark that an exclusive access has been performed and is
>>> not concluded yet.
>>>
>>> This flag will force the other CPUs to invalidate the exclusive range in
>>> case of collision.
>>>
>>> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
>>> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
>>> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
>>> ---
>>>  cputlb.c                | 20 +++++++++++++-------
>>>  include/exec/memory.h   |  1 +
>>>  softmmu_llsc_template.h | 11 +++++++----
>>>  softmmu_template.h      | 22 ++++++++++++++++++++++
>>>  4 files changed, 43 insertions(+), 11 deletions(-)
>>>
>>> diff --git a/cputlb.c b/cputlb.c
>>> index 87d09c8..06ce2da 100644
>>> --- a/cputlb.c
>>> +++ b/cputlb.c
>>> @@ -496,19 +496,25 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
>>>  /* For every vCPU compare the exclusive address and reset it in case of a
>>>   * match. Since only one vCPU is running at once, no lock has to be held to
>>>   * guard this operation. */
>>> -static inline void lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
>>> +static inline bool lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
>>>  {
>>>      CPUState *cpu;
>>> +    bool ret = false;
>>>
>>>      CPU_FOREACH(cpu) {
>>> -        if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
>>> -            ranges_overlap(cpu->excl_protected_range.begin,
>>> -                           cpu->excl_protected_range.end -
>>> -                           cpu->excl_protected_range.begin,
>>> -                           addr, size)) {
>>> -            cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
>>> +        if (current_cpu != cpu) {
>>
>> I'm confused by this change. I don't see anywhere in the MMIO handling
>> why we would want to change skipping the CPU. Perhaps this belongs in
>> the previous patch? Maybe the function should really be
>> lookup_and_maybe_reset_other_cpu_ll_addr?
>
> This is actually used later on in this patch.

But aren't there other users before the functional change was made to
skip the current_cpu? Where their expectations wrong or should we have
always skipped the current CPU?

The additional of the bool return I agree only needs to be brought in
now when there are functions that care.

>
>>
>>> +            if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
>>> +                ranges_overlap(cpu->excl_protected_range.begin,
>>> +                               cpu->excl_protected_range.end -
>>> +                               cpu->excl_protected_range.begin,
>>> +                               addr, size)) {
>>> +                cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
>>> +                ret = true;
>>> +            }
>>>          }
>>>      }
>>> +
>>> +    return ret;
>>>  }
>>>
>>>  #define MMUSUFFIX _mmu
>>> diff --git a/include/exec/memory.h b/include/exec/memory.h
>>> index 71e0480..bacb3ad 100644
>>> --- a/include/exec/memory.h
>>> +++ b/include/exec/memory.h
>>> @@ -171,6 +171,7 @@ struct MemoryRegion {
>>>      bool rom_device;
>>>      bool flush_coalesced_mmio;
>>>      bool global_locking;
>>> +    bool pending_excl_access; /* A vCPU issued an exclusive access */
>>>      uint8_t dirty_log_mask;
>>>      ram_addr_t ram_addr;
>>>      Object *owner;
>>> diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
>>> index 101f5e8..b4712ba 100644
>>> --- a/softmmu_llsc_template.h
>>> +++ b/softmmu_llsc_template.h
>>> @@ -81,15 +81,18 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr,
>>>                  }
>>>              }
>>>          }
>>> +        /* For this vCPU, just update the TLB entry, no need to flush. */
>>> +        env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
>>>      } else {
>>> -        hw_error("EXCL accesses to MMIO regions not supported yet.");
>>> +        /* Set a pending exclusive access in the MemoryRegion */
>>> +        MemoryRegion *mr = iotlb_to_region(this,
>>> +                                           env->iotlb[mmu_idx][index].addr,
>>> +                                           env->iotlb[mmu_idx][index].attrs);
>>> +        mr->pending_excl_access = true;
>>>      }
>>>
>>>      cc->cpu_set_excl_protected_range(this, hw_addr, DATA_SIZE);
>>>
>>> -    /* For this vCPU, just update the TLB entry, no need to flush. */
>>> -    env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
>>> -
>>>      /* From now on we are in LL/SC context */
>>>      this->ll_sc_context = true;
>>>
>>> diff --git a/softmmu_template.h b/softmmu_template.h
>>> index c54bdc9..71c5152 100644
>>> --- a/softmmu_template.h
>>> +++ b/softmmu_template.h
>>> @@ -360,6 +360,14 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
>>>      MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
>>>
>>>      physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
>>> +
>>> +    /* Invalidate the exclusive range that overlaps this access */
>>> +    if (mr->pending_excl_access) {
>>> +        if (lookup_and_reset_cpus_ll_addr(physaddr, 1 << SHIFT)) {
>
> Here precisely. As you wrote, we can rename it to
> lookup_and_maybe_reset_other_cpu_ll_addr even if this name does not
> convince me. What about other_cpus_reset_colliding_ll_addr?

We want as short and semantically informative as possible. Naming things is hard ;-)

 - reset_other_cpus_colliding_ll_addr
 - reset_other_cpus_overlapping_ll_addr

Any other options?

>
> Thank you,
> alvise
>
>>> +            mr->pending_excl_access = false;
>>> +        }
>>> +    }
>>> +
>>>      if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
>>>          cpu_io_recompile(cpu, retaddr);
>>>      }
>>> @@ -504,6 +512,13 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>>                  glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
>>>                                                           mmu_idx, index,
>>>                                                           retaddr);
>>> +                /* N.B.: Here excl_succeeded == true means that this access
>>> +                 * comes from an exclusive instruction. */
>>> +                if (cpu->excl_succeeded) {
>>> +                    MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
>>> +                                                       iotlbentry->attrs);
>>> +                    mr->pending_excl_access = false;
>>> +                }
>>>              } else {
>>>                  glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
>>>                                                          mmu_idx, index,
>>> @@ -655,6 +670,13 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>>                  glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
>>>                                                           mmu_idx, index,
>>>                                                           retaddr);
>>> +                /* N.B.: Here excl_succeeded == true means that this access
>>> +                 * comes from an exclusive instruction. */
>>> +                if (cpu->excl_succeeded) {
>>> +                    MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
>>> +                                                       iotlbentry->attrs);
>>> +                    mr->pending_excl_access = false;
>>> +                }
>>
>> My comments about duplication on previous patches still stand.
>
> Indeed.
>
> Thank you,
> alvise
>
>>
>>>              } else {
>>>                  glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
>>>                                                          mmu_idx, index,
>>
>>
>> --
>> Alex Bennée


--
Alex Bennée
alvise rigo March 7, 2016, 6:13 p.m. UTC | #4
On Thu, Feb 18, 2016 at 5:25 PM, Alex Bennée <alex.bennee@linaro.org> wrote:
>
> alvise rigo <a.rigo@virtualopensystems.com> writes:
>
>> On Wed, Feb 17, 2016 at 7:55 PM, Alex Bennée <alex.bennee@linaro.org> wrote:
>>>
>>> Alvise Rigo <a.rigo@virtualopensystems.com> writes:
>>>
>>>> As for the RAM case, also the MMIO exclusive ranges have to be protected
>>>> by other CPU's accesses. In order to do that, we flag the accessed
>>>> MemoryRegion to mark that an exclusive access has been performed and is
>>>> not concluded yet.
>>>>
>>>> This flag will force the other CPUs to invalidate the exclusive range in
>>>> case of collision.
>>>>
>>>> Suggested-by: Jani Kokkonen <jani.kokkonen@huawei.com>
>>>> Suggested-by: Claudio Fontana <claudio.fontana@huawei.com>
>>>> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
>>>> ---
>>>>  cputlb.c                | 20 +++++++++++++-------
>>>>  include/exec/memory.h   |  1 +
>>>>  softmmu_llsc_template.h | 11 +++++++----
>>>>  softmmu_template.h      | 22 ++++++++++++++++++++++
>>>>  4 files changed, 43 insertions(+), 11 deletions(-)
>>>>
>>>> diff --git a/cputlb.c b/cputlb.c
>>>> index 87d09c8..06ce2da 100644
>>>> --- a/cputlb.c
>>>> +++ b/cputlb.c
>>>> @@ -496,19 +496,25 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
>>>>  /* For every vCPU compare the exclusive address and reset it in case of a
>>>>   * match. Since only one vCPU is running at once, no lock has to be held to
>>>>   * guard this operation. */
>>>> -static inline void lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
>>>> +static inline bool lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
>>>>  {
>>>>      CPUState *cpu;
>>>> +    bool ret = false;
>>>>
>>>>      CPU_FOREACH(cpu) {
>>>> -        if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
>>>> -            ranges_overlap(cpu->excl_protected_range.begin,
>>>> -                           cpu->excl_protected_range.end -
>>>> -                           cpu->excl_protected_range.begin,
>>>> -                           addr, size)) {
>>>> -            cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
>>>> +        if (current_cpu != cpu) {
>>>
>>> I'm confused by this change. I don't see anywhere in the MMIO handling
>>> why we would want to change skipping the CPU. Perhaps this belongs in
>>> the previous patch? Maybe the function should really be
>>> lookup_and_maybe_reset_other_cpu_ll_addr?
>>
>> This is actually used later on in this patch.
>
> But aren't there other users before the functional change was made to
> skip the current_cpu? Where their expectations wrong or should we have
> always skipped the current CPU?

I see your point now. When current_cpu was skipped, there was no need
of the line
cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
in helper_stcond_name() when we return back from softmmu_template.h.

The error is that that line should have been added in this patch, not
in PATCH 07/16. Fixing it for the next version.

>
> The additional of the bool return I agree only needs to be brought in
> now when there are functions that care.
>
>>
>>>
>>>> +            if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
>>>> +                ranges_overlap(cpu->excl_protected_range.begin,
>>>> +                               cpu->excl_protected_range.end -
>>>> +                               cpu->excl_protected_range.begin,
>>>> +                               addr, size)) {
>>>> +                cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
>>>> +                ret = true;
>>>> +            }
>>>>          }
>>>>      }
>>>> +
>>>> +    return ret;
>>>>  }
>>>>
>>>>  #define MMUSUFFIX _mmu
>>>> diff --git a/include/exec/memory.h b/include/exec/memory.h
>>>> index 71e0480..bacb3ad 100644
>>>> --- a/include/exec/memory.h
>>>> +++ b/include/exec/memory.h
>>>> @@ -171,6 +171,7 @@ struct MemoryRegion {
>>>>      bool rom_device;
>>>>      bool flush_coalesced_mmio;
>>>>      bool global_locking;
>>>> +    bool pending_excl_access; /* A vCPU issued an exclusive access */
>>>>      uint8_t dirty_log_mask;
>>>>      ram_addr_t ram_addr;
>>>>      Object *owner;
>>>> diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
>>>> index 101f5e8..b4712ba 100644
>>>> --- a/softmmu_llsc_template.h
>>>> +++ b/softmmu_llsc_template.h
>>>> @@ -81,15 +81,18 @@ WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr,
>>>>                  }
>>>>              }
>>>>          }
>>>> +        /* For this vCPU, just update the TLB entry, no need to flush. */
>>>> +        env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
>>>>      } else {
>>>> -        hw_error("EXCL accesses to MMIO regions not supported yet.");
>>>> +        /* Set a pending exclusive access in the MemoryRegion */
>>>> +        MemoryRegion *mr = iotlb_to_region(this,
>>>> +                                           env->iotlb[mmu_idx][index].addr,
>>>> +                                           env->iotlb[mmu_idx][index].attrs);
>>>> +        mr->pending_excl_access = true;
>>>>      }
>>>>
>>>>      cc->cpu_set_excl_protected_range(this, hw_addr, DATA_SIZE);
>>>>
>>>> -    /* For this vCPU, just update the TLB entry, no need to flush. */
>>>> -    env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
>>>> -
>>>>      /* From now on we are in LL/SC context */
>>>>      this->ll_sc_context = true;
>>>>
>>>> diff --git a/softmmu_template.h b/softmmu_template.h
>>>> index c54bdc9..71c5152 100644
>>>> --- a/softmmu_template.h
>>>> +++ b/softmmu_template.h
>>>> @@ -360,6 +360,14 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
>>>>      MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
>>>>
>>>>      physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
>>>> +
>>>> +    /* Invalidate the exclusive range that overlaps this access */
>>>> +    if (mr->pending_excl_access) {
>>>> +        if (lookup_and_reset_cpus_ll_addr(physaddr, 1 << SHIFT)) {
>>
>> Here precisely. As you wrote, we can rename it to
>> lookup_and_maybe_reset_other_cpu_ll_addr even if this name does not
>> convince me. What about other_cpus_reset_colliding_ll_addr?
>
> We want as short and semantically informative as possible. Naming things is hard ;-)
>
>  - reset_other_cpus_colliding_ll_addr
>  - reset_other_cpus_overlapping_ll_addr
>
> Any other options?

Umm, these sound fine to me. Probably the first one since shorter.

Thank you,
alvise

>
>>
>> Thank you,
>> alvise
>>
>>>> +            mr->pending_excl_access = false;
>>>> +        }
>>>> +    }
>>>> +
>>>>      if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
>>>>          cpu_io_recompile(cpu, retaddr);
>>>>      }
>>>> @@ -504,6 +512,13 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>>>                  glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
>>>>                                                           mmu_idx, index,
>>>>                                                           retaddr);
>>>> +                /* N.B.: Here excl_succeeded == true means that this access
>>>> +                 * comes from an exclusive instruction. */
>>>> +                if (cpu->excl_succeeded) {
>>>> +                    MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
>>>> +                                                       iotlbentry->attrs);
>>>> +                    mr->pending_excl_access = false;
>>>> +                }
>>>>              } else {
>>>>                  glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
>>>>                                                          mmu_idx, index,
>>>> @@ -655,6 +670,13 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
>>>>                  glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
>>>>                                                           mmu_idx, index,
>>>>                                                           retaddr);
>>>> +                /* N.B.: Here excl_succeeded == true means that this access
>>>> +                 * comes from an exclusive instruction. */
>>>> +                if (cpu->excl_succeeded) {
>>>> +                    MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
>>>> +                                                       iotlbentry->attrs);
>>>> +                    mr->pending_excl_access = false;
>>>> +                }
>>>
>>> My comments about duplication on previous patches still stand.
>>
>> Indeed.
>>
>> Thank you,
>> alvise
>>
>>>
>>>>              } else {
>>>>                  glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
>>>>                                                          mmu_idx, index,
>>>
>>>
>>> --
>>> Alex Bennée
>
>
> --
> Alex Bennée
diff mbox

Patch

diff --git a/cputlb.c b/cputlb.c
index 87d09c8..06ce2da 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -496,19 +496,25 @@  tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
 /* For every vCPU compare the exclusive address and reset it in case of a
  * match. Since only one vCPU is running at once, no lock has to be held to
  * guard this operation. */
-static inline void lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
+static inline bool lookup_and_reset_cpus_ll_addr(hwaddr addr, hwaddr size)
 {
     CPUState *cpu;
+    bool ret = false;
 
     CPU_FOREACH(cpu) {
-        if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
-            ranges_overlap(cpu->excl_protected_range.begin,
-                           cpu->excl_protected_range.end -
-                           cpu->excl_protected_range.begin,
-                           addr, size)) {
-            cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
+        if (current_cpu != cpu) {
+            if (cpu->excl_protected_range.begin != EXCLUSIVE_RESET_ADDR &&
+                ranges_overlap(cpu->excl_protected_range.begin,
+                               cpu->excl_protected_range.end -
+                               cpu->excl_protected_range.begin,
+                               addr, size)) {
+                cpu->excl_protected_range.begin = EXCLUSIVE_RESET_ADDR;
+                ret = true;
+            }
         }
     }
+
+    return ret;
 }
 
 #define MMUSUFFIX _mmu
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 71e0480..bacb3ad 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -171,6 +171,7 @@  struct MemoryRegion {
     bool rom_device;
     bool flush_coalesced_mmio;
     bool global_locking;
+    bool pending_excl_access; /* A vCPU issued an exclusive access */
     uint8_t dirty_log_mask;
     ram_addr_t ram_addr;
     Object *owner;
diff --git a/softmmu_llsc_template.h b/softmmu_llsc_template.h
index 101f5e8..b4712ba 100644
--- a/softmmu_llsc_template.h
+++ b/softmmu_llsc_template.h
@@ -81,15 +81,18 @@  WORD_TYPE helper_ldlink_name(CPUArchState *env, target_ulong addr,
                 }
             }
         }
+        /* For this vCPU, just update the TLB entry, no need to flush. */
+        env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
     } else {
-        hw_error("EXCL accesses to MMIO regions not supported yet.");
+        /* Set a pending exclusive access in the MemoryRegion */
+        MemoryRegion *mr = iotlb_to_region(this,
+                                           env->iotlb[mmu_idx][index].addr,
+                                           env->iotlb[mmu_idx][index].attrs);
+        mr->pending_excl_access = true;
     }
 
     cc->cpu_set_excl_protected_range(this, hw_addr, DATA_SIZE);
 
-    /* For this vCPU, just update the TLB entry, no need to flush. */
-    env->tlb_table[mmu_idx][index].addr_write |= TLB_EXCL;
-
     /* From now on we are in LL/SC context */
     this->ll_sc_context = true;
 
diff --git a/softmmu_template.h b/softmmu_template.h
index c54bdc9..71c5152 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -360,6 +360,14 @@  static inline void glue(io_write, SUFFIX)(CPUArchState *env,
     MemoryRegion *mr = iotlb_to_region(cpu, physaddr, iotlbentry->attrs);
 
     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
+
+    /* Invalidate the exclusive range that overlaps this access */
+    if (mr->pending_excl_access) {
+        if (lookup_and_reset_cpus_ll_addr(physaddr, 1 << SHIFT)) {
+            mr->pending_excl_access = false;
+        }
+    }
+
     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) {
         cpu_io_recompile(cpu, retaddr);
     }
@@ -504,6 +512,13 @@  void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                 glue(helper_le_st_name, _do_mmio_access)(env, val, addr, oi,
                                                          mmu_idx, index,
                                                          retaddr);
+                /* N.B.: Here excl_succeeded == true means that this access
+                 * comes from an exclusive instruction. */
+                if (cpu->excl_succeeded) {
+                    MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
+                                                       iotlbentry->attrs);
+                    mr->pending_excl_access = false;
+                }
             } else {
                 glue(helper_le_st_name, _do_ram_access)(env, val, addr, oi,
                                                         mmu_idx, index,
@@ -655,6 +670,13 @@  void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                 glue(helper_be_st_name, _do_mmio_access)(env, val, addr, oi,
                                                          mmu_idx, index,
                                                          retaddr);
+                /* N.B.: Here excl_succeeded == true means that this access
+                 * comes from an exclusive instruction. */
+                if (cpu->excl_succeeded) {
+                    MemoryRegion *mr = iotlb_to_region(cpu, iotlbentry->addr,
+                                                       iotlbentry->attrs);
+                    mr->pending_excl_access = false;
+                }
             } else {
                 glue(helper_be_st_name, _do_ram_access)(env, val, addr, oi,
                                                         mmu_idx, index,