diff mbox series

[v5,22/22] target/arm: Add allocation tag storage for system mode

Message ID 20191011134744.2477-23-richard.henderson@linaro.org (mailing list archive)
State New, archived
Headers show
Series [v5,01/22] target/arm: Add MTE_ACTIVE to tb_flags | expand

Commit Message

Richard Henderson Oct. 11, 2019, 1:47 p.m. UTC
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/mte_helper.c | 61 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 61 insertions(+)

Comments

Peter Maydell Dec. 6, 2019, 1:02 p.m. UTC | #1
On Fri, 11 Oct 2019 at 14:50, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  target/arm/mte_helper.c | 61 +++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 61 insertions(+)
>
> diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
> index e8d8a6bedb..657383ba0e 100644
> --- a/target/arm/mte_helper.c
> +++ b/target/arm/mte_helper.c
> @@ -28,8 +28,69 @@
>  static uint8_t *allocation_tag_mem(CPUARMState *env, uint64_t ptr,
>                                     bool write, uintptr_t ra)
>  {
> +#ifdef CONFIG_USER_ONLY
>      /* Tag storage not implemented.  */
>      return NULL;
> +#else
> +    CPUState *cs = env_cpu(env);
> +    uintptr_t index;
> +    int mmu_idx;
> +    CPUTLBEntry *entry;
> +    CPUIOTLBEntry *iotlbentry;
> +    MemoryRegionSection *section;
> +    hwaddr physaddr, tag_physaddr;
> +
> +    /*
> +     * Find the TLB entry for this access.
> +     * As a side effect, this also raises an exception for invalid access.
> +     *
> +     * TODO: Perhaps there should be a cputlb helper that returns a
> +     * matching tlb entry + iotlb entry.  That would also be able to
> +     * make use of the victim tlb cache, which is currently private.
> +     */
> +    mmu_idx = cpu_mmu_index(env, false);
> +    index = tlb_index(env, mmu_idx, ptr);
> +    entry = tlb_entry(env, mmu_idx, ptr);
> +    if (!tlb_hit(write ? tlb_addr_write(entry) : entry->addr_read, ptr)) {
> +        bool ok = arm_cpu_tlb_fill(cs, ptr, 16,
> +                                   write ? MMU_DATA_STORE : MMU_DATA_LOAD,
> +                                   mmu_idx, false, ra);
> +        assert(ok);
> +        index = tlb_index(env, mmu_idx, ptr);
> +        entry = tlb_entry(env, mmu_idx, ptr);
> +    }
> +
> +    /* If the virtual page MemAttr != Tagged, nothing to do.  */
> +    iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
> +    if (!iotlbentry->attrs.target_tlb_bit1) {
> +        return NULL;
> +    }
> +
> +    /*
> +     * Find the physical address for the virtual access.
> +     *
> +     * TODO: It should be possible to have the tag mmu_idx map
> +     * from main memory ram_addr to tag memory host address.
> +     * that would allow this lookup step to be cached as well.
> +     */
> +    section = iotlb_to_section(cs, iotlbentry->addr, iotlbentry->attrs);
> +    physaddr = ((iotlbentry->addr & TARGET_PAGE_MASK) + ptr
> +                + section->offset_within_address_space
> +                - section->offset_within_region);

I'm surprised that going from vaddr to (physaddr, attrs) requires
this much effort, it seems like the kind of thing we would
already have a function to do.

> +
> +    /* Convert to the physical address in tag space.  */
> +    tag_physaddr = physaddr >> (LOG2_TAG_GRANULE + 1);
> +
> +    /* Choose the tlb index to use for the tag physical access.  */
> +    mmu_idx = iotlbentry->attrs.secure ? ARMMMUIdx_TagS : ARMMMUIdx_TagNS;
> +    mmu_idx = arm_to_core_mmu_idx(mmu_idx);
> +
> +    /*
> +     * FIXME: Get access length and type so that we can use
> +     * probe_access, so that pages are marked dirty for migration.
> +     */
> +    return tlb_vaddr_to_host(env, tag_physaddr, MMU_DATA_LOAD, mmu_idx);

Hmm, does that mean that a setup with MemTag is not migratable?
If so, we should at least install a migration-blocker for CPUs
in that configuration.

> +#endif
>  }
>
>  static int get_allocation_tag(CPUARMState *env, uint64_t ptr, uintptr_t ra)
> --
> 2.17.1
>


thanks
-- PMM
Richard Henderson Dec. 6, 2019, 2:14 p.m. UTC | #2
On 12/6/19 5:02 AM, Peter Maydell wrote:
>> +    /*
>> +     * Find the physical address for the virtual access.
>> +     *
>> +     * TODO: It should be possible to have the tag mmu_idx map
>> +     * from main memory ram_addr to tag memory host address.
>> +     * that would allow this lookup step to be cached as well.
>> +     */
>> +    section = iotlb_to_section(cs, iotlbentry->addr, iotlbentry->attrs);
>> +    physaddr = ((iotlbentry->addr & TARGET_PAGE_MASK) + ptr
>> +                + section->offset_within_address_space
>> +                - section->offset_within_region);
> 
> I'm surprised that going from vaddr to (physaddr, attrs) requires
> this much effort, it seems like the kind of thing we would
> already have a function to do.

There are very few places that need to talk about the actual physical address.
 Mostly because that doesn't mean much within qemu -- physical address within
which address space?  Usually we want the ramaddr_t (which is a sort of
combination of pa + as), or the host address, or the device the exists at the
pa + as.

>> +    /*
>> +     * FIXME: Get access length and type so that we can use
>> +     * probe_access, so that pages are marked dirty for migration.
>> +     */
>> +    return tlb_vaddr_to_host(env, tag_physaddr, MMU_DATA_LOAD, mmu_idx);
> 
> Hmm, does that mean that a setup with MemTag is not migratable?
> If so, we should at least install a migration-blocker for CPUs
> in that configuration.

It probably does as written.  I intend to fix this properly before final.


r~
diff mbox series

Patch

diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c
index e8d8a6bedb..657383ba0e 100644
--- a/target/arm/mte_helper.c
+++ b/target/arm/mte_helper.c
@@ -28,8 +28,69 @@ 
 static uint8_t *allocation_tag_mem(CPUARMState *env, uint64_t ptr,
                                    bool write, uintptr_t ra)
 {
+#ifdef CONFIG_USER_ONLY
     /* Tag storage not implemented.  */
     return NULL;
+#else
+    CPUState *cs = env_cpu(env);
+    uintptr_t index;
+    int mmu_idx;
+    CPUTLBEntry *entry;
+    CPUIOTLBEntry *iotlbentry;
+    MemoryRegionSection *section;
+    hwaddr physaddr, tag_physaddr;
+
+    /*
+     * Find the TLB entry for this access.
+     * As a side effect, this also raises an exception for invalid access.
+     *
+     * TODO: Perhaps there should be a cputlb helper that returns a
+     * matching tlb entry + iotlb entry.  That would also be able to
+     * make use of the victim tlb cache, which is currently private.
+     */
+    mmu_idx = cpu_mmu_index(env, false);
+    index = tlb_index(env, mmu_idx, ptr);
+    entry = tlb_entry(env, mmu_idx, ptr);
+    if (!tlb_hit(write ? tlb_addr_write(entry) : entry->addr_read, ptr)) {
+        bool ok = arm_cpu_tlb_fill(cs, ptr, 16,
+                                   write ? MMU_DATA_STORE : MMU_DATA_LOAD,
+                                   mmu_idx, false, ra);
+        assert(ok);
+        index = tlb_index(env, mmu_idx, ptr);
+        entry = tlb_entry(env, mmu_idx, ptr);
+    }
+
+    /* If the virtual page MemAttr != Tagged, nothing to do.  */
+    iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
+    if (!iotlbentry->attrs.target_tlb_bit1) {
+        return NULL;
+    }
+
+    /*
+     * Find the physical address for the virtual access.
+     *
+     * TODO: It should be possible to have the tag mmu_idx map
+     * from main memory ram_addr to tag memory host address.
+     * that would allow this lookup step to be cached as well.
+     */
+    section = iotlb_to_section(cs, iotlbentry->addr, iotlbentry->attrs);
+    physaddr = ((iotlbentry->addr & TARGET_PAGE_MASK) + ptr
+                + section->offset_within_address_space
+                - section->offset_within_region);
+
+    /* Convert to the physical address in tag space.  */
+    tag_physaddr = physaddr >> (LOG2_TAG_GRANULE + 1);
+
+    /* Choose the tlb index to use for the tag physical access.  */
+    mmu_idx = iotlbentry->attrs.secure ? ARMMMUIdx_TagS : ARMMMUIdx_TagNS;
+    mmu_idx = arm_to_core_mmu_idx(mmu_idx);
+
+    /*
+     * FIXME: Get access length and type so that we can use
+     * probe_access, so that pages are marked dirty for migration.
+     */
+    return tlb_vaddr_to_host(env, tag_physaddr, MMU_DATA_LOAD, mmu_idx);
+#endif
 }
 
 static int get_allocation_tag(CPUARMState *env, uint64_t ptr, uintptr_t ra)