@@ -1017,7 +1017,8 @@ static inline void tlb_set_compare(CPUTLBEntryFull *full, CPUTLBEntry *ent,
* critical section.
*/
void tlb_set_page_full(CPUState *cpu, int mmu_idx,
- vaddr addr, CPUTLBEntryFull *full)
+ vaddr addr, MMUAccessType access_type,
+ CPUTLBEntryFull *full)
{
CPUTLB *tlb = &cpu->neg.tlb;
CPUTLBDesc *desc = &tlb->d[mmu_idx];
@@ -1044,7 +1045,8 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
prot = full->prot;
asidx = cpu_asidx_from_attrs(cpu, full->attrs);
section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
- &xlat, &sz, full->attrs, &prot);
+ &xlat, &sz, full->attrs, &prot,
+ access_type);
assert(sz >= TARGET_PAGE_SIZE);
tlb_debug("vaddr=%016" VADDR_PRIx " paddr=0x" HWADDR_FMT_plx
@@ -1181,7 +1183,8 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
hwaddr paddr, MemTxAttrs attrs, int prot,
- int mmu_idx, vaddr size)
+ MMUAccessType access_type, int mmu_idx,
+ vaddr size)
{
CPUTLBEntryFull full = {
.phys_addr = paddr,
@@ -1191,15 +1194,15 @@ void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
};
assert(is_power_of_2(size));
- tlb_set_page_full(cpu, mmu_idx, addr, &full);
+ tlb_set_page_full(cpu, mmu_idx, addr, access_type, &full);
}
void tlb_set_page(CPUState *cpu, vaddr addr,
- hwaddr paddr, int prot,
+ hwaddr paddr, int prot, MMUAccessType access_type,
int mmu_idx, vaddr size)
{
tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
- prot, mmu_idx, size);
+ prot, access_type, mmu_idx, size);
}
/**
@@ -1241,7 +1244,7 @@ static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type,
if (ops->tlb_fill_align) {
if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx,
memop, size, probe, ra)) {
- tlb_set_page_full(cpu, mmu_idx, addr, &full);
+ tlb_set_page_full(cpu, mmu_idx, addr, type, &full);
return true;
}
} else {
@@ -40,6 +40,7 @@ void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
* @cpu: CPU context
* @mmu_idx: mmu index of the tlb to modify
* @addr: virtual address of the entry to add
+ * @access_type: access was read/write/execute
* @full: the details of the tlb entry
*
* Add an entry to @cpu tlb index @mmu_idx. All of the fields of
@@ -55,6 +56,7 @@ void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
* used by tlb_flush_page.
*/
void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
+ MMUAccessType access_type,
CPUTLBEntryFull *full);
/**
@@ -64,6 +66,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
* @paddr: physical address of the page
* @attrs: memory transaction attributes
* @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
+ * @access_type: access was read/write/execute
* @mmu_idx: MMU index to insert TLB entry for
* @size: size of the page in bytes
*
@@ -80,9 +83,9 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
* used by tlb_flush_page.
*/
void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
- hwaddr paddr, MemTxAttrs attrs,
- int prot, int mmu_idx, vaddr size);
-
+ hwaddr paddr, MemTxAttrs attrs, int prot,
+ MMUAccessType access_type, int mmu_idx,
+ vaddr size);
/**
* tlb_set_page:
*
@@ -91,7 +94,7 @@ void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
* as a convenience for CPUs which don't use memory transaction attributes.
*/
void tlb_set_page(CPUState *cpu, vaddr addr,
- hwaddr paddr, int prot,
+ hwaddr paddr, int prot, MMUAccessType access_type,
int mmu_idx, vaddr size);
#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
@@ -232,7 +232,8 @@ static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
hwaddr *xlat, hwaddr *plen,
- MemTxAttrs attrs, int *prot);
+ MemTxAttrs attrs, int *prot,
+ MMUAccessType access_type);
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section);
#endif
@@ -681,12 +681,14 @@ void tcg_iommu_init_notifier_list(CPUState *cpu)
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
hwaddr *xlat, hwaddr *plen,
- MemTxAttrs attrs, int *prot)
+ MemTxAttrs attrs, int *prot,
+ MMUAccessType access_type)
{
MemoryRegionSection *section;
IOMMUMemoryRegion *iommu_mr;
IOMMUMemoryRegionClass *imrc;
IOMMUTLBEntry iotlb;
+ IOMMUAccessFlags iommu_flags;
int iommu_idx;
hwaddr addr = orig_addr;
AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
@@ -703,10 +705,14 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr,
iommu_idx = imrc->attrs_to_index(iommu_mr, attrs);
tcg_register_iommu_notifier(cpu, iommu_mr, iommu_idx);
- /* We need all the permissions, so pass IOMMU_NONE so the IOMMU
- * doesn't short-cut its translation table walk.
- */
- iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, iommu_idx);
+
+ if (access_type == MMU_DATA_STORE) {
+ iommu_flags = IOMMU_WO;
+ } else {
+ iommu_flags = IOMMU_RO;
+ }
+
+ iotlb = imrc->translate(iommu_mr, addr, iommu_flags, iommu_idx);
addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
| (addr & iotlb.addr_mask));
/* Update the caller's prot bits to remove permissions the IOMMU
@@ -318,7 +318,7 @@ bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
}
tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
- prot, mmu_idx, TARGET_PAGE_SIZE);
+ prot, access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
@@ -149,7 +149,7 @@ bool avr_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
}
- tlb_set_page(cs, address, paddr, prot, mmu_idx, page_size);
+ tlb_set_page(cs, address, paddr, prot, access_type, mmu_idx, page_size);
return true;
}
@@ -473,7 +473,6 @@ bool hppa_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
out->prot = prot;
out->attrs = MEMTXATTRS_UNSPECIFIED;
out->lg_page_size = TARGET_PAGE_BITS;
-
return true;
}
@@ -624,7 +624,8 @@ bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
tlb_set_page_with_attrs(cs, addr & TARGET_PAGE_MASK,
out.paddr & TARGET_PAGE_MASK,
cpu_get_mem_attrs(env),
- out.prot, mmu_idx, out.page_size);
+ out.prot, access_type, mmu_idx,
+ out.page_size);
return true;
}
@@ -522,7 +522,7 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ access_type, mmu_idx, TARGET_PAGE_SIZE);
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
" prot %d\n", __func__, address, physical, prot);
@@ -969,7 +969,7 @@ bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
tlb_set_page(cs, address & TARGET_PAGE_MASK,
address & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC,
- mmu_idx, TARGET_PAGE_SIZE);
+ qemu_access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
@@ -989,7 +989,8 @@ bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
address, access_type, &page_size);
if (likely(ret == 0)) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
- physical & TARGET_PAGE_MASK, prot, mmu_idx, page_size);
+ physical & TARGET_PAGE_MASK, prot, qemu_access_type,
+ mmu_idx, page_size);
return true;
}
@@ -1461,6 +1462,7 @@ void HELPER(ptest)(CPUM68KState *env, uint32_t addr, uint32_t is_read)
int prot;
int ret;
target_ulong page_size;
+ MMUAccessType qemu_access_type;
access_type = ACCESS_PTEST;
if (env->dfc & 4) {
@@ -1468,9 +1470,11 @@ void HELPER(ptest)(CPUM68KState *env, uint32_t addr, uint32_t is_read)
}
if ((env->dfc & 3) == 2) {
access_type |= ACCESS_CODE;
+ qemu_access_type = MMU_INST_FETCH;
}
if (!is_read) {
access_type |= ACCESS_STORE;
+ qemu_access_type = MMU_DATA_STORE;
}
env->mmu.mmusr = 0;
@@ -1480,7 +1484,7 @@ void HELPER(ptest)(CPUM68KState *env, uint32_t addr, uint32_t is_read)
if (ret == 0) {
tlb_set_page(env_cpu(env), addr & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK,
- prot, access_type & ACCESS_SUPER ?
+ prot, qemu_access_type, access_type & ACCESS_SUPER ?
MMU_KERNEL_IDX : MMU_USER_IDX, page_size);
}
}
@@ -53,8 +53,8 @@ bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
prot = PAGE_RWX;
- tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx,
- TARGET_PAGE_SIZE);
+ tlb_set_page_with_attrs(cs, address, address, attrs, prot, access_type,
+ mmu_idx, TARGET_PAGE_SIZE);
return true;
}
@@ -65,8 +65,8 @@ bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
mmu_idx, vaddr, paddr, lu.prot);
- tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx,
- TARGET_PAGE_SIZE);
+ tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, access_type,
+ mmu_idx, TARGET_PAGE_SIZE);
return true;
}
@@ -933,7 +933,7 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
#if !defined(TARGET_MIPS64)
@@ -951,7 +951,7 @@ bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
}
@@ -128,7 +128,7 @@ bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
if (likely(excp == 0)) {
tlb_set_page(cs, addr & TARGET_PAGE_MASK,
phys_addr & TARGET_PAGE_MASK, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
if (probe) {
@@ -1369,7 +1369,7 @@ bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
if (ppc_xlate(cpu, eaddr, access_type, &raddr,
&page_size, &prot, mmu_idx, !probe)) {
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
- prot, mmu_idx, 1UL << page_size);
+ prot, access_type, mmu_idx, 1UL << page_size);
return true;
}
if (probe) {
@@ -1967,7 +1967,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (ret == TRANSLATE_SUCCESS) {
tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
- prot, mmu_idx, tlb_size);
+ prot, access_type, mmu_idx, tlb_size);
return true;
} else if (probe) {
return false;
@@ -182,7 +182,8 @@ static bool rx_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
/* Linear mapping */
address = physical = addr & TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, address, physical, prot, access_type,
+ mmu_idx, TARGET_PAGE_SIZE);
return true;
}
@@ -179,7 +179,7 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
"%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
__func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
- mmu_idx, TARGET_PAGE_SIZE);
+ access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
if (probe) {
@@ -808,7 +808,7 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (ret == MMU_OK) {
address &= TARGET_PAGE_MASK;
physical &= TARGET_PAGE_MASK;
- tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ tlb_set_page(cs, address, physical, prot, access_type, mmu_idx, TARGET_PAGE_SIZE);
return true;
}
if (probe) {
@@ -229,7 +229,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
"Translate at %" VADDR_PRIx " -> "
HWADDR_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
address, full.phys_addr, vaddr);
- tlb_set_page_full(cs, mmu_idx, vaddr, &full);
+ tlb_set_page_full(cs, mmu_idx, vaddr, access_type, &full);
return true;
}
@@ -245,7 +245,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
neverland. Fake/overridden mappings will be flushed when
switching to normal mode. */
full.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- tlb_set_page_full(cs, mmu_idx, vaddr, &full);
+ tlb_set_page_full(cs, mmu_idx, vaddr, access_type, &full);
return true;
} else {
if (access_type == MMU_INST_FETCH) {
@@ -769,7 +769,7 @@ bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
trace_mmu_helper_mmu_fault(address, full.phys_addr, mmu_idx, env->tl,
env->dmmu.mmu_primary_context,
env->dmmu.mmu_secondary_context);
- tlb_set_page_full(cs, mmu_idx, address, &full);
+ tlb_set_page_full(cs, mmu_idx, address, access_type, &full);
return true;
}
if (probe) {
@@ -84,7 +84,7 @@ bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (ret == TLBRET_MATCH) {
tlb_set_page(cs, address & TARGET_PAGE_MASK,
physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
- mmu_idx, TARGET_PAGE_SIZE);
+ rw, mmu_idx, TARGET_PAGE_SIZE);
return true;
} else {
assert(ret < 0);
@@ -281,7 +281,8 @@ bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
tlb_set_page(cs,
address & TARGET_PAGE_MASK,
paddr & TARGET_PAGE_MASK,
- access, mmu_idx, page_size);
+ access, access_type, mmu_idx,
+ page_size);
return true;
} else if (probe) {
return false;
It is the preparation patch for upcoming RISC-V wgChecker device. Since RISC-V wgChecker could permit access in RO/WO permission, the IOMMUMemoryRegion could return different section for read & write access. The memory access from CPU should also pass the access_type to IOMMU translate function so that IOMMU could return the correct section of specified access_type. Signed-off-by: Jim Shu <jim.shu@sifive.com> --- accel/tcg/cputlb.c | 17 ++++++++++------- include/exec/cputlb.h | 11 +++++++---- include/exec/exec-all.h | 3 ++- system/physmem.c | 16 +++++++++++----- target/alpha/helper.c | 2 +- target/avr/helper.c | 2 +- target/hppa/mem_helper.c | 1 - target/i386/tcg/system/excp_helper.c | 3 ++- target/loongarch/tcg/tlb_helper.c | 2 +- target/m68k/helper.c | 10 +++++++--- target/microblaze/helper.c | 8 ++++---- target/mips/tcg/system/tlb_helper.c | 4 ++-- target/openrisc/mmu.c | 2 +- target/ppc/mmu_helper.c | 2 +- target/riscv/cpu_helper.c | 2 +- target/rx/cpu.c | 3 ++- target/s390x/tcg/excp_helper.c | 2 +- target/sh4/helper.c | 2 +- target/sparc/mmu_helper.c | 6 +++--- target/tricore/helper.c | 2 +- target/xtensa/helper.c | 3 ++- 21 files changed, 61 insertions(+), 42 deletions(-)