@@ -1942,8 +1942,13 @@ static uint64_t int_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
this_size = 1 << this_mop;
this_mop |= MO_BE;
- r = memory_region_dispatch_read(mr, mr_offset, &val,
- this_mop, full->attrs);
+ if (type == MMU_INST_FETCH) {
+ r = memory_region_dispatch_fetch(mr, mr_offset, &val,
+ this_mop, full->attrs);
+ } else {
+ r = memory_region_dispatch_read(mr, mr_offset, &val,
+ this_mop, full->attrs);
+ }
if (unlikely(r != MEMTX_OK)) {
io_failed(cpu, full, addr, this_size, type, mmu_idx, r, ra);
}
@@ -274,6 +274,13 @@ struct MemoryRegionOps {
uint64_t data,
unsigned size);
+ /* Fetch from the memory region. @addr is relative to @mr; @size is
+ * in bytes. */
+ uint64_t (*fetch)(void *opaque,
+ hwaddr addr,
+ unsigned size);
+
+
MemTxResult (*read_with_attrs)(void *opaque,
hwaddr addr,
uint64_t *data,
@@ -284,6 +291,12 @@ struct MemoryRegionOps {
uint64_t data,
unsigned size,
MemTxAttrs attrs);
+ MemTxResult (*fetch_with_attrs)(void *opaque,
+ hwaddr addr,
+ uint64_t *data,
+ unsigned size,
+ MemTxAttrs attrs);
+
enum device_endian endianness;
/* Guest-visible constraints: */
@@ -2602,6 +2615,23 @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
MemOp op,
MemTxAttrs attrs);
+
+/**
+ * memory_region_dispatch_fetch: perform a fetch directly to the specified
+ * MemoryRegion.
+ *
+ * @mr: #MemoryRegion to access
+ * @addr: address within that region
+ * @pval: pointer to uint64_t which the data is written to
+ * @op: size, sign, and endianness of the memory operation
+ * @attrs: memory transaction attributes to use for the access
+ */
+MemTxResult memory_region_dispatch_fetch(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t *pval,
+ MemOp op,
+ MemTxAttrs attrs);
+
/**
* address_space_init: initializes an address space
*
@@ -477,6 +477,51 @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
return r;
}
+static MemTxResult memory_region_fetch_accessor(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t *value,
+ unsigned size,
+ signed shift,
+ uint64_t mask,
+ MemTxAttrs attrs)
+{
+ uint64_t tmp;
+
+ tmp = mr->ops->fetch(mr->opaque, addr, size);
+ if (mr->subpage) {
+ trace_memory_region_subpage_fetch(get_cpu_index(), mr, addr, tmp, size);
+ } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_FETCH)) {
+ hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
+ trace_memory_region_ops_fetch(get_cpu_index(), mr, abs_addr, tmp, size,
+ memory_region_name(mr));
+ }
+ memory_region_shift_read_access(value, shift, mask, tmp);
+ return MEMTX_OK;
+}
+
+static MemTxResult memory_region_fetch_with_attrs_accessor(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t *value,
+ unsigned size,
+ signed shift,
+ uint64_t mask,
+ MemTxAttrs attrs)
+{
+ uint64_t tmp = 0;
+ MemTxResult r;
+
+ r = mr->ops->fetch_with_attrs(mr->opaque, addr, &tmp, size, attrs);
+ if (mr->subpage) {
+ trace_memory_region_subpage_fetch(get_cpu_index(), mr, addr, tmp, size);
+ } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_FETCH)) {
+ hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr);
+ trace_memory_region_ops_fetch(get_cpu_index(), mr, abs_addr, tmp, size,
+ memory_region_name(mr));
+ }
+ memory_region_shift_read_access(value, shift, mask, tmp);
+ return r;
+}
+
static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
hwaddr addr,
uint64_t *value,
@@ -1461,6 +1506,65 @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
return r;
}
+static MemTxResult memory_region_dispatch_fetch1(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t *pval,
+ unsigned size,
+ MemTxAttrs attrs)
+{
+ *pval = 0;
+
+ if (mr->ops->fetch) {
+ return access_with_adjusted_size(addr, pval, size,
+ mr->ops->impl.min_access_size,
+ mr->ops->impl.max_access_size,
+ memory_region_fetch_accessor,
+ mr, attrs);
+ } else if (mr->ops->fetch_with_attrs) {
+ return access_with_adjusted_size(addr, pval, size,
+ mr->ops->impl.min_access_size,
+ mr->ops->impl.max_access_size,
+ memory_region_fetch_with_attrs_accessor,
+ mr, attrs);
+ } else if (mr->ops->read) {
+ return access_with_adjusted_size(addr, pval, size,
+ mr->ops->impl.min_access_size,
+ mr->ops->impl.max_access_size,
+ memory_region_read_accessor,
+ mr, attrs);
+ } else {
+ return access_with_adjusted_size(addr, pval, size,
+ mr->ops->impl.min_access_size,
+ mr->ops->impl.max_access_size,
+ memory_region_read_with_attrs_accessor,
+ mr, attrs);
+ }
+}
+
+MemTxResult memory_region_dispatch_fetch(MemoryRegion *mr,
+ hwaddr addr,
+ uint64_t *pval,
+ MemOp op,
+ MemTxAttrs attrs)
+{
+ unsigned size = memop_size(op);
+ MemTxResult r;
+
+ if (mr->alias) {
+ return memory_region_dispatch_fetch(mr->alias,
+ mr->alias_offset + addr,
+ pval, op, attrs);
+ }
+ if (!memory_region_access_valid(mr, addr, size, false, attrs)) {
+ *pval = unassigned_mem_read(mr, addr, size);
+ return MEMTX_DECODE_ERROR;
+ }
+
+ r = memory_region_dispatch_fetch1(mr, addr, pval, size, attrs);
+ adjust_endianness(mr, pval, op);
+ return r;
+}
+
/* Return true if an eventfd was signalled */
static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr,
hwaddr addr,
@@ -11,8 +11,10 @@ cpu_out(unsigned int addr, char size, unsigned int val) "addr 0x%x(%c) value %u"
# memory.c
memory_region_ops_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size, const char *name) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u name '%s'"
memory_region_ops_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size, const char *name) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u name '%s'"
+memory_region_ops_fetch(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size, const char *name) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u name '%s'"
memory_region_subpage_read(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
memory_region_subpage_write(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
+memory_region_subpage_fetch(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u"
memory_region_ram_device_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u"
memory_region_sync_dirty(const char *mr, const char *listener, int global) "mr '%s' listener '%s' synced (global=%d)"
Allow memory regions to have different behaviors for read and fetch operations. For example, the RISC-V IOPMP could raise an interrupt when the CPU tries to fetch from a non-executable region. If the fetch operation for a memory region is not implemented, the read operation will still be used for fetch operations. Signed-off-by: Ethan Chen <ethan84@andestech.com> --- accel/tcg/cputlb.c | 9 +++- include/exec/memory.h | 30 ++++++++++++ system/memory.c | 104 ++++++++++++++++++++++++++++++++++++++++++ system/trace-events | 2 + 4 files changed, 143 insertions(+), 2 deletions(-)