diff mbox series

[RFC,06/12] exec/memory_ldst_cached: Set MemTxResult on success

Message ID 20210520110919.2483190-7-philmd@redhat.com (mailing list archive)
State New, archived
Headers show
Series exec/memory: Experimental API to catch unaligned accesses | expand

Commit Message

Philippe Mathieu-Daudé May 20, 2021, 11:09 a.m. UTC
If the caller passed a MemTxResult argument, we must fill
it with the transaction result. We do it when no cache is
present, complete the other case (which is always successful).

Fixes: 48564041a73 ("exec: reintroduce MemoryRegion caching")
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
 include/exec/memory_ldst_cached.h.inc | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)
diff mbox series

Patch

diff --git a/include/exec/memory_ldst_cached.h.inc b/include/exec/memory_ldst_cached.h.inc
index d7834f852c4..a8f146251d4 100644
--- a/include/exec/memory_ldst_cached.h.inc
+++ b/include/exec/memory_ldst_cached.h.inc
@@ -30,6 +30,9 @@  static inline uint16_t ADDRESS_SPACE_LD_CACHED(uw)(MemoryRegionCache *cache,
     assert(addr < cache->len && 2 <= cache->len - addr);
     fuzz_dma_read_cb(cache->xlat + addr, 2, cache->mrs.mr);
     if (likely(cache->ptr)) {
+        if (result) {
+            *result = MEMTX_OK;
+        }
         return LD_P(uw)(cache->ptr + addr);
     } else {
         return ADDRESS_SPACE_LD_CACHED_SLOW(uw)(cache, addr, attrs, result);
@@ -42,6 +45,9 @@  static inline uint32_t ADDRESS_SPACE_LD_CACHED(l)(MemoryRegionCache *cache,
     assert(addr < cache->len && 4 <= cache->len - addr);
     fuzz_dma_read_cb(cache->xlat + addr, 4, cache->mrs.mr);
     if (likely(cache->ptr)) {
+        if (result) {
+            *result = MEMTX_OK;
+        }
         return LD_P(l)(cache->ptr + addr);
     } else {
         return ADDRESS_SPACE_LD_CACHED_SLOW(l)(cache, addr, attrs, result);
@@ -54,6 +60,9 @@  static inline uint64_t ADDRESS_SPACE_LD_CACHED(q)(MemoryRegionCache *cache,
     assert(addr < cache->len && 8 <= cache->len - addr);
     fuzz_dma_read_cb(cache->xlat + addr, 8, cache->mrs.mr);
     if (likely(cache->ptr)) {
+        if (result) {
+            *result = MEMTX_OK;
+        }
         return LD_P(q)(cache->ptr + addr);
     } else {
         return ADDRESS_SPACE_LD_CACHED_SLOW(q)(cache, addr, attrs, result);
@@ -76,6 +85,9 @@  static inline void ADDRESS_SPACE_ST_CACHED(w)(MemoryRegionCache *cache,
 {
     assert(addr < cache->len && 2 <= cache->len - addr);
     if (likely(cache->ptr)) {
+        if (result) {
+            *result = MEMTX_OK;
+        }
         ST_P(w)(cache->ptr + addr, val);
     } else {
         ADDRESS_SPACE_ST_CACHED_SLOW(w)(cache, addr, val, attrs, result);
@@ -87,6 +99,9 @@  static inline void ADDRESS_SPACE_ST_CACHED(l)(MemoryRegionCache *cache,
 {
     assert(addr < cache->len && 4 <= cache->len - addr);
     if (likely(cache->ptr)) {
+        if (result) {
+            *result = MEMTX_OK;
+        }
         ST_P(l)(cache->ptr + addr, val);
     } else {
         ADDRESS_SPACE_ST_CACHED_SLOW(l)(cache, addr, val, attrs, result);
@@ -98,6 +113,9 @@  static inline void ADDRESS_SPACE_ST_CACHED(q)(MemoryRegionCache *cache,
 {
     assert(addr < cache->len && 8 <= cache->len - addr);
     if (likely(cache->ptr)) {
+        if (result) {
+            *result = MEMTX_OK;
+        }
         ST_P(q)(cache->ptr + addr, val);
     } else {
         ADDRESS_SPACE_ST_CACHED_SLOW(q)(cache, addr, val, attrs, result);