Message ID | 20231005181629.4046-6-vikram.garhwal@amd.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Xen: support grant mappings. | expand |
On Thu, 5 Oct 2023, Vikram Garhwal wrote: > From: Juergen Gross <jgross@suse.com> > > In order to support mapping and unmapping guest memory dynamically to > and from qemu during address_space_[un]map() operations add the map() > and unmap() callbacks to MemoryRegionOps. > > Those will be used e.g. for Xen grant mappings when performing guest > I/Os. > > Signed-off-by: Juergen Gross <jgross@suse.com> > Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com> Can't we just use the existing Xen hooks in qemu_ram_ptr_length and xen_invalidate_map_cache_entry? Do we really need new ones? > --- > include/exec/memory.h | 21 ++++++++++++++++++ > softmmu/physmem.c | 50 +++++++++++++++++++++++++++++++++---------- > 2 files changed, 60 insertions(+), 11 deletions(-) > > diff --git a/include/exec/memory.h b/include/exec/memory.h > index c99842d2fc..f3c62d2883 100644 > --- a/include/exec/memory.h > +++ b/include/exec/memory.h > @@ -280,6 +280,27 @@ struct MemoryRegionOps { > unsigned size, > MemTxAttrs attrs); > > + /* > + * Dynamically create mapping. @addr is the guest address to map; @plen > + * is the pointer to the usable length of the buffer. > + * @mr contents can be changed in case a new memory region is created for > + * the mapping. > + * Returns the buffer address for accessing the data. > + */ > + void *(*map)(MemoryRegion **mr, > + hwaddr addr, > + hwaddr *plen, > + bool is_write, > + MemTxAttrs attrs); > + > + /* Unmap an area obtained via map() before. */ > + void (*unmap)(MemoryRegion *mr, > + void *buffer, > + ram_addr_t addr, > + hwaddr len, > + bool is_write, > + hwaddr access_len); > + > enum device_endian endianness; > /* Guest-visible constraints: */ > struct { > diff --git a/softmmu/physmem.c b/softmmu/physmem.c > index 6e5e379dd0..5f425bea1c 100644 > --- a/softmmu/physmem.c > +++ b/softmmu/physmem.c > @@ -3135,6 +3135,7 @@ void *address_space_map(AddressSpace *as, > hwaddr len = *plen; > hwaddr l, xlat; > MemoryRegion *mr; > + void *ptr = NULL; > FlatView *fv; > > if (len == 0) { > @@ -3168,12 +3169,20 @@ void *address_space_map(AddressSpace *as, > return bounce.buffer; > } > > - > memory_region_ref(mr); > + > + if (mr->ops && mr->ops->map) { > + ptr = mr->ops->map(&mr, addr, plen, is_write, attrs); > + } > + > *plen = flatview_extend_translation(fv, addr, len, mr, xlat, > l, is_write, attrs); > fuzz_dma_read_cb(addr, *plen, mr); > - return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); > + if (ptr == NULL) { > + ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); > + } > + > + return ptr; > } > > /* Unmaps a memory region previously mapped by address_space_map(). > @@ -3189,11 +3198,16 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, > > mr = memory_region_from_host(buffer, &addr1); > assert(mr != NULL); > - if (is_write) { > - invalidate_and_set_dirty(mr, addr1, access_len); > - } > - if (xen_enabled()) { > - xen_invalidate_map_cache_entry(buffer); > + > + if (mr->ops && mr->ops->unmap) { > + mr->ops->unmap(mr, buffer, addr1, len, is_write, access_len); > + } else { > + if (is_write) { > + invalidate_and_set_dirty(mr, addr1, access_len); > + } > + if (xen_enabled()) { > + xen_invalidate_map_cache_entry(buffer); > + } > } > memory_region_unref(mr); > return; > @@ -3266,10 +3280,18 @@ int64_t address_space_cache_init(MemoryRegionCache *cache, > * doing this if we found actual RAM, which behaves the same > * regardless of attributes; so UNSPECIFIED is fine. > */ > + if (mr->ops && mr->ops->map) { > + cache->ptr = mr->ops->map(&mr, addr, &l, is_write, > + MEMTXATTRS_UNSPECIFIED); > + } > + > l = flatview_extend_translation(cache->fv, addr, len, mr, > cache->xlat, l, is_write, > MEMTXATTRS_UNSPECIFIED); > - cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true); > + if (!cache->ptr) { > + cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, > + true); > + } > } else { > cache->ptr = NULL; > } > @@ -3291,14 +3313,20 @@ void address_space_cache_invalidate(MemoryRegionCache *cache, > > void address_space_cache_destroy(MemoryRegionCache *cache) > { > - if (!cache->mrs.mr) { > + MemoryRegion *mr = cache->mrs.mr; > + > + if (!mr) { > return; > } > > - if (xen_enabled()) { > + if (mr->ops && mr->ops->unmap) { > + mr->ops->unmap(mr, cache->ptr, cache->xlat, cache->len, > + cache->is_write, cache->len); > + } else if (xen_enabled()) { > xen_invalidate_map_cache_entry(cache->ptr); > } > - memory_region_unref(cache->mrs.mr); > + > + memory_region_unref(mr); > flatview_unref(cache->fv); > cache->mrs.mr = NULL; > cache->fv = NULL; > -- > 2.17.1 >
On 10.10.23 02:17, Stefano Stabellini wrote: > On Thu, 5 Oct 2023, Vikram Garhwal wrote: >> From: Juergen Gross <jgross@suse.com> >> >> In order to support mapping and unmapping guest memory dynamically to >> and from qemu during address_space_[un]map() operations add the map() >> and unmap() callbacks to MemoryRegionOps. >> >> Those will be used e.g. for Xen grant mappings when performing guest >> I/Os. >> >> Signed-off-by: Juergen Gross <jgross@suse.com> >> Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com> > > Can't we just use the existing Xen hooks in qemu_ram_ptr_length and > xen_invalidate_map_cache_entry? Do we really need new ones? I tried your idea first and it didn't work out. The existing hooks are invoked not only when explicitly [un]mapping memory regions, but in some other cases, too. Have a look for qemu_ram_ptr_length() call in flatview_write_continue(). Juergen
On Wed, 11 Oct 2023, Juergen Gross wrote: > On 10.10.23 02:17, Stefano Stabellini wrote: > > On Thu, 5 Oct 2023, Vikram Garhwal wrote: > > > From: Juergen Gross <jgross@suse.com> > > > > > > In order to support mapping and unmapping guest memory dynamically to > > > and from qemu during address_space_[un]map() operations add the map() > > > and unmap() callbacks to MemoryRegionOps. > > > > > > Those will be used e.g. for Xen grant mappings when performing guest > > > I/Os. > > > > > > Signed-off-by: Juergen Gross <jgross@suse.com> > > > Signed-off-by: Vikram Garhwal <vikram.garhwal@amd.com> > > > > Can't we just use the existing Xen hooks in qemu_ram_ptr_length and > > xen_invalidate_map_cache_entry? Do we really need new ones? > > I tried your idea first and it didn't work out. > > The existing hooks are invoked not only when explicitly [un]mapping memory > regions, but in some other cases, too. Have a look for qemu_ram_ptr_length() > call in flatview_write_continue(). Hi Juergen, thanks for the explanation and sorry for my late reply. I missed your email when it came out. If that is the only problem, it seems to me that it could be solved. The call to qemu_ram_ptr_length() in flatview_write_continue is unlocked. It should be also distinguishable by address (the grants have the top bit set?) So from qemu_ram_ptr_length() we could call xen_map_grant_dyn only when locked. And in xen_map_grant_dyn we could also check that XEN_GRANT_ADDR_OFF is set before continuing. Do you see any other issues with it?
diff --git a/include/exec/memory.h b/include/exec/memory.h index c99842d2fc..f3c62d2883 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -280,6 +280,27 @@ struct MemoryRegionOps { unsigned size, MemTxAttrs attrs); + /* + * Dynamically create mapping. @addr is the guest address to map; @plen + * is the pointer to the usable length of the buffer. + * @mr contents can be changed in case a new memory region is created for + * the mapping. + * Returns the buffer address for accessing the data. + */ + void *(*map)(MemoryRegion **mr, + hwaddr addr, + hwaddr *plen, + bool is_write, + MemTxAttrs attrs); + + /* Unmap an area obtained via map() before. */ + void (*unmap)(MemoryRegion *mr, + void *buffer, + ram_addr_t addr, + hwaddr len, + bool is_write, + hwaddr access_len); + enum device_endian endianness; /* Guest-visible constraints: */ struct { diff --git a/softmmu/physmem.c b/softmmu/physmem.c index 6e5e379dd0..5f425bea1c 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -3135,6 +3135,7 @@ void *address_space_map(AddressSpace *as, hwaddr len = *plen; hwaddr l, xlat; MemoryRegion *mr; + void *ptr = NULL; FlatView *fv; if (len == 0) { @@ -3168,12 +3169,20 @@ void *address_space_map(AddressSpace *as, return bounce.buffer; } - memory_region_ref(mr); + + if (mr->ops && mr->ops->map) { + ptr = mr->ops->map(&mr, addr, plen, is_write, attrs); + } + *plen = flatview_extend_translation(fv, addr, len, mr, xlat, l, is_write, attrs); fuzz_dma_read_cb(addr, *plen, mr); - return qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); + if (ptr == NULL) { + ptr = qemu_ram_ptr_length(mr->ram_block, xlat, plen, true); + } + + return ptr; } /* Unmaps a memory region previously mapped by address_space_map(). @@ -3189,11 +3198,16 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, mr = memory_region_from_host(buffer, &addr1); assert(mr != NULL); - if (is_write) { - invalidate_and_set_dirty(mr, addr1, access_len); - } - if (xen_enabled()) { - xen_invalidate_map_cache_entry(buffer); + + if (mr->ops && mr->ops->unmap) { + mr->ops->unmap(mr, buffer, addr1, len, is_write, access_len); + } else { + if (is_write) { + invalidate_and_set_dirty(mr, addr1, access_len); + } + if (xen_enabled()) { + xen_invalidate_map_cache_entry(buffer); + } } memory_region_unref(mr); return; @@ -3266,10 +3280,18 @@ int64_t address_space_cache_init(MemoryRegionCache *cache, * doing this if we found actual RAM, which behaves the same * regardless of attributes; so UNSPECIFIED is fine. */ + if (mr->ops && mr->ops->map) { + cache->ptr = mr->ops->map(&mr, addr, &l, is_write, + MEMTXATTRS_UNSPECIFIED); + } + l = flatview_extend_translation(cache->fv, addr, len, mr, cache->xlat, l, is_write, MEMTXATTRS_UNSPECIFIED); - cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, true); + if (!cache->ptr) { + cache->ptr = qemu_ram_ptr_length(mr->ram_block, cache->xlat, &l, + true); + } } else { cache->ptr = NULL; } @@ -3291,14 +3313,20 @@ void address_space_cache_invalidate(MemoryRegionCache *cache, void address_space_cache_destroy(MemoryRegionCache *cache) { - if (!cache->mrs.mr) { + MemoryRegion *mr = cache->mrs.mr; + + if (!mr) { return; } - if (xen_enabled()) { + if (mr->ops && mr->ops->unmap) { + mr->ops->unmap(mr, cache->ptr, cache->xlat, cache->len, + cache->is_write, cache->len); + } else if (xen_enabled()) { xen_invalidate_map_cache_entry(cache->ptr); } - memory_region_unref(cache->mrs.mr); + + memory_region_unref(mr); flatview_unref(cache->fv); cache->mrs.mr = NULL; cache->fv = NULL;