@@ -16,7 +16,6 @@
#include "qemu/log.h"
#include "qemu/memalign.h"
#include "linux-headers/linux/vhost.h"
-#include "exec/ramblock.h"
/**
* Validate the transport device features that both guests can use with the SVQ
@@ -79,10 +78,11 @@ uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
* @vaddr: Translated IOVA addresses
* @iovec: Source qemu's VA addresses
* @num: Length of iovec and minimum length of vaddr
+ * @gpas: Descriptors' GPAs, if backed by guest memory
*/
static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
hwaddr *addrs, const struct iovec *iovec,
- size_t num)
+ size_t num, const hwaddr *gpas)
{
if (num == 0) {
return true;
@@ -91,22 +91,19 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
for (size_t i = 0; i < num; ++i) {
Int128 needle_last, map_last;
size_t off;
- RAMBlock *rb;
- hwaddr gpa;
- ram_addr_t offset;
const DMAMap *map;
DMAMap needle;
- rb = qemu_ram_block_from_host(iovec[i].iov_base, false, &offset);
- if (rb) {
- gpa = rb->offset + offset;
-
+ /* Check if the descriptor is backed by guest memory */
+ if (gpas) {
+ /* Search the GPA->IOVA tree */
needle = (DMAMap) {
- .translated_addr = gpa,
+ .translated_addr = gpas[i],
.size = iovec[i].iov_len,
};
map = vhost_iova_tree_find_gpa(svq->iova_tree, &needle);
} else {
+ /* Search the IOVA->HVA tree */
needle = (DMAMap) {
.translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base,
.size = iovec[i].iov_len,
@@ -148,6 +145,7 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
* @sg: Cache for hwaddr
* @iovec: The iovec from the guest
* @num: iovec length
+ * @addr: Descriptors' GPAs, if backed by guest memory
* @more_descs: True if more descriptors come in the chain
* @write: True if they are writeable descriptors
*
@@ -155,7 +153,8 @@ static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq,
*/
static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
const struct iovec *iovec, size_t num,
- bool more_descs, bool write)
+ const hwaddr *addr, bool more_descs,
+ bool write)
{
uint16_t i = svq->free_head, last = svq->free_head;
unsigned n;
@@ -167,7 +166,7 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
return true;
}
- ok = vhost_svq_translate_addr(svq, sg, iovec, num);
+ ok = vhost_svq_translate_addr(svq, sg, iovec, num, addr);
if (unlikely(!ok)) {
return false;
}
@@ -192,8 +191,9 @@ static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
const struct iovec *out_sg, size_t out_num,
+ const hwaddr *out_addr,
const struct iovec *in_sg, size_t in_num,
- unsigned *head)
+ const hwaddr *in_addr, unsigned *head)
{
unsigned avail_idx;
vring_avail_t *avail = svq->vring.avail;
@@ -209,13 +209,14 @@ static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
return false;
}
- ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0,
- false);
+ ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, out_addr,
+ in_num > 0, false);
if (unlikely(!ok)) {
return false;
}
- ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true);
+ ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, in_addr, false,
+ true);
if (unlikely(!ok)) {
return false;
}
@@ -265,8 +266,9 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
* Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
*/
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
- size_t out_num, const struct iovec *in_sg, size_t in_num,
- VirtQueueElement *elem)
+ size_t out_num, const hwaddr *out_addr,
+ const struct iovec *in_sg, size_t in_num,
+ const hwaddr *in_addr, VirtQueueElement *elem)
{
unsigned qemu_head;
unsigned ndescs = in_num + out_num;
@@ -276,7 +278,8 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
return -ENOSPC;
}
- ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head);
+ ok = vhost_svq_add_split(svq, out_sg, out_num, out_addr, in_sg, in_num,
+ in_addr, &qemu_head);
if (unlikely(!ok)) {
return -EINVAL;
}
@@ -292,8 +295,8 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
static int vhost_svq_add_element(VhostShadowVirtqueue *svq,
VirtQueueElement *elem)
{
- return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg,
- elem->in_num, elem);
+ return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->out_addr,
+ elem->in_sg, elem->in_num, elem->in_addr, elem);
}
/**
@@ -118,8 +118,9 @@ uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq);
void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
const VirtQueueElement *elem, uint32_t len);
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
- size_t out_num, const struct iovec *in_sg, size_t in_num,
- VirtQueueElement *elem);
+ size_t out_num, const hwaddr *out_addr,
+ const struct iovec *in_sg, size_t in_num,
+ const hwaddr *in_addr, VirtQueueElement *elem);
size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num);
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
@@ -649,7 +649,7 @@ static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
int r;
- r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
+ r = vhost_svq_add(svq, out_sg, out_num, NULL, in_sg, in_num, NULL, NULL);
if (unlikely(r != 0)) {
if (unlikely(r == -ENOSPC)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
Propagates the GPAs (in_addr/out_addr) of a VirtQueueElement to vhost_svq_translate_addr() to translate GPAs to IOVAs via the GPA->IOVA tree when descriptors are backed by guest memory. GPAs are unique in the guest's address space, ensuring unambiguous IOVA translations. This avoids the issue where different GPAs map to the same HVA, causing the HVA->IOVA translation to potentially return an IOVA associated with the wrong intended GPA. For descriptors backed by host-only memory, the existing partial SVQ IOVA->HVA tree is used. Signed-off-by: Jonah Palmer <jonah.palmer@oracle.com> --- hw/virtio/vhost-shadow-virtqueue.c | 45 ++++++++++++++++-------------- hw/virtio/vhost-shadow-virtqueue.h | 5 ++-- net/vhost-vdpa.c | 2 +- 3 files changed, 28 insertions(+), 24 deletions(-)