@@ -1439,9 +1439,9 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
struct vring_virtqueue *vq = to_vvq(_vq);
struct vring_packed_desc *desc;
struct scatterlist *sg;
- unsigned int i, n, c, descs_used, err_idx;
+ unsigned int i, n, c, descs_used;
__le16 head_flags, flags;
- u16 head, id, prev, curr, avail_used_flags;
+ u16 head, id, prev, curr;
int err;
START_USE(vq);
@@ -1470,7 +1470,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
}
head = vq->packed.next_avail_idx;
- avail_used_flags = vq->packed.avail_used_flags;
WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
@@ -1488,15 +1487,15 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
id = vq->free_head;
BUG_ON(id == vq->packed.vring.num);
+ if (virtqueue_map_sgs(vq, sgs, total_sg, out_sgs, in_sgs)) {
+ END_USE(vq);
+ return -EIO;
+ }
+
curr = id;
c = 0;
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
- goto unmap_release;
-
flags = cpu_to_le16(vq->packed.avail_used_flags |
(++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
(n < out_sgs ? 0 : VRING_DESC_F_WRITE));
@@ -1505,12 +1504,12 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
else
desc[i].flags = flags;
- desc[i].addr = cpu_to_le64(addr);
+ desc[i].addr = cpu_to_le64(vring_sg_address(sg));
desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id);
if (unlikely(vq->use_dma_api)) {
- vq->packed.desc_extra[curr].addr = addr;
+ vq->packed.desc_extra[curr].addr = vring_sg_address(sg);
vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags =
le16_to_cpu(flags);
@@ -1556,26 +1555,6 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
END_USE(vq);
return 0;
-
-unmap_release:
- err_idx = i;
- i = head;
- curr = vq->free_head;
-
- vq->packed.avail_used_flags = avail_used_flags;
-
- for (n = 0; n < total_sg; n++) {
- if (i == err_idx)
- break;
- vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
- curr = vq->packed.desc_extra[curr].next;
- i++;
- if (i >= vq->packed.vring.num)
- i = 0;
- }
-
- END_USE(vq);
- return -EIO;
}
static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)