From patchwork Wed Jul 16 08:47:36 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Andrew Jones X-Patchwork-Id: 4565521 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 119449F295 for ; Wed, 16 Jul 2014 08:48:16 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id CAE5D2018E for ; Wed, 16 Jul 2014 08:48:14 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 79451201B9 for ; Wed, 16 Jul 2014 08:48:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933338AbaGPIsK (ORCPT ); Wed, 16 Jul 2014 04:48:10 -0400 Received: from mx1.redhat.com ([209.132.183.28]:61146 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S933084AbaGPIsC (ORCPT ); Wed, 16 Jul 2014 04:48:02 -0400 Received: from int-mx09.intmail.prod.int.phx2.redhat.com (int-mx09.intmail.prod.int.phx2.redhat.com [10.5.11.22]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id s6G8m01o013980 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-GCM-SHA384 bits=256 verify=OK); Wed, 16 Jul 2014 04:48:00 -0400 Received: from hawk.usersys.redhat.com (dhcp-1-116.brq.redhat.com [10.34.1.116]) by int-mx09.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id s6G8llvk029735; Wed, 16 Jul 2014 04:47:58 -0400 From: Andrew Jones To: kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org Cc: christoffer.dall@linaro.org, pbonzini@redhat.com Subject: [PATCH v7 07/14] virtio: add minimal support for virtqueues Date: Wed, 16 Jul 2014 10:47:36 +0200 Message-Id: <1405500463-20713-8-git-send-email-drjones@redhat.com> In-Reply-To: <1405500463-20713-1-git-send-email-drjones@redhat.com> References: <1405500463-20713-1-git-send-email-drjones@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.22 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Currently only supports sending (outbufs), doesn't have any bells or whistles. Code adapted from the Linux Kernel. Signed-off-by: Andrew Jones --- v7: - {alloc,alloc_aligned} -> {calloc,memalign} - changes now split between virtio.* and virtio-mmio.* files --- lib/virtio-mmio.c | 64 +++++++++++++++++++++++++++++ lib/virtio-mmio.h | 18 +++++++++ lib/virtio.c | 117 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ lib/virtio.h | 73 ++++++++++++++++++++++++++++++++++ 4 files changed, 272 insertions(+) diff --git a/lib/virtio-mmio.c b/lib/virtio-mmio.c index 7331abf128cc5..3840838defa1c 100644 --- a/lib/virtio-mmio.c +++ b/lib/virtio-mmio.c @@ -1,4 +1,6 @@ /* + * virtqueue support adapted from the Linux kernel. + * * Copyright (C) 2014, Red Hat Inc, Andrew Jones * * This work is licensed under the terms of the GNU LGPL, version 2. @@ -6,6 +8,7 @@ #include "libcflat.h" #include "devicetree.h" #include "alloc.h" +#include "asm/page.h" #include "asm/io.h" #include "virtio.h" #include "virtio-mmio.h" @@ -32,9 +35,68 @@ static void vm_set(struct virtio_device *vdev, unsigned offset, writeb(p[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); } +static bool vm_notify(struct virtqueue *vq) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); + writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + return true; +} + +static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + struct vring_virtqueue *vq; + void *queue; + unsigned num = VIRTIO_MMIO_QUEUE_NUM_MIN; + + vq = calloc(1, sizeof(*vq)); + queue = memalign(PAGE_SIZE, VIRTIO_MMIO_QUEUE_SIZE_MIN); + if (!vq || !queue) + return NULL; + + writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); + + assert(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX) >= num); + + if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN) != 0) { + printf("%s: virtqueue %d already setup! base=%p\n", + __func__, index, vm_dev->base); + return NULL; + } + + writel(num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); + writel(VIRTIO_MMIO_VRING_ALIGN, + vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); + writel(virt_to_pfn(queue), vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + + vring_init_virtqueue(vq, index, num, VIRTIO_MMIO_VRING_ALIGN, + vdev, queue, vm_notify, callback, name); + + return &vq->vq; +} + +static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], vq_callback_t *callbacks[], + const char *names[]) +{ + unsigned i; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); + if (vqs[i] == NULL) + return -1; + } + + return 0; +} + static const struct virtio_config_ops vm_config_ops = { .get = vm_get, .set = vm_set, + .find_vqs = vm_find_vqs, }; static void vm_device_init(struct virtio_mmio_device *vm_dev) @@ -42,6 +104,8 @@ static void vm_device_init(struct virtio_mmio_device *vm_dev) vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); vm_dev->vdev.config = &vm_config_ops; + + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); } /****************************************************** diff --git a/lib/virtio-mmio.h b/lib/virtio-mmio.h index 7cd610428b486..8046a4747959a 100644 --- a/lib/virtio-mmio.h +++ b/lib/virtio-mmio.h @@ -8,6 +8,7 @@ * This work is licensed under the terms of the GNU LGPL, version 2. */ #include "libcflat.h" +#include "asm/page.h" #include "virtio.h" #define VIRTIO_MMIO_MAGIC_VALUE 0x000 @@ -33,6 +34,23 @@ #define VIRTIO_MMIO_INT_VRING (1 << 0) #define VIRTIO_MMIO_INT_CONFIG (1 << 1) +#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE + +/* + * The minimum queue size is 2*VIRTIO_MMIO_VRING_ALIGN, which + * means the largest queue num for the minimum queue size is 128, i.e. + * 2*VIRTIO_MMIO_VRING_ALIGN = vring_size(128, VIRTIO_MMIO_VRING_ALIGN), + * where vring_size is + * + * unsigned vring_size(unsigned num, unsigned long align) + * { + * return ((sizeof(struct vring_desc) * num + sizeof(u16) * (3 + num) + * + align - 1) & ~(align - 1)) + * + sizeof(u16) * 3 + sizeof(struct vring_used_elem) * num; + * } + */ +#define VIRTIO_MMIO_QUEUE_SIZE_MIN (2*VIRTIO_MMIO_VRING_ALIGN) +#define VIRTIO_MMIO_QUEUE_NUM_MIN 128 #define to_virtio_mmio_device(vdev_ptr) \ container_of(vdev_ptr, struct virtio_mmio_device, vdev) diff --git a/lib/virtio.c b/lib/virtio.c index b9c403cc71e05..cb496ff2eabd5 100644 --- a/lib/virtio.c +++ b/lib/virtio.c @@ -1,12 +1,129 @@ /* + * virtqueue support adapted from the Linux kernel. + * * Copyright (C) 2014, Red Hat Inc, Andrew Jones * * This work is licensed under the terms of the GNU LGPL, version 2. */ #include "libcflat.h" +#include "asm/io.h" #include "virtio.h" #include "virtio-mmio.h" +void vring_init(struct vring *vr, unsigned int num, void *p, + unsigned long align) +{ + vr->num = num; + vr->desc = p; + vr->avail = p + num*sizeof(struct vring_desc); + vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(u16) + + align-1) & ~(align - 1)); +} + +void vring_init_virtqueue(struct vring_virtqueue *vq, unsigned index, + unsigned num, unsigned vring_align, + struct virtio_device *vdev, void *pages, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name) +{ + unsigned i; + + vring_init(&vq->vring, num, pages, vring_align); + vq->vq.callback = callback; + vq->vq.vdev = vdev; + vq->vq.name = name; + vq->vq.num_free = num; + vq->vq.index = index; + vq->notify = notify; + vq->last_used_idx = 0; + vq->num_added = 0; + vq->free_head = 0; + + for (i = 0; i < num-1; i++) { + vq->vring.desc[i].next = i+1; + vq->data[i] = NULL; + } + vq->data[i] = NULL; +} + +int virtqueue_add_outbuf(struct virtqueue *_vq, char *buf, size_t len) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + unsigned avail; + int head; + + assert(buf != NULL); + assert(len != 0); + + if (!vq->vq.num_free) + return -1; + + --vq->vq.num_free; + + head = vq->free_head; + + vq->vring.desc[head].flags = 0; + vq->vring.desc[head].addr = virt_to_phys(buf); + vq->vring.desc[head].len = len; + + vq->free_head = vq->vring.desc[head].next; + + vq->data[head] = buf; + + avail = (vq->vring.avail->idx & (vq->vring.num-1)); + vq->vring.avail->ring[avail] = head; + wmb(); + vq->vring.avail->idx++; + vq->num_added++; + + return 0; +} + +bool virtqueue_kick(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + mb(); + return vq->notify(_vq); +} + +void detach_buf(struct vring_virtqueue *vq, unsigned head) +{ + unsigned i = head; + + vq->data[head] = NULL; + + while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { + i = vq->vring.desc[i].next; + vq->vq.num_free++; + } + + vq->vring.desc[i].next = vq->free_head; + vq->free_head = head; + vq->vq.num_free++; +} + +void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + u16 last_used; + unsigned i; + void *ret; + + rmb(); + + last_used = (vq->last_used_idx & (vq->vring.num-1)); + i = vq->vring.used->ring[last_used].id; + *len = vq->vring.used->ring[last_used].len; + + ret = vq->data[i]; + detach_buf(vq, i); + + vq->last_used_idx++; + + return ret; +} + struct virtio_device *virtio_bind(u32 devid) { return virtio_mmio_bind(devid); diff --git a/lib/virtio.h b/lib/virtio.h index 5941fa40a8655..37ce028b2c2bb 100644 --- a/lib/virtio.h +++ b/lib/virtio.h @@ -20,11 +20,25 @@ struct virtio_device { const struct virtio_config_ops *config; }; +struct virtqueue { + void (*callback)(struct virtqueue *vq); + const char *name; + struct virtio_device *vdev; + unsigned int index; + unsigned int num_free; + void *priv; +}; + +typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { void (*get)(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len); void (*set)(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len); + int (*find_vqs)(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]); }; static inline u8 @@ -69,6 +83,65 @@ virtio_config_writel(struct virtio_device *vdev, unsigned offset, u32 val) vdev->config->set(vdev, offset, &val, 4); } +#define VRING_DESC_F_NEXT 1 +#define VRING_DESC_F_WRITE 2 + +struct vring_desc { + u64 addr; + u32 len; + u16 flags; + u16 next; +}; + +struct vring_avail { + u16 flags; + u16 idx; + u16 ring[]; +}; + +struct vring_used_elem { + u32 id; + u32 len; +}; + +struct vring_used { + u16 flags; + u16 idx; + struct vring_used_elem ring[]; +}; + +struct vring { + unsigned int num; + struct vring_desc *desc; + struct vring_avail *avail; + struct vring_used *used; +}; + +struct vring_virtqueue { + struct virtqueue vq; + struct vring vring; + unsigned int free_head; + unsigned int num_added; + u16 last_used_idx; + bool (*notify)(struct virtqueue *vq); + void *data[]; +}; + +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +extern void vring_init(struct vring *vr, unsigned int num, void *p, + unsigned long align); +extern void vring_init_virtqueue(struct vring_virtqueue *vq, unsigned index, + unsigned num, unsigned vring_align, + struct virtio_device *vdev, void *pages, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name); +extern int virtqueue_add_outbuf(struct virtqueue *vq, char *buf, size_t len); +extern bool virtqueue_kick(struct virtqueue *vq); +extern void detach_buf(struct vring_virtqueue *vq, unsigned head); +extern void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len); + extern struct virtio_device *virtio_bind(u32 devid); #endif /* _VIRTIO_H_ */