@@ -163,6 +163,62 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
.pre_save = vhost_scsi_pre_save,
};
+static int vhost_scsi_set_workers(VHostSCSICommon *vsc, int vq_workers)
+{
+ struct vhost_dev *dev = &vsc->dev;
+ int workers_per_queue = 1, io_queues;
+ struct vhost_vring_worker w;
+ int i, ret, cnt = 0;
+
+ if (vq_workers < VHOST_VRING_NEW_WORKER)
+ return -EINVAL;
+
+ if (vq_workers == 0 ||
+ dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1)
+ /* Use the single default worker */
+ return 0;
+
+ io_queues = dev->nvqs - VHOST_SCSI_VQ_NUM_FIXED;
+ if (vq_workers > 0 && io_queues > vq_workers)
+ workers_per_queue = io_queues / vq_workers;
+
+ w.pid = VHOST_VRING_NEW_WORKER;
+ /*
+ * ctl/evt share the first worker since it will be rare for them
+ * to send cmds while IO is running. The rest of the vqs get their
+ * own worker.
+ */
+ for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
+ w.index = i;
+
+ switch (vq_workers) {
+ case VHOST_VRING_NEW_WORKER:
+ w.pid = VHOST_VRING_NEW_WORKER;
+ break;
+ default:
+ /*
+ * TODO: we should get the ISR to vq mapping and bind workers
+ * so vqs sharing a ISR share a worker.
+ */
+ if (cnt == workers_per_queue) {
+ w.pid = VHOST_VRING_NEW_WORKER;
+ cnt = 0;
+ } else {
+ cnt++;
+ }
+ }
+
+ ret = dev->vhost_ops->vhost_set_vring_worker(dev, &w);
+ if (ret == -ENOTTY) {
+ ret = 0;
+ break;
+ } else if (ret)
+ break;
+ }
+
+ return ret;
+}
+
static void vhost_scsi_realize(DeviceState *dev, Error **errp)
{
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
@@ -223,6 +279,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
goto free_vqs;
}
+ ret = vhost_scsi_set_workers(vsc, vs->conf.virtqueue_workers);
+ if (ret < 0) {
+ error_setg(errp, "vhost-scsi: vhost worker setup failed: %s",
+ strerror(-ret));
+ goto free_vqs;
+ }
+
/* At present, channel and lun both are 0 for bootable vhost-scsi disk */
vsc->channel = 0;
vsc->lun = 0;
@@ -281,6 +344,8 @@ static Property vhost_scsi_properties[] = {
VIRTIO_SCSI_F_T10_PI,
false),
DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
+ DEFINE_PROP_INT32("virtqueue_workers", VirtIOSCSICommon,
+ conf.virtqueue_workers, 0),
DEFINE_PROP_END_OF_LIST(),
};
@@ -152,6 +152,13 @@ static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev,
return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s);
}
+static int vhost_kernel_set_vring_worker(struct vhost_dev *dev,
+ struct vhost_vring_worker *worker)
+{
+ return vhost_kernel_call(dev, VHOST_SET_VRING_WORKER, worker);
+}
+
+
static int vhost_kernel_set_features(struct vhost_dev *dev,
uint64_t features)
{
@@ -313,6 +320,7 @@ const VhostOps kernel_ops = {
.vhost_set_vring_call = vhost_kernel_set_vring_call,
.vhost_set_vring_busyloop_timeout =
vhost_kernel_set_vring_busyloop_timeout,
+ .vhost_set_vring_worker = vhost_kernel_set_vring_worker,
.vhost_set_features = vhost_kernel_set_features,
.vhost_get_features = vhost_kernel_get_features,
.vhost_set_backend_cap = vhost_kernel_set_backend_cap,
@@ -33,6 +33,7 @@ struct vhost_memory;
struct vhost_vring_file;
struct vhost_vring_state;
struct vhost_vring_addr;
+struct vhost_vring_worker;
struct vhost_scsi_target;
struct vhost_iotlb_msg;
struct vhost_virtqueue;
@@ -71,6 +72,8 @@ typedef int (*vhost_set_vring_call_op)(struct vhost_dev *dev,
struct vhost_vring_file *file);
typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev,
struct vhost_vring_state *r);
+typedef int (*vhost_set_vring_worker_op)(struct vhost_dev *dev,
+ struct vhost_vring_worker *worker);
typedef int (*vhost_set_features_op)(struct vhost_dev *dev,
uint64_t features);
typedef int (*vhost_get_features_op)(struct vhost_dev *dev,
@@ -146,6 +149,7 @@ typedef struct VhostOps {
vhost_set_vring_kick_op vhost_set_vring_kick;
vhost_set_vring_call_op vhost_set_vring_call;
vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout;
+ vhost_set_vring_worker_op vhost_set_vring_worker;
vhost_set_features_op vhost_set_features;
vhost_get_features_op vhost_get_features;
vhost_set_backend_cap_op vhost_set_backend_cap;
@@ -58,6 +58,7 @@ struct VirtIOSCSIConf {
#ifdef CONFIG_VHOST_SCSI
char *vhostfd;
char *wwpn;
+ int virtqueue_workers;
#endif
CharBackend chardev;
uint32_t boot_tpgt;
@@ -27,6 +27,18 @@ struct vhost_vring_file {
};
+#define VHOST_VRING_NEW_WORKER -1
+
+struct vhost_vring_worker {
+ unsigned int index;
+ /*
+ * The pid of the vhost worker that the vq will be bound to. If
+ * pid is VHOST_VRING_NEW_WORKER a new worker will be created and it's
+ * pid will be returned in pid.
+ */
+ pid_t pid;
+};
+
struct vhost_vring_addr {
unsigned int index;
/* Option flags. */
@@ -70,6 +70,17 @@
#define VHOST_VRING_BIG_ENDIAN 1
#define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
#define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
+/* By default, a device gets one vhost_worker created during VHOST_SET_OWNER
+ * that its virtqueues share. This allows userspace to create a vhost_worker
+ * and bind a virtqueue to it or bind a virtqueue to an existing worker.
+ *
+ * If pid > 0 and it matches an existing vhost_worker thread it will be bound
+ * to the virtqueue. If pid is VHOST_VRING_NEW_WORKER, then a new worker will be
+ * created and bound to the virtqueue.
+ *
+ * This must be called after VHOST_SET_OWNER and before the virtqueue is active.
+ */
+#define VHOST_SET_VRING_WORKER _IOWR(VHOST_VIRTIO, 0x15, struct vhost_vring_worker)
/* The following ioctls use eventfd file descriptors to signal and poll
* for events. */