diff mbox

[v4,5/7] xen/9pfs: send requests to the backend

Message ID 1489605821-29649-5-git-send-email-sstabellini@kernel.org (mailing list archive)
State New, archived
Headers show

Commit Message

Stefano Stabellini March 15, 2017, 7:23 p.m. UTC
Implement struct p9_trans_module create and close functions by looking
at the available Xen 9pfs frontend-backend connections. We don't expect
many frontend-backend connections, thus walking a list is OK.

Send requests to the backend by copying each request to one of the
available rings (each frontend-backend connection comes with multiple
rings). Handle the ring and notifications following the 9pfs
specification. If there are not enough free bytes on the ring for the
request, wait on the wait_queue: the backend will send a notification
after consuming more requests.

Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: groug@kaod.org
CC: jgross@suse.com
CC: Eric Van Hensbergen <ericvh@gmail.com>
CC: Ron Minnich <rminnich@sandia.gov>
CC: Latchesar Ionkov <lucho@ionkov.net>
CC: v9fs-developer@lists.sourceforge.net
---
 net/9p/trans_xen.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 87 insertions(+), 2 deletions(-)
diff mbox

Patch

diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index ada2b0c..2b18da0 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -72,22 +72,107 @@  struct xen_9pfs_front_priv {
 static LIST_HEAD(xen_9pfs_devs);
 static DEFINE_RWLOCK(xen_9pfs_lock); 
 
+/* We don't currently allow canceling of requests */
 static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req)
 {
-	return 0;
+	return 1;
 }
 
 static int p9_xen_create(struct p9_client *client, const char *addr, char *args)
 {
-	return 0;
+	struct xen_9pfs_front_priv *priv;
+
+	read_lock(&xen_9pfs_lock);
+	list_for_each_entry(priv, &xen_9pfs_devs, list) {
+		if (!strcmp(priv->tag, addr)) {
+			priv->client = client; 
+			read_unlock(&xen_9pfs_lock);
+			return 0;
+		}
+	}
+	read_unlock(&xen_9pfs_lock);
+	return -EINVAL;
 }
 
 static void p9_xen_close(struct p9_client *client)
 {
+	struct xen_9pfs_front_priv *priv;
+
+	read_lock(&xen_9pfs_lock);
+	list_for_each_entry(priv, &xen_9pfs_devs, list) {
+		if (priv->client == client) {
+			priv->client = NULL; 
+			read_unlock(&xen_9pfs_lock);
+			return;
+		}
+	}
+	read_unlock(&xen_9pfs_lock);
+	return;
+}
+
+static int p9_xen_write_todo(struct xen_9pfs_dataring *ring, RING_IDX size)
+{
+	RING_IDX cons, prod;
+
+	cons = ring->intf->out_cons;
+	prod = ring->intf->out_prod;
+	virt_mb();
+
+	if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) >= size)
+		return 1;
+	else
+		return 0;
 }
 
 static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
 {
+	struct xen_9pfs_front_priv *priv = NULL;
+	RING_IDX cons, prod, masked_cons, masked_prod;
+	unsigned long flags;
+	uint32_t size = p9_req->tc->size;
+	struct xen_9pfs_dataring *ring;
+	int num;
+
+	read_lock(&xen_9pfs_lock);
+	list_for_each_entry(priv, &xen_9pfs_devs, list) {
+		if (priv->client == client)
+			break;
+	}
+	read_unlock(&xen_9pfs_lock);
+	if (priv == NULL || priv->client != client)
+		return -EINVAL;
+
+	num = p9_req->tc->tag % priv->num_rings;
+	ring = &priv->rings[num];
+
+again:
+	while (wait_event_interruptible(ring->wq,
+				p9_xen_write_todo(ring, size) > 0) != 0);
+
+	spin_lock_irqsave(&ring->lock, flags);
+	cons = ring->intf->out_cons;
+	prod = ring->intf->out_prod;
+	virt_mb();
+
+	if (XEN_9PFS_RING_SIZE - xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < size) {
+		spin_unlock_irqrestore(&ring->lock, flags);
+		goto again;
+	}
+
+	masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+	masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+	xen_9pfs_write_packet(ring->data.out,
+				&masked_prod, masked_cons,
+				XEN_9PFS_RING_SIZE, p9_req->tc->sdata, size);
+
+	p9_req->status = REQ_STATUS_SENT;
+	virt_wmb();			/* write ring before updating pointer */
+	prod += size;
+	ring->intf->out_prod = prod;
+	spin_unlock_irqrestore(&ring->lock, flags);
+	notify_remote_via_irq(ring->irq);
+
 	return 0;
 }