diff mbox

[v3,6/7] xen/9pfs: receive responses

Message ID 1489449019-13343-6-git-send-email-sstabellini@kernel.org (mailing list archive)
State New, archived
Headers show

Commit Message

Stefano Stabellini March 13, 2017, 11:50 p.m. UTC
Upon receiving a notification from the backend, schedule the
p9_xen_response work_struct. p9_xen_response checks if any responses are
available, if so, it reads them one by one, calling p9_client_cb to send
them up to the 9p layer (p9_client_cb completes the request). Handle the
ring following the Xen 9pfs specification.

Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: jgross@suse.com
CC: Eric Van Hensbergen <ericvh@gmail.com>
CC: Ron Minnich <rminnich@sandia.gov>
CC: Latchesar Ionkov <lucho@ionkov.net>
CC: v9fs-developer@lists.sourceforge.net
---
 net/9p/trans_xen.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 55 insertions(+)

Comments

Jürgen Groß March 14, 2017, 7:04 a.m. UTC | #1
On 14/03/17 00:50, Stefano Stabellini wrote:
> Upon receiving a notification from the backend, schedule the
> p9_xen_response work_struct. p9_xen_response checks if any responses are
> available, if so, it reads them one by one, calling p9_client_cb to send
> them up to the 9p layer (p9_client_cb completes the request). Handle the
> ring following the Xen 9pfs specification.
> 
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> CC: jgross@suse.com
> CC: Eric Van Hensbergen <ericvh@gmail.com>
> CC: Ron Minnich <rminnich@sandia.gov>
> CC: Latchesar Ionkov <lucho@ionkov.net>
> CC: v9fs-developer@lists.sourceforge.net
> ---
>  net/9p/trans_xen.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 55 insertions(+)
> 
> diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
> index b40bbcb..1a7eb52 100644
> --- a/net/9p/trans_xen.c
> +++ b/net/9p/trans_xen.c
> @@ -168,6 +168,61 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
>  
>  static void p9_xen_response(struct work_struct *work)
>  {
> +	struct xen_9pfs_front_priv *priv;
> +	struct xen_9pfs_dataring *ring;
> +	RING_IDX cons, prod, masked_cons, masked_prod;
> +	struct xen_9pfs_header h;
> +	struct p9_req_t *req;
> +	int status;
> +
> +	ring = container_of(work, struct xen_9pfs_dataring, work);
> +	priv = ring->priv;
> +
> +	while (1) {
> +		cons = ring->intf->in_cons;
> +		prod = ring->intf->in_prod;
> +		virt_rmb();
> +
> +		if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < sizeof(h)) {
> +			notify_remote_via_irq(ring->irq);
> +			return;
> +		}
> +
> +		masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> +		masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> +
> +		/* First, read just the header */
> +		xen_9pfs_read_packet(ring->data.in,
> +				masked_prod, &masked_cons,
> +				XEN_9PFS_RING_SIZE, &h, sizeof(h));
> +
> +		req = p9_tag_lookup(priv->client, h.tag);
> +		if (!req || req->status != REQ_STATUS_SENT) {
> +			dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag);
> +			cons += h.size;
> +			virt_mb();
> +			ring->intf->in_cons = cons;
> +			continue;
> +		}
> +
> +		memcpy(req->rc, &h, sizeof(h));
> +		req->rc->offset = 0;
> +
> +		masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> +		/* Then, read the whole packet (including the header) */
> +		xen_9pfs_read_packet(ring->data.in,
> +				masked_prod, &masked_cons,
> +				XEN_9PFS_RING_SIZE, req->rc->sdata, h.size);

Please align the parameters to the same column.

> +
> +		virt_mb();
> +		cons += h.size;
> +		ring->intf->in_cons = cons;
> +
> +		status = (req->status != REQ_STATUS_ERROR) ?
> +			REQ_STATUS_RCVD : REQ_STATUS_ERROR;
> +
> +		p9_client_cb(priv->client, req, status);
> +	}
>  }
>  
>  static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)
> 


Juergen
Stefano Stabellini March 14, 2017, 8:32 p.m. UTC | #2
On Tue, 14 Mar 2017, Juergen Gross wrote:
> On 14/03/17 00:50, Stefano Stabellini wrote:
> > Upon receiving a notification from the backend, schedule the
> > p9_xen_response work_struct. p9_xen_response checks if any responses are
> > available, if so, it reads them one by one, calling p9_client_cb to send
> > them up to the 9p layer (p9_client_cb completes the request). Handle the
> > ring following the Xen 9pfs specification.
> > 
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> > CC: jgross@suse.com
> > CC: Eric Van Hensbergen <ericvh@gmail.com>
> > CC: Ron Minnich <rminnich@sandia.gov>
> > CC: Latchesar Ionkov <lucho@ionkov.net>
> > CC: v9fs-developer@lists.sourceforge.net
> > ---
> >  net/9p/trans_xen.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 55 insertions(+)
> > 
> > diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
> > index b40bbcb..1a7eb52 100644
> > --- a/net/9p/trans_xen.c
> > +++ b/net/9p/trans_xen.c
> > @@ -168,6 +168,61 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
> >  
> >  static void p9_xen_response(struct work_struct *work)
> >  {
> > +	struct xen_9pfs_front_priv *priv;
> > +	struct xen_9pfs_dataring *ring;
> > +	RING_IDX cons, prod, masked_cons, masked_prod;
> > +	struct xen_9pfs_header h;
> > +	struct p9_req_t *req;
> > +	int status;
> > +
> > +	ring = container_of(work, struct xen_9pfs_dataring, work);
> > +	priv = ring->priv;
> > +
> > +	while (1) {
> > +		cons = ring->intf->in_cons;
> > +		prod = ring->intf->in_prod;
> > +		virt_rmb();
> > +
> > +		if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < sizeof(h)) {
> > +			notify_remote_via_irq(ring->irq);
> > +			return;
> > +		}
> > +
> > +		masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
> > +		masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> > +
> > +		/* First, read just the header */
> > +		xen_9pfs_read_packet(ring->data.in,
> > +				masked_prod, &masked_cons,
> > +				XEN_9PFS_RING_SIZE, &h, sizeof(h));
> > +
> > +		req = p9_tag_lookup(priv->client, h.tag);
> > +		if (!req || req->status != REQ_STATUS_SENT) {
> > +			dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag);
> > +			cons += h.size;
> > +			virt_mb();
> > +			ring->intf->in_cons = cons;
> > +			continue;
> > +		}
> > +
> > +		memcpy(req->rc, &h, sizeof(h));
> > +		req->rc->offset = 0;
> > +
> > +		masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
> > +		/* Then, read the whole packet (including the header) */
> > +		xen_9pfs_read_packet(ring->data.in,
> > +				masked_prod, &masked_cons,
> > +				XEN_9PFS_RING_SIZE, req->rc->sdata, h.size);
> 
> Please align the parameters to the same column.

I am one of those heretics that use tabstop=4 :-)
I'll fix this though.


> > +
> > +		virt_mb();
> > +		cons += h.size;
> > +		ring->intf->in_cons = cons;
> > +
> > +		status = (req->status != REQ_STATUS_ERROR) ?
> > +			REQ_STATUS_RCVD : REQ_STATUS_ERROR;
> > +
> > +		p9_client_cb(priv->client, req, status);
> > +	}
> >  }
> >  
> >  static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)
> > 
> 
> 
> Juergen
>
diff mbox

Patch

diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index b40bbcb..1a7eb52 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -168,6 +168,61 @@  static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
 
 static void p9_xen_response(struct work_struct *work)
 {
+	struct xen_9pfs_front_priv *priv;
+	struct xen_9pfs_dataring *ring;
+	RING_IDX cons, prod, masked_cons, masked_prod;
+	struct xen_9pfs_header h;
+	struct p9_req_t *req;
+	int status;
+
+	ring = container_of(work, struct xen_9pfs_dataring, work);
+	priv = ring->priv;
+
+	while (1) {
+		cons = ring->intf->in_cons;
+		prod = ring->intf->in_prod;
+		virt_rmb();
+
+		if (xen_9pfs_queued(prod, cons, XEN_9PFS_RING_SIZE) < sizeof(h)) {
+			notify_remote_via_irq(ring->irq);
+			return;
+		}
+
+		masked_prod = xen_9pfs_mask(prod, XEN_9PFS_RING_SIZE);
+		masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+
+		/* First, read just the header */
+		xen_9pfs_read_packet(ring->data.in,
+				masked_prod, &masked_cons,
+				XEN_9PFS_RING_SIZE, &h, sizeof(h));
+
+		req = p9_tag_lookup(priv->client, h.tag);
+		if (!req || req->status != REQ_STATUS_SENT) {
+			dev_warn(&priv->dev->dev, "Wrong req tag=%x\n", h.tag);
+			cons += h.size;
+			virt_mb();
+			ring->intf->in_cons = cons;
+			continue;
+		}
+
+		memcpy(req->rc, &h, sizeof(h));
+		req->rc->offset = 0;
+
+		masked_cons = xen_9pfs_mask(cons, XEN_9PFS_RING_SIZE);
+		/* Then, read the whole packet (including the header) */
+		xen_9pfs_read_packet(ring->data.in,
+				masked_prod, &masked_cons,
+				XEN_9PFS_RING_SIZE, req->rc->sdata, h.size);
+
+		virt_mb();
+		cons += h.size;
+		ring->intf->in_cons = cons;
+
+		status = (req->status != REQ_STATUS_ERROR) ?
+			REQ_STATUS_RCVD : REQ_STATUS_ERROR;
+
+		p9_client_cb(priv->client, req, status);
+	}
 }
 
 static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r)