diff mbox

[v5,06/18] xen/pvcalls: handle commands from the frontend

Message ID 1498158867-25426-6-git-send-email-sstabellini@kernel.org (mailing list archive)
State New, archived
Headers show

Commit Message

Stefano Stabellini June 22, 2017, 7:14 p.m. UTC
When the other end notifies us that there are commands to be read
(pvcalls_back_event), wake up the backend thread to parse the command.

The command ring works like most other Xen rings, so use the usual
ring macros to read and write to it. The functions implementing the
commands are empty stubs for now.

Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: boris.ostrovsky@oracle.com
CC: jgross@suse.com
---
 drivers/xen/pvcalls-back.c | 119 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 119 insertions(+)

Comments

Juergen Gross July 3, 2017, 11:23 a.m. UTC | #1
On 22/06/17 21:14, Stefano Stabellini wrote:
> When the other end notifies us that there are commands to be read
> (pvcalls_back_event), wake up the backend thread to parse the command.
> 
> The command ring works like most other Xen rings, so use the usual
> ring macros to read and write to it. The functions implementing the
> commands are empty stubs for now.
> 
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> CC: boris.ostrovsky@oracle.com
> CC: jgross@suse.com
> ---
>  drivers/xen/pvcalls-back.c | 119 +++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 119 insertions(+)
> 
> diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
> index e4c2e46..437c2ad 100644
> --- a/drivers/xen/pvcalls-back.c
> +++ b/drivers/xen/pvcalls-back.c
> @@ -51,12 +51,131 @@ struct pvcalls_fedata {
>  	struct work_struct register_work;
>  };
>  
> +static int pvcalls_back_socket(struct xenbus_device *dev,
> +		struct xen_pvcalls_request *req)
> +{
> +	return 0;
> +}
> +
> +static int pvcalls_back_connect(struct xenbus_device *dev,
> +				struct xen_pvcalls_request *req)
> +{
> +	return 0;
> +}
> +
> +static int pvcalls_back_release(struct xenbus_device *dev,
> +				struct xen_pvcalls_request *req)
> +{
> +	return 0;
> +}
> +
> +static int pvcalls_back_bind(struct xenbus_device *dev,
> +			     struct xen_pvcalls_request *req)
> +{
> +	return 0;
> +}
> +
> +static int pvcalls_back_listen(struct xenbus_device *dev,
> +			       struct xen_pvcalls_request *req)
> +{
> +	return 0;
> +}
> +
> +static int pvcalls_back_accept(struct xenbus_device *dev,
> +			       struct xen_pvcalls_request *req)
> +{
> +	return 0;
> +}
> +
> +static int pvcalls_back_poll(struct xenbus_device *dev,
> +			     struct xen_pvcalls_request *req)
> +{
> +	return 0;
> +}
> +
> +static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
> +				   struct xen_pvcalls_request *req)
> +{
> +	int ret = 0;
> +
> +	switch (req->cmd) {
> +	case PVCALLS_SOCKET:
> +		ret = pvcalls_back_socket(dev, req);
> +		break;
> +	case PVCALLS_CONNECT:
> +		ret = pvcalls_back_connect(dev, req);
> +		break;
> +	case PVCALLS_RELEASE:
> +		ret = pvcalls_back_release(dev, req);
> +		break;
> +	case PVCALLS_BIND:
> +		ret = pvcalls_back_bind(dev, req);
> +		break;
> +	case PVCALLS_LISTEN:
> +		ret = pvcalls_back_listen(dev, req);
> +		break;
> +	case PVCALLS_ACCEPT:
> +		ret = pvcalls_back_accept(dev, req);
> +		break;
> +	case PVCALLS_POLL:
> +		ret = pvcalls_back_poll(dev, req);
> +		break;
> +	default:
> +		ret = -ENOTSUPP;
> +		break;
> +	}
> +	return ret;
> +}
> +
>  static void pvcalls_back_work(struct work_struct *work)
>  {
> +	struct pvcalls_fedata *fedata = container_of(work,
> +		struct pvcalls_fedata, register_work);
> +	int notify, notify_all = 0, more = 1;
> +	struct xen_pvcalls_request req;
> +	struct xenbus_device *dev = fedata->dev;
> +
> +	while (more) {
> +		while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
> +			RING_COPY_REQUEST(&fedata->ring,
> +					  fedata->ring.req_cons++,
> +					  &req);
> +
> +			if (!pvcalls_back_handle_cmd(dev, &req)) {

Hmm, no response in case of not supported command?

> +				RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
> +					&fedata->ring, notify);
> +				notify_all += notify;
> +			}
> +		}
> +
> +		if (notify_all)
> +			notify_remote_via_irq(fedata->irq);

Want to reset notify_all in above if?
Could have been an "accept" which didn't queues a response.

> +
> +		RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
> +	}
>  }
>  
>  static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
>  {
> +	struct xenbus_device *dev = dev_id;
> +	struct pvcalls_fedata *fedata = NULL;
> +
> +	if (dev == NULL)
> +		return IRQ_HANDLED;
> +
> +	fedata = dev_get_drvdata(&dev->dev);
> +	if (fedata == NULL)
> +		return IRQ_HANDLED;
> +
> +	/*
> +	 * TODO: a small theoretical race exists if we try to queue work
> +	 * after pvcalls_back_work checked for final requests and before
> +	 * it returns. The queuing will fail, and pvcalls_back_work
> +	 * won't do the work because it is about to return. In that
> +	 * case, we lose the notification.
> +	 */
> +	queue_work(fedata->wq, &fedata->register_work);

I know you like workqueues more than IRQ threads. But probably the above
TODO could be handled via an IRQ thread more easily?

I think you should either solve above race, or add a comment why it is
not problematic, or show us why an IRQ thread doesn't solve the problem.


Juergen
Stefano Stabellini July 3, 2017, 8:57 p.m. UTC | #2
On Mon, 3 Jul 2017, Juergen Gross wrote:
> On 22/06/17 21:14, Stefano Stabellini wrote:
> > When the other end notifies us that there are commands to be read
> > (pvcalls_back_event), wake up the backend thread to parse the command.
> > 
> > The command ring works like most other Xen rings, so use the usual
> > ring macros to read and write to it. The functions implementing the
> > commands are empty stubs for now.
> > 
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > CC: boris.ostrovsky@oracle.com
> > CC: jgross@suse.com
> > ---
> >  drivers/xen/pvcalls-back.c | 119 +++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 119 insertions(+)
> > 
> > diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
> > index e4c2e46..437c2ad 100644
> > --- a/drivers/xen/pvcalls-back.c
> > +++ b/drivers/xen/pvcalls-back.c
> > @@ -51,12 +51,131 @@ struct pvcalls_fedata {
> >  	struct work_struct register_work;
> >  };
> >  
> > +static int pvcalls_back_socket(struct xenbus_device *dev,
> > +		struct xen_pvcalls_request *req)
> > +{
> > +	return 0;
> > +}
> > +
> > +static int pvcalls_back_connect(struct xenbus_device *dev,
> > +				struct xen_pvcalls_request *req)
> > +{
> > +	return 0;
> > +}
> > +
> > +static int pvcalls_back_release(struct xenbus_device *dev,
> > +				struct xen_pvcalls_request *req)
> > +{
> > +	return 0;
> > +}
> > +
> > +static int pvcalls_back_bind(struct xenbus_device *dev,
> > +			     struct xen_pvcalls_request *req)
> > +{
> > +	return 0;
> > +}
> > +
> > +static int pvcalls_back_listen(struct xenbus_device *dev,
> > +			       struct xen_pvcalls_request *req)
> > +{
> > +	return 0;
> > +}
> > +
> > +static int pvcalls_back_accept(struct xenbus_device *dev,
> > +			       struct xen_pvcalls_request *req)
> > +{
> > +	return 0;
> > +}
> > +
> > +static int pvcalls_back_poll(struct xenbus_device *dev,
> > +			     struct xen_pvcalls_request *req)
> > +{
> > +	return 0;
> > +}
> > +
> > +static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
> > +				   struct xen_pvcalls_request *req)
> > +{
> > +	int ret = 0;
> > +
> > +	switch (req->cmd) {
> > +	case PVCALLS_SOCKET:
> > +		ret = pvcalls_back_socket(dev, req);
> > +		break;
> > +	case PVCALLS_CONNECT:
> > +		ret = pvcalls_back_connect(dev, req);
> > +		break;
> > +	case PVCALLS_RELEASE:
> > +		ret = pvcalls_back_release(dev, req);
> > +		break;
> > +	case PVCALLS_BIND:
> > +		ret = pvcalls_back_bind(dev, req);
> > +		break;
> > +	case PVCALLS_LISTEN:
> > +		ret = pvcalls_back_listen(dev, req);
> > +		break;
> > +	case PVCALLS_ACCEPT:
> > +		ret = pvcalls_back_accept(dev, req);
> > +		break;
> > +	case PVCALLS_POLL:
> > +		ret = pvcalls_back_poll(dev, req);
> > +		break;
> > +	default:
> > +		ret = -ENOTSUPP;
> > +		break;
> > +	}
> > +	return ret;
> > +}
> > +
> >  static void pvcalls_back_work(struct work_struct *work)
> >  {
> > +	struct pvcalls_fedata *fedata = container_of(work,
> > +		struct pvcalls_fedata, register_work);
> > +	int notify, notify_all = 0, more = 1;
> > +	struct xen_pvcalls_request req;
> > +	struct xenbus_device *dev = fedata->dev;
> > +
> > +	while (more) {
> > +		while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
> > +			RING_COPY_REQUEST(&fedata->ring,
> > +					  fedata->ring.req_cons++,
> > +					  &req);
> > +
> > +			if (!pvcalls_back_handle_cmd(dev, &req)) {
> 
> Hmm, no response in case of not supported command?

Good point, I'll add one.


> > +				RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
> > +					&fedata->ring, notify);
> > +				notify_all += notify;
> > +			}
> > +		}
> > +
> > +		if (notify_all)
> > +			notify_remote_via_irq(fedata->irq);
> 
> Want to reset notify_all in above if?
> Could have been an "accept" which didn't queues a response.

Yes, I'll do that.


> > +
> > +		RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
> > +	}
> >  }
> >  
> >  static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
> >  {
> > +	struct xenbus_device *dev = dev_id;
> > +	struct pvcalls_fedata *fedata = NULL;
> > +
> > +	if (dev == NULL)
> > +		return IRQ_HANDLED;
> > +
> > +	fedata = dev_get_drvdata(&dev->dev);
> > +	if (fedata == NULL)
> > +		return IRQ_HANDLED;
> > +
> > +	/*
> > +	 * TODO: a small theoretical race exists if we try to queue work
> > +	 * after pvcalls_back_work checked for final requests and before
> > +	 * it returns. The queuing will fail, and pvcalls_back_work
> > +	 * won't do the work because it is about to return. In that
> > +	 * case, we lose the notification.
> > +	 */
> > +	queue_work(fedata->wq, &fedata->register_work);
> 
> I know you like workqueues more than IRQ threads. But probably the above
> TODO could be handled via an IRQ thread more easily?
> 
> I think you should either solve above race, or add a comment why it is
> not problematic, or show us why an IRQ thread doesn't solve the problem.

I think actually that an irq thread is exactly what we need to solve
this race. Thanks for the suggestion! I'll change the code to use it.
diff mbox

Patch

diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index e4c2e46..437c2ad 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -51,12 +51,131 @@  struct pvcalls_fedata {
 	struct work_struct register_work;
 };
 
+static int pvcalls_back_socket(struct xenbus_device *dev,
+		struct xen_pvcalls_request *req)
+{
+	return 0;
+}
+
+static int pvcalls_back_connect(struct xenbus_device *dev,
+				struct xen_pvcalls_request *req)
+{
+	return 0;
+}
+
+static int pvcalls_back_release(struct xenbus_device *dev,
+				struct xen_pvcalls_request *req)
+{
+	return 0;
+}
+
+static int pvcalls_back_bind(struct xenbus_device *dev,
+			     struct xen_pvcalls_request *req)
+{
+	return 0;
+}
+
+static int pvcalls_back_listen(struct xenbus_device *dev,
+			       struct xen_pvcalls_request *req)
+{
+	return 0;
+}
+
+static int pvcalls_back_accept(struct xenbus_device *dev,
+			       struct xen_pvcalls_request *req)
+{
+	return 0;
+}
+
+static int pvcalls_back_poll(struct xenbus_device *dev,
+			     struct xen_pvcalls_request *req)
+{
+	return 0;
+}
+
+static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
+				   struct xen_pvcalls_request *req)
+{
+	int ret = 0;
+
+	switch (req->cmd) {
+	case PVCALLS_SOCKET:
+		ret = pvcalls_back_socket(dev, req);
+		break;
+	case PVCALLS_CONNECT:
+		ret = pvcalls_back_connect(dev, req);
+		break;
+	case PVCALLS_RELEASE:
+		ret = pvcalls_back_release(dev, req);
+		break;
+	case PVCALLS_BIND:
+		ret = pvcalls_back_bind(dev, req);
+		break;
+	case PVCALLS_LISTEN:
+		ret = pvcalls_back_listen(dev, req);
+		break;
+	case PVCALLS_ACCEPT:
+		ret = pvcalls_back_accept(dev, req);
+		break;
+	case PVCALLS_POLL:
+		ret = pvcalls_back_poll(dev, req);
+		break;
+	default:
+		ret = -ENOTSUPP;
+		break;
+	}
+	return ret;
+}
+
 static void pvcalls_back_work(struct work_struct *work)
 {
+	struct pvcalls_fedata *fedata = container_of(work,
+		struct pvcalls_fedata, register_work);
+	int notify, notify_all = 0, more = 1;
+	struct xen_pvcalls_request req;
+	struct xenbus_device *dev = fedata->dev;
+
+	while (more) {
+		while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
+			RING_COPY_REQUEST(&fedata->ring,
+					  fedata->ring.req_cons++,
+					  &req);
+
+			if (!pvcalls_back_handle_cmd(dev, &req)) {
+				RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
+					&fedata->ring, notify);
+				notify_all += notify;
+			}
+		}
+
+		if (notify_all)
+			notify_remote_via_irq(fedata->irq);
+
+		RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
+	}
 }
 
 static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
 {
+	struct xenbus_device *dev = dev_id;
+	struct pvcalls_fedata *fedata = NULL;
+
+	if (dev == NULL)
+		return IRQ_HANDLED;
+
+	fedata = dev_get_drvdata(&dev->dev);
+	if (fedata == NULL)
+		return IRQ_HANDLED;
+
+	/*
+	 * TODO: a small theoretical race exists if we try to queue work
+	 * after pvcalls_back_work checked for final requests and before
+	 * it returns. The queuing will fail, and pvcalls_back_work
+	 * won't do the work because it is about to return. In that
+	 * case, we lose the notification.
+	 */
+	queue_work(fedata->wq, &fedata->register_work);
+
 	return IRQ_HANDLED;
 }