diff mbox

[v5,02/13] xen/pvcalls: implement frontend disconnect

Message ID 1507336227-20477-2-git-send-email-sstabellini@kernel.org (mailing list archive)
State New, archived
Headers show

Commit Message

Stefano Stabellini Oct. 7, 2017, 12:30 a.m. UTC
Introduce a data structure named pvcalls_bedata. It contains pointers to
the command ring, the event channel, a list of active sockets and a list
of passive sockets. Lists accesses are protected by a spin_lock.

Introduce a waitqueue to allow waiting for a response on commands sent
to the backend.

Introduce an array of struct xen_pvcalls_response to store commands
responses.

pvcalls_refcount is used to keep count of the outstanding pvcalls users.
Only remove connections once the refcount is zero.

Implement pvcalls frontend removal function. Go through the list of
active and passive sockets and free them all, one at a time.

Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: boris.ostrovsky@oracle.com
CC: jgross@suse.com
---
 drivers/xen/pvcalls-front.c | 67 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)

Comments

Boris Ostrovsky Oct. 17, 2017, 4:01 p.m. UTC | #1
On 10/06/2017 08:30 PM, Stefano Stabellini wrote:
> Introduce a data structure named pvcalls_bedata. It contains pointers to
> the command ring, the event channel, a list of active sockets and a list
> of passive sockets. Lists accesses are protected by a spin_lock.
>
> Introduce a waitqueue to allow waiting for a response on commands sent
> to the backend.
>
> Introduce an array of struct xen_pvcalls_response to store commands
> responses.
>
> pvcalls_refcount is used to keep count of the outstanding pvcalls users.
> Only remove connections once the refcount is zero.
>
> Implement pvcalls frontend removal function. Go through the list of
> active and passive sockets and free them all, one at a time.
>
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> CC: boris.ostrovsky@oracle.com
> CC: jgross@suse.com
> ---
>  drivers/xen/pvcalls-front.c | 67 +++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 67 insertions(+)
>
> diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
> index a8d38c2..d8b7a04 100644
> --- a/drivers/xen/pvcalls-front.c
> +++ b/drivers/xen/pvcalls-front.c
> @@ -20,6 +20,46 @@
>  #include <xen/xenbus.h>
>  #include <xen/interface/io/pvcalls.h>
>  
> +#define PVCALLS_INVALID_ID UINT_MAX
> +#define PVCALLS_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
> +#define PVCALLS_NR_REQ_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
> +
> +struct pvcalls_bedata {
> +	struct xen_pvcalls_front_ring ring;
> +	grant_ref_t ref;
> +	int irq;
> +
> +	struct list_head socket_mappings;
> +	struct list_head socketpass_mappings;
> +	spinlock_t socket_lock;
> +
> +	wait_queue_head_t inflight_req;
> +	struct xen_pvcalls_response rsp[PVCALLS_NR_REQ_PER_RING];

Did you mean _REQ_ or _RSP_ in the macro name?

> +};
> +/* Only one front/back connection supported. */
> +static struct xenbus_device *pvcalls_front_dev;
> +static atomic_t pvcalls_refcount;
> +
> +/* first increment refcount, then proceed */
> +#define pvcalls_enter() {               \
> +	atomic_inc(&pvcalls_refcount);      \
> +}
> +
> +/* first complete other operations, then decrement refcount */
> +#define pvcalls_exit() {                \
> +	atomic_dec(&pvcalls_refcount);      \
> +}
> +
> +static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
> +{
> +	return IRQ_HANDLED;
> +}
> +
> +static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
> +				   struct sock_mapping *map)
> +{
> +}
> +
>  static const struct xenbus_device_id pvcalls_front_ids[] = {
>  	{ "pvcalls" },
>  	{ "" }
> @@ -27,6 +67,33 @@
>  
>  static int pvcalls_front_remove(struct xenbus_device *dev)
>  {
> +	struct pvcalls_bedata *bedata;
> +	struct sock_mapping *map = NULL, *n;
> +
> +	bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
> +	dev_set_drvdata(&dev->dev, NULL);
> +	pvcalls_front_dev = NULL;
> +	if (bedata->irq >= 0)
> +		unbind_from_irqhandler(bedata->irq, dev);
> +
> +	smp_mb();
> +	while (atomic_read(&pvcalls_refcount) > 0)
> +		cpu_relax();
> +	list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
> +		pvcalls_front_free_map(bedata, map);
> +		kfree(map);
> +	}
> +	list_for_each_entry_safe(map, n, &bedata->socketpass_mappings, list) {
> +		spin_lock(&bedata->socket_lock);
> +		list_del_init(&map->list);
> +		spin_unlock(&bedata->socket_lock);
> +		kfree(map);

Why do you re-init the entry if you are freeing it? And do you really
need the locks around it? This looks similar to the case we've discussed
for other patches --- if we are concerned that someone may grab this
entry then something must be wrong.

(Sorry, this must have been here in earlier versions but I only now
noticed it.)

-boris

> +	}
> +	if (bedata->ref >= 0)
> +		gnttab_end_foreign_access(bedata->ref, 0, 0);
> +	kfree(bedata->ring.sring);
> +	kfree(bedata);
> +	xenbus_switch_state(dev, XenbusStateClosed);
>  	return 0;
>  }
>
Stefano Stabellini Oct. 23, 2017, 10:44 p.m. UTC | #2
On Tue, 17 Oct 2017, Boris Ostrovsky wrote:
> On 10/06/2017 08:30 PM, Stefano Stabellini wrote:
> > Introduce a data structure named pvcalls_bedata. It contains pointers to
> > the command ring, the event channel, a list of active sockets and a list
> > of passive sockets. Lists accesses are protected by a spin_lock.
> >
> > Introduce a waitqueue to allow waiting for a response on commands sent
> > to the backend.
> >
> > Introduce an array of struct xen_pvcalls_response to store commands
> > responses.
> >
> > pvcalls_refcount is used to keep count of the outstanding pvcalls users.
> > Only remove connections once the refcount is zero.
> >
> > Implement pvcalls frontend removal function. Go through the list of
> > active and passive sockets and free them all, one at a time.
> >
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > CC: boris.ostrovsky@oracle.com
> > CC: jgross@suse.com
> > ---
> >  drivers/xen/pvcalls-front.c | 67 +++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 67 insertions(+)
> >
> > diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
> > index a8d38c2..d8b7a04 100644
> > --- a/drivers/xen/pvcalls-front.c
> > +++ b/drivers/xen/pvcalls-front.c
> > @@ -20,6 +20,46 @@
> >  #include <xen/xenbus.h>
> >  #include <xen/interface/io/pvcalls.h>
> >  
> > +#define PVCALLS_INVALID_ID UINT_MAX
> > +#define PVCALLS_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
> > +#define PVCALLS_NR_REQ_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
> > +
> > +struct pvcalls_bedata {
> > +	struct xen_pvcalls_front_ring ring;
> > +	grant_ref_t ref;
> > +	int irq;
> > +
> > +	struct list_head socket_mappings;
> > +	struct list_head socketpass_mappings;
> > +	spinlock_t socket_lock;
> > +
> > +	wait_queue_head_t inflight_req;
> > +	struct xen_pvcalls_response rsp[PVCALLS_NR_REQ_PER_RING];
> 
> Did you mean _REQ_ or _RSP_ in the macro name?

For each request there is one response, so it doesn't make a difference.
But for clarity, I will rename.


> > +};
> > +/* Only one front/back connection supported. */
> > +static struct xenbus_device *pvcalls_front_dev;
> > +static atomic_t pvcalls_refcount;
> > +
> > +/* first increment refcount, then proceed */
> > +#define pvcalls_enter() {               \
> > +	atomic_inc(&pvcalls_refcount);      \
> > +}
> > +
> > +/* first complete other operations, then decrement refcount */
> > +#define pvcalls_exit() {                \
> > +	atomic_dec(&pvcalls_refcount);      \
> > +}
> > +
> > +static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
> > +{
> > +	return IRQ_HANDLED;
> > +}
> > +
> > +static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
> > +				   struct sock_mapping *map)
> > +{
> > +}
> > +
> >  static const struct xenbus_device_id pvcalls_front_ids[] = {
> >  	{ "pvcalls" },
> >  	{ "" }
> > @@ -27,6 +67,33 @@
> >  
> >  static int pvcalls_front_remove(struct xenbus_device *dev)
> >  {
> > +	struct pvcalls_bedata *bedata;
> > +	struct sock_mapping *map = NULL, *n;
> > +
> > +	bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
> > +	dev_set_drvdata(&dev->dev, NULL);
> > +	pvcalls_front_dev = NULL;
> > +	if (bedata->irq >= 0)
> > +		unbind_from_irqhandler(bedata->irq, dev);
> > +
> > +	smp_mb();
> > +	while (atomic_read(&pvcalls_refcount) > 0)
> > +		cpu_relax();
> > +	list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
> > +		pvcalls_front_free_map(bedata, map);
> > +		kfree(map);
> > +	}
> > +	list_for_each_entry_safe(map, n, &bedata->socketpass_mappings, list) {
> > +		spin_lock(&bedata->socket_lock);
> > +		list_del_init(&map->list);
> > +		spin_unlock(&bedata->socket_lock);
> > +		kfree(map);
> 
> Why do you re-init the entry if you are freeing it?

Fair enough, I'll just list_del.


> And do you really
> need the locks around it? This looks similar to the case we've discussed
> for other patches --- if we are concerned that someone may grab this
> entry then something must be wrong.
> 
> (Sorry, this must have been here in earlier versions but I only now
> noticed it.)

Yes, you are right, it is already protected by the global refcount, I'll
remove.


> > +	}
> > +	if (bedata->ref >= 0)
> > +		gnttab_end_foreign_access(bedata->ref, 0, 0);
> > +	kfree(bedata->ring.sring);
> > +	kfree(bedata);
> > +	xenbus_switch_state(dev, XenbusStateClosed);
> >  	return 0;
> >  }
> >  
>
diff mbox

Patch

diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index a8d38c2..d8b7a04 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -20,6 +20,46 @@ 
 #include <xen/xenbus.h>
 #include <xen/interface/io/pvcalls.h>
 
+#define PVCALLS_INVALID_ID UINT_MAX
+#define PVCALLS_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
+#define PVCALLS_NR_REQ_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
+
+struct pvcalls_bedata {
+	struct xen_pvcalls_front_ring ring;
+	grant_ref_t ref;
+	int irq;
+
+	struct list_head socket_mappings;
+	struct list_head socketpass_mappings;
+	spinlock_t socket_lock;
+
+	wait_queue_head_t inflight_req;
+	struct xen_pvcalls_response rsp[PVCALLS_NR_REQ_PER_RING];
+};
+/* Only one front/back connection supported. */
+static struct xenbus_device *pvcalls_front_dev;
+static atomic_t pvcalls_refcount;
+
+/* first increment refcount, then proceed */
+#define pvcalls_enter() {               \
+	atomic_inc(&pvcalls_refcount);      \
+}
+
+/* first complete other operations, then decrement refcount */
+#define pvcalls_exit() {                \
+	atomic_dec(&pvcalls_refcount);      \
+}
+
+static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
+{
+	return IRQ_HANDLED;
+}
+
+static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
+				   struct sock_mapping *map)
+{
+}
+
 static const struct xenbus_device_id pvcalls_front_ids[] = {
 	{ "pvcalls" },
 	{ "" }
@@ -27,6 +67,33 @@ 
 
 static int pvcalls_front_remove(struct xenbus_device *dev)
 {
+	struct pvcalls_bedata *bedata;
+	struct sock_mapping *map = NULL, *n;
+
+	bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+	dev_set_drvdata(&dev->dev, NULL);
+	pvcalls_front_dev = NULL;
+	if (bedata->irq >= 0)
+		unbind_from_irqhandler(bedata->irq, dev);
+
+	smp_mb();
+	while (atomic_read(&pvcalls_refcount) > 0)
+		cpu_relax();
+	list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
+		pvcalls_front_free_map(bedata, map);
+		kfree(map);
+	}
+	list_for_each_entry_safe(map, n, &bedata->socketpass_mappings, list) {
+		spin_lock(&bedata->socket_lock);
+		list_del_init(&map->list);
+		spin_unlock(&bedata->socket_lock);
+		kfree(map);
+	}
+	if (bedata->ref >= 0)
+		gnttab_end_foreign_access(bedata->ref, 0, 0);
+	kfree(bedata->ring.sring);
+	kfree(bedata);
+	xenbus_switch_state(dev, XenbusStateClosed);
 	return 0;
 }