diff mbox

[v4,05/13] xen/pvcalls: implement connect command

Message ID 1505516440-11111-5-git-send-email-sstabellini@kernel.org (mailing list archive)
State New, archived
Headers show

Commit Message

Stefano Stabellini Sept. 15, 2017, 11 p.m. UTC
Send PVCALLS_CONNECT to the backend. Allocate a new ring and evtchn for
the active socket.

Introduce fields in struct sock_mapping to keep track of active sockets.
Introduce a waitqueue to allow the frontend to wait on data coming from
the backend on the active socket (recvmsg command).

Two mutexes (one of reads and one for writes) will be used to protect
the active socket in and out rings from concurrent accesses.

Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
CC: boris.ostrovsky@oracle.com
CC: jgross@suse.com
---
 drivers/xen/pvcalls-front.c | 163 ++++++++++++++++++++++++++++++++++++++++++++
 drivers/xen/pvcalls-front.h |   2 +
 2 files changed, 165 insertions(+)

Comments

Boris Ostrovsky Sept. 21, 2017, 6:28 p.m. UTC | #1
On 09/15/2017 07:00 PM, Stefano Stabellini wrote:
> Send PVCALLS_CONNECT to the backend. Allocate a new ring and evtchn for
> the active socket.
> 
> Introduce fields in struct sock_mapping to keep track of active sockets.
> Introduce a waitqueue to allow the frontend to wait on data coming from
> the backend on the active socket (recvmsg command).
> 
> Two mutexes (one of reads and one for writes) will be used to protect
> the active socket in and out rings from concurrent accesses.
> 
> Signed-off-by: Stefano Stabellini <stefano@aporeto.com>

Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>

with a couple of nits below and comments from previous patch applicable 
here.


> +	bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
> +					PVCALLS_RING_ORDER);

I don't think the cast is needed.


> +	map = (struct sock_mapping *) sock->sk->sk_send_head;

Space between cast and variable.

 > +	req = RING_GET_REQUEST(&bedata->ring, req_id);
 > +	req->req_id = req_id;
 > +	req->cmd = PVCALLS_CONNECT;
 > +	req->u.connect.id = (uint64_t)map;
 > +	memcpy(req->u.connect.addr, addr, sizeof(*addr));

Move this down (I don't think there are any dependencies)

 > +	req->u.connect.len = addr_len;
 > +	req->u.connect.flags = flags;
 > +	req->u.connect.ref = map->active.ref;
 > +	req->u.connect.evtchn = evtchn;
 > +


-boris
Stefano Stabellini Oct. 6, 2017, 5:44 p.m. UTC | #2
On Thu, 21 Sep 2017, Boris Ostrovsky wrote:
> On 09/15/2017 07:00 PM, Stefano Stabellini wrote:
> > Send PVCALLS_CONNECT to the backend. Allocate a new ring and evtchn for
> > the active socket.
> > 
> > Introduce fields in struct sock_mapping to keep track of active sockets.
> > Introduce a waitqueue to allow the frontend to wait on data coming from
> > the backend on the active socket (recvmsg command).
> > 
> > Two mutexes (one of reads and one for writes) will be used to protect
> > the active socket in and out rings from concurrent accesses.
> > 
> > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> 
> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> 
> with a couple of nits below and comments from previous patch applicable here.
> 
> 
> > +	bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
> > +					PVCALLS_RING_ORDER);
> 
> I don't think the cast is needed.
> 
> 
> > +	map = (struct sock_mapping *) sock->sk->sk_send_head;
> 
> Space between cast and variable.
> 
> > +	req = RING_GET_REQUEST(&bedata->ring, req_id);
> > +	req->req_id = req_id;
> > +	req->cmd = PVCALLS_CONNECT;
> > +	req->u.connect.id = (uint64_t)map;
> > +	memcpy(req->u.connect.addr, addr, sizeof(*addr));
> 
> Move this down (I don't think there are any dependencies)

All done, thanks!


> > +	req->u.connect.len = addr_len;
> > +	req->u.connect.flags = flags;
> > +	req->u.connect.ref = map->active.ref;
> > +	req->u.connect.evtchn = evtchn;
> > +
Stefano Stabellini Oct. 6, 2017, 5:54 p.m. UTC | #3
On Fri, 6 Oct 2017, Stefano Stabellini wrote:
> On Thu, 21 Sep 2017, Boris Ostrovsky wrote:
> > On 09/15/2017 07:00 PM, Stefano Stabellini wrote:
> > > Send PVCALLS_CONNECT to the backend. Allocate a new ring and evtchn for
> > > the active socket.
> > > 
> > > Introduce fields in struct sock_mapping to keep track of active sockets.
> > > Introduce a waitqueue to allow the frontend to wait on data coming from
> > > the backend on the active socket (recvmsg command).
> > > 
> > > Two mutexes (one of reads and one for writes) will be used to protect
> > > the active socket in and out rings from concurrent accesses.
> > > 
> > > Signed-off-by: Stefano Stabellini <stefano@aporeto.com>
> > 
> > Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> > 
> > with a couple of nits below and comments from previous patch applicable here.
> > 
> > 
> > > +	bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
> > > +					PVCALLS_RING_ORDER);
> > 
> > I don't think the cast is needed.

actually this cast is needed, I'll keep it

 
> > > +	map = (struct sock_mapping *) sock->sk->sk_send_head;
> > 
> > Space between cast and variable.
> > 
> > > +	req = RING_GET_REQUEST(&bedata->ring, req_id);
> > > +	req->req_id = req_id;
> > > +	req->cmd = PVCALLS_CONNECT;
> > > +	req->u.connect.id = (uint64_t)map;
> > > +	memcpy(req->u.connect.addr, addr, sizeof(*addr));
> > 
> > Move this down (I don't think there are any dependencies)
> 
> All done, thanks!
> 
> 
> > > +	req->u.connect.len = addr_len;
> > > +	req->u.connect.flags = flags;
> > > +	req->u.connect.ref = map->active.ref;
> > > +	req->u.connect.evtchn = evtchn;
> > > +
>
diff mbox

Patch

diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 1bad1b1..ef511b6 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -13,6 +13,10 @@ 
  */
 
 #include <linux/module.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+
+#include <net/sock.h>
 
 #include <xen/events.h>
 #include <xen/grant_table.h>
@@ -57,6 +61,18 @@  struct sock_mapping {
 	bool active_socket;
 	struct list_head list;
 	struct socket *sock;
+	union {
+		struct {
+			int irq;
+			grant_ref_t ref;
+			struct pvcalls_data_intf *ring;
+			struct pvcalls_data data;
+			struct mutex in_mutex;
+			struct mutex out_mutex;
+
+			wait_queue_head_t inflight_conn_req;
+		} active;
+	};
 };
 
 static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
@@ -114,6 +130,18 @@  static irqreturn_t pvcalls_front_event_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
+static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map)
+{
+	struct sock_mapping *map = sock_map;
+
+	if (map == NULL)
+		return IRQ_HANDLED;
+
+	wake_up_interruptible(&map->active.inflight_conn_req);
+
+	return IRQ_HANDLED;
+}
+
 int pvcalls_front_socket(struct socket *sock)
 {
 	struct pvcalls_bedata *bedata;
@@ -191,6 +219,133 @@  int pvcalls_front_socket(struct socket *sock)
 	return ret;
 }
 
+static int create_active(struct sock_mapping *map, int *evtchn)
+{
+	void *bytes;
+	int ret = -ENOMEM, irq = -1, i;
+
+	*evtchn = -1;
+	init_waitqueue_head(&map->active.inflight_conn_req);
+
+	map->active.ring = (struct pvcalls_data_intf *)
+		__get_free_page(GFP_KERNEL | __GFP_ZERO);
+	if (map->active.ring == NULL)
+		goto out_error;
+	map->active.ring->ring_order = PVCALLS_RING_ORDER;
+	bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+					PVCALLS_RING_ORDER);
+	if (bytes == NULL)
+		goto out_error;
+	for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
+		map->active.ring->ref[i] = gnttab_grant_foreign_access(
+			pvcalls_front_dev->otherend_id,
+			pfn_to_gfn(virt_to_pfn(bytes) + i), 0);
+
+	map->active.ref = gnttab_grant_foreign_access(
+		pvcalls_front_dev->otherend_id,
+		pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
+
+	map->active.data.in = bytes;
+	map->active.data.out = bytes +
+		XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+	ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
+	if (ret)
+		goto out_error;
+	irq = bind_evtchn_to_irqhandler(*evtchn, pvcalls_front_conn_handler,
+					0, "pvcalls-frontend", map);
+	if (irq < 0) {
+		ret = irq;
+		goto out_error;
+	}
+
+	map->active.irq = irq;
+	map->active_socket = true;
+	mutex_init(&map->active.in_mutex);
+	mutex_init(&map->active.out_mutex);
+
+	return 0;
+
+out_error:
+	if (irq >= 0)
+		unbind_from_irqhandler(irq, map);
+	else if (*evtchn >= 0)
+		xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
+	kfree(map->active.data.in);
+	kfree(map->active.ring);
+	return ret;
+}
+
+int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
+				int addr_len, int flags)
+{
+	struct pvcalls_bedata *bedata;
+	struct sock_mapping *map = NULL;
+	struct xen_pvcalls_request *req;
+	int notify, req_id, ret, evtchn;
+
+	pvcalls_enter;
+	if (!pvcalls_front_dev) {
+		pvcalls_exit;
+		return -ENETUNREACH;
+	}
+	if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) {
+		pvcalls_exit;
+		return -ENOTSUPP;
+	}
+
+	bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+
+	map = (struct sock_mapping *) sock->sk->sk_send_head;
+	if (!map) {
+		pvcalls_exit;
+		return -ENOTSOCK;
+	}
+
+	spin_lock(&bedata->socket_lock);
+	ret = get_request(bedata, &req_id);
+	if (ret < 0) {
+		spin_unlock(&bedata->socket_lock);
+		pvcalls_exit;
+		return ret;
+	}
+	ret = create_active(map, &evtchn);
+	if (ret < 0) {
+		spin_unlock(&bedata->socket_lock);
+		pvcalls_exit;
+		return ret;
+	}
+
+	req = RING_GET_REQUEST(&bedata->ring, req_id);
+	req->req_id = req_id;
+	req->cmd = PVCALLS_CONNECT;
+	req->u.connect.id = (uint64_t)map;
+	memcpy(req->u.connect.addr, addr, sizeof(*addr));
+	req->u.connect.len = addr_len;
+	req->u.connect.flags = flags;
+	req->u.connect.ref = map->active.ref;
+	req->u.connect.evtchn = evtchn;
+
+	map->sock = sock;
+
+	bedata->ring.req_prod_pvt++;
+	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify);
+	spin_unlock(&bedata->socket_lock);
+
+	if (notify)
+		notify_remote_via_irq(bedata->irq);
+
+	wait_event(bedata->inflight_req,
+		   READ_ONCE(bedata->rsp[req_id].req_id) == req_id);
+
+	ret = bedata->rsp[req_id].ret;
+	/* read ret, then set this rsp slot to be reused */
+	smp_mb();
+	WRITE_ONCE(bedata->rsp[req_id].req_id, PVCALLS_INVALID_ID);
+	pvcalls_exit;
+	return ret;
+}
+
 static const struct xenbus_device_id pvcalls_front_ids[] = {
 	{ "pvcalls" },
 	{ "" }
@@ -207,6 +362,14 @@  static int pvcalls_front_remove(struct xenbus_device *dev)
 	if (bedata->irq >= 0)
 		unbind_from_irqhandler(bedata->irq, dev);
 
+	list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) {
+		map->sock->sk->sk_send_head = NULL;
+		if (map->active_socket) {
+			map->active.ring->in_error = -EBADF;
+			wake_up_interruptible(&map->active.inflight_conn_req);
+		}
+	}
+
 	smp_mb();
 	while (atomic_read(&pvcalls_refcount) > 0)
 		cpu_relax();
diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h
index b7dabed..63b0417 100644
--- a/drivers/xen/pvcalls-front.h
+++ b/drivers/xen/pvcalls-front.h
@@ -4,5 +4,7 @@ 
 #include <linux/net.h>
 
 int pvcalls_front_socket(struct socket *sock);
+int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
+			  int addr_len, int flags);
 
 #endif