diff mbox

[RESEND,4/4] qemu-xen-dir/hw/block: Cache local buffers used in grant copy

Message ID 1464669898-28495-5-git-send-email-paulinaszubarczyk@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paulina Szubarczyk May 31, 2016, 4:44 a.m. UTC
If there are still pending requests the buffers are not free() but
cached in an array of a size max_request*BLKIF_MAX_SEGMENTS_PER_REQUEST

---
 hw/block/xen_disk.c | 60 +++++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 47 insertions(+), 13 deletions(-)

Comments

Roger Pau Monne June 2, 2016, 2:19 p.m. UTC | #1
On Tue, May 31, 2016 at 06:44:58AM +0200, Paulina Szubarczyk wrote:
> If there are still pending requests the buffers are not free() but
> cached in an array of a size max_request*BLKIF_MAX_SEGMENTS_PER_REQUEST
> 
> ---
>  hw/block/xen_disk.c | 60 +++++++++++++++++++++++++++++++++++++++++------------
>  1 file changed, 47 insertions(+), 13 deletions(-)
> 
> diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
> index 43cd9c9..cf80897 100644
> --- a/hw/block/xen_disk.c
> +++ b/hw/block/xen_disk.c
> @@ -125,6 +125,10 @@ struct XenBlkDev {
>      /* */
>      gboolean            feature_discard;
>  
> +    /* request buffer cache */
> +    void                **buf_cache;
> +    int                 buf_cache_free;

Have you checked if there's some already available FIFO queue structure that 
you can use?

Glib Trash Stacks looks like a suitable candidate:

https://developer.gnome.org/glib/stable/glib-Trash-Stacks.html

> +
>      /* qemu block driver */
>      DriveInfo           *dinfo;
>      BlockBackend        *blk;
> @@ -284,12 +288,16 @@ err:
>      return -1;
>  }
>  
> -
> -static void* get_buffer(void) {
> +static void* get_buffer(struct XenBlkDev *blkdev) {
>      void *buf;
>  
> -    buf = mmap(NULL, 1 << XC_PAGE_SHIFT, PROT_READ | PROT_WRITE, 
> +    if(blkdev->buf_cache_free <= 0) {
> +        buf = mmap(NULL, 1 << XC_PAGE_SHIFT, PROT_READ | PROT_WRITE, 
>                 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
> +    } else {
> +        blkdev->buf_cache_free--;
> +        buf = blkdev->buf_cache[blkdev->buf_cache_free];
> +    }
>  
>      if (unlikely(buf == MAP_FAILED))
>          return NULL;
> @@ -301,21 +309,40 @@ static int free_buffer(void* buf) {
>      return munmap(buf, 1 << XC_PAGE_SHIFT);
>  }
>  
> -static int free_buffers(void** page, int count) 
> +static int free_buffers(void** page, int count, struct XenBlkDev *blkdev) 
>  {
> -    int i, r = 0;
> +    int i, put_buf_cache = 0, r = 0;
> +
> +    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {

Shouldn't this be <=?

Or else you will only cache at most 341 pages instead of the maximum 
number of pages that can be in-flight (352).

Roger.
Paulina Szubarczyk June 7, 2016, 1:13 p.m. UTC | #2
On Thu, 2016-06-02 at 16:19 +0200, Roger Pau Monné wrote:
> On Tue, May 31, 2016 at 06:44:58AM +0200, Paulina Szubarczyk wrote:
> > If there are still pending requests the buffers are not free() but
> > cached in an array of a size max_request*BLKIF_MAX_SEGMENTS_PER_REQUEST
> > 
> > ---
> >  hw/block/xen_disk.c | 60 +++++++++++++++++++++++++++++++++++++++++------------
> >  1 file changed, 47 insertions(+), 13 deletions(-)
> > 
> > diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
> > index 43cd9c9..cf80897 100644
> > --- a/hw/block/xen_disk.c
> > +++ b/hw/block/xen_disk.c
> > @@ -125,6 +125,10 @@ struct XenBlkDev {
> >      /* */
> >      gboolean            feature_discard;
> >  
> > +    /* request buffer cache */
> > +    void                **buf_cache;
> > +    int                 buf_cache_free;
> 
> Have you checked if there's some already available FIFO queue structure that 
> you can use?
> 
> Glib Trash Stacks looks like a suitable candidate:
> 
> https://developer.gnome.org/glib/stable/glib-Trash-Stacks.html

Persistent regions are using a single-link-list GSList and I was
thinking that using that structure here will be better since from the
link you send comes out that Trash-Stacks are deprecated from 2.48. 

But I have some problems with debuging qemu-system-i386. gdb is not able
to load symbols, it informs "qemu-system-i386...(no debugging symbols
found)...done." It was not an issue earlier and I have tried to run
configure with --enable-debug before the build as well as setting
'strip_opt="yes"'.
> 
> > +
> >      /* qemu block driver */
> >      DriveInfo           *dinfo;
> >      BlockBackend        *blk;
> > @@ -284,12 +288,16 @@ err:
> >      return -1;
> >  }
> >  
> > -
> > -static void* get_buffer(void) {
> > +static void* get_buffer(struct XenBlkDev *blkdev) {
> >      void *buf;
> >  
> > -    buf = mmap(NULL, 1 << XC_PAGE_SHIFT, PROT_READ | PROT_WRITE, 
> > +    if(blkdev->buf_cache_free <= 0) {
> > +        buf = mmap(NULL, 1 << XC_PAGE_SHIFT, PROT_READ | PROT_WRITE, 
> >                 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
> > +    } else {
> > +        blkdev->buf_cache_free--;
> > +        buf = blkdev->buf_cache[blkdev->buf_cache_free];
> > +    }
> >  
> >      if (unlikely(buf == MAP_FAILED))
> >          return NULL;
> > @@ -301,21 +309,40 @@ static int free_buffer(void* buf) {
> >      return munmap(buf, 1 << XC_PAGE_SHIFT);
> >  }
> >  
> > -static int free_buffers(void** page, int count) 
> > +static int free_buffers(void** page, int count, struct XenBlkDev *blkdev) 
> >  {
> > -    int i, r = 0;
> > +    int i, put_buf_cache = 0, r = 0;
> > +
> > +    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
> 
> Shouldn't this be <=?
> 
> Or else you will only cache at most 341 pages instead of the maximum 
> number of pages that can be in-flight (352).

At the moment when the request is completing and freeing the pages it is
still a part of in-flight requests and then I think there should not be
scheduled more then max_request-1 of others.

Paulina
diff mbox

Patch

diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index 43cd9c9..cf80897 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -125,6 +125,10 @@  struct XenBlkDev {
     /* */
     gboolean            feature_discard;
 
+    /* request buffer cache */
+    void                **buf_cache;
+    int                 buf_cache_free;
+
     /* qemu block driver */
     DriveInfo           *dinfo;
     BlockBackend        *blk;
@@ -284,12 +288,16 @@  err:
     return -1;
 }
 
-
-static void* get_buffer(void) {
+static void* get_buffer(struct XenBlkDev *blkdev) {
     void *buf;
 
-    buf = mmap(NULL, 1 << XC_PAGE_SHIFT, PROT_READ | PROT_WRITE, 
+    if(blkdev->buf_cache_free <= 0) {
+        buf = mmap(NULL, 1 << XC_PAGE_SHIFT, PROT_READ | PROT_WRITE, 
                MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+    } else {
+        blkdev->buf_cache_free--;
+        buf = blkdev->buf_cache[blkdev->buf_cache_free];
+    }
 
     if (unlikely(buf == MAP_FAILED))
         return NULL;
@@ -301,21 +309,40 @@  static int free_buffer(void* buf) {
     return munmap(buf, 1 << XC_PAGE_SHIFT);
 }
 
-static int free_buffers(void** page, int count) 
+static int free_buffers(void** page, int count, struct XenBlkDev *blkdev) 
 {
-    int i, r = 0;
+    int i, put_buf_cache = 0, r = 0;
+
+    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
+        put_buf_cache = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST
+                        - blkdev->buf_cache_free;
+    }
 
     for (i = 0; i < count; i++) { 
-        
-        if(free_buffer(page[i])) 
-            r = 1;
-        
+        if(put_buf_cache > 0) {
+            blkdev->buf_cache[blkdev->buf_cache_free++] = page[i];
+            put_buf_cache--;
+        } else { 
+            if(free_buffer(page[i])) 
+                r = 1;
+        }
+
         page[i] = NULL;
     }
 
     return r;
 }
 
+static void free_buf_cache(struct XenBlkDev *blkdev) {
+    int i;
+    for(i = 0; i < blkdev->buf_cache_free; i++) {
+        free_buffer(blkdev->buf_cache[i]);
+    }
+
+    blkdev->buf_cache_free = 0;
+    free(blkdev->buf_cache);
+}
+
 static int ioreq_write(struct ioreq *ioreq) 
 {
     XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
@@ -343,7 +370,7 @@  static int ioreq_write(struct ioreq *ioreq)
         offset[i] = ioreq->req.seg[i].first_sect * ioreq->blkdev->file_blk;
         len[i] = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) 
                   * ioreq->blkdev->file_blk;
-        pages[i]  = get_buffer();
+        pages[i]  = get_buffer(ioreq->blkdev);
 
         if(!pages[i]) {
             xen_be_printf(&ioreq->blkdev->xendev, 0, 
@@ -357,7 +384,7 @@  static int ioreq_write(struct ioreq *ioreq)
         xen_be_printf(&ioreq->blkdev->xendev, 0, 
                       "failed to copy data for write %d \n", rc);
 
-        if(free_buffers(ioreq->page, ioreq->v.niov)) {
+        if(free_buffers(ioreq->page, ioreq->v.niov, ioreq->blkdev)) {
             xen_be_printf(&ioreq->blkdev->xendev, 0, 
                           "failed to free page, errno %d \n", errno);
         }
@@ -383,7 +410,7 @@  static int ioreq_read_init(struct ioreq *ioreq)
     }
 
     for (i = 0; i < ioreq->v.niov; i++) {
-        ioreq->page[i] = get_buffer();
+        ioreq->page[i] = get_buffer(ioreq->blkdev);
         if(!ioreq->page[i]) {
             return -1;
         }
@@ -469,7 +496,7 @@  static void qemu_aio_complete(void *opaque, int ret)
                           "failed to copy read data to guest\n");
         }
     case BLKIF_OP_WRITE:
-        if(free_buffers(ioreq->page, ioreq->v.niov)) {
+        if(free_buffers(ioreq->page, ioreq->v.niov, ioreq->blkdev)) {
             xen_be_printf(&ioreq->blkdev->xendev, 0, 
                           "failed to free page, errno %d \n", errno);
         }
@@ -936,6 +963,11 @@  static int blk_connect(struct XenDevice *xendev)
     }
     blkdev->cnt_map++;
 
+    /* create buffer cache for grant copy operations*/
+    blkdev->buf_cache_free = 0;
+    blkdev->buf_cache = calloc(max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST, 
+                               sizeof(void *));
+
     switch (blkdev->protocol) {
     case BLKIF_PROTOCOL_NATIVE:
     {
@@ -972,6 +1004,8 @@  static void blk_disconnect(struct XenDevice *xendev)
 {
     struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
 
+    free_buf_cache(blkdev);
+
     if (blkdev->blk) {
         blk_detach_dev(blkdev->blk, blkdev);
         blk_unref(blkdev->blk);