Message ID | 20190619223338.1231.52537.stgit@localhost.localdomain (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm / virtio: Provide support for paravirtual waste page treatment | expand |
On Wed, Jun 19, 2019 at 03:33:38PM -0700, Alexander Duyck wrote: > From: Alexander Duyck <alexander.h.duyck@linux.intel.com> > > Add support for aerating memory using the hinting feature provided by > virtio-balloon. Hinting differs from the regular balloon functionality in > that is is much less durable than a standard memory balloon. Instead of > creating a list of pages that cannot be accessed the pages are only > inaccessible while they are being indicated to the virtio interface. Once > the interface has acknowledged them they are placed back into their > respective free lists and are once again accessible by the guest system. > > Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> > --- > drivers/virtio/Kconfig | 1 > drivers/virtio/virtio_balloon.c | 110 ++++++++++++++++++++++++++++++++++- > include/uapi/linux/virtio_balloon.h | 1 > 3 files changed, 108 insertions(+), 4 deletions(-) > > diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig > index 023fc3bc01c6..9cdaccf92c3a 100644 > --- a/drivers/virtio/Kconfig > +++ b/drivers/virtio/Kconfig > @@ -47,6 +47,7 @@ config VIRTIO_BALLOON > tristate "Virtio balloon driver" > depends on VIRTIO > select MEMORY_BALLOON > + select AERATION > ---help--- > This driver supports increasing and decreasing the amount > of memory within a KVM guest. > diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c > index 44339fc87cc7..91f1e8c9017d 100644 > --- a/drivers/virtio/virtio_balloon.c > +++ b/drivers/virtio/virtio_balloon.c > @@ -18,6 +18,7 @@ > #include <linux/mm.h> > #include <linux/mount.h> > #include <linux/magic.h> > +#include <linux/memory_aeration.h> > > /* > * Balloon device works in 4K page units. So each page is pointed to by > @@ -26,6 +27,7 @@ > */ > #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) > #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 > +#define VIRTIO_BALLOON_ARRAY_HINTS_MAX 32 > #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 > > #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ > @@ -45,6 +47,7 @@ enum virtio_balloon_vq { > VIRTIO_BALLOON_VQ_DEFLATE, > VIRTIO_BALLOON_VQ_STATS, > VIRTIO_BALLOON_VQ_FREE_PAGE, > + VIRTIO_BALLOON_VQ_HINTING, > VIRTIO_BALLOON_VQ_MAX > }; > > @@ -54,7 +57,8 @@ enum virtio_balloon_config_read { > > struct virtio_balloon { > struct virtio_device *vdev; > - struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; > + struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq, > + *hinting_vq; > > /* Balloon's own wq for cpu-intensive work items */ > struct workqueue_struct *balloon_wq; > @@ -103,9 +107,21 @@ struct virtio_balloon { > /* Synchronize access/update to this struct virtio_balloon elements */ > struct mutex balloon_lock; > > - /* The array of pfns we tell the Host about. */ > - unsigned int num_pfns; > - __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; > + > + union { > + /* The array of pfns we tell the Host about. */ > + struct { > + unsigned int num_pfns; > + __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; > + }; > + /* The array of physical addresses we are hinting on */ > + struct { > + unsigned int num_hints; > + __virtio64 hints[VIRTIO_BALLOON_ARRAY_HINTS_MAX]; > + }; > + }; > + > + struct aerator_dev_info a_dev_info; > > /* Memory statistics */ > struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; > @@ -151,6 +167,68 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) > > } > > +static u64 page_to_hints_pa_order(struct page *page) > +{ > + unsigned char order; > + dma_addr_t pa; > + > + BUILD_BUG_ON((64 - VIRTIO_BALLOON_PFN_SHIFT) >= > + (1 << VIRTIO_BALLOON_PFN_SHIFT)); > + > + /* > + * Record physical page address combined with page order. > + * Order will never exceed 64 - VIRTIO_BALLON_PFN_SHIFT > + * since the size has to fit into a 64b value. So as long > + * as VIRTIO_BALLOON_SHIFT is greater than this combining > + * the two values should be safe. > + */ > + pa = page_to_phys(page); > + order = page_private(page) + > + PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT; > + > + return (u64)(pa | order); > +} > + > +void virtballoon_aerator_react(struct aerator_dev_info *a_dev_info) > +{ > + struct virtio_balloon *vb = container_of(a_dev_info, > + struct virtio_balloon, > + a_dev_info); > + struct virtqueue *vq = vb->hinting_vq; > + struct scatterlist sg; > + unsigned int unused; > + struct page *page; > + > + mutex_lock(&vb->balloon_lock); > + > + vb->num_hints = 0; > + > + list_for_each_entry(page, &a_dev_info->batch, lru) { > + vb->hints[vb->num_hints++] = > + cpu_to_virtio64(vb->vdev, > + page_to_hints_pa_order(page)); > + } > + > + /* We shouldn't have been called if there is nothing to process */ > + if (WARN_ON(vb->num_hints == 0)) > + goto out; > + > + sg_init_one(&sg, vb->hints, > + sizeof(vb->hints[0]) * vb->num_hints); > + > + /* > + * We should always be able to add one buffer to an > + * empty queue. > + */ > + virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); > + virtqueue_kick(vq); > + > + /* When host has read buffer, this completes via balloon_ack */ > + wait_event(vb->acked, virtqueue_get_buf(vq, &unused)); > +out: > + mutex_unlock(&vb->balloon_lock); > +} > + > static void set_page_pfns(struct virtio_balloon *vb, > __virtio32 pfns[], struct page *page) > { > @@ -475,6 +553,7 @@ static int init_vqs(struct virtio_balloon *vb) > names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; > names[VIRTIO_BALLOON_VQ_STATS] = NULL; > names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; > + names[VIRTIO_BALLOON_VQ_HINTING] = NULL; > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { > names[VIRTIO_BALLOON_VQ_STATS] = "stats"; > @@ -486,11 +565,19 @@ static int init_vqs(struct virtio_balloon *vb) > callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; > } > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { > + names[VIRTIO_BALLOON_VQ_HINTING] = "hinting_vq"; > + callbacks[VIRTIO_BALLOON_VQ_HINTING] = balloon_ack; > + } > + > err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, > vqs, callbacks, names, NULL, NULL); > if (err) > return err; > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) > + vb->hinting_vq = vqs[VIRTIO_BALLOON_VQ_HINTING]; > + > vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; > vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { > @@ -929,12 +1016,24 @@ static int virtballoon_probe(struct virtio_device *vdev) > if (err) > goto out_del_balloon_wq; > } > + > + vb->a_dev_info.react = virtballoon_aerator_react; > + vb->a_dev_info.capacity = VIRTIO_BALLOON_ARRAY_HINTS_MAX; > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { > + err = aerator_startup(&vb->a_dev_info); > + if (err) > + goto out_unregister_shrinker; > + } > + > virtio_device_ready(vdev); > > if (towards_target(vb)) > virtballoon_changed(vdev); > return 0; > > +out_unregister_shrinker: > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) > + virtio_balloon_unregister_shrinker(vb); > out_del_balloon_wq: > if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) > destroy_workqueue(vb->balloon_wq); > @@ -963,6 +1062,8 @@ static void virtballoon_remove(struct virtio_device *vdev) > { > struct virtio_balloon *vb = vdev->priv; > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) > + aerator_shutdown(); > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) > virtio_balloon_unregister_shrinker(vb); > spin_lock_irq(&vb->stop_update_lock); > @@ -1032,6 +1133,7 @@ static int virtballoon_validate(struct virtio_device *vdev) > VIRTIO_BALLOON_F_DEFLATE_ON_OOM, > VIRTIO_BALLOON_F_FREE_PAGE_HINT, > VIRTIO_BALLOON_F_PAGE_POISON, > + VIRTIO_BALLOON_F_HINTING, > }; > > static struct virtio_driver virtio_balloon_driver = { > diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h > index a1966cd7b677..2b0f62814e22 100644 > --- a/include/uapi/linux/virtio_balloon.h > +++ b/include/uapi/linux/virtio_balloon.h > @@ -36,6 +36,7 @@ > #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */ > #define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */ > #define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */ > +#define VIRTIO_BALLOON_F_HINTING 5 /* Page hinting virtqueue */ > > /* Size of a PFN in the balloon interface. */ > #define VIRTIO_BALLOON_PFN_SHIFT 12 The approach here is very close to what on-demand hinting that is already upstream does. This should have resulted in a most of the code being shared but this does not seem to happen here. Can we unify the code in some way? It can still use a separate feature flag, but there are things I like very much about current hinting code, such as using s/g instead of passing PFNs in a buffer. If this doesn't work could you elaborate on why?
On 7/16/19 2:55 AM, Michael S. Tsirkin wrote: > The approach here is very close to what on-demand hinting that is > already upstream does. Are you referring to the s390 (and powerpc) stuff that is hidden behind arch_free_page()?
On 16.07.19 16:00, Dave Hansen wrote: > On 7/16/19 2:55 AM, Michael S. Tsirkin wrote: >> The approach here is very close to what on-demand hinting that is >> already upstream does. > > Are you referring to the s390 (and powerpc) stuff that is hidden behind > arch_free_page()? > I assume Michael meant "free page reporting".
On 16.07.19 16:12, David Hildenbrand wrote: > On 16.07.19 16:00, Dave Hansen wrote: >> On 7/16/19 2:55 AM, Michael S. Tsirkin wrote: >>> The approach here is very close to what on-demand hinting that is >>> already upstream does. >> >> Are you referring to the s390 (and powerpc) stuff that is hidden behind >> arch_free_page()? >> > > I assume Michael meant "free page reporting". > (https://lwn.net/Articles/759413/)
On 7/16/19 7:12 AM, David Hildenbrand wrote: > On 16.07.19 16:00, Dave Hansen wrote: >> On 7/16/19 2:55 AM, Michael S. Tsirkin wrote: >>> The approach here is very close to what on-demand hinting that is >>> already upstream does. >> Are you referring to the s390 (and powerpc) stuff that is hidden behind >> arch_free_page()? >> > I assume Michael meant "free page reporting". Where is the page allocator integration? The set you linked to has 5 patches, but only 4 were merged. This one is missing: https://lore.kernel.org/patchwork/patch/961038/
On Tuesday, July 16, 2019 10:41 PM, Hansen, Dave wrote: > Where is the page allocator integration? The set you linked to has 5 patches, > but only 4 were merged. This one is missing: > > https://lore.kernel.org/patchwork/patch/961038/ For some reason, we used the regular page allocation to get pages from the free list at that stage. This part could be improved by Alex or Nitesh's approach. The page address transmission from the balloon driver to the host device could reuse what's upstreamed there. I think you could add a new VIRTIO_BALLOON_CMD_xx for your usages. Best, Wei
On 16.07.19 16:41, Dave Hansen wrote: > On 7/16/19 7:12 AM, David Hildenbrand wrote: >> On 16.07.19 16:00, Dave Hansen wrote: >>> On 7/16/19 2:55 AM, Michael S. Tsirkin wrote: >>>> The approach here is very close to what on-demand hinting that is >>>> already upstream does. >>> Are you referring to the s390 (and powerpc) stuff that is hidden behind >>> arch_free_page()? >>> >> I assume Michael meant "free page reporting". > > Where is the page allocator integration? The set you linked to has 5 > patches, but only 4 were merged. This one is missing: > > https://lore.kernel.org/patchwork/patch/961038/ > I don't recall which version was actually merged (there were too many :) ). I think it was v37: https://lore.kernel.org/patchwork/cover/977804/ And I remember that there was a comment from Linus that made the patch you mentioned getting dropped.
On Tue, Jul 16, 2019 at 04:17:13PM +0200, David Hildenbrand wrote: > On 16.07.19 16:12, David Hildenbrand wrote: > > On 16.07.19 16:00, Dave Hansen wrote: > >> On 7/16/19 2:55 AM, Michael S. Tsirkin wrote: > >>> The approach here is very close to what on-demand hinting that is > >>> already upstream does. > >> > >> Are you referring to the s390 (and powerpc) stuff that is hidden behind > >> arch_free_page()? > >> > > > > I assume Michael meant "free page reporting". > > > > (https://lwn.net/Articles/759413/) Yes - VIRTIO_BALLOON_F_FREE_PAGE_HINT. > -- > > Thanks, > > David / dhildenb
On Tue, Jul 16, 2019 at 2:55 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Wed, Jun 19, 2019 at 03:33:38PM -0700, Alexander Duyck wrote: > > From: Alexander Duyck <alexander.h.duyck@linux.intel.com> > > > > Add support for aerating memory using the hinting feature provided by > > virtio-balloon. Hinting differs from the regular balloon functionality in > > that is is much less durable than a standard memory balloon. Instead of > > creating a list of pages that cannot be accessed the pages are only > > inaccessible while they are being indicated to the virtio interface. Once > > the interface has acknowledged them they are placed back into their > > respective free lists and are once again accessible by the guest system. > > > > Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> > > --- > > drivers/virtio/Kconfig | 1 > > drivers/virtio/virtio_balloon.c | 110 ++++++++++++++++++++++++++++++++++- > > include/uapi/linux/virtio_balloon.h | 1 > > 3 files changed, 108 insertions(+), 4 deletions(-) > > > > diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig > > index 023fc3bc01c6..9cdaccf92c3a 100644 > > --- a/drivers/virtio/Kconfig > > +++ b/drivers/virtio/Kconfig > > @@ -47,6 +47,7 @@ config VIRTIO_BALLOON > > tristate "Virtio balloon driver" > > depends on VIRTIO > > select MEMORY_BALLOON > > + select AERATION > > ---help--- > > This driver supports increasing and decreasing the amount > > of memory within a KVM guest. > > diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c > > index 44339fc87cc7..91f1e8c9017d 100644 > > --- a/drivers/virtio/virtio_balloon.c > > +++ b/drivers/virtio/virtio_balloon.c > > @@ -18,6 +18,7 @@ > > #include <linux/mm.h> > > #include <linux/mount.h> > > #include <linux/magic.h> > > +#include <linux/memory_aeration.h> > > > > /* > > * Balloon device works in 4K page units. So each page is pointed to by > > @@ -26,6 +27,7 @@ > > */ > > #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) > > #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 > > +#define VIRTIO_BALLOON_ARRAY_HINTS_MAX 32 > > #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 > > > > #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ > > @@ -45,6 +47,7 @@ enum virtio_balloon_vq { > > VIRTIO_BALLOON_VQ_DEFLATE, > > VIRTIO_BALLOON_VQ_STATS, > > VIRTIO_BALLOON_VQ_FREE_PAGE, > > + VIRTIO_BALLOON_VQ_HINTING, > > VIRTIO_BALLOON_VQ_MAX > > }; > > > > @@ -54,7 +57,8 @@ enum virtio_balloon_config_read { > > > > struct virtio_balloon { > > struct virtio_device *vdev; > > - struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; > > + struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq, > > + *hinting_vq; > > > > /* Balloon's own wq for cpu-intensive work items */ > > struct workqueue_struct *balloon_wq; > > @@ -103,9 +107,21 @@ struct virtio_balloon { > > /* Synchronize access/update to this struct virtio_balloon elements */ > > struct mutex balloon_lock; > > > > - /* The array of pfns we tell the Host about. */ > > - unsigned int num_pfns; > > - __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; > > + > > + union { > > + /* The array of pfns we tell the Host about. */ > > + struct { > > + unsigned int num_pfns; > > + __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; > > + }; > > + /* The array of physical addresses we are hinting on */ > > + struct { > > + unsigned int num_hints; > > + __virtio64 hints[VIRTIO_BALLOON_ARRAY_HINTS_MAX]; > > + }; > > + }; > > + > > + struct aerator_dev_info a_dev_info; > > > > /* Memory statistics */ > > struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; > > @@ -151,6 +167,68 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) > > > > } > > > > +static u64 page_to_hints_pa_order(struct page *page) > > +{ > > + unsigned char order; > > + dma_addr_t pa; > > + > > + BUILD_BUG_ON((64 - VIRTIO_BALLOON_PFN_SHIFT) >= > > + (1 << VIRTIO_BALLOON_PFN_SHIFT)); > > + > > + /* > > + * Record physical page address combined with page order. > > + * Order will never exceed 64 - VIRTIO_BALLON_PFN_SHIFT > > + * since the size has to fit into a 64b value. So as long > > + * as VIRTIO_BALLOON_SHIFT is greater than this combining > > + * the two values should be safe. > > + */ > > + pa = page_to_phys(page); > > + order = page_private(page) + > > + PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT; > > + > > + return (u64)(pa | order); > > +} > > + > > +void virtballoon_aerator_react(struct aerator_dev_info *a_dev_info) > > +{ > > + struct virtio_balloon *vb = container_of(a_dev_info, > > + struct virtio_balloon, > > + a_dev_info); > > + struct virtqueue *vq = vb->hinting_vq; > > + struct scatterlist sg; > > + unsigned int unused; > > + struct page *page; > > + > > + mutex_lock(&vb->balloon_lock); > > + > > + vb->num_hints = 0; > > + > > + list_for_each_entry(page, &a_dev_info->batch, lru) { > > + vb->hints[vb->num_hints++] = > > + cpu_to_virtio64(vb->vdev, > > + page_to_hints_pa_order(page)); > > + } > > + > > + /* We shouldn't have been called if there is nothing to process */ > > + if (WARN_ON(vb->num_hints == 0)) > > + goto out; > > + > > + sg_init_one(&sg, vb->hints, > > + sizeof(vb->hints[0]) * vb->num_hints); > > + > > + /* > > + * We should always be able to add one buffer to an > > + * empty queue. > > + */ > > + virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); > > + virtqueue_kick(vq); > > + > > + /* When host has read buffer, this completes via balloon_ack */ > > + wait_event(vb->acked, virtqueue_get_buf(vq, &unused)); > > +out: > > + mutex_unlock(&vb->balloon_lock); > > +} > > + > > static void set_page_pfns(struct virtio_balloon *vb, > > __virtio32 pfns[], struct page *page) > > { > > @@ -475,6 +553,7 @@ static int init_vqs(struct virtio_balloon *vb) > > names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; > > names[VIRTIO_BALLOON_VQ_STATS] = NULL; > > names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; > > + names[VIRTIO_BALLOON_VQ_HINTING] = NULL; > > > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { > > names[VIRTIO_BALLOON_VQ_STATS] = "stats"; > > @@ -486,11 +565,19 @@ static int init_vqs(struct virtio_balloon *vb) > > callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; > > } > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { > > + names[VIRTIO_BALLOON_VQ_HINTING] = "hinting_vq"; > > + callbacks[VIRTIO_BALLOON_VQ_HINTING] = balloon_ack; > > + } > > + > > err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, > > vqs, callbacks, names, NULL, NULL); > > if (err) > > return err; > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) > > + vb->hinting_vq = vqs[VIRTIO_BALLOON_VQ_HINTING]; > > + > > vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; > > vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { > > @@ -929,12 +1016,24 @@ static int virtballoon_probe(struct virtio_device *vdev) > > if (err) > > goto out_del_balloon_wq; > > } > > + > > + vb->a_dev_info.react = virtballoon_aerator_react; > > + vb->a_dev_info.capacity = VIRTIO_BALLOON_ARRAY_HINTS_MAX; > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { > > + err = aerator_startup(&vb->a_dev_info); > > + if (err) > > + goto out_unregister_shrinker; > > + } > > + > > virtio_device_ready(vdev); > > > > if (towards_target(vb)) > > virtballoon_changed(vdev); > > return 0; > > > > +out_unregister_shrinker: > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) > > + virtio_balloon_unregister_shrinker(vb); > > out_del_balloon_wq: > > if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) > > destroy_workqueue(vb->balloon_wq); > > @@ -963,6 +1062,8 @@ static void virtballoon_remove(struct virtio_device *vdev) > > { > > struct virtio_balloon *vb = vdev->priv; > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) > > + aerator_shutdown(); > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) > > virtio_balloon_unregister_shrinker(vb); > > spin_lock_irq(&vb->stop_update_lock); > > @@ -1032,6 +1133,7 @@ static int virtballoon_validate(struct virtio_device *vdev) > > VIRTIO_BALLOON_F_DEFLATE_ON_OOM, > > VIRTIO_BALLOON_F_FREE_PAGE_HINT, > > VIRTIO_BALLOON_F_PAGE_POISON, > > + VIRTIO_BALLOON_F_HINTING, > > }; > > > > static struct virtio_driver virtio_balloon_driver = { > > diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h > > index a1966cd7b677..2b0f62814e22 100644 > > --- a/include/uapi/linux/virtio_balloon.h > > +++ b/include/uapi/linux/virtio_balloon.h > > @@ -36,6 +36,7 @@ > > #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */ > > #define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */ > > #define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */ > > +#define VIRTIO_BALLOON_F_HINTING 5 /* Page hinting virtqueue */ > > > > /* Size of a PFN in the balloon interface. */ > > #define VIRTIO_BALLOON_PFN_SHIFT 12 > > > > The approach here is very close to what on-demand hinting that is > already upstream does. > > This should have resulted in a most of the code being shared > but this does not seem to happen here. > > Can we unify the code in some way? > It can still use a separate feature flag, but there are things > I like very much about current hinting code, such as > using s/g instead of passing PFNs in a buffer. > > If this doesn't work could you elaborate on why? As far as sending a scatter gather that shouldn't be too much of an issue, however I need to double check that I will still be able to keep the completions as a single block. One significant spot where the "VIRTIO_BALLOON_F_FREE_PAGE_HINT" code and my code differs. My code is processing a fixed discreet block of pages at a time, whereas the FREE_PAGE_HINT code is slurping up all available high-order memory and stuffing it into a giant balloon and has more of a streaming setup as it doesn't return things until either forced to by the shrinker or once it has processed all available memory. The basic idea with the bubble hinting was to essentially create mini balloons. As such I had based the code off of the balloon inflation code. The only spot where it really differs is that I needed the ability to pass higher order pages so I tweaked thinks and passed "hints" instead of "pfns".
On Tue, Jul 16, 2019 at 08:37:06AM -0700, Alexander Duyck wrote: > On Tue, Jul 16, 2019 at 2:55 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > On Wed, Jun 19, 2019 at 03:33:38PM -0700, Alexander Duyck wrote: > > > From: Alexander Duyck <alexander.h.duyck@linux.intel.com> > > > > > > Add support for aerating memory using the hinting feature provided by > > > virtio-balloon. Hinting differs from the regular balloon functionality in > > > that is is much less durable than a standard memory balloon. Instead of > > > creating a list of pages that cannot be accessed the pages are only > > > inaccessible while they are being indicated to the virtio interface. Once > > > the interface has acknowledged them they are placed back into their > > > respective free lists and are once again accessible by the guest system. > > > > > > Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> > > > --- > > > drivers/virtio/Kconfig | 1 > > > drivers/virtio/virtio_balloon.c | 110 ++++++++++++++++++++++++++++++++++- > > > include/uapi/linux/virtio_balloon.h | 1 > > > 3 files changed, 108 insertions(+), 4 deletions(-) > > > > > > diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig > > > index 023fc3bc01c6..9cdaccf92c3a 100644 > > > --- a/drivers/virtio/Kconfig > > > +++ b/drivers/virtio/Kconfig > > > @@ -47,6 +47,7 @@ config VIRTIO_BALLOON > > > tristate "Virtio balloon driver" > > > depends on VIRTIO > > > select MEMORY_BALLOON > > > + select AERATION > > > ---help--- > > > This driver supports increasing and decreasing the amount > > > of memory within a KVM guest. > > > diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c > > > index 44339fc87cc7..91f1e8c9017d 100644 > > > --- a/drivers/virtio/virtio_balloon.c > > > +++ b/drivers/virtio/virtio_balloon.c > > > @@ -18,6 +18,7 @@ > > > #include <linux/mm.h> > > > #include <linux/mount.h> > > > #include <linux/magic.h> > > > +#include <linux/memory_aeration.h> > > > > > > /* > > > * Balloon device works in 4K page units. So each page is pointed to by > > > @@ -26,6 +27,7 @@ > > > */ > > > #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) > > > #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 > > > +#define VIRTIO_BALLOON_ARRAY_HINTS_MAX 32 > > > #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 > > > > > > #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ > > > @@ -45,6 +47,7 @@ enum virtio_balloon_vq { > > > VIRTIO_BALLOON_VQ_DEFLATE, > > > VIRTIO_BALLOON_VQ_STATS, > > > VIRTIO_BALLOON_VQ_FREE_PAGE, > > > + VIRTIO_BALLOON_VQ_HINTING, > > > VIRTIO_BALLOON_VQ_MAX > > > }; > > > > > > @@ -54,7 +57,8 @@ enum virtio_balloon_config_read { > > > > > > struct virtio_balloon { > > > struct virtio_device *vdev; > > > - struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; > > > + struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq, > > > + *hinting_vq; > > > > > > /* Balloon's own wq for cpu-intensive work items */ > > > struct workqueue_struct *balloon_wq; > > > @@ -103,9 +107,21 @@ struct virtio_balloon { > > > /* Synchronize access/update to this struct virtio_balloon elements */ > > > struct mutex balloon_lock; > > > > > > - /* The array of pfns we tell the Host about. */ > > > - unsigned int num_pfns; > > > - __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; > > > + > > > + union { > > > + /* The array of pfns we tell the Host about. */ > > > + struct { > > > + unsigned int num_pfns; > > > + __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; > > > + }; > > > + /* The array of physical addresses we are hinting on */ > > > + struct { > > > + unsigned int num_hints; > > > + __virtio64 hints[VIRTIO_BALLOON_ARRAY_HINTS_MAX]; > > > + }; > > > + }; > > > + > > > + struct aerator_dev_info a_dev_info; > > > > > > /* Memory statistics */ > > > struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; > > > @@ -151,6 +167,68 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) > > > > > > } > > > > > > +static u64 page_to_hints_pa_order(struct page *page) > > > +{ > > > + unsigned char order; > > > + dma_addr_t pa; > > > + > > > + BUILD_BUG_ON((64 - VIRTIO_BALLOON_PFN_SHIFT) >= > > > + (1 << VIRTIO_BALLOON_PFN_SHIFT)); > > > + > > > + /* > > > + * Record physical page address combined with page order. > > > + * Order will never exceed 64 - VIRTIO_BALLON_PFN_SHIFT > > > + * since the size has to fit into a 64b value. So as long > > > + * as VIRTIO_BALLOON_SHIFT is greater than this combining > > > + * the two values should be safe. > > > + */ > > > + pa = page_to_phys(page); > > > + order = page_private(page) + > > > + PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT; > > > + > > > + return (u64)(pa | order); > > > +} > > > + > > > +void virtballoon_aerator_react(struct aerator_dev_info *a_dev_info) > > > +{ > > > + struct virtio_balloon *vb = container_of(a_dev_info, > > > + struct virtio_balloon, > > > + a_dev_info); > > > + struct virtqueue *vq = vb->hinting_vq; > > > + struct scatterlist sg; > > > + unsigned int unused; > > > + struct page *page; > > > + > > > + mutex_lock(&vb->balloon_lock); > > > + > > > + vb->num_hints = 0; > > > + > > > + list_for_each_entry(page, &a_dev_info->batch, lru) { > > > + vb->hints[vb->num_hints++] = > > > + cpu_to_virtio64(vb->vdev, > > > + page_to_hints_pa_order(page)); > > > + } > > > + > > > + /* We shouldn't have been called if there is nothing to process */ > > > + if (WARN_ON(vb->num_hints == 0)) > > > + goto out; > > > + > > > + sg_init_one(&sg, vb->hints, > > > + sizeof(vb->hints[0]) * vb->num_hints); > > > + > > > + /* > > > + * We should always be able to add one buffer to an > > > + * empty queue. > > > + */ > > > + virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); > > > + virtqueue_kick(vq); > > > + > > > + /* When host has read buffer, this completes via balloon_ack */ > > > + wait_event(vb->acked, virtqueue_get_buf(vq, &unused)); > > > +out: > > > + mutex_unlock(&vb->balloon_lock); > > > +} > > > + > > > static void set_page_pfns(struct virtio_balloon *vb, > > > __virtio32 pfns[], struct page *page) > > > { > > > @@ -475,6 +553,7 @@ static int init_vqs(struct virtio_balloon *vb) > > > names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; > > > names[VIRTIO_BALLOON_VQ_STATS] = NULL; > > > names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; > > > + names[VIRTIO_BALLOON_VQ_HINTING] = NULL; > > > > > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { > > > names[VIRTIO_BALLOON_VQ_STATS] = "stats"; > > > @@ -486,11 +565,19 @@ static int init_vqs(struct virtio_balloon *vb) > > > callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; > > > } > > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { > > > + names[VIRTIO_BALLOON_VQ_HINTING] = "hinting_vq"; > > > + callbacks[VIRTIO_BALLOON_VQ_HINTING] = balloon_ack; > > > + } > > > + > > > err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, > > > vqs, callbacks, names, NULL, NULL); > > > if (err) > > > return err; > > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) > > > + vb->hinting_vq = vqs[VIRTIO_BALLOON_VQ_HINTING]; > > > + > > > vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; > > > vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; > > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { > > > @@ -929,12 +1016,24 @@ static int virtballoon_probe(struct virtio_device *vdev) > > > if (err) > > > goto out_del_balloon_wq; > > > } > > > + > > > + vb->a_dev_info.react = virtballoon_aerator_react; > > > + vb->a_dev_info.capacity = VIRTIO_BALLOON_ARRAY_HINTS_MAX; > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { > > > + err = aerator_startup(&vb->a_dev_info); > > > + if (err) > > > + goto out_unregister_shrinker; > > > + } > > > + > > > virtio_device_ready(vdev); > > > > > > if (towards_target(vb)) > > > virtballoon_changed(vdev); > > > return 0; > > > > > > +out_unregister_shrinker: > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) > > > + virtio_balloon_unregister_shrinker(vb); > > > out_del_balloon_wq: > > > if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) > > > destroy_workqueue(vb->balloon_wq); > > > @@ -963,6 +1062,8 @@ static void virtballoon_remove(struct virtio_device *vdev) > > > { > > > struct virtio_balloon *vb = vdev->priv; > > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) > > > + aerator_shutdown(); > > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) > > > virtio_balloon_unregister_shrinker(vb); > > > spin_lock_irq(&vb->stop_update_lock); > > > @@ -1032,6 +1133,7 @@ static int virtballoon_validate(struct virtio_device *vdev) > > > VIRTIO_BALLOON_F_DEFLATE_ON_OOM, > > > VIRTIO_BALLOON_F_FREE_PAGE_HINT, > > > VIRTIO_BALLOON_F_PAGE_POISON, > > > + VIRTIO_BALLOON_F_HINTING, > > > }; > > > > > > static struct virtio_driver virtio_balloon_driver = { > > > diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h > > > index a1966cd7b677..2b0f62814e22 100644 > > > --- a/include/uapi/linux/virtio_balloon.h > > > +++ b/include/uapi/linux/virtio_balloon.h > > > @@ -36,6 +36,7 @@ > > > #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */ > > > #define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */ > > > #define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */ > > > +#define VIRTIO_BALLOON_F_HINTING 5 /* Page hinting virtqueue */ > > > > > > /* Size of a PFN in the balloon interface. */ > > > #define VIRTIO_BALLOON_PFN_SHIFT 12 > > > > > > > > The approach here is very close to what on-demand hinting that is > > already upstream does. > > > > This should have resulted in a most of the code being shared > > but this does not seem to happen here. > > > > Can we unify the code in some way? > > It can still use a separate feature flag, but there are things > > I like very much about current hinting code, such as > > using s/g instead of passing PFNs in a buffer. > > > > If this doesn't work could you elaborate on why? > > As far as sending a scatter gather that shouldn't be too much of an > issue, however I need to double check that I will still be able to > keep the completions as a single block. > > One significant spot where the "VIRTIO_BALLOON_F_FREE_PAGE_HINT" code > and my code differs. My code is processing a fixed discreet block of > pages at a time, whereas the FREE_PAGE_HINT code is slurping up all > available high-order memory and stuffing it into a giant balloon and > has more of a streaming setup as it doesn't return things until either > forced to by the shrinker or once it has processed all available > memory. This is what I am saying. Having watched that patchset being developed, I think that's simply because processing blocks required mm core changes, which Wei was not up to pushing through. If we did while (1) { alloc_pages add_buf get_buf free_pages } We'd end up passing the same page to balloon again and again. So we end up reserving lots of memory with alloc_pages instead. What I am saying is that now that you are developing infrastructure to iterate over free pages, FREE_PAGE_HINT should be able to use it too. Whether that's possible might be a good indication of whether the new mm APIs make sense. > The basic idea with the bubble hinting was to essentially create mini > balloons. As such I had based the code off of the balloon inflation > code. The only spot where it really differs is that I needed the > ability to pass higher order pages so I tweaked thinks and passed > "hints" instead of "pfns". And that is fine. But there isn't really such a big difference with FREE_PAGE_HINT except FREE_PAGE_HINT triggers upon host request and not in response to guest load.
On Tue, Jul 16, 2019 at 03:01:52PM +0000, Wang, Wei W wrote: > On Tuesday, July 16, 2019 10:41 PM, Hansen, Dave wrote: > > Where is the page allocator integration? The set you linked to has 5 patches, > > but only 4 were merged. This one is missing: > > > > https://lore.kernel.org/patchwork/patch/961038/ > > For some reason, we used the regular page allocation to get pages > from the free list at that stage. This is what Linus suggested, that is why: https://lkml.org/lkml/2018/6/27/461 and https://lkml.org/lkml/2018/7/11/795 See also https://lkml.org/lkml/2018/7/10/1157 for some failed attempts to upstream mm core changes related to this. > This part could be improved by Alex > or Nitesh's approach. > > The page address transmission from the balloon driver to the host > device could reuse what's upstreamed there. I think you could add a > new VIRTIO_BALLOON_CMD_xx for your usages. > > Best, > Wei
On Tue, Jul 16, 2019 at 9:08 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Tue, Jul 16, 2019 at 08:37:06AM -0700, Alexander Duyck wrote: > > On Tue, Jul 16, 2019 at 2:55 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > On Wed, Jun 19, 2019 at 03:33:38PM -0700, Alexander Duyck wrote: > > > > From: Alexander Duyck <alexander.h.duyck@linux.intel.com> > > > > > > > > Add support for aerating memory using the hinting feature provided by > > > > virtio-balloon. Hinting differs from the regular balloon functionality in > > > > that is is much less durable than a standard memory balloon. Instead of > > > > creating a list of pages that cannot be accessed the pages are only > > > > inaccessible while they are being indicated to the virtio interface. Once > > > > the interface has acknowledged them they are placed back into their > > > > respective free lists and are once again accessible by the guest system. > > > > > > > > Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> > > > > --- > > > > drivers/virtio/Kconfig | 1 > > > > drivers/virtio/virtio_balloon.c | 110 ++++++++++++++++++++++++++++++++++- > > > > include/uapi/linux/virtio_balloon.h | 1 > > > > 3 files changed, 108 insertions(+), 4 deletions(-) > > > > > > > > diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig > > > > index 023fc3bc01c6..9cdaccf92c3a 100644 > > > > --- a/drivers/virtio/Kconfig > > > > +++ b/drivers/virtio/Kconfig > > > > @@ -47,6 +47,7 @@ config VIRTIO_BALLOON > > > > tristate "Virtio balloon driver" > > > > depends on VIRTIO > > > > select MEMORY_BALLOON > > > > + select AERATION > > > > ---help--- > > > > This driver supports increasing and decreasing the amount > > > > of memory within a KVM guest. > > > > diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c > > > > index 44339fc87cc7..91f1e8c9017d 100644 > > > > --- a/drivers/virtio/virtio_balloon.c > > > > +++ b/drivers/virtio/virtio_balloon.c > > > > @@ -18,6 +18,7 @@ > > > > #include <linux/mm.h> > > > > #include <linux/mount.h> > > > > #include <linux/magic.h> > > > > +#include <linux/memory_aeration.h> > > > > > > > > /* > > > > * Balloon device works in 4K page units. So each page is pointed to by > > > > @@ -26,6 +27,7 @@ > > > > */ > > > > #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) > > > > #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 > > > > +#define VIRTIO_BALLOON_ARRAY_HINTS_MAX 32 > > > > #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 > > > > > > > > #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ > > > > @@ -45,6 +47,7 @@ enum virtio_balloon_vq { > > > > VIRTIO_BALLOON_VQ_DEFLATE, > > > > VIRTIO_BALLOON_VQ_STATS, > > > > VIRTIO_BALLOON_VQ_FREE_PAGE, > > > > + VIRTIO_BALLOON_VQ_HINTING, > > > > VIRTIO_BALLOON_VQ_MAX > > > > }; > > > > > > > > @@ -54,7 +57,8 @@ enum virtio_balloon_config_read { > > > > > > > > struct virtio_balloon { > > > > struct virtio_device *vdev; > > > > - struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; > > > > + struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq, > > > > + *hinting_vq; > > > > > > > > /* Balloon's own wq for cpu-intensive work items */ > > > > struct workqueue_struct *balloon_wq; > > > > @@ -103,9 +107,21 @@ struct virtio_balloon { > > > > /* Synchronize access/update to this struct virtio_balloon elements */ > > > > struct mutex balloon_lock; > > > > > > > > - /* The array of pfns we tell the Host about. */ > > > > - unsigned int num_pfns; > > > > - __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; > > > > + > > > > + union { > > > > + /* The array of pfns we tell the Host about. */ > > > > + struct { > > > > + unsigned int num_pfns; > > > > + __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; > > > > + }; > > > > + /* The array of physical addresses we are hinting on */ > > > > + struct { > > > > + unsigned int num_hints; > > > > + __virtio64 hints[VIRTIO_BALLOON_ARRAY_HINTS_MAX]; > > > > + }; > > > > + }; > > > > + > > > > + struct aerator_dev_info a_dev_info; > > > > > > > > /* Memory statistics */ > > > > struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; > > > > @@ -151,6 +167,68 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) > > > > > > > > } > > > > > > > > +static u64 page_to_hints_pa_order(struct page *page) > > > > +{ > > > > + unsigned char order; > > > > + dma_addr_t pa; > > > > + > > > > + BUILD_BUG_ON((64 - VIRTIO_BALLOON_PFN_SHIFT) >= > > > > + (1 << VIRTIO_BALLOON_PFN_SHIFT)); > > > > + > > > > + /* > > > > + * Record physical page address combined with page order. > > > > + * Order will never exceed 64 - VIRTIO_BALLON_PFN_SHIFT > > > > + * since the size has to fit into a 64b value. So as long > > > > + * as VIRTIO_BALLOON_SHIFT is greater than this combining > > > > + * the two values should be safe. > > > > + */ > > > > + pa = page_to_phys(page); > > > > + order = page_private(page) + > > > > + PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT; > > > > + > > > > + return (u64)(pa | order); > > > > +} > > > > + > > > > +void virtballoon_aerator_react(struct aerator_dev_info *a_dev_info) > > > > +{ > > > > + struct virtio_balloon *vb = container_of(a_dev_info, > > > > + struct virtio_balloon, > > > > + a_dev_info); > > > > + struct virtqueue *vq = vb->hinting_vq; > > > > + struct scatterlist sg; > > > > + unsigned int unused; > > > > + struct page *page; > > > > + > > > > + mutex_lock(&vb->balloon_lock); > > > > + > > > > + vb->num_hints = 0; > > > > + > > > > + list_for_each_entry(page, &a_dev_info->batch, lru) { > > > > + vb->hints[vb->num_hints++] = > > > > + cpu_to_virtio64(vb->vdev, > > > > + page_to_hints_pa_order(page)); > > > > + } > > > > + > > > > + /* We shouldn't have been called if there is nothing to process */ > > > > + if (WARN_ON(vb->num_hints == 0)) > > > > + goto out; > > > > + > > > > + sg_init_one(&sg, vb->hints, > > > > + sizeof(vb->hints[0]) * vb->num_hints); > > > > + > > > > + /* > > > > + * We should always be able to add one buffer to an > > > > + * empty queue. > > > > + */ > > > > + virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); > > > > + virtqueue_kick(vq); > > > > + > > > > + /* When host has read buffer, this completes via balloon_ack */ > > > > + wait_event(vb->acked, virtqueue_get_buf(vq, &unused)); > > > > +out: > > > > + mutex_unlock(&vb->balloon_lock); > > > > +} > > > > + > > > > static void set_page_pfns(struct virtio_balloon *vb, > > > > __virtio32 pfns[], struct page *page) > > > > { > > > > @@ -475,6 +553,7 @@ static int init_vqs(struct virtio_balloon *vb) > > > > names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; > > > > names[VIRTIO_BALLOON_VQ_STATS] = NULL; > > > > names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; > > > > + names[VIRTIO_BALLOON_VQ_HINTING] = NULL; > > > > > > > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { > > > > names[VIRTIO_BALLOON_VQ_STATS] = "stats"; > > > > @@ -486,11 +565,19 @@ static int init_vqs(struct virtio_balloon *vb) > > > > callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; > > > > } > > > > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { > > > > + names[VIRTIO_BALLOON_VQ_HINTING] = "hinting_vq"; > > > > + callbacks[VIRTIO_BALLOON_VQ_HINTING] = balloon_ack; > > > > + } > > > > + > > > > err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, > > > > vqs, callbacks, names, NULL, NULL); > > > > if (err) > > > > return err; > > > > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) > > > > + vb->hinting_vq = vqs[VIRTIO_BALLOON_VQ_HINTING]; > > > > + > > > > vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; > > > > vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; > > > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { > > > > @@ -929,12 +1016,24 @@ static int virtballoon_probe(struct virtio_device *vdev) > > > > if (err) > > > > goto out_del_balloon_wq; > > > > } > > > > + > > > > + vb->a_dev_info.react = virtballoon_aerator_react; > > > > + vb->a_dev_info.capacity = VIRTIO_BALLOON_ARRAY_HINTS_MAX; > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { > > > > + err = aerator_startup(&vb->a_dev_info); > > > > + if (err) > > > > + goto out_unregister_shrinker; > > > > + } > > > > + > > > > virtio_device_ready(vdev); > > > > > > > > if (towards_target(vb)) > > > > virtballoon_changed(vdev); > > > > return 0; > > > > > > > > +out_unregister_shrinker: > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) > > > > + virtio_balloon_unregister_shrinker(vb); > > > > out_del_balloon_wq: > > > > if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) > > > > destroy_workqueue(vb->balloon_wq); > > > > @@ -963,6 +1062,8 @@ static void virtballoon_remove(struct virtio_device *vdev) > > > > { > > > > struct virtio_balloon *vb = vdev->priv; > > > > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) > > > > + aerator_shutdown(); > > > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) > > > > virtio_balloon_unregister_shrinker(vb); > > > > spin_lock_irq(&vb->stop_update_lock); > > > > @@ -1032,6 +1133,7 @@ static int virtballoon_validate(struct virtio_device *vdev) > > > > VIRTIO_BALLOON_F_DEFLATE_ON_OOM, > > > > VIRTIO_BALLOON_F_FREE_PAGE_HINT, > > > > VIRTIO_BALLOON_F_PAGE_POISON, > > > > + VIRTIO_BALLOON_F_HINTING, > > > > }; > > > > > > > > static struct virtio_driver virtio_balloon_driver = { > > > > diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h > > > > index a1966cd7b677..2b0f62814e22 100644 > > > > --- a/include/uapi/linux/virtio_balloon.h > > > > +++ b/include/uapi/linux/virtio_balloon.h > > > > @@ -36,6 +36,7 @@ > > > > #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */ > > > > #define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */ > > > > #define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */ > > > > +#define VIRTIO_BALLOON_F_HINTING 5 /* Page hinting virtqueue */ > > > > > > > > /* Size of a PFN in the balloon interface. */ > > > > #define VIRTIO_BALLOON_PFN_SHIFT 12 > > > > > > > > > > > > The approach here is very close to what on-demand hinting that is > > > already upstream does. > > > > > > This should have resulted in a most of the code being shared > > > but this does not seem to happen here. > > > > > > Can we unify the code in some way? > > > It can still use a separate feature flag, but there are things > > > I like very much about current hinting code, such as > > > using s/g instead of passing PFNs in a buffer. > > > > > > If this doesn't work could you elaborate on why? > > > > As far as sending a scatter gather that shouldn't be too much of an > > issue, however I need to double check that I will still be able to > > keep the completions as a single block. > > > > One significant spot where the "VIRTIO_BALLOON_F_FREE_PAGE_HINT" code > > and my code differs. My code is processing a fixed discreet block of > > pages at a time, whereas the FREE_PAGE_HINT code is slurping up all > > available high-order memory and stuffing it into a giant balloon and > > has more of a streaming setup as it doesn't return things until either > > forced to by the shrinker or once it has processed all available > > memory. > > This is what I am saying. Having watched that patchset being developed, > I think that's simply because processing blocks required mm core > changes, which Wei was not up to pushing through. > > > If we did > > while (1) { > alloc_pages > add_buf > get_buf > free_pages > } > > We'd end up passing the same page to balloon again and again. > > So we end up reserving lots of memory with alloc_pages instead. > > What I am saying is that now that you are developing > infrastructure to iterate over free pages, > FREE_PAGE_HINT should be able to use it too. > Whether that's possible might be a good indication of > whether the new mm APIs make sense. The problem is the infrastructure as implemented isn't designed to do that. I am pretty certain this interface will have issues with being given small blocks to process at a time. Basically the design for the FREE_PAGE_HINT feature doesn't really have the concept of doing things a bit at a time. It is either filling, stopped, or done. From what I can tell it requires a configuration change for the virtio balloon interface to toggle between those states. > > The basic idea with the bubble hinting was to essentially create mini > > balloons. As such I had based the code off of the balloon inflation > > code. The only spot where it really differs is that I needed the > > ability to pass higher order pages so I tweaked thinks and passed > > "hints" instead of "pfns". > > And that is fine. But there isn't really such a big difference with > FREE_PAGE_HINT except FREE_PAGE_HINT triggers upon host request and not > in response to guest load. I disagree, I believe there is a significant difference. The FREE_PAGE_HINT code was implemented to be more of a streaming interface. This is one of the things Linus kept complaining about in his comments. This code attempts to pull in ALL of the higher order pages, not just a smaller block of them. Honestly the difference is mostly in the hypervisor interface than what is needed for the kernel interface, however the design of the hypervisor interface would make doing things more incrementally much more difficult. With that said I will take a look into at least using the scatter gather interface directly rather than sending the list. I think I can probably do that much. However it will actually reduce code reuse as I have to check and verify the pages have been processed before I can free them back to the host. - Alex
On Tue, Jul 16, 2019 at 09:54:37AM -0700, Alexander Duyck wrote: > On Tue, Jul 16, 2019 at 9:08 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > On Tue, Jul 16, 2019 at 08:37:06AM -0700, Alexander Duyck wrote: > > > On Tue, Jul 16, 2019 at 2:55 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > On Wed, Jun 19, 2019 at 03:33:38PM -0700, Alexander Duyck wrote: > > > > > From: Alexander Duyck <alexander.h.duyck@linux.intel.com> > > > > > > > > > > Add support for aerating memory using the hinting feature provided by > > > > > virtio-balloon. Hinting differs from the regular balloon functionality in > > > > > that is is much less durable than a standard memory balloon. Instead of > > > > > creating a list of pages that cannot be accessed the pages are only > > > > > inaccessible while they are being indicated to the virtio interface. Once > > > > > the interface has acknowledged them they are placed back into their > > > > > respective free lists and are once again accessible by the guest system. > > > > > > > > > > Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> > > > > > --- > > > > > drivers/virtio/Kconfig | 1 > > > > > drivers/virtio/virtio_balloon.c | 110 ++++++++++++++++++++++++++++++++++- > > > > > include/uapi/linux/virtio_balloon.h | 1 > > > > > 3 files changed, 108 insertions(+), 4 deletions(-) > > > > > > > > > > diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig > > > > > index 023fc3bc01c6..9cdaccf92c3a 100644 > > > > > --- a/drivers/virtio/Kconfig > > > > > +++ b/drivers/virtio/Kconfig > > > > > @@ -47,6 +47,7 @@ config VIRTIO_BALLOON > > > > > tristate "Virtio balloon driver" > > > > > depends on VIRTIO > > > > > select MEMORY_BALLOON > > > > > + select AERATION > > > > > ---help--- > > > > > This driver supports increasing and decreasing the amount > > > > > of memory within a KVM guest. > > > > > diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c > > > > > index 44339fc87cc7..91f1e8c9017d 100644 > > > > > --- a/drivers/virtio/virtio_balloon.c > > > > > +++ b/drivers/virtio/virtio_balloon.c > > > > > @@ -18,6 +18,7 @@ > > > > > #include <linux/mm.h> > > > > > #include <linux/mount.h> > > > > > #include <linux/magic.h> > > > > > +#include <linux/memory_aeration.h> > > > > > > > > > > /* > > > > > * Balloon device works in 4K page units. So each page is pointed to by > > > > > @@ -26,6 +27,7 @@ > > > > > */ > > > > > #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) > > > > > #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 > > > > > +#define VIRTIO_BALLOON_ARRAY_HINTS_MAX 32 > > > > > #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 > > > > > > > > > > #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ > > > > > @@ -45,6 +47,7 @@ enum virtio_balloon_vq { > > > > > VIRTIO_BALLOON_VQ_DEFLATE, > > > > > VIRTIO_BALLOON_VQ_STATS, > > > > > VIRTIO_BALLOON_VQ_FREE_PAGE, > > > > > + VIRTIO_BALLOON_VQ_HINTING, > > > > > VIRTIO_BALLOON_VQ_MAX > > > > > }; > > > > > > > > > > @@ -54,7 +57,8 @@ enum virtio_balloon_config_read { > > > > > > > > > > struct virtio_balloon { > > > > > struct virtio_device *vdev; > > > > > - struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; > > > > > + struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq, > > > > > + *hinting_vq; > > > > > > > > > > /* Balloon's own wq for cpu-intensive work items */ > > > > > struct workqueue_struct *balloon_wq; > > > > > @@ -103,9 +107,21 @@ struct virtio_balloon { > > > > > /* Synchronize access/update to this struct virtio_balloon elements */ > > > > > struct mutex balloon_lock; > > > > > > > > > > - /* The array of pfns we tell the Host about. */ > > > > > - unsigned int num_pfns; > > > > > - __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; > > > > > + > > > > > + union { > > > > > + /* The array of pfns we tell the Host about. */ > > > > > + struct { > > > > > + unsigned int num_pfns; > > > > > + __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; > > > > > + }; > > > > > + /* The array of physical addresses we are hinting on */ > > > > > + struct { > > > > > + unsigned int num_hints; > > > > > + __virtio64 hints[VIRTIO_BALLOON_ARRAY_HINTS_MAX]; > > > > > + }; > > > > > + }; > > > > > + > > > > > + struct aerator_dev_info a_dev_info; > > > > > > > > > > /* Memory statistics */ > > > > > struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; > > > > > @@ -151,6 +167,68 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) > > > > > > > > > > } > > > > > > > > > > +static u64 page_to_hints_pa_order(struct page *page) > > > > > +{ > > > > > + unsigned char order; > > > > > + dma_addr_t pa; > > > > > + > > > > > + BUILD_BUG_ON((64 - VIRTIO_BALLOON_PFN_SHIFT) >= > > > > > + (1 << VIRTIO_BALLOON_PFN_SHIFT)); > > > > > + > > > > > + /* > > > > > + * Record physical page address combined with page order. > > > > > + * Order will never exceed 64 - VIRTIO_BALLON_PFN_SHIFT > > > > > + * since the size has to fit into a 64b value. So as long > > > > > + * as VIRTIO_BALLOON_SHIFT is greater than this combining > > > > > + * the two values should be safe. > > > > > + */ > > > > > + pa = page_to_phys(page); > > > > > + order = page_private(page) + > > > > > + PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT; > > > > > + > > > > > + return (u64)(pa | order); > > > > > +} > > > > > + > > > > > +void virtballoon_aerator_react(struct aerator_dev_info *a_dev_info) > > > > > +{ > > > > > + struct virtio_balloon *vb = container_of(a_dev_info, > > > > > + struct virtio_balloon, > > > > > + a_dev_info); > > > > > + struct virtqueue *vq = vb->hinting_vq; > > > > > + struct scatterlist sg; > > > > > + unsigned int unused; > > > > > + struct page *page; > > > > > + > > > > > + mutex_lock(&vb->balloon_lock); > > > > > + > > > > > + vb->num_hints = 0; > > > > > + > > > > > + list_for_each_entry(page, &a_dev_info->batch, lru) { > > > > > + vb->hints[vb->num_hints++] = > > > > > + cpu_to_virtio64(vb->vdev, > > > > > + page_to_hints_pa_order(page)); > > > > > + } > > > > > + > > > > > + /* We shouldn't have been called if there is nothing to process */ > > > > > + if (WARN_ON(vb->num_hints == 0)) > > > > > + goto out; > > > > > + > > > > > + sg_init_one(&sg, vb->hints, > > > > > + sizeof(vb->hints[0]) * vb->num_hints); > > > > > + > > > > > + /* > > > > > + * We should always be able to add one buffer to an > > > > > + * empty queue. > > > > > + */ > > > > > + virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); > > > > > + virtqueue_kick(vq); > > > > > + > > > > > + /* When host has read buffer, this completes via balloon_ack */ > > > > > + wait_event(vb->acked, virtqueue_get_buf(vq, &unused)); > > > > > +out: > > > > > + mutex_unlock(&vb->balloon_lock); > > > > > +} > > > > > + > > > > > static void set_page_pfns(struct virtio_balloon *vb, > > > > > __virtio32 pfns[], struct page *page) > > > > > { > > > > > @@ -475,6 +553,7 @@ static int init_vqs(struct virtio_balloon *vb) > > > > > names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; > > > > > names[VIRTIO_BALLOON_VQ_STATS] = NULL; > > > > > names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; > > > > > + names[VIRTIO_BALLOON_VQ_HINTING] = NULL; > > > > > > > > > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { > > > > > names[VIRTIO_BALLOON_VQ_STATS] = "stats"; > > > > > @@ -486,11 +565,19 @@ static int init_vqs(struct virtio_balloon *vb) > > > > > callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; > > > > > } > > > > > > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { > > > > > + names[VIRTIO_BALLOON_VQ_HINTING] = "hinting_vq"; > > > > > + callbacks[VIRTIO_BALLOON_VQ_HINTING] = balloon_ack; > > > > > + } > > > > > + > > > > > err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, > > > > > vqs, callbacks, names, NULL, NULL); > > > > > if (err) > > > > > return err; > > > > > > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) > > > > > + vb->hinting_vq = vqs[VIRTIO_BALLOON_VQ_HINTING]; > > > > > + > > > > > vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; > > > > > vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; > > > > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { > > > > > @@ -929,12 +1016,24 @@ static int virtballoon_probe(struct virtio_device *vdev) > > > > > if (err) > > > > > goto out_del_balloon_wq; > > > > > } > > > > > + > > > > > + vb->a_dev_info.react = virtballoon_aerator_react; > > > > > + vb->a_dev_info.capacity = VIRTIO_BALLOON_ARRAY_HINTS_MAX; > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { > > > > > + err = aerator_startup(&vb->a_dev_info); > > > > > + if (err) > > > > > + goto out_unregister_shrinker; > > > > > + } > > > > > + > > > > > virtio_device_ready(vdev); > > > > > > > > > > if (towards_target(vb)) > > > > > virtballoon_changed(vdev); > > > > > return 0; > > > > > > > > > > +out_unregister_shrinker: > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) > > > > > + virtio_balloon_unregister_shrinker(vb); > > > > > out_del_balloon_wq: > > > > > if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) > > > > > destroy_workqueue(vb->balloon_wq); > > > > > @@ -963,6 +1062,8 @@ static void virtballoon_remove(struct virtio_device *vdev) > > > > > { > > > > > struct virtio_balloon *vb = vdev->priv; > > > > > > > > > > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) > > > > > + aerator_shutdown(); > > > > > if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) > > > > > virtio_balloon_unregister_shrinker(vb); > > > > > spin_lock_irq(&vb->stop_update_lock); > > > > > @@ -1032,6 +1133,7 @@ static int virtballoon_validate(struct virtio_device *vdev) > > > > > VIRTIO_BALLOON_F_DEFLATE_ON_OOM, > > > > > VIRTIO_BALLOON_F_FREE_PAGE_HINT, > > > > > VIRTIO_BALLOON_F_PAGE_POISON, > > > > > + VIRTIO_BALLOON_F_HINTING, > > > > > }; > > > > > > > > > > static struct virtio_driver virtio_balloon_driver = { > > > > > diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h > > > > > index a1966cd7b677..2b0f62814e22 100644 > > > > > --- a/include/uapi/linux/virtio_balloon.h > > > > > +++ b/include/uapi/linux/virtio_balloon.h > > > > > @@ -36,6 +36,7 @@ > > > > > #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */ > > > > > #define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */ > > > > > #define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */ > > > > > +#define VIRTIO_BALLOON_F_HINTING 5 /* Page hinting virtqueue */ > > > > > > > > > > /* Size of a PFN in the balloon interface. */ > > > > > #define VIRTIO_BALLOON_PFN_SHIFT 12 > > > > > > > > > > > > > > > > The approach here is very close to what on-demand hinting that is > > > > already upstream does. > > > > > > > > This should have resulted in a most of the code being shared > > > > but this does not seem to happen here. > > > > > > > > Can we unify the code in some way? > > > > It can still use a separate feature flag, but there are things > > > > I like very much about current hinting code, such as > > > > using s/g instead of passing PFNs in a buffer. > > > > > > > > If this doesn't work could you elaborate on why? > > > > > > As far as sending a scatter gather that shouldn't be too much of an > > > issue, however I need to double check that I will still be able to > > > keep the completions as a single block. > > > > > > One significant spot where the "VIRTIO_BALLOON_F_FREE_PAGE_HINT" code > > > and my code differs. My code is processing a fixed discreet block of > > > pages at a time, whereas the FREE_PAGE_HINT code is slurping up all > > > available high-order memory and stuffing it into a giant balloon and > > > has more of a streaming setup as it doesn't return things until either > > > forced to by the shrinker or once it has processed all available > > > memory. > > > > This is what I am saying. Having watched that patchset being developed, > > I think that's simply because processing blocks required mm core > > changes, which Wei was not up to pushing through. > > > > > > If we did > > > > while (1) { > > alloc_pages > > add_buf > > get_buf > > free_pages > > } > > > > We'd end up passing the same page to balloon again and again. > > > > So we end up reserving lots of memory with alloc_pages instead. > > > > What I am saying is that now that you are developing > > infrastructure to iterate over free pages, > > FREE_PAGE_HINT should be able to use it too. > > Whether that's possible might be a good indication of > > whether the new mm APIs make sense. > > The problem is the infrastructure as implemented isn't designed to do > that. I am pretty certain this interface will have issues with being > given small blocks to process at a time. > > Basically the design for the FREE_PAGE_HINT feature doesn't really > have the concept of doing things a bit at a time. It is either > filling, stopped, or done. From what I can tell it requires a > configuration change for the virtio balloon interface to toggle > between those states. Maybe I misunderstand what you are saying. Filling state can definitely report things a bit at a time. It does not assume that all of guest free memory can fit in a VQ. > > > The basic idea with the bubble hinting was to essentially create mini > > > balloons. As such I had based the code off of the balloon inflation > > > code. The only spot where it really differs is that I needed the > > > ability to pass higher order pages so I tweaked thinks and passed > > > "hints" instead of "pfns". > > > > And that is fine. But there isn't really such a big difference with > > FREE_PAGE_HINT except FREE_PAGE_HINT triggers upon host request and not > > in response to guest load. > > I disagree, I believe there is a significant difference. Yes there is, I just don't think it's in the iteration. The iteration seems to be useful to hinting. > The > FREE_PAGE_HINT code was implemented to be more of a streaming > interface. It's implemented like this but it does not follow from the interface. The implementation is a combination of attempts to minimize # of exits and minimize mm core changes. > This is one of the things Linus kept complaining about in > his comments. This code attempts to pull in ALL of the higher order > pages, not just a smaller block of them. It wants to report all higher order pages eventually, yes. But it's absolutely fine to report a chunk and then wait for host to process the chunk before reporting more. However, interfaces we came up with for this would call into virtio with a bunch of locks taken. The solution was to take pages off the free list completely. That in turn means we can't return them until we have processed all free memory. > Honestly the difference is > mostly in the hypervisor interface than what is needed for the kernel > interface, however the design of the hypervisor interface would make > doing things more incrementally much more difficult. OK that's interesting. The hypervisor interface is not documented in the spec yet. Let me take a stub at a writeup now. So: - hypervisor requests reporting by modifying command ID field in config space, and interrupting guest - in response, guest sends the command ID value on a special free page hinting VQ, followed by any number of buffers. Each buffer is assumed to be the address and length of memory that was unused *at some point after the time when command ID was sent*. Note that hypervisor takes pains to handle the case where memory is actually no longer free by the time it gets the memory. This allows guest driver to take more liberties and free pages without waiting for guest to use the buffers. This is also one of the reason we call this a free page hint - the guarantee that page is free is a weak one, in that sense it's more of a hint than a promise. That helps guarantee we don't create OOM out of blue. - guest eventually sends a special buffer signalling to host that it's done sending free pages. It then stops reporting until command id changes. - host can restart the process at any time by updating command ID. That will make guest stop and start from the beginning. - host can also stop the process by specifying a special command ID value. ========= Now let's compare to what you have here: - At any time after boot, guest walks over free memory and sends addresses as buffers to the host - Memory reported is then guaranteed to be unused until host has used the buffers Is above a fair summary? So yes there's a difference but the specific bit of chunking is same imho. > With that said I will take a look into at least using the scatter > gather interface directly rather than sending the list. I think I can > probably do that much. However it will actually reduce code reuse as I > have to check and verify the pages have been processed before I can > free them back to the host. > > - Alex
On Tue, Jul 16, 2019 at 10:41 AM Michael S. Tsirkin <mst@redhat.com> wrote: <snip> > > > This is what I am saying. Having watched that patchset being developed, > > > I think that's simply because processing blocks required mm core > > > changes, which Wei was not up to pushing through. > > > > > > > > > If we did > > > > > > while (1) { > > > alloc_pages > > > add_buf > > > get_buf > > > free_pages > > > } > > > > > > We'd end up passing the same page to balloon again and again. > > > > > > So we end up reserving lots of memory with alloc_pages instead. > > > > > > What I am saying is that now that you are developing > > > infrastructure to iterate over free pages, > > > FREE_PAGE_HINT should be able to use it too. > > > Whether that's possible might be a good indication of > > > whether the new mm APIs make sense. > > > > The problem is the infrastructure as implemented isn't designed to do > > that. I am pretty certain this interface will have issues with being > > given small blocks to process at a time. > > > > Basically the design for the FREE_PAGE_HINT feature doesn't really > > have the concept of doing things a bit at a time. It is either > > filling, stopped, or done. From what I can tell it requires a > > configuration change for the virtio balloon interface to toggle > > between those states. > > Maybe I misunderstand what you are saying. > > Filling state can definitely report things > a bit at a time. It does not assume that > all of guest free memory can fit in a VQ. I think where you and I may differ is that you are okay with just pulling pages until you hit OOM, or allocation failures. Do I have that right? In my mind I am wanting to perform the hinting on a small block at a time and work through things iteratively. The problem is the FREE_PAGE_HINT doesn't have the option of returning pages until all pages have been pulled. It is run to completion and will keep filling the balloon until an allocation fails and the host says it is done. I would prefer to avoid that as I prefer to simply notify the host of a fixed block of pages at a time and let it process without having to have a thread on each side actively pushing pages, or listening for the incoming pages. > > > > The basic idea with the bubble hinting was to essentially create mini > > > > balloons. As such I had based the code off of the balloon inflation > > > > code. The only spot where it really differs is that I needed the > > > > ability to pass higher order pages so I tweaked thinks and passed > > > > "hints" instead of "pfns". > > > > > > And that is fine. But there isn't really such a big difference with > > > FREE_PAGE_HINT except FREE_PAGE_HINT triggers upon host request and not > > > in response to guest load. > > > > I disagree, I believe there is a significant difference. > > Yes there is, I just don't think it's in the iteration. > The iteration seems to be useful to hinting. I agree that iteration is useful to hinting. The problem is the FREE_PAGE_HINT code isn't really designed to be iterative. It is designed to run with a polling thread on each side and it is meant to be run to completion. > > The > > FREE_PAGE_HINT code was implemented to be more of a streaming > > interface. > > It's implemented like this but it does not follow from > the interface. The implementation is a combination of > attempts to minimize # of exits and minimize mm core changes. The problem is the interface doesn't have a good way of indicating that it is done with a block of pages. So what I am probably looking at if I do a sg implementation for my hinting is to provide one large sg block for all 32 of the pages I might be holding. I'm assuming that will still be processed as one contiguous block. With that I can then at least maintain a single response per request. > > This is one of the things Linus kept complaining about in > > his comments. This code attempts to pull in ALL of the higher order > > pages, not just a smaller block of them. > > It wants to report all higher order pages eventually, yes. > But it's absolutely fine to report a chunk and then wait > for host to process the chunk before reporting more. > > However, interfaces we came up with for this would call > into virtio with a bunch of locks taken. > The solution was to take pages off the free list completely. > That in turn means we can't return them until > we have processed all free memory. I get that. The problem is the interface is designed around run to completion. For example it will sit there in a busy loop waiting for a free buffer because it knows the other side is suppose to be processing the pages already. > > Honestly the difference is > > mostly in the hypervisor interface than what is needed for the kernel > > interface, however the design of the hypervisor interface would make > > doing things more incrementally much more difficult. > > OK that's interesting. The hypervisor interface is not > documented in the spec yet. Let me take a stub at a writeup now. So: > > > > - hypervisor requests reporting by modifying command ID > field in config space, and interrupting guest > > - in response, guest sends the command ID value on a special > free page hinting VQ, > followed by any number of buffers. Each buffer is assumed > to be the address and length of memory that was > unused *at some point after the time when command ID was sent*. > > Note that hypervisor takes pains to handle the case > where memory is actually no longer free by the time > it gets the memory. > This allows guest driver to take more liberties > and free pages without waiting for guest to > use the buffers. > > This is also one of the reason we call this a free page hint - > the guarantee that page is free is a weak one, > in that sense it's more of a hint than a promise. > That helps guarantee we don't create OOM out of blue. > > - guest eventually sends a special buffer signalling to > host that it's done sending free pages. > It then stops reporting until command id changes. The pages are not freed back to the guest until the host reports that it is "DONE" via a configuration change. Doing that stops any further progress, and attempting to resume will just restart from the beginning. The big piece this design is missing is the incremental notification pages have been processed. The existing code just fills the vq with pages and keeps doing it until it cannot allocate any more pages. We would have to add logic to stop, flush, and resume to the existing framework. > - host can restart the process at any time by > updating command ID. That will make guest stop > and start from the beginning. > > - host can also stop the process by specifying a special > command ID value. > > > ========= > > > Now let's compare to what you have here: > > - At any time after boot, guest walks over free memory and sends > addresses as buffers to the host > > - Memory reported is then guaranteed to be unused > until host has used the buffers > > > Is above a fair summary? > > So yes there's a difference but the specific bit of chunking is same > imho. The big difference is that I am returning the pages after they are processed, while FREE_PAGE_HINT doesn't and isn't designed to. The problem is the interface doesn't allow for a good way to identify that any given block of pages has been processed and can be returned. Instead pages go in, but they don't come out until the configuration is changed and "DONE" is reported. The act of reporting "DONE" will reset things and start them all over which kind of defeats the point.
On Tue, Jul 16, 2019 at 02:06:59PM -0700, Alexander Duyck wrote: > On Tue, Jul 16, 2019 at 10:41 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > <snip> > > > > > This is what I am saying. Having watched that patchset being developed, > > > > I think that's simply because processing blocks required mm core > > > > changes, which Wei was not up to pushing through. > > > > > > > > > > > > If we did > > > > > > > > while (1) { > > > > alloc_pages > > > > add_buf > > > > get_buf > > > > free_pages > > > > } > > > > > > > > We'd end up passing the same page to balloon again and again. > > > > > > > > So we end up reserving lots of memory with alloc_pages instead. > > > > > > > > What I am saying is that now that you are developing > > > > infrastructure to iterate over free pages, > > > > FREE_PAGE_HINT should be able to use it too. > > > > Whether that's possible might be a good indication of > > > > whether the new mm APIs make sense. > > > > > > The problem is the infrastructure as implemented isn't designed to do > > > that. I am pretty certain this interface will have issues with being > > > given small blocks to process at a time. > > > > > > Basically the design for the FREE_PAGE_HINT feature doesn't really > > > have the concept of doing things a bit at a time. It is either > > > filling, stopped, or done. From what I can tell it requires a > > > configuration change for the virtio balloon interface to toggle > > > between those states. > > > > Maybe I misunderstand what you are saying. > > > > Filling state can definitely report things > > a bit at a time. It does not assume that > > all of guest free memory can fit in a VQ. > > I think where you and I may differ is that you are okay with just > pulling pages until you hit OOM, or allocation failures. Do I have > that right? This is exactly what the current code does. But that's an implementation detail which came about because we failed to find any other way to iterate over free blocks. > In my mind I am wanting to perform the hinting on a small > block at a time and work through things iteratively. > > The problem is the FREE_PAGE_HINT doesn't have the option of returning > pages until all pages have been pulled. It is run to completion and > will keep filling the balloon until an allocation fails and the host > says it is done. OK so there are two points. One is that FREE_PAGE_HINT does not need to allocate a page at all. It really just wants to iterate over free pages. The reason FREE_PAGE_HINT does not free up pages until we finished iterating over the free list it not a hypervisor API. The reason is we don't want to keep getting the same address over and over again. > I would prefer to avoid that as I prefer to simply > notify the host of a fixed block of pages at a time and let it process > without having to have a thread on each side actively pushing pages, > or listening for the incoming pages. Right. And FREE_PAGE_HINT can go even further. It can push a page and let linux use it immediately. It does not even need to wait for host to process anything unless the VQ gets full. > > > > > > The basic idea with the bubble hinting was to essentially create mini > > > > > balloons. As such I had based the code off of the balloon inflation > > > > > code. The only spot where it really differs is that I needed the > > > > > ability to pass higher order pages so I tweaked thinks and passed > > > > > "hints" instead of "pfns". > > > > > > > > And that is fine. But there isn't really such a big difference with > > > > FREE_PAGE_HINT except FREE_PAGE_HINT triggers upon host request and not > > > > in response to guest load. > > > > > > I disagree, I believe there is a significant difference. > > > > Yes there is, I just don't think it's in the iteration. > > The iteration seems to be useful to hinting. > > I agree that iteration is useful to hinting. The problem is the > FREE_PAGE_HINT code isn't really designed to be iterative. It is > designed to run with a polling thread on each side and it is meant to > be run to completion. Absolutely. But that's a bug I think. > > > The > > > FREE_PAGE_HINT code was implemented to be more of a streaming > > > interface. > > > > It's implemented like this but it does not follow from > > the interface. The implementation is a combination of > > attempts to minimize # of exits and minimize mm core changes. > > The problem is the interface doesn't have a good way of indicating > that it is done with a block of pages. > > So what I am probably looking at if I do a sg implementation for my > hinting is to provide one large sg block for all 32 of the pages I > might be holding. Right now if you pass an sg it will try to allocate a buffer on demand for you. If this is a problem I could come up with a new API that lets caller allocate the buffer. Let me know. > I'm assuming that will still be processed as one > contiguous block. With that I can then at least maintain a single > response per request. Why do you care? Won't a counter of outstanding pages be enough? Down the road maybe we could actually try to pipeline things a bit. So send 32 pages once you get 16 of these back send 16 more. Better for SMP configs and does not hurt non-SMP too much. I am not saying we need to do it right away though. > > > This is one of the things Linus kept complaining about in > > > his comments. This code attempts to pull in ALL of the higher order > > > pages, not just a smaller block of them. > > > > It wants to report all higher order pages eventually, yes. > > But it's absolutely fine to report a chunk and then wait > > for host to process the chunk before reporting more. > > > > However, interfaces we came up with for this would call > > into virtio with a bunch of locks taken. > > The solution was to take pages off the free list completely. > > That in turn means we can't return them until > > we have processed all free memory. > > I get that. The problem is the interface is designed around run to > completion. For example it will sit there in a busy loop waiting for a > free buffer because it knows the other side is suppose to be > processing the pages already. I didn't get this part. > > > Honestly the difference is > > > mostly in the hypervisor interface than what is needed for the kernel > > > interface, however the design of the hypervisor interface would make > > > doing things more incrementally much more difficult. > > > > OK that's interesting. The hypervisor interface is not > > documented in the spec yet. Let me take a stub at a writeup now. So: > > > > > > > > - hypervisor requests reporting by modifying command ID > > field in config space, and interrupting guest > > > > - in response, guest sends the command ID value on a special > > free page hinting VQ, > > followed by any number of buffers. Each buffer is assumed > > to be the address and length of memory that was > > unused *at some point after the time when command ID was sent*. > > > > Note that hypervisor takes pains to handle the case > > where memory is actually no longer free by the time > > it gets the memory. > > This allows guest driver to take more liberties > > and free pages without waiting for guest to > > use the buffers. > > > > This is also one of the reason we call this a free page hint - > > the guarantee that page is free is a weak one, > > in that sense it's more of a hint than a promise. > > That helps guarantee we don't create OOM out of blue. I would like to stress the last paragraph above. > > > > - guest eventually sends a special buffer signalling to > > host that it's done sending free pages. > > It then stops reporting until command id changes. > > The pages are not freed back to the guest until the host reports that > it is "DONE" via a configuration change. Doing that stops any further > progress, and attempting to resume will just restart from the > beginning. Right but it's not a requirement. Host does not assume this at all. It's done like this simply because we can't iterate over pages with the existing API. > The big piece this design is missing is the incremental notification > pages have been processed. The existing code just fills the vq with > pages and keeps doing it until it cannot allocate any more pages. We > would have to add logic to stop, flush, and resume to the existing > framework. But not to the hypervisor interface. Hypervisor is fine with pages being reused immediately. In fact, even before they are processed. > > - host can restart the process at any time by > > updating command ID. That will make guest stop > > and start from the beginning. > > > > - host can also stop the process by specifying a special > > command ID value. > > > > > > ========= > > > > > > Now let's compare to what you have here: > > > > - At any time after boot, guest walks over free memory and sends > > addresses as buffers to the host > > > > - Memory reported is then guaranteed to be unused > > until host has used the buffers > > > > > > Is above a fair summary? > > > > So yes there's a difference but the specific bit of chunking is same > > imho. > > The big difference is that I am returning the pages after they are > processed, while FREE_PAGE_HINT doesn't and isn't designed to. It doesn't but the hypervisor *is* designed to support that. > The > problem is the interface doesn't allow for a good way to identify that > any given block of pages has been processed and can be returned. And that's because FREE_PAGE_HINT does not care. It can return any page at any point even before hypervisor saw it. > Instead pages go in, but they don't come out until the configuration > is changed and "DONE" is reported. The act of reporting "DONE" will > reset things and start them all over which kind of defeats the point. Right. But if you consider how we are using the shrinker you will see that it's kind of broken. For example not keeping track of allocated pages means the count we return is broken while reporting is active. I looked at fixing it but really if we can just stop allocating memory that would be way cleaner. For example we allocate pages until shrinker kicks in. Fair enough but in fact many it would be better to do the reverse: trigger shrinker and then send as many free pages as we can to host.
On Wed, Jul 17, 2019 at 3:28 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Tue, Jul 16, 2019 at 02:06:59PM -0700, Alexander Duyck wrote: > > On Tue, Jul 16, 2019 at 10:41 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > <snip> > > > > > > > This is what I am saying. Having watched that patchset being developed, > > > > > I think that's simply because processing blocks required mm core > > > > > changes, which Wei was not up to pushing through. > > > > > > > > > > > > > > > If we did > > > > > > > > > > while (1) { > > > > > alloc_pages > > > > > add_buf > > > > > get_buf > > > > > free_pages > > > > > } > > > > > > > > > > We'd end up passing the same page to balloon again and again. > > > > > > > > > > So we end up reserving lots of memory with alloc_pages instead. > > > > > > > > > > What I am saying is that now that you are developing > > > > > infrastructure to iterate over free pages, > > > > > FREE_PAGE_HINT should be able to use it too. > > > > > Whether that's possible might be a good indication of > > > > > whether the new mm APIs make sense. > > > > > > > > The problem is the infrastructure as implemented isn't designed to do > > > > that. I am pretty certain this interface will have issues with being > > > > given small blocks to process at a time. > > > > > > > > Basically the design for the FREE_PAGE_HINT feature doesn't really > > > > have the concept of doing things a bit at a time. It is either > > > > filling, stopped, or done. From what I can tell it requires a > > > > configuration change for the virtio balloon interface to toggle > > > > between those states. > > > > > > Maybe I misunderstand what you are saying. > > > > > > Filling state can definitely report things > > > a bit at a time. It does not assume that > > > all of guest free memory can fit in a VQ. > > > > I think where you and I may differ is that you are okay with just > > pulling pages until you hit OOM, or allocation failures. Do I have > > that right? > > This is exactly what the current code does. But that's an implementation > detail which came about because we failed to find any other way to > iterate over free blocks. I get that. However my concern is that permeated other areas of the implementation that make taking another approach much more difficult than it needs to be. > > In my mind I am wanting to perform the hinting on a small > > block at a time and work through things iteratively. > > > > The problem is the FREE_PAGE_HINT doesn't have the option of returning > > pages until all pages have been pulled. It is run to completion and > > will keep filling the balloon until an allocation fails and the host > > says it is done. > > OK so there are two points. One is that FREE_PAGE_HINT does not > need to allocate a page at all. It really just wants to > iterate over free pages. I agree that it should just want to iterate over pages. However the issue I am trying to point out is that it doesn't have any guarantees on ordering and that is my concern. What I want to avoid is potentially corrupting memory. So for example with my current hinting approach I am using the list of hints because I get back one completion indicating all of the hints have been processed. It is only at that point that I can go back and make the memory available for allocation again. So one big issue right now with the FREE_PAGE_HINT approach is that it is designed to be all or nothing. Using the balloon makes it impossible for us to be incremental as all the pages are contained in one spot. What we would need is some way to associate a page with a given vq buffer. Ultimately in order to really make the FREE_PAGE_HINT logic work with something like my page hinting logic it would need to work more like a network Rx ring in that we would associate a page per buffer and have some way of knowing the two are associated. > The reason FREE_PAGE_HINT does not free up pages until we finished > iterating over the free list it not a hypervisor API. The reason is we > don't want to keep getting the same address over and over again. > > > I would prefer to avoid that as I prefer to simply > > notify the host of a fixed block of pages at a time and let it process > > without having to have a thread on each side actively pushing pages, > > or listening for the incoming pages. > > Right. And FREE_PAGE_HINT can go even further. It can push a page and > let linux use it immediately. It does not even need to wait for host to > process anything unless the VQ gets full. If it is doing what you are saying it will be corrupting memory. At a minimum it has to wait until the page has been processed and the dirty bit cleared before it can let linux use it again. It is all a matter of keeping the dirty bit coherent. If we let linux use it again immediately and then cleared the dirty bit we would open up a possible data corruption race during migration as a dirty page might not be marked as such. > > > > > > > > The basic idea with the bubble hinting was to essentially create mini > > > > > > balloons. As such I had based the code off of the balloon inflation > > > > > > code. The only spot where it really differs is that I needed the > > > > > > ability to pass higher order pages so I tweaked thinks and passed > > > > > > "hints" instead of "pfns". > > > > > > > > > > And that is fine. But there isn't really such a big difference with > > > > > FREE_PAGE_HINT except FREE_PAGE_HINT triggers upon host request and not > > > > > in response to guest load. > > > > > > > > I disagree, I believe there is a significant difference. > > > > > > Yes there is, I just don't think it's in the iteration. > > > The iteration seems to be useful to hinting. > > > > I agree that iteration is useful to hinting. The problem is the > > FREE_PAGE_HINT code isn't really designed to be iterative. It is > > designed to run with a polling thread on each side and it is meant to > > be run to completion. > > Absolutely. But that's a bug I think. I think it is a part of the design. Basically in order to avoid corrupting memory it cannot return the page to the guest kernel until it has finished clearing the dirty bits associated with the pages. > > > > The > > > > FREE_PAGE_HINT code was implemented to be more of a streaming > > > > interface. > > > > > > It's implemented like this but it does not follow from > > > the interface. The implementation is a combination of > > > attempts to minimize # of exits and minimize mm core changes. > > > > The problem is the interface doesn't have a good way of indicating > > that it is done with a block of pages. > > > > So what I am probably looking at if I do a sg implementation for my > > hinting is to provide one large sg block for all 32 of the pages I > > might be holding. > > Right now if you pass an sg it will try to allocate a buffer > on demand for you. If this is a problem I could come up > with a new API that lets caller allocate the buffer. > Let me know. > > > I'm assuming that will still be processed as one > > contiguous block. With that I can then at least maintain a single > > response per request. > > Why do you care? Won't a counter of outstanding pages be enough? > Down the road maybe we could actually try to pipeline > things a bit. So send 32 pages once you get 16 of these back > send 16 more. Better for SMP configs and does not hurt > non-SMP too much. I am not saying we need to do it right away though. So the big thing is we cannot give the page back to the guest kernel until we know the processing has been completed. In the case of the MADV_DONT_NEED call it will zero out the entire page on the next access. If the guest kernel had already written data by the time we get to that it would cause a data corruption and kill the whole guest. > > > > This is one of the things Linus kept complaining about in > > > > his comments. This code attempts to pull in ALL of the higher order > > > > pages, not just a smaller block of them. > > > > > > It wants to report all higher order pages eventually, yes. > > > But it's absolutely fine to report a chunk and then wait > > > for host to process the chunk before reporting more. > > > > > > However, interfaces we came up with for this would call > > > into virtio with a bunch of locks taken. > > > The solution was to take pages off the free list completely. > > > That in turn means we can't return them until > > > we have processed all free memory. > > > > I get that. The problem is the interface is designed around run to > > completion. For example it will sit there in a busy loop waiting for a > > free buffer because it knows the other side is suppose to be > > processing the pages already. > > I didn't get this part. I think the part you may not be getting is that we cannot let the guest use the page until the hint has been processed. Otherwise we risk corrupting memory. That is the piece that has me paranoid. If we end up performing a hint on a page that is use somewhere in the kernel it will corrupt memory one way or another. That is the thing I have to avoid at all cost. That is why I have to have a way to know exactly which pages have been processed and which haven't before I return pages to the guest. Otherwise I am just corrupting memory. > > > > Honestly the difference is > > > > mostly in the hypervisor interface than what is needed for the kernel > > > > interface, however the design of the hypervisor interface would make > > > > doing things more incrementally much more difficult. > > > > > > OK that's interesting. The hypervisor interface is not > > > documented in the spec yet. Let me take a stub at a writeup now. So: > > > > > > > > > > > > - hypervisor requests reporting by modifying command ID > > > field in config space, and interrupting guest > > > > > > - in response, guest sends the command ID value on a special > > > free page hinting VQ, > > > followed by any number of buffers. Each buffer is assumed > > > to be the address and length of memory that was > > > unused *at some point after the time when command ID was sent*. > > > > > > Note that hypervisor takes pains to handle the case > > > where memory is actually no longer free by the time > > > it gets the memory. > > > This allows guest driver to take more liberties > > > and free pages without waiting for guest to > > > use the buffers. > > > > > > This is also one of the reason we call this a free page hint - > > > the guarantee that page is free is a weak one, > > > in that sense it's more of a hint than a promise. > > > That helps guarantee we don't create OOM out of blue. > > I would like to stress the last paragraph above. The problem is we don't want to give bad hints. What we do based on the hint is clear the dirty bit. If we clear it in err when the page is actually in use it will lead to data corruption after migration. The idea with the hint is that you are saying the page is currently not in use, however if you send that hint late and have already freed the page back you can corrupt memory. > > > > > > - guest eventually sends a special buffer signalling to > > > host that it's done sending free pages. > > > It then stops reporting until command id changes. > > > > The pages are not freed back to the guest until the host reports that > > it is "DONE" via a configuration change. Doing that stops any further > > progress, and attempting to resume will just restart from the > > beginning. > > Right but it's not a requirement. Host does not assume this at all. > It's done like this simply because we can't iterate over pages > with the existing API. The problem is nothing about the implementation was designed for iteration. What I would have to do is likely gut and rewrite the entire guest side of the FREE_PAGE_HINT code in order to make it work iteratively. As I mentioned it would probably have to look more like a NIC Rx ring in handling because we would have to have some sort of way to associate the pages 1:1 to the buffers. > > The big piece this design is missing is the incremental notification > > pages have been processed. The existing code just fills the vq with > > pages and keeps doing it until it cannot allocate any more pages. We > > would have to add logic to stop, flush, and resume to the existing > > framework. > > But not to the hypervisor interface. Hypervisor is fine > with pages being reused immediately. In fact, even before they > are processed. I don't think that is actually the case. If it does that I am pretty sure it will corrupt memory during migration. Take a look at qemu_guest_free_page_hint: https://github.com/qemu/qemu/blob/master/migration/ram.c#L3342 I'm pretty sure that code is going in and clearing the dirty bitmap for memory. If we were to allow a page to be allocated and used and then perform the hint it is going to introduce a race where the page might be missed for migration and could result in memory corruption. > > > - host can restart the process at any time by > > > updating command ID. That will make guest stop > > > and start from the beginning. > > > > > > - host can also stop the process by specifying a special > > > command ID value. > > > > > > > > > ========= > > > > > > > > > Now let's compare to what you have here: > > > > > > - At any time after boot, guest walks over free memory and sends > > > addresses as buffers to the host > > > > > > - Memory reported is then guaranteed to be unused > > > until host has used the buffers > > > > > > > > > Is above a fair summary? > > > > > > So yes there's a difference but the specific bit of chunking is same > > > imho. > > > > The big difference is that I am returning the pages after they are > > processed, while FREE_PAGE_HINT doesn't and isn't designed to. > > It doesn't but the hypervisor *is* designed to support that. Not really, it seems like it is more just a side effect of things. Also as I mentioned before I am also not a huge fan of polling on both sides as it is just going to burn through CPU. If we are iterative and polling it is going to end up with us potentially pushing one CPU at 100%, and if the one CPU doing the polling cannot keep up with the page updates coming from the other CPUs we would be stuck in that state for a while. I would have preferred to see something where the CPU would at least allow other tasks to occur while it is waiting for buffers to be returned by the host. > > The > > problem is the interface doesn't allow for a good way to identify that > > any given block of pages has been processed and can be returned. > > And that's because FREE_PAGE_HINT does not care. > It can return any page at any point even before hypervisor > saw it. I disagree, see my comment above. > > Instead pages go in, but they don't come out until the configuration > > is changed and "DONE" is reported. The act of reporting "DONE" will > > reset things and start them all over which kind of defeats the point. > > Right. > > But if you consider how we are using the shrinker you will > see that it's kind of broken. > For example not keeping track of allocated > pages means the count we return is broken > while reporting is active. > > I looked at fixing it but really if we can just > stop allocating memory that would be way cleaner. Agreed. If we hit an OOM we should probably just stop the free page hinting and treat that as the equivalent to an allocation failure. As-is I think this also has the potential for corrupting memory since it will likely be returning the most recent pages added to the balloon so the pages are likely still on the processing queue. > For example we allocate pages until shrinker kicks in. > Fair enough but in fact many it would be better to > do the reverse: trigger shrinker and then send as many > free pages as we can to host. I'm not sure I understand this last part.
On Wed, Jul 17, 2019 at 09:43:52AM -0700, Alexander Duyck wrote: > On Wed, Jul 17, 2019 at 3:28 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > On Tue, Jul 16, 2019 at 02:06:59PM -0700, Alexander Duyck wrote: > > > On Tue, Jul 16, 2019 at 10:41 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > <snip> > > > > > > > > > This is what I am saying. Having watched that patchset being developed, > > > > > > I think that's simply because processing blocks required mm core > > > > > > changes, which Wei was not up to pushing through. > > > > > > > > > > > > > > > > > > If we did > > > > > > > > > > > > while (1) { > > > > > > alloc_pages > > > > > > add_buf > > > > > > get_buf > > > > > > free_pages > > > > > > } > > > > > > > > > > > > We'd end up passing the same page to balloon again and again. > > > > > > > > > > > > So we end up reserving lots of memory with alloc_pages instead. > > > > > > > > > > > > What I am saying is that now that you are developing > > > > > > infrastructure to iterate over free pages, > > > > > > FREE_PAGE_HINT should be able to use it too. > > > > > > Whether that's possible might be a good indication of > > > > > > whether the new mm APIs make sense. > > > > > > > > > > The problem is the infrastructure as implemented isn't designed to do > > > > > that. I am pretty certain this interface will have issues with being > > > > > given small blocks to process at a time. > > > > > > > > > > Basically the design for the FREE_PAGE_HINT feature doesn't really > > > > > have the concept of doing things a bit at a time. It is either > > > > > filling, stopped, or done. From what I can tell it requires a > > > > > configuration change for the virtio balloon interface to toggle > > > > > between those states. > > > > > > > > Maybe I misunderstand what you are saying. > > > > > > > > Filling state can definitely report things > > > > a bit at a time. It does not assume that > > > > all of guest free memory can fit in a VQ. > > > > > > I think where you and I may differ is that you are okay with just > > > pulling pages until you hit OOM, or allocation failures. Do I have > > > that right? > > > > This is exactly what the current code does. But that's an implementation > > detail which came about because we failed to find any other way to > > iterate over free blocks. > > I get that. However my concern is that permeated other areas of the > implementation that make taking another approach much more difficult > than it needs to be. Implementation would have to change to use an iterator obviously. But I don't see that it leaked out to a hypervisor interface. In fact take a look at virtio_balloon_shrinker_scan and you will see that it calls shrink_free_pages without waiting for the device at all. > > > In my mind I am wanting to perform the hinting on a small > > > block at a time and work through things iteratively. > > > > > > The problem is the FREE_PAGE_HINT doesn't have the option of returning > > > pages until all pages have been pulled. It is run to completion and > > > will keep filling the balloon until an allocation fails and the host > > > says it is done. > > > > OK so there are two points. One is that FREE_PAGE_HINT does not > > need to allocate a page at all. It really just wants to > > iterate over free pages. > > I agree that it should just want to iterate over pages. However the > issue I am trying to point out is that it doesn't have any guarantees > on ordering and that is my concern. What I want to avoid is > potentially corrupting memory. I get that. I am just trying to make sure you are aware that for FREE_PAGE_HINT specifically ordering does not matter because it does not care when hypervisor used the buffers. It only cares that page was free after it got the request. used buffers are only tracked to avoid overflowing the VQ. This is different from your hinting where you make it the responsibility of the guest to not allocate page before it was used. > > So for example with my current hinting approach I am using the list of > hints because I get back one completion indicating all of the hints > have been processed. It is only at that point that I can go back and > make the memory available for allocation again. Right. But just counting them would work just as well, no? At least as long as you wait for everything to complete... If you want to pipeline, see below > > So one big issue right now with the FREE_PAGE_HINT approach is that it > is designed to be all or nothing. Using the balloon makes it > impossible for us to be incremental as all the pages are contained in > one spot. What we would need is some way to associate a page with a > given vq buffer. Sorry if I'm belaboring the obvious, but isn't this what 'void *data' in virtqueue_add_inbuf is designed for? And if you only ever use virtqueue_add_inbuf and virtqueue_add_outbuf on a given VQ, then you can track two pointers using virtqueue_add_inbuf_ctx. > Ultimately in order to really make the FREE_PAGE_HINT > logic work with something like my page hinting logic it would need to > work more like a network Rx ring in that we would associate a page per > buffer and have some way of knowing the two are associated. Right. That's exactly how virtio net does it btw. > > The reason FREE_PAGE_HINT does not free up pages until we finished > > iterating over the free list it not a hypervisor API. The reason is we > > don't want to keep getting the same address over and over again. > > > > > I would prefer to avoid that as I prefer to simply > > > notify the host of a fixed block of pages at a time and let it process > > > without having to have a thread on each side actively pushing pages, > > > or listening for the incoming pages. > > > > Right. And FREE_PAGE_HINT can go even further. It can push a page and > > let linux use it immediately. It does not even need to wait for host to > > process anything unless the VQ gets full. > > If it is doing what you are saying it will be corrupting memory. No and that is hypervisor's responsibility. I think you are missing part of the picture here. Here is a valid implementation: Before asking for hints, hypervisor write-protects all memory, and logs all write faults. When hypervisor gets the hint, if page has since been modified, the hint is ignored. > At a > minimum it has to wait until the page has been processed and the dirty > bit cleared before it can let linux use it again. It is all a matter > of keeping the dirty bit coherent. If we let linux use it again > immediately and then cleared the dirty bit we would open up a possible > data corruption race during migration as a dirty page might not be > marked as such. I think you are talking about the dirty bit on the host, right? The implication is that calling MADV_FREE from qemu would not be a good implementation of FREE_PAGE_HINT. And indeed, as far as I can see it does nothing of the sort. > > > > > > > > > > The basic idea with the bubble hinting was to essentially create mini > > > > > > > balloons. As such I had based the code off of the balloon inflation > > > > > > > code. The only spot where it really differs is that I needed the > > > > > > > ability to pass higher order pages so I tweaked thinks and passed > > > > > > > "hints" instead of "pfns". > > > > > > > > > > > > And that is fine. But there isn't really such a big difference with > > > > > > FREE_PAGE_HINT except FREE_PAGE_HINT triggers upon host request and not > > > > > > in response to guest load. > > > > > > > > > > I disagree, I believe there is a significant difference. > > > > > > > > Yes there is, I just don't think it's in the iteration. > > > > The iteration seems to be useful to hinting. > > > > > > I agree that iteration is useful to hinting. The problem is the > > > FREE_PAGE_HINT code isn't really designed to be iterative. It is > > > designed to run with a polling thread on each side and it is meant to > > > be run to completion. > > > > Absolutely. But that's a bug I think. > > I think it is a part of the design. Basically in order to avoid > corrupting memory it cannot return the page to the guest kernel until > it has finished clearing the dirty bits associated with the pages. OK I hope I clarified by that's not supposed to be the case. > > > > > The > > > > > FREE_PAGE_HINT code was implemented to be more of a streaming > > > > > interface. > > > > > > > > It's implemented like this but it does not follow from > > > > the interface. The implementation is a combination of > > > > attempts to minimize # of exits and minimize mm core changes. > > > > > > The problem is the interface doesn't have a good way of indicating > > > that it is done with a block of pages. > > > > > > So what I am probably looking at if I do a sg implementation for my > > > hinting is to provide one large sg block for all 32 of the pages I > > > might be holding. > > > > Right now if you pass an sg it will try to allocate a buffer > > on demand for you. If this is a problem I could come up > > with a new API that lets caller allocate the buffer. > > Let me know. > > > > > I'm assuming that will still be processed as one > > > contiguous block. With that I can then at least maintain a single > > > response per request. > > > > Why do you care? Won't a counter of outstanding pages be enough? > > Down the road maybe we could actually try to pipeline > > things a bit. So send 32 pages once you get 16 of these back > > send 16 more. Better for SMP configs and does not hurt > > non-SMP too much. I am not saying we need to do it right away though. > > So the big thing is we cannot give the page back to the guest kernel > until we know the processing has been completed. In the case of the > MADV_DONT_NEED call it will zero out the entire page on the next > access. If the guest kernel had already written data by the time we > get to that it would cause a data corruption and kill the whole guest. Exactly but FREE_PAGE_HINT does not cause qemu to call MADV_DONT_NEED. > > > > > This is one of the things Linus kept complaining about in > > > > > his comments. This code attempts to pull in ALL of the higher order > > > > > pages, not just a smaller block of them. > > > > > > > > It wants to report all higher order pages eventually, yes. > > > > But it's absolutely fine to report a chunk and then wait > > > > for host to process the chunk before reporting more. > > > > > > > > However, interfaces we came up with for this would call > > > > into virtio with a bunch of locks taken. > > > > The solution was to take pages off the free list completely. > > > > That in turn means we can't return them until > > > > we have processed all free memory. > > > > > > I get that. The problem is the interface is designed around run to > > > completion. For example it will sit there in a busy loop waiting for a > > > free buffer because it knows the other side is suppose to be > > > processing the pages already. > > > > I didn't get this part. > > I think the part you may not be getting is that we cannot let the > guest use the page until the hint has been processed. Otherwise we > risk corrupting memory. That is the piece that has me paranoid. If we > end up performing a hint on a page that is use somewhere in the kernel > it will corrupt memory one way or another. That is the thing I have to > avoid at all cost. You have to do it, sure. And that is because you do not assume that hypervisor does it for you. But FREE_PAGE_HINT doesn't, hypervisor takes care of that. > That is why I have to have a way to know exactly which pages have been > processed and which haven't before I return pages to the guest. > Otherwise I am just corrupting memory. Sure. That isn't really hard though. > > > > > > Honestly the difference is > > > > > mostly in the hypervisor interface than what is needed for the kernel > > > > > interface, however the design of the hypervisor interface would make > > > > > doing things more incrementally much more difficult. > > > > > > > > OK that's interesting. The hypervisor interface is not > > > > documented in the spec yet. Let me take a stub at a writeup now. So: > > > > > > > > > > > > > > > > - hypervisor requests reporting by modifying command ID > > > > field in config space, and interrupting guest > > > > > > > > - in response, guest sends the command ID value on a special > > > > free page hinting VQ, > > > > followed by any number of buffers. Each buffer is assumed > > > > to be the address and length of memory that was > > > > unused *at some point after the time when command ID was sent*. > > > > > > > > Note that hypervisor takes pains to handle the case > > > > where memory is actually no longer free by the time > > > > it gets the memory. > > > > This allows guest driver to take more liberties > > > > and free pages without waiting for guest to > > > > use the buffers. > > > > > > > > This is also one of the reason we call this a free page hint - > > > > the guarantee that page is free is a weak one, > > > > in that sense it's more of a hint than a promise. > > > > That helps guarantee we don't create OOM out of blue. > > > > I would like to stress the last paragraph above. > > The problem is we don't want to give bad hints. What we do based on > the hint is clear the dirty bit. If we clear it in err when the page > is actually in use it will lead to data corruption after migration. That's true for your patches. I get that. > The idea with the hint is that you are saying the page is currently > not in use, however if you send that hint late and have already freed > the page back you can corrupt memory. That part is I think wrong - assuming "you" means upstream code. > > > > > > > > - guest eventually sends a special buffer signalling to > > > > host that it's done sending free pages. > > > > It then stops reporting until command id changes. > > > > > > The pages are not freed back to the guest until the host reports that > > > it is "DONE" via a configuration change. Doing that stops any further > > > progress, and attempting to resume will just restart from the > > > beginning. > > > > Right but it's not a requirement. Host does not assume this at all. > > It's done like this simply because we can't iterate over pages > > with the existing API. > > The problem is nothing about the implementation was designed for > iteration. What I would have to do is likely gut and rewrite the > entire guest side of the FREE_PAGE_HINT code in order to make it work > iteratively. Right. I agree. > As I mentioned it would probably have to look more like a > NIC Rx ring in handling because we would have to have some sort of way > to associate the pages 1:1 to the buffers. > > > > The big piece this design is missing is the incremental notification > > > pages have been processed. The existing code just fills the vq with > > > pages and keeps doing it until it cannot allocate any more pages. We > > > would have to add logic to stop, flush, and resume to the existing > > > framework. > > > > But not to the hypervisor interface. Hypervisor is fine > > with pages being reused immediately. In fact, even before they > > are processed. > > I don't think that is actually the case. If it does that I am pretty > sure it will corrupt memory during migration. > > Take a look at qemu_guest_free_page_hint: > https://github.com/qemu/qemu/blob/master/migration/ram.c#L3342 > > I'm pretty sure that code is going in and clearing the dirty bitmap > for memory. Yes it does. However the trick is that meanwhile kvm is logging new writes. So the bitmap that is being cleared is the bitmap that was logged before the request was sent to guest. > If we were to allow a page to be allocated and used and > then perform the hint it is going to introduce a race where the page > might be missed for migration and could result in memory corruption. commit c13c4153f76db23cac06a12044bf4dd346764059 has this explanation: Note: balloon will report pages which were free at the time of this call. As the reporting happens asynchronously, dirty bit logging must be enabled before this free_page_start call is made. Guest reporting must be disabled before the migration dirty bitmap is synchronized. but over multiple iterations this seems to have been dropped from code comments. Wei, would you mind going back and documenting the APIs you used? They seem to be causing confusion ... > > > > > - host can restart the process at any time by > > > > updating command ID. That will make guest stop > > > > and start from the beginning. > > > > > > > > - host can also stop the process by specifying a special > > > > command ID value. > > > > > > > > > > > > ========= > > > > > > > > > > > > Now let's compare to what you have here: > > > > > > > > - At any time after boot, guest walks over free memory and sends > > > > addresses as buffers to the host > > > > > > > > - Memory reported is then guaranteed to be unused > > > > until host has used the buffers > > > > > > > > > > > > Is above a fair summary? > > > > > > > > So yes there's a difference but the specific bit of chunking is same > > > > imho. > > > > > > The big difference is that I am returning the pages after they are > > > processed, while FREE_PAGE_HINT doesn't and isn't designed to. > > > > It doesn't but the hypervisor *is* designed to support that. > > Not really, it seems like it is more just a side effect of things. I hope the commit log above is enough to convice you we did think about this. > Also as I mentioned before I am also not a huge fan of polling on both > sides as it is just going to burn through CPU. If we are iterative and > polling it is going to end up with us potentially pushing one CPU at > 100%, and if the one CPU doing the polling cannot keep up with the > page updates coming from the other CPUs we would be stuck in that > state for a while. I would have preferred to see something where the > CPU would at least allow other tasks to occur while it is waiting for > buffers to be returned by the host. You lost me here. What does polling have to do with it? > > > The > > > problem is the interface doesn't allow for a good way to identify that > > > any given block of pages has been processed and can be returned. > > > > And that's because FREE_PAGE_HINT does not care. > > It can return any page at any point even before hypervisor > > saw it. > > I disagree, see my comment above. OK let's see if above is enough to convice you. Or maybe we have a bug when shrinker is invoked :) But I don't think so. > > > Instead pages go in, but they don't come out until the configuration > > > is changed and "DONE" is reported. The act of reporting "DONE" will > > > reset things and start them all over which kind of defeats the point. > > > > Right. > > > > But if you consider how we are using the shrinker you will > > see that it's kind of broken. > > For example not keeping track of allocated > > pages means the count we return is broken > > while reporting is active. > > > > I looked at fixing it but really if we can just > > stop allocating memory that would be way cleaner. > > Agreed. If we hit an OOM we should probably just stop the free page > hinting and treat that as the equivalent to an allocation failure. And fix the shrinker count to include the pages in the vq. Yea. > > As-is I think this also has the potential for corrupting memory since > it will likely be returning the most recent pages added to the balloon > so the pages are likely still on the processing queue. That part is fine I think because of the above. > > > For example we allocate pages until shrinker kicks in. > > Fair enough but in fact many it would be better to > > do the reverse: trigger shrinker and then send as many > > free pages as we can to host. > > I'm not sure I understand this last part. Oh basically what I am saying is this: one of the reasons to use page hinting is when host is short on memory. In that case, why don't we use shrinker to ask kernel drivers to free up memory? Any memory freed could then be reported to host.
On Wed, Jul 17, 2019 at 10:14 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Wed, Jul 17, 2019 at 09:43:52AM -0700, Alexander Duyck wrote: > > On Wed, Jul 17, 2019 at 3:28 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > On Tue, Jul 16, 2019 at 02:06:59PM -0700, Alexander Duyck wrote: > > > > On Tue, Jul 16, 2019 at 10:41 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > <snip> > > > > > > > > > > > This is what I am saying. Having watched that patchset being developed, > > > > > > > I think that's simply because processing blocks required mm core > > > > > > > changes, which Wei was not up to pushing through. > > > > > > > > > > > > > > > > > > > > > If we did > > > > > > > > > > > > > > while (1) { > > > > > > > alloc_pages > > > > > > > add_buf > > > > > > > get_buf > > > > > > > free_pages > > > > > > > } > > > > > > > > > > > > > > We'd end up passing the same page to balloon again and again. > > > > > > > > > > > > > > So we end up reserving lots of memory with alloc_pages instead. > > > > > > > > > > > > > > What I am saying is that now that you are developing > > > > > > > infrastructure to iterate over free pages, > > > > > > > FREE_PAGE_HINT should be able to use it too. > > > > > > > Whether that's possible might be a good indication of > > > > > > > whether the new mm APIs make sense. > > > > > > > > > > > > The problem is the infrastructure as implemented isn't designed to do > > > > > > that. I am pretty certain this interface will have issues with being > > > > > > given small blocks to process at a time. > > > > > > > > > > > > Basically the design for the FREE_PAGE_HINT feature doesn't really > > > > > > have the concept of doing things a bit at a time. It is either > > > > > > filling, stopped, or done. From what I can tell it requires a > > > > > > configuration change for the virtio balloon interface to toggle > > > > > > between those states. > > > > > > > > > > Maybe I misunderstand what you are saying. > > > > > > > > > > Filling state can definitely report things > > > > > a bit at a time. It does not assume that > > > > > all of guest free memory can fit in a VQ. > > > > > > > > I think where you and I may differ is that you are okay with just > > > > pulling pages until you hit OOM, or allocation failures. Do I have > > > > that right? > > > > > > This is exactly what the current code does. But that's an implementation > > > detail which came about because we failed to find any other way to > > > iterate over free blocks. > > > > I get that. However my concern is that permeated other areas of the > > implementation that make taking another approach much more difficult > > than it needs to be. > > Implementation would have to change to use an iterator obviously. But I don't see > that it leaked out to a hypervisor interface. > > In fact take a look at virtio_balloon_shrinker_scan > and you will see that it calls shrink_free_pages > without waiting for the device at all. Yes, and in case you missed it earlier I am pretty sure that leads to possible memory corruption. I don't think it was tested enough to be able to say that is safe. Specifically we cannot be clearing the dirty flag on pages that are in use. We should only be clearing that flag for pages that are guaranteed to not be in use. > > > > In my mind I am wanting to perform the hinting on a small > > > > block at a time and work through things iteratively. > > > > > > > > The problem is the FREE_PAGE_HINT doesn't have the option of returning > > > > pages until all pages have been pulled. It is run to completion and > > > > will keep filling the balloon until an allocation fails and the host > > > > says it is done. > > > > > > OK so there are two points. One is that FREE_PAGE_HINT does not > > > need to allocate a page at all. It really just wants to > > > iterate over free pages. > > > > I agree that it should just want to iterate over pages. However the > > issue I am trying to point out is that it doesn't have any guarantees > > on ordering and that is my concern. What I want to avoid is > > potentially corrupting memory. > > I get that. I am just trying to make sure you are aware that for > FREE_PAGE_HINT specifically ordering does not matter because it does not > care when hypervisor used the buffers. It only cares that page was > free after it got the request. used buffers are only tracked to avoid > overflowing the VQ. This is different from your hinting where you make > it the responsibility of the guest to not allocate page before it was > used. Prove to me that the ordering does not matter. As far as I can tell it should since this is being used to clear the bitmap and will affect migration. I'm pretty certain the page should not be freed until it has been processed. Otherwise I believe there is a risk of the page not being migrated and leading to a memory corruption when the VM is finally migrated. > > > > So for example with my current hinting approach I am using the list of > > hints because I get back one completion indicating all of the hints > > have been processed. It is only at that point that I can go back and > > make the memory available for allocation again. > > Right. But just counting them would work just as well, no? > At least as long as you wait for everything to complete... > If you want to pipeline, see below Yes, but if possible I would also want to try and keep the batch behavior that I have. We could count the descriptors processed, however that is still essentially done all via busy waiting in the FREE_PAGE_HINT logic. > > > > So one big issue right now with the FREE_PAGE_HINT approach is that it > > is designed to be all or nothing. Using the balloon makes it > > impossible for us to be incremental as all the pages are contained in > > one spot. What we would need is some way to associate a page with a > > given vq buffer. > > Sorry if I'm belaboring the obvious, but isn't this what 'void *data' in > virtqueue_add_inbuf is designed for? And if you only ever use > virtqueue_add_inbuf and virtqueue_add_outbuf on a given VQ, then you can > track two pointers using virtqueue_add_inbuf_ctx. I am still learning virtio so I wasn't aware of this piece until yesterday. For FREE_PAGE_HINT it would probably work as we would then have that association. For my page hinting I am still thinking I would prefer to just pass around a scatterlist since that is the structure I would likely fill and then later drain of pages versus just maintaining a list. > > Ultimately in order to really make the FREE_PAGE_HINT > > logic work with something like my page hinting logic it would need to > > work more like a network Rx ring in that we would associate a page per > > buffer and have some way of knowing the two are associated. > > Right. That's exactly how virtio net does it btw. Yeah, I saw that after reviewing the code yesterday. > > > The reason FREE_PAGE_HINT does not free up pages until we finished > > > iterating over the free list it not a hypervisor API. The reason is we > > > don't want to keep getting the same address over and over again. > > > > > > > I would prefer to avoid that as I prefer to simply > > > > notify the host of a fixed block of pages at a time and let it process > > > > without having to have a thread on each side actively pushing pages, > > > > or listening for the incoming pages. > > > > > > Right. And FREE_PAGE_HINT can go even further. It can push a page and > > > let linux use it immediately. It does not even need to wait for host to > > > process anything unless the VQ gets full. > > > > If it is doing what you are saying it will be corrupting memory. > > No and that is hypervisor's responsibility. > > I think you are missing part of the picture here. > > Here is a valid implementation: > > Before asking for hints, hypervisor write-protects all memory, and logs > all write faults. When hypervisor gets the hint, if page has since been > modified, the hint is ignored. No here is the part where I think you missed the point. I was already aware of this. So my concern is this scenario. If you put a hint on the VQ and then free the memory back to the guest, what about the scenario where another process could allocate the memory and dirty it before we process the hint request on the host? In that case the page was dirtied, the hypervisor will have correctly write faulted and dirtied it, and then we came though and incorrectly marked it as being free. That is the scenario I am worried about as I am pretty certain that leads to memory corruption. > > > At a > > minimum it has to wait until the page has been processed and the dirty > > bit cleared before it can let linux use it again. It is all a matter > > of keeping the dirty bit coherent. If we let linux use it again > > immediately and then cleared the dirty bit we would open up a possible > > data corruption race during migration as a dirty page might not be > > marked as such. > > I think you are talking about the dirty bit on the host, right? > > The implication is that calling MADV_FREE from qemu would > not be a good implementation of FREE_PAGE_HINT. > And indeed, as far as I can see it does nothing of the sort. I don't mean the dirty bit on the host, I am talking about the bitmap used to determine which pages need to be migrated. That is what this hint is updating and it is also being tracked via the write protection of the pages at the start of migration. My concern is that we can end up losing track of pages that are updated if we are hinting after they have been freed back to the guest for reallocation. > > > > > > > > > > > > The basic idea with the bubble hinting was to essentially create mini > > > > > > > > balloons. As such I had based the code off of the balloon inflation > > > > > > > > code. The only spot where it really differs is that I needed the > > > > > > > > ability to pass higher order pages so I tweaked thinks and passed > > > > > > > > "hints" instead of "pfns". > > > > > > > > > > > > > > And that is fine. But there isn't really such a big difference with > > > > > > > FREE_PAGE_HINT except FREE_PAGE_HINT triggers upon host request and not > > > > > > > in response to guest load. > > > > > > > > > > > > I disagree, I believe there is a significant difference. > > > > > > > > > > Yes there is, I just don't think it's in the iteration. > > > > > The iteration seems to be useful to hinting. > > > > > > > > I agree that iteration is useful to hinting. The problem is the > > > > FREE_PAGE_HINT code isn't really designed to be iterative. It is > > > > designed to run with a polling thread on each side and it is meant to > > > > be run to completion. > > > > > > Absolutely. But that's a bug I think. > > > > I think it is a part of the design. Basically in order to avoid > > corrupting memory it cannot return the page to the guest kernel until > > it has finished clearing the dirty bits associated with the pages. > > OK I hope I clarified by that's not supposed to be the case. I think you might have missed something. I am pretty certain issues are still present. > > > > > > The > > > > > > FREE_PAGE_HINT code was implemented to be more of a streaming > > > > > > interface. > > > > > > > > > > It's implemented like this but it does not follow from > > > > > the interface. The implementation is a combination of > > > > > attempts to minimize # of exits and minimize mm core changes. > > > > > > > > The problem is the interface doesn't have a good way of indicating > > > > that it is done with a block of pages. > > > > > > > > So what I am probably looking at if I do a sg implementation for my > > > > hinting is to provide one large sg block for all 32 of the pages I > > > > might be holding. > > > > > > Right now if you pass an sg it will try to allocate a buffer > > > on demand for you. If this is a problem I could come up > > > with a new API that lets caller allocate the buffer. > > > Let me know. > > > > > > > I'm assuming that will still be processed as one > > > > contiguous block. With that I can then at least maintain a single > > > > response per request. > > > > > > Why do you care? Won't a counter of outstanding pages be enough? > > > Down the road maybe we could actually try to pipeline > > > things a bit. So send 32 pages once you get 16 of these back > > > send 16 more. Better for SMP configs and does not hurt > > > non-SMP too much. I am not saying we need to do it right away though. > > > > So the big thing is we cannot give the page back to the guest kernel > > until we know the processing has been completed. In the case of the > > MADV_DONT_NEED call it will zero out the entire page on the next > > access. If the guest kernel had already written data by the time we > > get to that it would cause a data corruption and kill the whole guest. > > > Exactly but FREE_PAGE_HINT does not cause qemu to call MADV_DONT_NEED. No, instead it clears the bit indicating that the page is supposed to be migrated. The effect will not be all that different, just delayed until the VM is actually migrated. > > > > > > This is one of the things Linus kept complaining about in > > > > > > his comments. This code attempts to pull in ALL of the higher order > > > > > > pages, not just a smaller block of them. > > > > > > > > > > It wants to report all higher order pages eventually, yes. > > > > > But it's absolutely fine to report a chunk and then wait > > > > > for host to process the chunk before reporting more. > > > > > > > > > > However, interfaces we came up with for this would call > > > > > into virtio with a bunch of locks taken. > > > > > The solution was to take pages off the free list completely. > > > > > That in turn means we can't return them until > > > > > we have processed all free memory. > > > > > > > > I get that. The problem is the interface is designed around run to > > > > completion. For example it will sit there in a busy loop waiting for a > > > > free buffer because it knows the other side is suppose to be > > > > processing the pages already. > > > > > > I didn't get this part. > > > > I think the part you may not be getting is that we cannot let the > > guest use the page until the hint has been processed. Otherwise we > > risk corrupting memory. That is the piece that has me paranoid. If we > > end up performing a hint on a page that is use somewhere in the kernel > > it will corrupt memory one way or another. That is the thing I have to > > avoid at all cost. > > You have to do it, sure. And that is because you do not > assume that hypervisor does it for you. But FREE_PAGE_HINT doesn't, > hypervisor takes care of that. Sort of. The hypervisor is trying to do dirty page tracking, however the FREE_PAGE_HINT interferes with that. That is the problem. If we get that out of order then the hypervisor work will be undone and we just make a mess of memory. > > That is why I have to have a way to know exactly which pages have been > > processed and which haven't before I return pages to the guest. > > Otherwise I am just corrupting memory. > > Sure. That isn't really hard though. Agreed. > > > > > > > > Honestly the difference is > > > > > > mostly in the hypervisor interface than what is needed for the kernel > > > > > > interface, however the design of the hypervisor interface would make > > > > > > doing things more incrementally much more difficult. > > > > > > > > > > OK that's interesting. The hypervisor interface is not > > > > > documented in the spec yet. Let me take a stub at a writeup now. So: > > > > > > > > > > > > > > > > > > > > - hypervisor requests reporting by modifying command ID > > > > > field in config space, and interrupting guest > > > > > > > > > > - in response, guest sends the command ID value on a special > > > > > free page hinting VQ, > > > > > followed by any number of buffers. Each buffer is assumed > > > > > to be the address and length of memory that was > > > > > unused *at some point after the time when command ID was sent*. > > > > > > > > > > Note that hypervisor takes pains to handle the case > > > > > where memory is actually no longer free by the time > > > > > it gets the memory. > > > > > This allows guest driver to take more liberties > > > > > and free pages without waiting for guest to > > > > > use the buffers. > > > > > > > > > > This is also one of the reason we call this a free page hint - > > > > > the guarantee that page is free is a weak one, > > > > > in that sense it's more of a hint than a promise. > > > > > That helps guarantee we don't create OOM out of blue. > > > > > > I would like to stress the last paragraph above. > > > > The problem is we don't want to give bad hints. What we do based on > > the hint is clear the dirty bit. If we clear it in err when the page > > is actually in use it will lead to data corruption after migration. > > That's true for your patches. I get that. No, it should be true for FREE_PAGE_HINT as well. The fact that it isn't is a bug as far as I am concerned. If you are doing dirty page tracking in the hypervisor you cannot expect it to behave well if the guest is providing it with bad data. > > The idea with the hint is that you are saying the page is currently > > not in use, however if you send that hint late and have already freed > > the page back you can corrupt memory. > > > That part is I think wrong - assuming "you" means upstream code. Yes, I am referring to someone running FREE_PAGE_HINT code. I usually try to replace them with "we" to make it clear I am not talking about someone personally, it is a bad habit. > > > > > > > > > > - guest eventually sends a special buffer signalling to > > > > > host that it's done sending free pages. > > > > > It then stops reporting until command id changes. > > > > > > > > The pages are not freed back to the guest until the host reports that > > > > it is "DONE" via a configuration change. Doing that stops any further > > > > progress, and attempting to resume will just restart from the > > > > beginning. > > > > > > Right but it's not a requirement. Host does not assume this at all. > > > It's done like this simply because we can't iterate over pages > > > with the existing API. > > > > The problem is nothing about the implementation was designed for > > iteration. What I would have to do is likely gut and rewrite the > > entire guest side of the FREE_PAGE_HINT code in order to make it work > > iteratively. > > > Right. I agree. > > > As I mentioned it would probably have to look more like a > > NIC Rx ring in handling because we would have to have some sort of way > > to associate the pages 1:1 to the buffers. > > > > > > The big piece this design is missing is the incremental notification > > > > pages have been processed. The existing code just fills the vq with > > > > pages and keeps doing it until it cannot allocate any more pages. We > > > > would have to add logic to stop, flush, and resume to the existing > > > > framework. > > > > > > But not to the hypervisor interface. Hypervisor is fine > > > with pages being reused immediately. In fact, even before they > > > are processed. > > > > I don't think that is actually the case. If it does that I am pretty > > sure it will corrupt memory during migration. > > > > Take a look at qemu_guest_free_page_hint: > > https://github.com/qemu/qemu/blob/master/migration/ram.c#L3342 > > > > I'm pretty sure that code is going in and clearing the dirty bitmap > > for memory. > > Yes it does. However the trick is that meanwhile > kvm is logging new writes. So the bitmap that > is being cleared is the bitmap that was logged before the request > was sent to guest. > > > If we were to allow a page to be allocated and used and > > then perform the hint it is going to introduce a race where the page > > might be missed for migration and could result in memory corruption. > > commit c13c4153f76db23cac06a12044bf4dd346764059 has this explanation: > > Note: balloon will report pages which were free at the time of this call. > As the reporting happens asynchronously, dirty bit logging must be > enabled before this free_page_start call is made. Guest reporting must be > disabled before the migration dirty bitmap is synchronized. > > but over multiple iterations this seems to have been dropped > from code comments. Wei, would you mind going back > and documenting the APIs you used? > They seem to be causing confusion ... The "Note" is the behavior I am seeing. Specifically there is nothing in place to prevent the freed pages from causing corruption if they are freed before being hinted. The requirement should be that they cannot be freed until after they are hinted that way the dirty bit logging will mark the page as dirty if it is accessed AFTER being hinted. If you do not guarantee the hinting has happened first you could end up logging the dirty bit before the hint is processed and then clear the dirty bit due to the hint. It is pretty straight forward to resolve by just not putting the page into the balloon until after the hint has been processed. > > > > > > > - host can restart the process at any time by > > > > > updating command ID. That will make guest stop > > > > > and start from the beginning. > > > > > > > > > > - host can also stop the process by specifying a special > > > > > command ID value. > > > > > > > > > > > > > > > ========= > > > > > > > > > > > > > > > Now let's compare to what you have here: > > > > > > > > > > - At any time after boot, guest walks over free memory and sends > > > > > addresses as buffers to the host > > > > > > > > > > - Memory reported is then guaranteed to be unused > > > > > until host has used the buffers > > > > > > > > > > > > > > > Is above a fair summary? > > > > > > > > > > So yes there's a difference but the specific bit of chunking is same > > > > > imho. > > > > > > > > The big difference is that I am returning the pages after they are > > > > processed, while FREE_PAGE_HINT doesn't and isn't designed to. > > > > > > It doesn't but the hypervisor *is* designed to support that. > > > > Not really, it seems like it is more just a side effect of things. > > I hope the commit log above is enough to convice you we did > think about this. Sorry, but no. I think the "note" convinced me there is a race condition, specifically in the shrinker case. We cannot free the page back to host memory until the hint has been processed, otherwise we will race with the dirty bit logging. > > Also as I mentioned before I am also not a huge fan of polling on both > > sides as it is just going to burn through CPU. If we are iterative and > > polling it is going to end up with us potentially pushing one CPU at > > 100%, and if the one CPU doing the polling cannot keep up with the > > page updates coming from the other CPUs we would be stuck in that > > state for a while. I would have preferred to see something where the > > CPU would at least allow other tasks to occur while it is waiting for > > buffers to be returned by the host. > > You lost me here. What does polling have to do with it? This is just another issue I found. Specifically busy polling while waiting on the host to process the hints. I'm not a fan of it and was just pointing it out. > > > > The > > > > problem is the interface doesn't allow for a good way to identify that > > > > any given block of pages has been processed and can be returned. > > > > > > And that's because FREE_PAGE_HINT does not care. > > > It can return any page at any point even before hypervisor > > > saw it. > > > > I disagree, see my comment above. > > OK let's see if above is enough to convice you. Or maybe we > have a bug when shrinker is invoked :) But I don't think so. I'm pretty sure there is a bug. > > > > Instead pages go in, but they don't come out until the configuration > > > > is changed and "DONE" is reported. The act of reporting "DONE" will > > > > reset things and start them all over which kind of defeats the point. > > > > > > Right. > > > > > > But if you consider how we are using the shrinker you will > > > see that it's kind of broken. > > > For example not keeping track of allocated > > > pages means the count we return is broken > > > while reporting is active. > > > > > > I looked at fixing it but really if we can just > > > stop allocating memory that would be way cleaner. > > > > Agreed. If we hit an OOM we should probably just stop the free page > > hinting and treat that as the equivalent to an allocation failure. > > And fix the shrinker count to include the pages in the vq. Yea. I don't know if we really want to touch the pages in the VQ. I would say that we should leave them alone. > > > > As-is I think this also has the potential for corrupting memory since > > it will likely be returning the most recent pages added to the balloon > > so the pages are likely still on the processing queue. > > That part is fine I think because of the above. > > > > > > For example we allocate pages until shrinker kicks in. > > > Fair enough but in fact many it would be better to > > > do the reverse: trigger shrinker and then send as many > > > free pages as we can to host. > > > > I'm not sure I understand this last part. > > Oh basically what I am saying is this: one of the reasons to use page > hinting is when host is short on memory. In that case, why don't we use > shrinker to ask kernel drivers to free up memory? Any memory freed could > then be reported to host. Didn't the balloon driver already have a feature like that where it could start shrinking memory if the host was under memory pressure? If so how would adding another one add much value. The idea here is if the memory is free we just mark it as such. As long as we can do so with no noticeable overhead on the guest or host why not just do it?
On 7/18/19 11:34 AM, Alexander Duyck wrote: > On Wed, Jul 17, 2019 at 10:14 PM Michael S. Tsirkin <mst@redhat.com> wrote: >> On Wed, Jul 17, 2019 at 09:43:52AM -0700, Alexander Duyck wrote: >>> On Wed, Jul 17, 2019 at 3:28 AM Michael S. Tsirkin <mst@redhat.com> wrote: >>>> On Tue, Jul 16, 2019 at 02:06:59PM -0700, Alexander Duyck wrote: >>>>> On Tue, Jul 16, 2019 at 10:41 AM Michael S. Tsirkin <mst@redhat.com> wrote: >>>>> >>>>> <snip> >>>>> >>>>>>>> This is what I am saying. Having watched that patchset being developed, >>>>>>>> I think that's simply because processing blocks required mm core >>>>>>>> changes, which Wei was not up to pushing through. >>>>>>>> >>>>>>>> >>>>>>>> If we did >>>>>>>> >>>>>>>> while (1) { >>>>>>>> alloc_pages >>>>>>>> add_buf >>>>>>>> get_buf >>>>>>>> free_pages >>>>>>>> } >>>>>>>> >>>>>>>> We'd end up passing the same page to balloon again and again. >>>>>>>> >>>>>>>> So we end up reserving lots of memory with alloc_pages instead. >>>>>>>> >>>>>>>> What I am saying is that now that you are developing >>>>>>>> infrastructure to iterate over free pages, >>>>>>>> FREE_PAGE_HINT should be able to use it too. >>>>>>>> Whether that's possible might be a good indication of >>>>>>>> whether the new mm APIs make sense. >>>>>>> The problem is the infrastructure as implemented isn't designed to do >>>>>>> that. I am pretty certain this interface will have issues with being >>>>>>> given small blocks to process at a time. >>>>>>> >>>>>>> Basically the design for the FREE_PAGE_HINT feature doesn't really >>>>>>> have the concept of doing things a bit at a time. It is either >>>>>>> filling, stopped, or done. From what I can tell it requires a >>>>>>> configuration change for the virtio balloon interface to toggle >>>>>>> between those states. >>>>>> Maybe I misunderstand what you are saying. >>>>>> >>>>>> Filling state can definitely report things >>>>>> a bit at a time. It does not assume that >>>>>> all of guest free memory can fit in a VQ. >>>>> I think where you and I may differ is that you are okay with just >>>>> pulling pages until you hit OOM, or allocation failures. Do I have >>>>> that right? >>>> This is exactly what the current code does. But that's an implementation >>>> detail which came about because we failed to find any other way to >>>> iterate over free blocks. >>> I get that. However my concern is that permeated other areas of the >>> implementation that make taking another approach much more difficult >>> than it needs to be. >> Implementation would have to change to use an iterator obviously. But I don't see >> that it leaked out to a hypervisor interface. >> >> In fact take a look at virtio_balloon_shrinker_scan >> and you will see that it calls shrink_free_pages >> without waiting for the device at all. > Yes, and in case you missed it earlier I am pretty sure that leads to > possible memory corruption. I don't think it was tested enough to be > able to say that is safe. > > Specifically we cannot be clearing the dirty flag on pages that are in > use. We should only be clearing that flag for pages that are > guaranteed to not be in use. > >>>>> In my mind I am wanting to perform the hinting on a small >>>>> block at a time and work through things iteratively. >>>>> >>>>> The problem is the FREE_PAGE_HINT doesn't have the option of returning >>>>> pages until all pages have been pulled. It is run to completion and >>>>> will keep filling the balloon until an allocation fails and the host >>>>> says it is done. >>>> OK so there are two points. One is that FREE_PAGE_HINT does not >>>> need to allocate a page at all. It really just wants to >>>> iterate over free pages. >>> I agree that it should just want to iterate over pages. However the >>> issue I am trying to point out is that it doesn't have any guarantees >>> on ordering and that is my concern. What I want to avoid is >>> potentially corrupting memory. >> I get that. I am just trying to make sure you are aware that for >> FREE_PAGE_HINT specifically ordering does not matter because it does not >> care when hypervisor used the buffers. It only cares that page was >> free after it got the request. used buffers are only tracked to avoid >> overflowing the VQ. This is different from your hinting where you make >> it the responsibility of the guest to not allocate page before it was >> used. > Prove to me that the ordering does not matter. As far as I can tell it > should since this is being used to clear the bitmap and will affect > migration. I'm pretty certain the page should not be freed until it > has been processed. Otherwise I believe there is a risk of the page > not being migrated and leading to a memory corruption when the VM is > finally migrated. > >>> So for example with my current hinting approach I am using the list of >>> hints because I get back one completion indicating all of the hints >>> have been processed. It is only at that point that I can go back and >>> make the memory available for allocation again. >> Right. But just counting them would work just as well, no? >> At least as long as you wait for everything to complete... >> If you want to pipeline, see below > Yes, but if possible I would also want to try and keep the batch > behavior that I have. We could count the descriptors processed, > however that is still essentially done all via busy waiting in the > FREE_PAGE_HINT logic. > >>> So one big issue right now with the FREE_PAGE_HINT approach is that it >>> is designed to be all or nothing. Using the balloon makes it >>> impossible for us to be incremental as all the pages are contained in >>> one spot. What we would need is some way to associate a page with a >>> given vq buffer. >> Sorry if I'm belaboring the obvious, but isn't this what 'void *data' in >> virtqueue_add_inbuf is designed for? And if you only ever use >> virtqueue_add_inbuf and virtqueue_add_outbuf on a given VQ, then you can >> track two pointers using virtqueue_add_inbuf_ctx. > I am still learning virtio so I wasn't aware of this piece until > yesterday. For FREE_PAGE_HINT it would probably work as we would then > have that association. For my page hinting I am still thinking I would > prefer to just pass around a scatterlist since that is the structure I > would likely fill and then later drain of pages versus just > maintaining a list. > >>> Ultimately in order to really make the FREE_PAGE_HINT >>> logic work with something like my page hinting logic it would need to >>> work more like a network Rx ring in that we would associate a page per >>> buffer and have some way of knowing the two are associated. >> Right. That's exactly how virtio net does it btw. > Yeah, I saw that after reviewing the code yesterday. > >>>> The reason FREE_PAGE_HINT does not free up pages until we finished >>>> iterating over the free list it not a hypervisor API. The reason is we >>>> don't want to keep getting the same address over and over again. >>>> >>>>> I would prefer to avoid that as I prefer to simply >>>>> notify the host of a fixed block of pages at a time and let it process >>>>> without having to have a thread on each side actively pushing pages, >>>>> or listening for the incoming pages. >>>> Right. And FREE_PAGE_HINT can go even further. It can push a page and >>>> let linux use it immediately. It does not even need to wait for host to >>>> process anything unless the VQ gets full. >>> If it is doing what you are saying it will be corrupting memory. >> No and that is hypervisor's responsibility. >> >> I think you are missing part of the picture here. >> >> Here is a valid implementation: >> >> Before asking for hints, hypervisor write-protects all memory, and logs >> all write faults. When hypervisor gets the hint, if page has since been >> modified, the hint is ignored. > No here is the part where I think you missed the point. I was already > aware of this. So my concern is this scenario. > > If you put a hint on the VQ and then free the memory back to the > guest, what about the scenario where another process could allocate > the memory and dirty it before we process the hint request on the > host? In that case the page was dirtied, the hypervisor will have > correctly write faulted and dirtied it, and then we came though and > incorrectly marked it as being free. That is the scenario I am worried > about as I am pretty certain that leads to memory corruption. > > >>> At a >>> minimum it has to wait until the page has been processed and the dirty >>> bit cleared before it can let linux use it again. It is all a matter >>> of keeping the dirty bit coherent. If we let linux use it again >>> immediately and then cleared the dirty bit we would open up a possible >>> data corruption race during migration as a dirty page might not be >>> marked as such. >> I think you are talking about the dirty bit on the host, right? >> >> The implication is that calling MADV_FREE from qemu would >> not be a good implementation of FREE_PAGE_HINT. >> And indeed, as far as I can see it does nothing of the sort. > I don't mean the dirty bit on the host, I am talking about the bitmap > used to determine which pages need to be migrated. That is what this > hint is updating and it is also being tracked via the write protection > of the pages at the start of migration. > > My concern is that we can end up losing track of pages that are > updated if we are hinting after they have been freed back to the guest > for reallocation. > >>>>>>>>> The basic idea with the bubble hinting was to essentially create mini >>>>>>>>> balloons. As such I had based the code off of the balloon inflation >>>>>>>>> code. The only spot where it really differs is that I needed the >>>>>>>>> ability to pass higher order pages so I tweaked thinks and passed >>>>>>>>> "hints" instead of "pfns". >>>>>>>> And that is fine. But there isn't really such a big difference with >>>>>>>> FREE_PAGE_HINT except FREE_PAGE_HINT triggers upon host request and not >>>>>>>> in response to guest load. >>>>>>> I disagree, I believe there is a significant difference. >>>>>> Yes there is, I just don't think it's in the iteration. >>>>>> The iteration seems to be useful to hinting. >>>>> I agree that iteration is useful to hinting. The problem is the >>>>> FREE_PAGE_HINT code isn't really designed to be iterative. It is >>>>> designed to run with a polling thread on each side and it is meant to >>>>> be run to completion. >>>> Absolutely. But that's a bug I think. >>> I think it is a part of the design. Basically in order to avoid >>> corrupting memory it cannot return the page to the guest kernel until >>> it has finished clearing the dirty bits associated with the pages. >> OK I hope I clarified by that's not supposed to be the case. > I think you might have missed something. I am pretty certain issues > are still present. > >>>>>>> The >>>>>>> FREE_PAGE_HINT code was implemented to be more of a streaming >>>>>>> interface. >>>>>> It's implemented like this but it does not follow from >>>>>> the interface. The implementation is a combination of >>>>>> attempts to minimize # of exits and minimize mm core changes. >>>>> The problem is the interface doesn't have a good way of indicating >>>>> that it is done with a block of pages. >>>>> >>>>> So what I am probably looking at if I do a sg implementation for my >>>>> hinting is to provide one large sg block for all 32 of the pages I >>>>> might be holding. >>>> Right now if you pass an sg it will try to allocate a buffer >>>> on demand for you. If this is a problem I could come up >>>> with a new API that lets caller allocate the buffer. >>>> Let me know. >>>> >>>>> I'm assuming that will still be processed as one >>>>> contiguous block. With that I can then at least maintain a single >>>>> response per request. >>>> Why do you care? Won't a counter of outstanding pages be enough? >>>> Down the road maybe we could actually try to pipeline >>>> things a bit. So send 32 pages once you get 16 of these back >>>> send 16 more. Better for SMP configs and does not hurt >>>> non-SMP too much. I am not saying we need to do it right away though. >>> So the big thing is we cannot give the page back to the guest kernel >>> until we know the processing has been completed. In the case of the >>> MADV_DONT_NEED call it will zero out the entire page on the next >>> access. If the guest kernel had already written data by the time we >>> get to that it would cause a data corruption and kill the whole guest. >> >> Exactly but FREE_PAGE_HINT does not cause qemu to call MADV_DONT_NEED. > No, instead it clears the bit indicating that the page is supposed to > be migrated. The effect will not be all that different, just delayed > until the VM is actually migrated. > >>>>>>> This is one of the things Linus kept complaining about in >>>>>>> his comments. This code attempts to pull in ALL of the higher order >>>>>>> pages, not just a smaller block of them. >>>>>> It wants to report all higher order pages eventually, yes. >>>>>> But it's absolutely fine to report a chunk and then wait >>>>>> for host to process the chunk before reporting more. >>>>>> >>>>>> However, interfaces we came up with for this would call >>>>>> into virtio with a bunch of locks taken. >>>>>> The solution was to take pages off the free list completely. >>>>>> That in turn means we can't return them until >>>>>> we have processed all free memory. >>>>> I get that. The problem is the interface is designed around run to >>>>> completion. For example it will sit there in a busy loop waiting for a >>>>> free buffer because it knows the other side is suppose to be >>>>> processing the pages already. >>>> I didn't get this part. >>> I think the part you may not be getting is that we cannot let the >>> guest use the page until the hint has been processed. Otherwise we >>> risk corrupting memory. That is the piece that has me paranoid. If we >>> end up performing a hint on a page that is use somewhere in the kernel >>> it will corrupt memory one way or another. That is the thing I have to >>> avoid at all cost. >> You have to do it, sure. And that is because you do not >> assume that hypervisor does it for you. But FREE_PAGE_HINT doesn't, >> hypervisor takes care of that. > Sort of. The hypervisor is trying to do dirty page tracking, however > the FREE_PAGE_HINT interferes with that. That is the problem. If we > get that out of order then the hypervisor work will be undone and we > just make a mess of memory. > >>> That is why I have to have a way to know exactly which pages have been >>> processed and which haven't before I return pages to the guest. >>> Otherwise I am just corrupting memory. >> Sure. That isn't really hard though. > Agreed. > >>>>>>> Honestly the difference is >>>>>>> mostly in the hypervisor interface than what is needed for the kernel >>>>>>> interface, however the design of the hypervisor interface would make >>>>>>> doing things more incrementally much more difficult. >>>>>> OK that's interesting. The hypervisor interface is not >>>>>> documented in the spec yet. Let me take a stub at a writeup now. So: >>>>>> >>>>>> >>>>>> >>>>>> - hypervisor requests reporting by modifying command ID >>>>>> field in config space, and interrupting guest >>>>>> >>>>>> - in response, guest sends the command ID value on a special >>>>>> free page hinting VQ, >>>>>> followed by any number of buffers. Each buffer is assumed >>>>>> to be the address and length of memory that was >>>>>> unused *at some point after the time when command ID was sent*. >>>>>> >>>>>> Note that hypervisor takes pains to handle the case >>>>>> where memory is actually no longer free by the time >>>>>> it gets the memory. >>>>>> This allows guest driver to take more liberties >>>>>> and free pages without waiting for guest to >>>>>> use the buffers. >>>>>> >>>>>> This is also one of the reason we call this a free page hint - >>>>>> the guarantee that page is free is a weak one, >>>>>> in that sense it's more of a hint than a promise. >>>>>> That helps guarantee we don't create OOM out of blue. >>>> I would like to stress the last paragraph above. >>> The problem is we don't want to give bad hints. What we do based on >>> the hint is clear the dirty bit. If we clear it in err when the page >>> is actually in use it will lead to data corruption after migration. >> That's true for your patches. I get that. > No, it should be true for FREE_PAGE_HINT as well. The fact that it > isn't is a bug as far as I am concerned. If you are doing dirty page > tracking in the hypervisor you cannot expect it to behave well if the > guest is providing it with bad data. > >>> The idea with the hint is that you are saying the page is currently >>> not in use, however if you send that hint late and have already freed >>> the page back you can corrupt memory. >> >> That part is I think wrong - assuming "you" means upstream code. > Yes, I am referring to someone running FREE_PAGE_HINT code. I usually > try to replace them with "we" to make it clear I am not talking about > someone personally, it is a bad habit. > >>>>>> - guest eventually sends a special buffer signalling to >>>>>> host that it's done sending free pages. >>>>>> It then stops reporting until command id changes. >>>>> The pages are not freed back to the guest until the host reports that >>>>> it is "DONE" via a configuration change. Doing that stops any further >>>>> progress, and attempting to resume will just restart from the >>>>> beginning. >>>> Right but it's not a requirement. Host does not assume this at all. >>>> It's done like this simply because we can't iterate over pages >>>> with the existing API. >>> The problem is nothing about the implementation was designed for >>> iteration. What I would have to do is likely gut and rewrite the >>> entire guest side of the FREE_PAGE_HINT code in order to make it work >>> iteratively. >> >> Right. I agree. >> >>> As I mentioned it would probably have to look more like a >>> NIC Rx ring in handling because we would have to have some sort of way >>> to associate the pages 1:1 to the buffers. >>> >>>>> The big piece this design is missing is the incremental notification >>>>> pages have been processed. The existing code just fills the vq with >>>>> pages and keeps doing it until it cannot allocate any more pages. We >>>>> would have to add logic to stop, flush, and resume to the existing >>>>> framework. >>>> But not to the hypervisor interface. Hypervisor is fine >>>> with pages being reused immediately. In fact, even before they >>>> are processed. >>> I don't think that is actually the case. If it does that I am pretty >>> sure it will corrupt memory during migration. >>> >>> Take a look at qemu_guest_free_page_hint: >>> https://github.com/qemu/qemu/blob/master/migration/ram.c#L3342 >>> >>> I'm pretty sure that code is going in and clearing the dirty bitmap >>> for memory. >> Yes it does. However the trick is that meanwhile >> kvm is logging new writes. So the bitmap that >> is being cleared is the bitmap that was logged before the request >> was sent to guest. >> >>> If we were to allow a page to be allocated and used and >>> then perform the hint it is going to introduce a race where the page >>> might be missed for migration and could result in memory corruption. >> commit c13c4153f76db23cac06a12044bf4dd346764059 has this explanation: >> >> Note: balloon will report pages which were free at the time of this call. >> As the reporting happens asynchronously, dirty bit logging must be >> enabled before this free_page_start call is made. Guest reporting must be >> disabled before the migration dirty bitmap is synchronized. >> >> but over multiple iterations this seems to have been dropped >> from code comments. Wei, would you mind going back >> and documenting the APIs you used? >> They seem to be causing confusion ... > The "Note" is the behavior I am seeing. Specifically there is nothing > in place to prevent the freed pages from causing corruption if they > are freed before being hinted. The requirement should be that they > cannot be freed until after they are hinted that way the dirty bit > logging will mark the page as dirty if it is accessed AFTER being > hinted. > > If you do not guarantee the hinting has happened first you could end > up logging the dirty bit before the hint is processed and then clear > the dirty bit due to the hint. It is pretty straight forward to > resolve by just not putting the page into the balloon until after the > hint has been processed. > >>>>>> - host can restart the process at any time by >>>>>> updating command ID. That will make guest stop >>>>>> and start from the beginning. >>>>>> >>>>>> - host can also stop the process by specifying a special >>>>>> command ID value. >>>>>> >>>>>> >>>>>> ========= >>>>>> >>>>>> >>>>>> Now let's compare to what you have here: >>>>>> >>>>>> - At any time after boot, guest walks over free memory and sends >>>>>> addresses as buffers to the host >>>>>> >>>>>> - Memory reported is then guaranteed to be unused >>>>>> until host has used the buffers >>>>>> >>>>>> >>>>>> Is above a fair summary? >>>>>> >>>>>> So yes there's a difference but the specific bit of chunking is same >>>>>> imho. >>>>> The big difference is that I am returning the pages after they are >>>>> processed, while FREE_PAGE_HINT doesn't and isn't designed to. >>>> It doesn't but the hypervisor *is* designed to support that. >>> Not really, it seems like it is more just a side effect of things. >> I hope the commit log above is enough to convice you we did >> think about this. > Sorry, but no. I think the "note" convinced me there is a race > condition, specifically in the shrinker case. We cannot free the page > back to host memory until the hint has been processed, otherwise we > will race with the dirty bit logging. > >>> Also as I mentioned before I am also not a huge fan of polling on both >>> sides as it is just going to burn through CPU. If we are iterative and >>> polling it is going to end up with us potentially pushing one CPU at >>> 100%, and if the one CPU doing the polling cannot keep up with the >>> page updates coming from the other CPUs we would be stuck in that >>> state for a while. I would have preferred to see something where the >>> CPU would at least allow other tasks to occur while it is waiting for >>> buffers to be returned by the host. >> You lost me here. What does polling have to do with it? > This is just another issue I found. Specifically busy polling while > waiting on the host to process the hints. I'm not a fan of it and was > just pointing it out. > >>>>> The >>>>> problem is the interface doesn't allow for a good way to identify that >>>>> any given block of pages has been processed and can be returned. >>>> And that's because FREE_PAGE_HINT does not care. >>>> It can return any page at any point even before hypervisor >>>> saw it. >>> I disagree, see my comment above. >> OK let's see if above is enough to convice you. Or maybe we >> have a bug when shrinker is invoked :) But I don't think so. > I'm pretty sure there is a bug. > >>>>> Instead pages go in, but they don't come out until the configuration >>>>> is changed and "DONE" is reported. The act of reporting "DONE" will >>>>> reset things and start them all over which kind of defeats the point. >>>> Right. >>>> >>>> But if you consider how we are using the shrinker you will >>>> see that it's kind of broken. >>>> For example not keeping track of allocated >>>> pages means the count we return is broken >>>> while reporting is active. >>>> >>>> I looked at fixing it but really if we can just >>>> stop allocating memory that would be way cleaner. >>> Agreed. If we hit an OOM we should probably just stop the free page >>> hinting and treat that as the equivalent to an allocation failure. >> And fix the shrinker count to include the pages in the vq. Yea. > I don't know if we really want to touch the pages in the VQ. I would > say that we should leave them alone. > >>> As-is I think this also has the potential for corrupting memory since >>> it will likely be returning the most recent pages added to the balloon >>> so the pages are likely still on the processing queue. >> That part is fine I think because of the above. >> >>>> For example we allocate pages until shrinker kicks in. >>>> Fair enough but in fact many it would be better to >>>> do the reverse: trigger shrinker and then send as many >>>> free pages as we can to host. >>> I'm not sure I understand this last part. >> Oh basically what I am saying is this: one of the reasons to use page >> hinting is when host is short on memory. In that case, why don't we use >> shrinker to ask kernel drivers to free up memory? Any memory freed could >> then be reported to host. > Didn't the balloon driver already have a feature like that where it > could start shrinking memory if the host was under memory pressure? If you are referring to auto-ballooning (I don't think it is merged). It has its own set of disadvantages such as it could easily lead to OOM, memory corruption and so on. VIRTIO_BALLOON_F_FREE_PAGE_HINT does address some of those issues. However, it still requires external control to initiate/stop the memory transaction. > If > so how would adding another one add much value. > The idea here is if the memory is free we just mark it as such. As > long as we can do so with no noticeable overhead on the guest or host > why not just do it? +1. This is the advantage which both the hinting solutions are trying to introduce.
On Thu, Jul 18, 2019 at 08:34:37AM -0700, Alexander Duyck wrote: > On Wed, Jul 17, 2019 at 10:14 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > On Wed, Jul 17, 2019 at 09:43:52AM -0700, Alexander Duyck wrote: > > > On Wed, Jul 17, 2019 at 3:28 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > On Tue, Jul 16, 2019 at 02:06:59PM -0700, Alexander Duyck wrote: > > > > > On Tue, Jul 16, 2019 at 10:41 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > > > <snip> > > > > > > > > > > > > > This is what I am saying. Having watched that patchset being developed, > > > > > > > > I think that's simply because processing blocks required mm core > > > > > > > > changes, which Wei was not up to pushing through. > > > > > > > > > > > > > > > > > > > > > > > > If we did > > > > > > > > > > > > > > > > while (1) { > > > > > > > > alloc_pages > > > > > > > > add_buf > > > > > > > > get_buf > > > > > > > > free_pages > > > > > > > > } > > > > > > > > > > > > > > > > We'd end up passing the same page to balloon again and again. > > > > > > > > > > > > > > > > So we end up reserving lots of memory with alloc_pages instead. > > > > > > > > > > > > > > > > What I am saying is that now that you are developing > > > > > > > > infrastructure to iterate over free pages, > > > > > > > > FREE_PAGE_HINT should be able to use it too. > > > > > > > > Whether that's possible might be a good indication of > > > > > > > > whether the new mm APIs make sense. > > > > > > > > > > > > > > The problem is the infrastructure as implemented isn't designed to do > > > > > > > that. I am pretty certain this interface will have issues with being > > > > > > > given small blocks to process at a time. > > > > > > > > > > > > > > Basically the design for the FREE_PAGE_HINT feature doesn't really > > > > > > > have the concept of doing things a bit at a time. It is either > > > > > > > filling, stopped, or done. From what I can tell it requires a > > > > > > > configuration change for the virtio balloon interface to toggle > > > > > > > between those states. > > > > > > > > > > > > Maybe I misunderstand what you are saying. > > > > > > > > > > > > Filling state can definitely report things > > > > > > a bit at a time. It does not assume that > > > > > > all of guest free memory can fit in a VQ. > > > > > > > > > > I think where you and I may differ is that you are okay with just > > > > > pulling pages until you hit OOM, or allocation failures. Do I have > > > > > that right? > > > > > > > > This is exactly what the current code does. But that's an implementation > > > > detail which came about because we failed to find any other way to > > > > iterate over free blocks. > > > > > > I get that. However my concern is that permeated other areas of the > > > implementation that make taking another approach much more difficult > > > than it needs to be. > > > > Implementation would have to change to use an iterator obviously. But I don't see > > that it leaked out to a hypervisor interface. > > > > In fact take a look at virtio_balloon_shrinker_scan > > and you will see that it calls shrink_free_pages > > without waiting for the device at all. > > Yes, and in case you missed it earlier I am pretty sure that leads to > possible memory corruption. I don't think it was tested enough to be > able to say that is safe. More testing would be good, for sure. > Specifically we cannot be clearing the dirty flag on pages that are in > use. We should only be clearing that flag for pages that are > guaranteed to not be in use. I think that clearing the dirty flag is safe if the flag was originally set and the page has been write-protected before reporting was requested. In that case we know that page has not been changed. Right? > > > > > In my mind I am wanting to perform the hinting on a small > > > > > block at a time and work through things iteratively. > > > > > > > > > > The problem is the FREE_PAGE_HINT doesn't have the option of returning > > > > > pages until all pages have been pulled. It is run to completion and > > > > > will keep filling the balloon until an allocation fails and the host > > > > > says it is done. > > > > > > > > OK so there are two points. One is that FREE_PAGE_HINT does not > > > > need to allocate a page at all. It really just wants to > > > > iterate over free pages. > > > > > > I agree that it should just want to iterate over pages. However the > > > issue I am trying to point out is that it doesn't have any guarantees > > > on ordering and that is my concern. What I want to avoid is > > > potentially corrupting memory. > > > > I get that. I am just trying to make sure you are aware that for > > FREE_PAGE_HINT specifically ordering does not matter because it does not > > care when hypervisor used the buffers. It only cares that page was > > free after it got the request. used buffers are only tracked to avoid > > overflowing the VQ. This is different from your hinting where you make > > it the responsibility of the guest to not allocate page before it was > > used. > > Prove to me that the ordering does not matter. As far as I can tell it > should since this is being used to clear the bitmap and will affect > migration. OK I will try. Imagine a page that is used by Linux. It has been write protected by sync dirty bitmap. Note how that does not happen while reporting is active: it happens before and next after reporting is done. Now what are the bits that will be cleared by hinting? These are dirty bits from page use from before hinting was requested. We do not care about these because we know that page was free at some point afterwards. So any data it had can be safely discarded. All this should have been documented in qemu source but unfortunately wasn't :( Is the above convincing? > I'm pretty certain the page should not be freed until it > has been processed. Otherwise I believe there is a risk of the page > not being migrated and leading to a memory corruption when the VM is > finally migrated. I understand the concern, it was definitely on my mind and I think it was addressed. But do let me know. > > > > > > So for example with my current hinting approach I am using the list of > > > hints because I get back one completion indicating all of the hints > > > have been processed. It is only at that point that I can go back and > > > make the memory available for allocation again. > > > > Right. But just counting them would work just as well, no? > > At least as long as you wait for everything to complete... > > If you want to pipeline, see below > > Yes, but if possible I would also want to try and keep the batch > behavior that I have. As in pass a batch to host at once? Sure I think it's a good idea. > We could count the descriptors processed, > however that is still essentially done all via busy waiting in the > FREE_PAGE_HINT logic. OK let's discuss FREE_PAGE_HINT separately above. Until we agree on whether it's safe to free up pages before they are used for that usecase, we are just going in circles. > > > > > > So one big issue right now with the FREE_PAGE_HINT approach is that it > > > is designed to be all or nothing. Using the balloon makes it > > > impossible for us to be incremental as all the pages are contained in > > > one spot. What we would need is some way to associate a page with a > > > given vq buffer. > > > > Sorry if I'm belaboring the obvious, but isn't this what 'void *data' in > > virtqueue_add_inbuf is designed for? And if you only ever use > > virtqueue_add_inbuf and virtqueue_add_outbuf on a given VQ, then you can > > track two pointers using virtqueue_add_inbuf_ctx. > > I am still learning virtio so I wasn't aware of this piece until > yesterday. For FREE_PAGE_HINT it would probably work as we would then > have that association. For my page hinting I am still thinking I would > prefer to just pass around a scatterlist since that is the structure I > would likely fill and then later drain of pages versus just > maintaining a list. OK. That might need an API extension. We do support scatter lists but ATM they allocate memory internally. Not something you want to do when you are playing with free lists I think. > > > Ultimately in order to really make the FREE_PAGE_HINT > > > logic work with something like my page hinting logic it would need to > > > work more like a network Rx ring in that we would associate a page per > > > buffer and have some way of knowing the two are associated. > > > > Right. That's exactly how virtio net does it btw. > > Yeah, I saw that after reviewing the code yesterday. > > > > > The reason FREE_PAGE_HINT does not free up pages until we finished > > > > iterating over the free list it not a hypervisor API. The reason is we > > > > don't want to keep getting the same address over and over again. > > > > > > > > > I would prefer to avoid that as I prefer to simply > > > > > notify the host of a fixed block of pages at a time and let it process > > > > > without having to have a thread on each side actively pushing pages, > > > > > or listening for the incoming pages. > > > > > > > > Right. And FREE_PAGE_HINT can go even further. It can push a page and > > > > let linux use it immediately. It does not even need to wait for host to > > > > process anything unless the VQ gets full. > > > > > > If it is doing what you are saying it will be corrupting memory. > > > > No and that is hypervisor's responsibility. > > > > I think you are missing part of the picture here. > > > > Here is a valid implementation: > > > > Before asking for hints, hypervisor write-protects all memory, and logs > > all write faults. When hypervisor gets the hint, if page has since been > > modified, the hint is ignored. > > No here is the part where I think you missed the point. I was already > aware of this. So my concern is this scenario. > > If you put a hint on the VQ and then free the memory back to the > guest, what about the scenario where another process could allocate > the memory and dirty it before we process the hint request on the > host? In that case the page was dirtied, the hypervisor will have > correctly write faulted and dirtied it, and then we came though and > incorrectly marked it as being free. That is the scenario I am worried > about as I am pretty certain that leads to memory corruption. It would for sure. There are actually two dirty bit data structures. One is maintained by KVM, I'd like to call it a "write log" here. the other is maintained by qemu, that's the "dirty bitmap". sync is the step where we atomically copy write log to dirty bitmap and write-protect memory. It works like this in theory: sync command id ++ request hints from guest with command id XXX-> get hint - if command id matches - clear dirty bitmap bit sync code underwent enough changes that I couldn't easily verify that's still the case but was very clear originally :) Can you see how if a hint crosses a sync then it has a different command id and so is ignored? and if not then writes are logged. > > > > > > At a > > > minimum it has to wait until the page has been processed and the dirty > > > bit cleared before it can let linux use it again. It is all a matter > > > of keeping the dirty bit coherent. If we let linux use it again > > > immediately and then cleared the dirty bit we would open up a possible > > > data corruption race during migration as a dirty page might not be > > > marked as such. > > > > I think you are talking about the dirty bit on the host, right? > > > > The implication is that calling MADV_FREE from qemu would > > not be a good implementation of FREE_PAGE_HINT. > > And indeed, as far as I can see it does nothing of the sort. > > I don't mean the dirty bit on the host, I am talking about the bitmap > used to determine which pages need to be migrated. That is what this > hint is updating and it is also being tracked via the write protection > of the pages at the start of migration. > > My concern is that we can end up losing track of pages that are > updated if we are hinting after they have been freed back to the guest > for reallocation. > > > > > > > > > > > > > > > The basic idea with the bubble hinting was to essentially create mini > > > > > > > > > balloons. As such I had based the code off of the balloon inflation > > > > > > > > > code. The only spot where it really differs is that I needed the > > > > > > > > > ability to pass higher order pages so I tweaked thinks and passed > > > > > > > > > "hints" instead of "pfns". > > > > > > > > > > > > > > > > And that is fine. But there isn't really such a big difference with > > > > > > > > FREE_PAGE_HINT except FREE_PAGE_HINT triggers upon host request and not > > > > > > > > in response to guest load. > > > > > > > > > > > > > > I disagree, I believe there is a significant difference. > > > > > > > > > > > > Yes there is, I just don't think it's in the iteration. > > > > > > The iteration seems to be useful to hinting. > > > > > > > > > > I agree that iteration is useful to hinting. The problem is the > > > > > FREE_PAGE_HINT code isn't really designed to be iterative. It is > > > > > designed to run with a polling thread on each side and it is meant to > > > > > be run to completion. > > > > > > > > Absolutely. But that's a bug I think. > > > > > > I think it is a part of the design. Basically in order to avoid > > > corrupting memory it cannot return the page to the guest kernel until > > > it has finished clearing the dirty bits associated with the pages. > > > > OK I hope I clarified by that's not supposed to be the case. > > I think you might have missed something. I am pretty certain issues > are still present. > > > > > > > > The > > > > > > > FREE_PAGE_HINT code was implemented to be more of a streaming > > > > > > > interface. > > > > > > > > > > > > It's implemented like this but it does not follow from > > > > > > the interface. The implementation is a combination of > > > > > > attempts to minimize # of exits and minimize mm core changes. > > > > > > > > > > The problem is the interface doesn't have a good way of indicating > > > > > that it is done with a block of pages. > > > > > > > > > > So what I am probably looking at if I do a sg implementation for my > > > > > hinting is to provide one large sg block for all 32 of the pages I > > > > > might be holding. > > > > > > > > Right now if you pass an sg it will try to allocate a buffer > > > > on demand for you. If this is a problem I could come up > > > > with a new API that lets caller allocate the buffer. > > > > Let me know. > > > > > > > > > I'm assuming that will still be processed as one > > > > > contiguous block. With that I can then at least maintain a single > > > > > response per request. > > > > > > > > Why do you care? Won't a counter of outstanding pages be enough? > > > > Down the road maybe we could actually try to pipeline > > > > things a bit. So send 32 pages once you get 16 of these back > > > > send 16 more. Better for SMP configs and does not hurt > > > > non-SMP too much. I am not saying we need to do it right away though. > > > > > > So the big thing is we cannot give the page back to the guest kernel > > > until we know the processing has been completed. In the case of the > > > MADV_DONT_NEED call it will zero out the entire page on the next > > > access. If the guest kernel had already written data by the time we > > > get to that it would cause a data corruption and kill the whole guest. > > > > > > Exactly but FREE_PAGE_HINT does not cause qemu to call MADV_DONT_NEED. > > No, instead it clears the bit indicating that the page is supposed to > be migrated. The effect will not be all that different, just delayed > until the VM is actually migrated. > > > > > > > > This is one of the things Linus kept complaining about in > > > > > > > his comments. This code attempts to pull in ALL of the higher order > > > > > > > pages, not just a smaller block of them. > > > > > > > > > > > > It wants to report all higher order pages eventually, yes. > > > > > > But it's absolutely fine to report a chunk and then wait > > > > > > for host to process the chunk before reporting more. > > > > > > > > > > > > However, interfaces we came up with for this would call > > > > > > into virtio with a bunch of locks taken. > > > > > > The solution was to take pages off the free list completely. > > > > > > That in turn means we can't return them until > > > > > > we have processed all free memory. > > > > > > > > > > I get that. The problem is the interface is designed around run to > > > > > completion. For example it will sit there in a busy loop waiting for a > > > > > free buffer because it knows the other side is suppose to be > > > > > processing the pages already. > > > > > > > > I didn't get this part. > > > > > > I think the part you may not be getting is that we cannot let the > > > guest use the page until the hint has been processed. Otherwise we > > > risk corrupting memory. That is the piece that has me paranoid. If we > > > end up performing a hint on a page that is use somewhere in the kernel > > > it will corrupt memory one way or another. That is the thing I have to > > > avoid at all cost. > > > > You have to do it, sure. And that is because you do not > > assume that hypervisor does it for you. But FREE_PAGE_HINT doesn't, > > hypervisor takes care of that. > > Sort of. The hypervisor is trying to do dirty page tracking, however > the FREE_PAGE_HINT interferes with that. That is the problem. If we > get that out of order then the hypervisor work will be undone and we > just make a mess of memory. > > > > That is why I have to have a way to know exactly which pages have been > > > processed and which haven't before I return pages to the guest. > > > Otherwise I am just corrupting memory. > > > > Sure. That isn't really hard though. > > Agreed. > > > > > > > > > > > Honestly the difference is > > > > > > > mostly in the hypervisor interface than what is needed for the kernel > > > > > > > interface, however the design of the hypervisor interface would make > > > > > > > doing things more incrementally much more difficult. > > > > > > > > > > > > OK that's interesting. The hypervisor interface is not > > > > > > documented in the spec yet. Let me take a stub at a writeup now. So: > > > > > > > > > > > > > > > > > > > > > > > > - hypervisor requests reporting by modifying command ID > > > > > > field in config space, and interrupting guest > > > > > > > > > > > > - in response, guest sends the command ID value on a special > > > > > > free page hinting VQ, > > > > > > followed by any number of buffers. Each buffer is assumed > > > > > > to be the address and length of memory that was > > > > > > unused *at some point after the time when command ID was sent*. > > > > > > > > > > > > Note that hypervisor takes pains to handle the case > > > > > > where memory is actually no longer free by the time > > > > > > it gets the memory. > > > > > > This allows guest driver to take more liberties > > > > > > and free pages without waiting for guest to > > > > > > use the buffers. > > > > > > > > > > > > This is also one of the reason we call this a free page hint - > > > > > > the guarantee that page is free is a weak one, > > > > > > in that sense it's more of a hint than a promise. > > > > > > That helps guarantee we don't create OOM out of blue. > > > > > > > > I would like to stress the last paragraph above. > > > > > > The problem is we don't want to give bad hints. What we do based on > > > the hint is clear the dirty bit. If we clear it in err when the page > > > is actually in use it will lead to data corruption after migration. > > > > That's true for your patches. I get that. > > No, it should be true for FREE_PAGE_HINT as well. The fact that it > isn't is a bug as far as I am concerned. If you are doing dirty page > tracking in the hypervisor you cannot expect it to behave well if the > guest is providing it with bad data. > > > > The idea with the hint is that you are saying the page is currently > > > not in use, however if you send that hint late and have already freed > > > the page back you can corrupt memory. > > > > > > That part is I think wrong - assuming "you" means upstream code. > > Yes, I am referring to someone running FREE_PAGE_HINT code. I usually > try to replace them with "we" to make it clear I am not talking about > someone personally, it is a bad habit. > > > > > > > > > > > > > - guest eventually sends a special buffer signalling to > > > > > > host that it's done sending free pages. > > > > > > It then stops reporting until command id changes. > > > > > > > > > > The pages are not freed back to the guest until the host reports that > > > > > it is "DONE" via a configuration change. Doing that stops any further > > > > > progress, and attempting to resume will just restart from the > > > > > beginning. > > > > > > > > Right but it's not a requirement. Host does not assume this at all. > > > > It's done like this simply because we can't iterate over pages > > > > with the existing API. > > > > > > The problem is nothing about the implementation was designed for > > > iteration. What I would have to do is likely gut and rewrite the > > > entire guest side of the FREE_PAGE_HINT code in order to make it work > > > iteratively. > > > > > > Right. I agree. > > > > > As I mentioned it would probably have to look more like a > > > NIC Rx ring in handling because we would have to have some sort of way > > > to associate the pages 1:1 to the buffers. > > > > > > > > The big piece this design is missing is the incremental notification > > > > > pages have been processed. The existing code just fills the vq with > > > > > pages and keeps doing it until it cannot allocate any more pages. We > > > > > would have to add logic to stop, flush, and resume to the existing > > > > > framework. > > > > > > > > But not to the hypervisor interface. Hypervisor is fine > > > > with pages being reused immediately. In fact, even before they > > > > are processed. > > > > > > I don't think that is actually the case. If it does that I am pretty > > > sure it will corrupt memory during migration. > > > > > > Take a look at qemu_guest_free_page_hint: > > > https://github.com/qemu/qemu/blob/master/migration/ram.c#L3342 > > > > > > I'm pretty sure that code is going in and clearing the dirty bitmap > > > for memory. > > > > Yes it does. However the trick is that meanwhile > > kvm is logging new writes. So the bitmap that > > is being cleared is the bitmap that was logged before the request > > was sent to guest. > > > > > If we were to allow a page to be allocated and used and > > > then perform the hint it is going to introduce a race where the page > > > might be missed for migration and could result in memory corruption. > > > > commit c13c4153f76db23cac06a12044bf4dd346764059 has this explanation: > > > > Note: balloon will report pages which were free at the time of this call. > > As the reporting happens asynchronously, dirty bit logging must be > > enabled before this free_page_start call is made. Guest reporting must be > > disabled before the migration dirty bitmap is synchronized. > > > > but over multiple iterations this seems to have been dropped > > from code comments. Wei, would you mind going back > > and documenting the APIs you used? > > They seem to be causing confusion ... > > The "Note" is the behavior I am seeing. Specifically there is nothing > in place to prevent the freed pages from causing corruption if they > are freed before being hinted. The requirement should be that they > cannot be freed until after they are hinted that way the dirty bit > logging will mark the page as dirty if it is accessed AFTER being > hinted. > > If you do not guarantee the hinting has happened first you could end > up logging the dirty bit before the hint is processed and then clear > the dirty bit due to the hint. It is pretty straight forward to > resolve by just not putting the page into the balloon until after the > hint has been processed. > > > > > > > > > > - host can restart the process at any time by > > > > > > updating command ID. That will make guest stop > > > > > > and start from the beginning. > > > > > > > > > > > > - host can also stop the process by specifying a special > > > > > > command ID value. > > > > > > > > > > > > > > > > > > ========= > > > > > > > > > > > > > > > > > > Now let's compare to what you have here: > > > > > > > > > > > > - At any time after boot, guest walks over free memory and sends > > > > > > addresses as buffers to the host > > > > > > > > > > > > - Memory reported is then guaranteed to be unused > > > > > > until host has used the buffers > > > > > > > > > > > > > > > > > > Is above a fair summary? > > > > > > > > > > > > So yes there's a difference but the specific bit of chunking is same > > > > > > imho. > > > > > > > > > > The big difference is that I am returning the pages after they are > > > > > processed, while FREE_PAGE_HINT doesn't and isn't designed to. > > > > > > > > It doesn't but the hypervisor *is* designed to support that. > > > > > > Not really, it seems like it is more just a side effect of things. > > > > I hope the commit log above is enough to convice you we did > > think about this. > > Sorry, but no. I think the "note" convinced me there is a race > condition, specifically in the shrinker case. We cannot free the page > back to host memory until the hint has been processed, otherwise we > will race with the dirty bit logging. > > > > Also as I mentioned before I am also not a huge fan of polling on both > > > sides as it is just going to burn through CPU. If we are iterative and > > > polling it is going to end up with us potentially pushing one CPU at > > > 100%, and if the one CPU doing the polling cannot keep up with the > > > page updates coming from the other CPUs we would be stuck in that > > > state for a while. I would have preferred to see something where the > > > CPU would at least allow other tasks to occur while it is waiting for > > > buffers to be returned by the host. > > > > You lost me here. What does polling have to do with it? > > This is just another issue I found. Specifically busy polling while > waiting on the host to process the hints. I'm not a fan of it and was > just pointing it out. > > > > > > The > > > > > problem is the interface doesn't allow for a good way to identify that > > > > > any given block of pages has been processed and can be returned. > > > > > > > > And that's because FREE_PAGE_HINT does not care. > > > > It can return any page at any point even before hypervisor > > > > saw it. > > > > > > I disagree, see my comment above. > > > > OK let's see if above is enough to convice you. Or maybe we > > have a bug when shrinker is invoked :) But I don't think so. > > I'm pretty sure there is a bug. > > > > > > Instead pages go in, but they don't come out until the configuration > > > > > is changed and "DONE" is reported. The act of reporting "DONE" will > > > > > reset things and start them all over which kind of defeats the point. > > > > > > > > Right. > > > > > > > > But if you consider how we are using the shrinker you will > > > > see that it's kind of broken. > > > > For example not keeping track of allocated > > > > pages means the count we return is broken > > > > while reporting is active. > > > > > > > > I looked at fixing it but really if we can just > > > > stop allocating memory that would be way cleaner. > > > > > > Agreed. If we hit an OOM we should probably just stop the free page > > > hinting and treat that as the equivalent to an allocation failure. > > > > And fix the shrinker count to include the pages in the vq. Yea. > > I don't know if we really want to touch the pages in the VQ. I would > say that we should leave them alone. > > > > > > > As-is I think this also has the potential for corrupting memory since > > > it will likely be returning the most recent pages added to the balloon > > > so the pages are likely still on the processing queue. > > > > That part is fine I think because of the above. > > > > > > > > > For example we allocate pages until shrinker kicks in. > > > > Fair enough but in fact many it would be better to > > > > do the reverse: trigger shrinker and then send as many > > > > free pages as we can to host. > > > > > > I'm not sure I understand this last part. > > > > Oh basically what I am saying is this: one of the reasons to use page > > hinting is when host is short on memory. In that case, why don't we use > > shrinker to ask kernel drivers to free up memory? Any memory freed could > > then be reported to host. > > Didn't the balloon driver already have a feature like that where it > could start shrinking memory if the host was under memory pressure? If > so how would adding another one add much value. > > The idea here is if the memory is free we just mark it as such. As > long as we can do so with no noticeable overhead on the guest or host > why not just do it?
On Thu, Jul 18, 2019 at 08:34:37AM -0700, Alexander Duyck wrote: > > > > For example we allocate pages until shrinker kicks in. > > > > Fair enough but in fact many it would be better to > > > > do the reverse: trigger shrinker and then send as many > > > > free pages as we can to host. > > > > > > I'm not sure I understand this last part. > > > > Oh basically what I am saying is this: one of the reasons to use page > > hinting is when host is short on memory. In that case, why don't we use > > shrinker to ask kernel drivers to free up memory? Any memory freed could > > then be reported to host. > > Didn't the balloon driver already have a feature like that where it > could start shrinking memory if the host was under memory pressure? If > so how would adding another one add much value. Well fundamentally the basic balloon inflate kind of does this, yes :) The difference with what I am suggesting is that balloon inflate tries to aggressively achieve a specific goal of freed memory. We could have a weaker "free as much as you can" that is still stronger than free page hint which as you point out below does not try to free at all, just hints what is already free. > The idea here is if the memory is free we just mark it as such. As > long as we can do so with no noticeable overhead on the guest or host > why not just do it?
On Thu, Jul 18, 2019 at 12:03:23PM -0400, Nitesh Narayan Lal wrote: > >>>> For example we allocate pages until shrinker kicks in. > >>>> Fair enough but in fact many it would be better to > >>>> do the reverse: trigger shrinker and then send as many > >>>> free pages as we can to host. > >>> I'm not sure I understand this last part. > >> Oh basically what I am saying is this: one of the reasons to use page > >> hinting is when host is short on memory. In that case, why don't we use > >> shrinker to ask kernel drivers to free up memory? Any memory freed could > >> then be reported to host. > > Didn't the balloon driver already have a feature like that where it > > could start shrinking memory if the host was under memory pressure? > If you are referring to auto-ballooning (I don't think it is merged). It > has its own set of disadvantages such as it could easily lead to OOM, > memory corruption and so on. Right. So what I am saying is: we could have a flag that triggers a shrinker once before sending memory hints. Worth considering.
On Thu, Jul 18, 2019 at 9:07 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Thu, Jul 18, 2019 at 08:34:37AM -0700, Alexander Duyck wrote: > > On Wed, Jul 17, 2019 at 10:14 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > On Wed, Jul 17, 2019 at 09:43:52AM -0700, Alexander Duyck wrote: > > > > On Wed, Jul 17, 2019 at 3:28 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > > > On Tue, Jul 16, 2019 at 02:06:59PM -0700, Alexander Duyck wrote: > > > > > > On Tue, Jul 16, 2019 at 10:41 AM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > > > > > > > <snip> > > > > > > > > > > > > > > > This is what I am saying. Having watched that patchset being developed, > > > > > > > > > I think that's simply because processing blocks required mm core > > > > > > > > > changes, which Wei was not up to pushing through. > > > > > > > > > > > > > > > > > > > > > > > > > > > If we did > > > > > > > > > > > > > > > > > > while (1) { > > > > > > > > > alloc_pages > > > > > > > > > add_buf > > > > > > > > > get_buf > > > > > > > > > free_pages > > > > > > > > > } > > > > > > > > > > > > > > > > > > We'd end up passing the same page to balloon again and again. > > > > > > > > > > > > > > > > > > So we end up reserving lots of memory with alloc_pages instead. > > > > > > > > > > > > > > > > > > What I am saying is that now that you are developing > > > > > > > > > infrastructure to iterate over free pages, > > > > > > > > > FREE_PAGE_HINT should be able to use it too. > > > > > > > > > Whether that's possible might be a good indication of > > > > > > > > > whether the new mm APIs make sense. > > > > > > > > > > > > > > > > The problem is the infrastructure as implemented isn't designed to do > > > > > > > > that. I am pretty certain this interface will have issues with being > > > > > > > > given small blocks to process at a time. > > > > > > > > > > > > > > > > Basically the design for the FREE_PAGE_HINT feature doesn't really > > > > > > > > have the concept of doing things a bit at a time. It is either > > > > > > > > filling, stopped, or done. From what I can tell it requires a > > > > > > > > configuration change for the virtio balloon interface to toggle > > > > > > > > between those states. > > > > > > > > > > > > > > Maybe I misunderstand what you are saying. > > > > > > > > > > > > > > Filling state can definitely report things > > > > > > > a bit at a time. It does not assume that > > > > > > > all of guest free memory can fit in a VQ. > > > > > > > > > > > > I think where you and I may differ is that you are okay with just > > > > > > pulling pages until you hit OOM, or allocation failures. Do I have > > > > > > that right? > > > > > > > > > > This is exactly what the current code does. But that's an implementation > > > > > detail which came about because we failed to find any other way to > > > > > iterate over free blocks. > > > > > > > > I get that. However my concern is that permeated other areas of the > > > > implementation that make taking another approach much more difficult > > > > than it needs to be. > > > > > > Implementation would have to change to use an iterator obviously. But I don't see > > > that it leaked out to a hypervisor interface. > > > > > > In fact take a look at virtio_balloon_shrinker_scan > > > and you will see that it calls shrink_free_pages > > > without waiting for the device at all. > > > > Yes, and in case you missed it earlier I am pretty sure that leads to > > possible memory corruption. I don't think it was tested enough to be > > able to say that is safe. > > More testing would be good, for sure. > > > Specifically we cannot be clearing the dirty flag on pages that are in > > use. We should only be clearing that flag for pages that are > > guaranteed to not be in use. > > I think that clearing the dirty flag is safe if the flag was originally > set and the page has been > write-protected before reporting was requested. > In that case we know that page has not been changed. > Right? I am just going to drop the rest of this thread as I agree we have been running ourselves around in circles. The part I had missed was the part where there are 2 bitmaps and that you are are using migration_bitmap_sync_precopy() to align the two. This is just running at the same time as the precopy code and is only really meant to try and clear the bit before the precopy gets to it from what I can tell. So one thing that is still an issue then is that my approach would only work on the first migration. The problem is the logic I have implemented assumes that once we have hinted on a page we don't need to do it again. However in order to support migration you would need to reset the hinting entirely and start over again after doing a migration.
On Thu, Jul 18, 2019 at 1:24 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Thu, Jul 18, 2019 at 08:34:37AM -0700, Alexander Duyck wrote: > > > > > For example we allocate pages until shrinker kicks in. > > > > > Fair enough but in fact many it would be better to > > > > > do the reverse: trigger shrinker and then send as many > > > > > free pages as we can to host. > > > > > > > > I'm not sure I understand this last part. > > > > > > Oh basically what I am saying is this: one of the reasons to use page > > > hinting is when host is short on memory. In that case, why don't we use > > > shrinker to ask kernel drivers to free up memory? Any memory freed could > > > then be reported to host. > > > > Didn't the balloon driver already have a feature like that where it > > could start shrinking memory if the host was under memory pressure? If > > so how would adding another one add much value. > > Well fundamentally the basic balloon inflate kind of does this, yes :) > > The difference with what I am suggesting is that balloon inflate tries > to aggressively achieve a specific goal of freed memory. We could have a > weaker "free as much as you can" that is still stronger than free page > hint which as you point out below does not try to free at all, just > hints what is already free. Yes, but why wait until the host is low on memory? With my implementation we can perform the hints in the background for a low cost already. So why should we wait to free up memory when we could do it immediately. Why let things get to the state where the host is under memory pressure when the guests can be proactively freeing up the pages and improving performance as a result be reducing swap usage?
On Thu, Jul 18, 2019 at 01:29:14PM -0700, Alexander Duyck wrote: > So one thing that is still an issue then is that my approach would > only work on the first migration. The problem is the logic I have > implemented assumes that once we have hinted on a page we don't need > to do it again. However in order to support migration you would need > to reset the hinting entirely and start over again after doing a > migration. Well with precopy at least it's simple: just clear the dirty bit, it won't be sent, and then on destination you get a zero page and later COW on first write. Right? With precopy it is tricker as destination waits until it gets all of memory. I think we could use some trick to make source pretend it's a zero page, that is cheap to send.
On Thu, Jul 18, 2019 at 01:34:03PM -0700, Alexander Duyck wrote: > On Thu, Jul 18, 2019 at 1:24 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > On Thu, Jul 18, 2019 at 08:34:37AM -0700, Alexander Duyck wrote: > > > > > > For example we allocate pages until shrinker kicks in. > > > > > > Fair enough but in fact many it would be better to > > > > > > do the reverse: trigger shrinker and then send as many > > > > > > free pages as we can to host. > > > > > > > > > > I'm not sure I understand this last part. > > > > > > > > Oh basically what I am saying is this: one of the reasons to use page > > > > hinting is when host is short on memory. In that case, why don't we use > > > > shrinker to ask kernel drivers to free up memory? Any memory freed could > > > > then be reported to host. > > > > > > Didn't the balloon driver already have a feature like that where it > > > could start shrinking memory if the host was under memory pressure? If > > > so how would adding another one add much value. > > > > Well fundamentally the basic balloon inflate kind of does this, yes :) > > > > The difference with what I am suggesting is that balloon inflate tries > > to aggressively achieve a specific goal of freed memory. We could have a > > weaker "free as much as you can" that is still stronger than free page > > hint which as you point out below does not try to free at all, just > > hints what is already free. > > Yes, but why wait until the host is low on memory? It can come about for a variety of reasons, such as other VMs being aggressive, or ours aggressively caching stuff in memory. > With my > implementation we can perform the hints in the background for a low > cost already. So why should we wait to free up memory when we could do > it immediately. Why let things get to the state where the host is > under memory pressure when the guests can be proactively freeing up > the pages and improving performance as a result be reducing swap > usage? You are talking about sending free memory to host. Fair enough but if you have drivers that aggressively allocate memory then there won't be that much free guest memory without invoking a shrinker.
On Thu, Jul 18, 2019 at 1:37 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Thu, Jul 18, 2019 at 01:29:14PM -0700, Alexander Duyck wrote: > > So one thing that is still an issue then is that my approach would > > only work on the first migration. The problem is the logic I have > > implemented assumes that once we have hinted on a page we don't need > > to do it again. However in order to support migration you would need > > to reset the hinting entirely and start over again after doing a > > migration. > > Well with precopy at least it's simple: just clear the > dirty bit, it won't be sent, and then on destination > you get a zero page and later COW on first write. > Right? Are you talking about adding MADV_DONTNEED functionality to FREE_PAGE_HINTS? > With precopy it is tricker as destination waits until it gets > all of memory. I think we could use some trick to > make source pretend it's a zero page, that is cheap to send. So I am confused again. What I was getting at is that if I am not mistaken block->bmap is set to all 1s for each page in ram_list_init_bitmaps(). After that the precopy starts and begins moving memory over. We need to be able to go in and hint away all the free pages from that initial bitmap. To do that we would need to have the "Hinted" flag I added in the patch set cleared for all pages, and then go through all free memory and start over in order to hint on which pages are actually free. Otherwise all we are doing is hinting on which pages have been freed since the last round of hints. Essentially this is another case where being incremental is problematic for this design. What I would need to do is reset the "Hinted" flag in all of the free pages after the migration has been completed.
On Thu, Jul 18, 2019 at 1:49 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > On Thu, Jul 18, 2019 at 01:34:03PM -0700, Alexander Duyck wrote: > > On Thu, Jul 18, 2019 at 1:24 PM Michael S. Tsirkin <mst@redhat.com> wrote: > > > > > > On Thu, Jul 18, 2019 at 08:34:37AM -0700, Alexander Duyck wrote: > > > > > > > For example we allocate pages until shrinker kicks in. > > > > > > > Fair enough but in fact many it would be better to > > > > > > > do the reverse: trigger shrinker and then send as many > > > > > > > free pages as we can to host. > > > > > > > > > > > > I'm not sure I understand this last part. > > > > > > > > > > Oh basically what I am saying is this: one of the reasons to use page > > > > > hinting is when host is short on memory. In that case, why don't we use > > > > > shrinker to ask kernel drivers to free up memory? Any memory freed could > > > > > then be reported to host. > > > > > > > > Didn't the balloon driver already have a feature like that where it > > > > could start shrinking memory if the host was under memory pressure? If > > > > so how would adding another one add much value. > > > > > > Well fundamentally the basic balloon inflate kind of does this, yes :) > > > > > > The difference with what I am suggesting is that balloon inflate tries > > > to aggressively achieve a specific goal of freed memory. We could have a > > > weaker "free as much as you can" that is still stronger than free page > > > hint which as you point out below does not try to free at all, just > > > hints what is already free. > > > > Yes, but why wait until the host is low on memory? > > It can come about for a variety of reasons, such as > other VMs being aggressive, or ours aggressively caching > stuff in memory. > > > With my > > implementation we can perform the hints in the background for a low > > cost already. So why should we wait to free up memory when we could do > > it immediately. Why let things get to the state where the host is > > under memory pressure when the guests can be proactively freeing up > > the pages and improving performance as a result be reducing swap > > usage? > > You are talking about sending free memory to host. > Fair enough but if you have drivers that aggressively > allocate memory then there won't be that much free guest > memory without invoking a shrinker. So then what we really need is a way for the host to trigger the shrinker via a call to drop_slab() on the guest don't we? Then we could automatically hint the free pages to the host.
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 023fc3bc01c6..9cdaccf92c3a 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -47,6 +47,7 @@ config VIRTIO_BALLOON tristate "Virtio balloon driver" depends on VIRTIO select MEMORY_BALLOON + select AERATION ---help--- This driver supports increasing and decreasing the amount of memory within a KVM guest. diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 44339fc87cc7..91f1e8c9017d 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -18,6 +18,7 @@ #include <linux/mm.h> #include <linux/mount.h> #include <linux/magic.h> +#include <linux/memory_aeration.h> /* * Balloon device works in 4K page units. So each page is pointed to by @@ -26,6 +27,7 @@ */ #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 +#define VIRTIO_BALLOON_ARRAY_HINTS_MAX 32 #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ @@ -45,6 +47,7 @@ enum virtio_balloon_vq { VIRTIO_BALLOON_VQ_DEFLATE, VIRTIO_BALLOON_VQ_STATS, VIRTIO_BALLOON_VQ_FREE_PAGE, + VIRTIO_BALLOON_VQ_HINTING, VIRTIO_BALLOON_VQ_MAX }; @@ -54,7 +57,8 @@ enum virtio_balloon_config_read { struct virtio_balloon { struct virtio_device *vdev; - struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; + struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq, + *hinting_vq; /* Balloon's own wq for cpu-intensive work items */ struct workqueue_struct *balloon_wq; @@ -103,9 +107,21 @@ struct virtio_balloon { /* Synchronize access/update to this struct virtio_balloon elements */ struct mutex balloon_lock; - /* The array of pfns we tell the Host about. */ - unsigned int num_pfns; - __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; + + union { + /* The array of pfns we tell the Host about. */ + struct { + unsigned int num_pfns; + __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; + }; + /* The array of physical addresses we are hinting on */ + struct { + unsigned int num_hints; + __virtio64 hints[VIRTIO_BALLOON_ARRAY_HINTS_MAX]; + }; + }; + + struct aerator_dev_info a_dev_info; /* Memory statistics */ struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; @@ -151,6 +167,68 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) } +static u64 page_to_hints_pa_order(struct page *page) +{ + unsigned char order; + dma_addr_t pa; + + BUILD_BUG_ON((64 - VIRTIO_BALLOON_PFN_SHIFT) >= + (1 << VIRTIO_BALLOON_PFN_SHIFT)); + + /* + * Record physical page address combined with page order. + * Order will never exceed 64 - VIRTIO_BALLON_PFN_SHIFT + * since the size has to fit into a 64b value. So as long + * as VIRTIO_BALLOON_SHIFT is greater than this combining + * the two values should be safe. + */ + pa = page_to_phys(page); + order = page_private(page) + + PAGE_SHIFT - VIRTIO_BALLOON_PFN_SHIFT; + + return (u64)(pa | order); +} + +void virtballoon_aerator_react(struct aerator_dev_info *a_dev_info) +{ + struct virtio_balloon *vb = container_of(a_dev_info, + struct virtio_balloon, + a_dev_info); + struct virtqueue *vq = vb->hinting_vq; + struct scatterlist sg; + unsigned int unused; + struct page *page; + + mutex_lock(&vb->balloon_lock); + + vb->num_hints = 0; + + list_for_each_entry(page, &a_dev_info->batch, lru) { + vb->hints[vb->num_hints++] = + cpu_to_virtio64(vb->vdev, + page_to_hints_pa_order(page)); + } + + /* We shouldn't have been called if there is nothing to process */ + if (WARN_ON(vb->num_hints == 0)) + goto out; + + sg_init_one(&sg, vb->hints, + sizeof(vb->hints[0]) * vb->num_hints); + + /* + * We should always be able to add one buffer to an + * empty queue. + */ + virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); + virtqueue_kick(vq); + + /* When host has read buffer, this completes via balloon_ack */ + wait_event(vb->acked, virtqueue_get_buf(vq, &unused)); +out: + mutex_unlock(&vb->balloon_lock); +} + static void set_page_pfns(struct virtio_balloon *vb, __virtio32 pfns[], struct page *page) { @@ -475,6 +553,7 @@ static int init_vqs(struct virtio_balloon *vb) names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; names[VIRTIO_BALLOON_VQ_STATS] = NULL; names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; + names[VIRTIO_BALLOON_VQ_HINTING] = NULL; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { names[VIRTIO_BALLOON_VQ_STATS] = "stats"; @@ -486,11 +565,19 @@ static int init_vqs(struct virtio_balloon *vb) callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; } + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { + names[VIRTIO_BALLOON_VQ_HINTING] = "hinting_vq"; + callbacks[VIRTIO_BALLOON_VQ_HINTING] = balloon_ack; + } + err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs, callbacks, names, NULL, NULL); if (err) return err; + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) + vb->hinting_vq = vqs[VIRTIO_BALLOON_VQ_HINTING]; + vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { @@ -929,12 +1016,24 @@ static int virtballoon_probe(struct virtio_device *vdev) if (err) goto out_del_balloon_wq; } + + vb->a_dev_info.react = virtballoon_aerator_react; + vb->a_dev_info.capacity = VIRTIO_BALLOON_ARRAY_HINTS_MAX; + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) { + err = aerator_startup(&vb->a_dev_info); + if (err) + goto out_unregister_shrinker; + } + virtio_device_ready(vdev); if (towards_target(vb)) virtballoon_changed(vdev); return 0; +out_unregister_shrinker: + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) + virtio_balloon_unregister_shrinker(vb); out_del_balloon_wq: if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) destroy_workqueue(vb->balloon_wq); @@ -963,6 +1062,8 @@ static void virtballoon_remove(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_HINTING)) + aerator_shutdown(); if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) virtio_balloon_unregister_shrinker(vb); spin_lock_irq(&vb->stop_update_lock); @@ -1032,6 +1133,7 @@ static int virtballoon_validate(struct virtio_device *vdev) VIRTIO_BALLOON_F_DEFLATE_ON_OOM, VIRTIO_BALLOON_F_FREE_PAGE_HINT, VIRTIO_BALLOON_F_PAGE_POISON, + VIRTIO_BALLOON_F_HINTING, }; static struct virtio_driver virtio_balloon_driver = { diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index a1966cd7b677..2b0f62814e22 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -36,6 +36,7 @@ #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM 2 /* Deflate balloon on OOM */ #define VIRTIO_BALLOON_F_FREE_PAGE_HINT 3 /* VQ to report free pages */ #define VIRTIO_BALLOON_F_PAGE_POISON 4 /* Guest is using page poisoning */ +#define VIRTIO_BALLOON_F_HINTING 5 /* Page hinting virtqueue */ /* Size of a PFN in the balloon interface. */ #define VIRTIO_BALLOON_PFN_SHIFT 12