diff mbox

[v22,2/3] virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_VQ

Message ID 1516165812-3995-3-git-send-email-wei.w.wang@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Wang, Wei W Jan. 17, 2018, 5:10 a.m. UTC
Negotiation of the VIRTIO_BALLOON_F_FREE_PAGE_VQ feature indicates the
support of reporting hints of guest free pages to host via virtio-balloon.

Host requests the guest to report free pages by sending a new cmd
id to the guest via the free_page_report_cmd_id configuration register.

When the guest starts to report, the first element added to the free page
vq is the cmd id given by host. When the guest finishes the reporting
of all the free pages, VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID is added
to the vq to tell host that the reporting is done. Host may also requests
the guest to stop the reporting in advance by sending the stop cmd id to
the guest via the configuration register.

Signed-off-by: Wei Wang <wei.w.wang@intel.com>
Signed-off-by: Liang Li <liang.z.li@intel.com>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
---
 drivers/virtio/virtio_balloon.c     | 242 +++++++++++++++++++++++++++++++-----
 include/uapi/linux/virtio_balloon.h |   4 +
 2 files changed, 214 insertions(+), 32 deletions(-)

Comments

Pankaj Gupta Jan. 17, 2018, 8:21 a.m. UTC | #1
> 
> Negotiation of the VIRTIO_BALLOON_F_FREE_PAGE_VQ feature indicates the
> support of reporting hints of guest free pages to host via virtio-balloon.
> 
> Host requests the guest to report free pages by sending a new cmd
> id to the guest via the free_page_report_cmd_id configuration register.
> 
> When the guest starts to report, the first element added to the free page
> vq is the cmd id given by host. When the guest finishes the reporting
> of all the free pages, VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID is added
> to the vq to tell host that the reporting is done. Host may also requests
> the guest to stop the reporting in advance by sending the stop cmd id to
> the guest via the configuration register.
> 
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> Signed-off-by: Liang Li <liang.z.li@intel.com>
> Cc: Michael S. Tsirkin <mst@redhat.com>
> Cc: Michal Hocko <mhocko@kernel.org>
> ---
>  drivers/virtio/virtio_balloon.c     | 242
>  +++++++++++++++++++++++++++++++-----
>  include/uapi/linux/virtio_balloon.h |   4 +
>  2 files changed, 214 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/virtio/virtio_balloon.c
> b/drivers/virtio/virtio_balloon.c
> index a1fb52c..b9561a5 100644
> --- a/drivers/virtio/virtio_balloon.c
> +++ b/drivers/virtio/virtio_balloon.c
> @@ -53,7 +53,12 @@ static struct vfsmount *balloon_mnt;
>  
>  struct virtio_balloon {
>  	struct virtio_device *vdev;
> -	struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
> +	struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
> +
> +	/* Balloon's own wq for cpu-intensive work items */
> +	struct workqueue_struct *balloon_wq;
> +	/* The free page reporting work item submitted to the balloon wq */
> +	struct work_struct report_free_page_work;
>  
>  	/* The balloon servicing is delegated to a freezable workqueue. */
>  	struct work_struct update_balloon_stats_work;
> @@ -63,6 +68,13 @@ struct virtio_balloon {
>  	spinlock_t stop_update_lock;
>  	bool stop_update;
>  
> +	/* Start to report free pages */
> +	bool report_free_page;
> +	/* Stores the cmd id given by host to start the free page reporting */
> +	uint32_t start_cmd_id;
> +	/* Stores STOP_ID as a sign to tell host that the reporting is done */
> +	uint32_t stop_cmd_id;
> +
>  	/* Waiting for host to ack the pages we released. */
>  	wait_queue_head_t acked;
>  
> @@ -281,6 +293,71 @@ static unsigned int update_balloon_stats(struct
> virtio_balloon *vb)
>  	return idx;
>  }
>  
> +static void add_one_sg(struct virtqueue *vq, unsigned long pfn, uint32_t
> len)
> +{
> +	struct scatterlist sg;
> +	unsigned int unused;
> +	int err;
> +
> +	sg_init_table(&sg, 1);
> +	sg_set_page(&sg, pfn_to_page(pfn), len, 0);
> +
> +	/* Detach all the used buffers from the vq */
> +	while (virtqueue_get_buf(vq, &unused))
> +		;
> +
> +	/*
> +	 * Since this is an optimization feature, losing a couple of free
> +	 * pages to report isn't important. We simply resturn without adding
> +	 * the page if the vq is full. We are adding one entry each time,
> +	 * which essentially results in no memory allocation, so the
> +	 * GFP_KERNEL flag below can be ignored.
> +	 */
> +	if (vq->num_free) {
> +		err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);
> +		/*
> +		 * This is expected to never fail, because there is always an
> +		 * entry available on the vq.
> +		 */
> +		BUG_ON(err);
> +	}
> +}
> +
> +static void batch_free_page_sg(struct virtqueue *vq,
> +			       unsigned long pfn,
> +			       uint32_t len)
> +{
> +	add_one_sg(vq, pfn, len);
> +
> +	/* Batch till the vq is full */
> +	if (!vq->num_free)
> +		virtqueue_kick(vq);
> +}
> +
> +static void send_cmd_id(struct virtqueue *vq, void *addr)
> +{
> +	struct scatterlist sg;
> +	unsigned int unused;
> +	int err;
> +
> +	sg_init_one(&sg, addr, sizeof(uint32_t));
> +
> +	/*
> +	 * This handles the cornercase that the vq happens to be full when
> +	 * adding a cmd id. Rarely happen in practice.
> +	 */
> +	while (!vq->num_free)
> +		virtqueue_get_buf(vq, &unused);
> +
> +	err = virtqueue_add_outbuf(vq, &sg, 1, vq, GFP_KERNEL);
> +	/*
> +	 * This is expected to never fail, because there is always an
> +	 * entry available on the vq.
> +	 */
> +	BUG_ON(err);
> +	virtqueue_kick(vq);
> +}
> +
>  /*
>   * While most virtqueues communicate guest-initiated requests to the
>   hypervisor,
>   * the stats queue operates in reverse.  The driver initializes the
>   virtqueue
> @@ -316,17 +393,6 @@ static void stats_handle_request(struct virtio_balloon
> *vb)
>  	virtqueue_kick(vq);
>  }
>  
> -static void virtballoon_changed(struct virtio_device *vdev)
> -{
> -	struct virtio_balloon *vb = vdev->priv;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&vb->stop_update_lock, flags);
> -	if (!vb->stop_update)
> -		queue_work(system_freezable_wq, &vb->update_balloon_size_work);
> -	spin_unlock_irqrestore(&vb->stop_update_lock, flags);
> -}
> -
>  static inline s64 towards_target(struct virtio_balloon *vb)
>  {
>  	s64 target;
> @@ -343,6 +409,36 @@ static inline s64 towards_target(struct virtio_balloon
> *vb)
>  	return target - vb->num_pages;
>  }
>  
> +static void virtballoon_changed(struct virtio_device *vdev)
> +{
> +	struct virtio_balloon *vb = vdev->priv;
> +	unsigned long flags;
> +	__u32 cmd_id;
> +	s64 diff = towards_target(vb);
> +
> +	if (diff) {
> +		spin_lock_irqsave(&vb->stop_update_lock, flags);
> +		if (!vb->stop_update)
> +			queue_work(system_freezable_wq,
> +				   &vb->update_balloon_size_work);
> +		spin_unlock_irqrestore(&vb->stop_update_lock, flags);
> +	}
> +
> +	virtio_cread(vb->vdev, struct virtio_balloon_config,
> +		     free_page_report_cmd_id, &cmd_id);
> +	if (cmd_id == VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID) {
> +		WRITE_ONCE(vb->report_free_page, false);
> +	} else if (cmd_id != vb->start_cmd_id) {
> +		/*
> +		 * Host requests to start the reporting by sending a new cmd
> +		 * id.
> +		 */
> +		WRITE_ONCE(vb->report_free_page, true);
> +		vb->start_cmd_id = cmd_id;
> +		queue_work(vb->balloon_wq, &vb->report_free_page_work);
> +	}
> +}
> +
>  static void update_balloon_size(struct virtio_balloon *vb)
>  {
>  	u32 actual = vb->num_pages;
> @@ -417,40 +513,113 @@ static void update_balloon_size_func(struct
> work_struct *work)
>  
>  static int init_vqs(struct virtio_balloon *vb)
>  {
> -	struct virtqueue *vqs[3];
> -	vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
> -	static const char * const names[] = { "inflate", "deflate", "stats" };
> -	int err, nvqs;
> +	struct virtqueue **vqs;
> +	vq_callback_t **callbacks;
> +	const char **names;
> +	struct scatterlist sg;
> +	int i, nvqs, err = -ENOMEM;
> +
> +	/* Inflateq and deflateq are used unconditionally */
> +	nvqs = 2;
> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ))
> +		nvqs++;
> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
> +		nvqs++;
> +
> +	/* Allocate space for find_vqs parameters */
> +	vqs = kcalloc(nvqs, sizeof(*vqs), GFP_KERNEL);
> +	if (!vqs)
> +		goto err_vq;
> +	callbacks = kmalloc_array(nvqs, sizeof(*callbacks), GFP_KERNEL);
> +	if (!callbacks)
> +		goto err_callback;
> +	names = kmalloc_array(nvqs, sizeof(*names), GFP_KERNEL);
> +	if (!names)
> +		goto err_names;
> +
> +	callbacks[0] = balloon_ack;
> +	names[0] = "inflate";
> +	callbacks[1] = balloon_ack;
> +	names[1] = "deflate";
> +
> +	i = 2;
> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
> +		callbacks[i] = stats_request;
> +		names[i] = "stats";
> +		i++;
> +	}
>  
> -	/*
> -	 * We expect two virtqueues: inflate and deflate, and
> -	 * optionally stat.
> -	 */
> -	nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
> -	err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL);
> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
> +		callbacks[i] = NULL;
> +		names[i] = "free_page_vq";
> +	}
> +
> +	err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names,
> +					 NULL, NULL);
>  	if (err)
> -		return err;
> +		goto err_find;
>  
>  	vb->inflate_vq = vqs[0];
>  	vb->deflate_vq = vqs[1];
> +	i = 2;
>  	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
> -		struct scatterlist sg;
> -		unsigned int num_stats;
> -		vb->stats_vq = vqs[2];
> -
> +		vb->stats_vq = vqs[i++];
>  		/*
>  		 * Prime this virtqueue with one buffer so the hypervisor can
>  		 * use it to signal us later (it can't be broken yet!).
>  		 */
> -		num_stats = update_balloon_stats(vb);
> -
> -		sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
> +		sg_init_one(&sg, vb->stats, sizeof(vb->stats));
>  		if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
> -		    < 0)
> -			BUG();
> +		    < 0) {
> +			dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
> +				 __func__);
> +			goto err_find;
> +		}
>  		virtqueue_kick(vb->stats_vq);
>  	}
> +
> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
> +		vb->free_page_vq = vqs[i];
> +
> +	kfree(names);
> +	kfree(callbacks);
> +	kfree(vqs);
>  	return 0;
 
We can assign err=0 and remove above duplicate code?
 
> +
> +err_find:
> +	kfree(names);
> +err_names:
> +	kfree(callbacks);
> +err_callback:
> +	kfree(vqs);
> +err_vq:
> +	return err;
> +}
> +
> +static bool virtio_balloon_send_free_pages(void *opaque, unsigned long pfn,
> +					   unsigned long nr_pages)
> +{
> +	struct virtio_balloon *vb = (struct virtio_balloon *)opaque;
> +	uint32_t len = nr_pages << PAGE_SHIFT;
> +
> +	if (!READ_ONCE(vb->report_free_page))
> +		return false;
> +
> +	batch_free_page_sg(vb->free_page_vq, pfn, len);
> +
> +	return true;
> +}
> +
> +static void report_free_page_func(struct work_struct *work)
> +{
> +	struct virtio_balloon *vb;
> +
> +	vb = container_of(work, struct virtio_balloon, report_free_page_work);
> +	/* Start by sending the obtained cmd id to the host with an outbuf */
> +	send_cmd_id(vb->free_page_vq, &vb->start_cmd_id);
> +	walk_free_mem_block(vb, 0, &virtio_balloon_send_free_pages);
> +	/* End by sending the stop id to the host with an outbuf */
> +	send_cmd_id(vb->free_page_vq, &vb->stop_cmd_id);
>  }
>  
>  #ifdef CONFIG_BALLOON_COMPACTION
> @@ -566,6 +735,13 @@ static int virtballoon_probe(struct virtio_device *vdev)
>  	if (err)
>  		goto out_free_vb;
>  
> +	if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
> +		vb->balloon_wq = alloc_workqueue("balloon-wq",
> +					WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
> +		INIT_WORK(&vb->report_free_page_work, report_free_page_func);
> +		vb->stop_cmd_id = VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID;
> +	}
> +
>  	vb->nb.notifier_call = virtballoon_oom_notify;
>  	vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
>  	err = register_oom_notifier(&vb->nb);
> @@ -630,6 +806,7 @@ static void virtballoon_remove(struct virtio_device
> *vdev)
>  	spin_unlock_irq(&vb->stop_update_lock);
>  	cancel_work_sync(&vb->update_balloon_size_work);
>  	cancel_work_sync(&vb->update_balloon_stats_work);
> +	cancel_work_sync(&vb->report_free_page_work);
>  
>  	remove_common(vb);
>  #ifdef CONFIG_BALLOON_COMPACTION
> @@ -682,6 +859,7 @@ static unsigned int features[] = {
>  	VIRTIO_BALLOON_F_MUST_TELL_HOST,
>  	VIRTIO_BALLOON_F_STATS_VQ,
>  	VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
> +	VIRTIO_BALLOON_F_FREE_PAGE_VQ,
>  };
>  
>  static struct virtio_driver virtio_balloon_driver = {
> diff --git a/include/uapi/linux/virtio_balloon.h
> b/include/uapi/linux/virtio_balloon.h
> index 343d7dd..55e2456 100644
> --- a/include/uapi/linux/virtio_balloon.h
> +++ b/include/uapi/linux/virtio_balloon.h
> @@ -34,15 +34,19 @@
>  #define VIRTIO_BALLOON_F_MUST_TELL_HOST	0 /* Tell before reclaiming pages */
>  #define VIRTIO_BALLOON_F_STATS_VQ	1 /* Memory Stats virtqueue */
>  #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM	2 /* Deflate balloon on OOM */
> +#define VIRTIO_BALLOON_F_FREE_PAGE_VQ	3 /* VQ to report free pages */
>  
>  /* Size of a PFN in the balloon interface. */
>  #define VIRTIO_BALLOON_PFN_SHIFT 12
>  
> +#define VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID		0
>  struct virtio_balloon_config {
>  	/* Number of pages host wants Guest to give up. */
>  	__u32 num_pages;
>  	/* Number of pages we've actually got in balloon. */
>  	__u32 actual;
> +	/* Free page report command id, readonly by guest */
> +	__u32 free_page_report_cmd_id;
>  };
>  
>  #define VIRTIO_BALLOON_S_SWAP_IN  0   /* Amount of memory swapped in */
> --
> 2.7.4
> 
>
Wang, Wei W Jan. 17, 2018, 9 a.m. UTC | #2
On 01/17/2018 04:21 PM, Pankaj Gupta wrote:
>> Negotiation of the VIRTIO_BALLOON_F_FREE_PAGE_VQ feature indicates the
>> support of reporting hints of guest free pages to host via virtio-balloon.
>>
>> Host requests the guest to report free pages by sending a new cmd
>> id to the guest via the free_page_report_cmd_id configuration register.
>>
>> When the guest starts to report, the first element added to the free page
>> vq is the cmd id given by host. When the guest finishes the reporting
>> of all the free pages, VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID is added
>> to the vq to tell host that the reporting is done. Host may also requests
>> the guest to stop the reporting in advance by sending the stop cmd id to
>> the guest via the configuration register.
>>
>> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
>> Signed-off-by: Liang Li <liang.z.li@intel.com>
>> Cc: Michael S. Tsirkin <mst@redhat.com>
>> Cc: Michal Hocko <mhocko@kernel.org>
>> ---
>>   drivers/virtio/virtio_balloon.c     | 242
>>   +++++++++++++++++++++++++++++++-----
>>   include/uapi/linux/virtio_balloon.h |   4 +
>>   2 files changed, 214 insertions(+), 32 deletions(-)
>>
>> diff --git a/drivers/virtio/virtio_balloon.c
>> b/drivers/virtio/virtio_balloon.c
>> index a1fb52c..b9561a5 100644
>> --- a/drivers/virtio/virtio_balloon.c
>> +++ b/drivers/virtio/virtio_balloon.c
>> @@ -53,7 +53,12 @@ static struct vfsmount *balloon_mnt;
>>   
>>   struct virtio_balloon {
>>   	struct virtio_device *vdev;
>> -	struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
>> +	struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
>> +
>> +	/* Balloon's own wq for cpu-intensive work items */
>> +	struct workqueue_struct *balloon_wq;
>> +	/* The free page reporting work item submitted to the balloon wq */
>> +	struct work_struct report_free_page_work;
>>   
>>   	/* The balloon servicing is delegated to a freezable workqueue. */
>>   	struct work_struct update_balloon_stats_work;
>> @@ -63,6 +68,13 @@ struct virtio_balloon {
>>   	spinlock_t stop_update_lock;
>>   	bool stop_update;
>>   
>> +	/* Start to report free pages */
>> +	bool report_free_page;
>> +	/* Stores the cmd id given by host to start the free page reporting */
>> +	uint32_t start_cmd_id;
>> +	/* Stores STOP_ID as a sign to tell host that the reporting is done */
>> +	uint32_t stop_cmd_id;
>> +
>>   	/* Waiting for host to ack the pages we released. */
>>   	wait_queue_head_t acked;
>>   
>> @@ -281,6 +293,71 @@ static unsigned int update_balloon_stats(struct
>> virtio_balloon *vb)
>>   	return idx;
>>   }
>>   
>> +static void add_one_sg(struct virtqueue *vq, unsigned long pfn, uint32_t
>> len)
>> +{
>> +	struct scatterlist sg;
>> +	unsigned int unused;
>> +	int err;
>> +
>> +	sg_init_table(&sg, 1);
>> +	sg_set_page(&sg, pfn_to_page(pfn), len, 0);
>> +
>> +	/* Detach all the used buffers from the vq */
>> +	while (virtqueue_get_buf(vq, &unused))
>> +		;
>> +
>> +	/*
>> +	 * Since this is an optimization feature, losing a couple of free
>> +	 * pages to report isn't important. We simply resturn without adding
>> +	 * the page if the vq is full. We are adding one entry each time,
>> +	 * which essentially results in no memory allocation, so the
>> +	 * GFP_KERNEL flag below can be ignored.
>> +	 */
>> +	if (vq->num_free) {
>> +		err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);
>> +		/*
>> +		 * This is expected to never fail, because there is always an
>> +		 * entry available on the vq.
>> +		 */
>> +		BUG_ON(err);
>> +	}
>> +}
>> +
>> +static void batch_free_page_sg(struct virtqueue *vq,
>> +			       unsigned long pfn,
>> +			       uint32_t len)
>> +{
>> +	add_one_sg(vq, pfn, len);
>> +
>> +	/* Batch till the vq is full */
>> +	if (!vq->num_free)
>> +		virtqueue_kick(vq);
>> +}
>> +
>> +static void send_cmd_id(struct virtqueue *vq, void *addr)
>> +{
>> +	struct scatterlist sg;
>> +	unsigned int unused;
>> +	int err;
>> +
>> +	sg_init_one(&sg, addr, sizeof(uint32_t));
>> +
>> +	/*
>> +	 * This handles the cornercase that the vq happens to be full when
>> +	 * adding a cmd id. Rarely happen in practice.
>> +	 */
>> +	while (!vq->num_free)
>> +		virtqueue_get_buf(vq, &unused);
>> +
>> +	err = virtqueue_add_outbuf(vq, &sg, 1, vq, GFP_KERNEL);
>> +	/*
>> +	 * This is expected to never fail, because there is always an
>> +	 * entry available on the vq.
>> +	 */
>> +	BUG_ON(err);
>> +	virtqueue_kick(vq);
>> +}
>> +
>>   /*
>>    * While most virtqueues communicate guest-initiated requests to the
>>    hypervisor,
>>    * the stats queue operates in reverse.  The driver initializes the
>>    virtqueue
>> @@ -316,17 +393,6 @@ static void stats_handle_request(struct virtio_balloon
>> *vb)
>>   	virtqueue_kick(vq);
>>   }
>>   
>> -static void virtballoon_changed(struct virtio_device *vdev)
>> -{
>> -	struct virtio_balloon *vb = vdev->priv;
>> -	unsigned long flags;
>> -
>> -	spin_lock_irqsave(&vb->stop_update_lock, flags);
>> -	if (!vb->stop_update)
>> -		queue_work(system_freezable_wq, &vb->update_balloon_size_work);
>> -	spin_unlock_irqrestore(&vb->stop_update_lock, flags);
>> -}
>> -
>>   static inline s64 towards_target(struct virtio_balloon *vb)
>>   {
>>   	s64 target;
>> @@ -343,6 +409,36 @@ static inline s64 towards_target(struct virtio_balloon
>> *vb)
>>   	return target - vb->num_pages;
>>   }
>>   
>> +static void virtballoon_changed(struct virtio_device *vdev)
>> +{
>> +	struct virtio_balloon *vb = vdev->priv;
>> +	unsigned long flags;
>> +	__u32 cmd_id;
>> +	s64 diff = towards_target(vb);
>> +
>> +	if (diff) {
>> +		spin_lock_irqsave(&vb->stop_update_lock, flags);
>> +		if (!vb->stop_update)
>> +			queue_work(system_freezable_wq,
>> +				   &vb->update_balloon_size_work);
>> +		spin_unlock_irqrestore(&vb->stop_update_lock, flags);
>> +	}
>> +
>> +	virtio_cread(vb->vdev, struct virtio_balloon_config,
>> +		     free_page_report_cmd_id, &cmd_id);
>> +	if (cmd_id == VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID) {
>> +		WRITE_ONCE(vb->report_free_page, false);
>> +	} else if (cmd_id != vb->start_cmd_id) {
>> +		/*
>> +		 * Host requests to start the reporting by sending a new cmd
>> +		 * id.
>> +		 */
>> +		WRITE_ONCE(vb->report_free_page, true);
>> +		vb->start_cmd_id = cmd_id;
>> +		queue_work(vb->balloon_wq, &vb->report_free_page_work);
>> +	}
>> +}
>> +
>>   static void update_balloon_size(struct virtio_balloon *vb)
>>   {
>>   	u32 actual = vb->num_pages;
>> @@ -417,40 +513,113 @@ static void update_balloon_size_func(struct
>> work_struct *work)
>>   
>>   static int init_vqs(struct virtio_balloon *vb)
>>   {
>> -	struct virtqueue *vqs[3];
>> -	vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
>> -	static const char * const names[] = { "inflate", "deflate", "stats" };
>> -	int err, nvqs;
>> +	struct virtqueue **vqs;
>> +	vq_callback_t **callbacks;
>> +	const char **names;
>> +	struct scatterlist sg;
>> +	int i, nvqs, err = -ENOMEM;
>> +
>> +	/* Inflateq and deflateq are used unconditionally */
>> +	nvqs = 2;
>> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ))
>> +		nvqs++;
>> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
>> +		nvqs++;
>> +
>> +	/* Allocate space for find_vqs parameters */
>> +	vqs = kcalloc(nvqs, sizeof(*vqs), GFP_KERNEL);
>> +	if (!vqs)
>> +		goto err_vq;
>> +	callbacks = kmalloc_array(nvqs, sizeof(*callbacks), GFP_KERNEL);
>> +	if (!callbacks)
>> +		goto err_callback;
>> +	names = kmalloc_array(nvqs, sizeof(*names), GFP_KERNEL);
>> +	if (!names)
>> +		goto err_names;
>> +
>> +	callbacks[0] = balloon_ack;
>> +	names[0] = "inflate";
>> +	callbacks[1] = balloon_ack;
>> +	names[1] = "deflate";
>> +
>> +	i = 2;
>> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
>> +		callbacks[i] = stats_request;
>> +		names[i] = "stats";
>> +		i++;
>> +	}
>>   
>> -	/*
>> -	 * We expect two virtqueues: inflate and deflate, and
>> -	 * optionally stat.
>> -	 */
>> -	nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
>> -	err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL);
>> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
>> +		callbacks[i] = NULL;
>> +		names[i] = "free_page_vq";
>> +	}
>> +
>> +	err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names,
>> +					 NULL, NULL);
>>   	if (err)
>> -		return err;
>> +		goto err_find;
>>   
>>   	vb->inflate_vq = vqs[0];
>>   	vb->deflate_vq = vqs[1];
>> +	i = 2;
>>   	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
>> -		struct scatterlist sg;
>> -		unsigned int num_stats;
>> -		vb->stats_vq = vqs[2];
>> -
>> +		vb->stats_vq = vqs[i++];
>>   		/*
>>   		 * Prime this virtqueue with one buffer so the hypervisor can
>>   		 * use it to signal us later (it can't be broken yet!).
>>   		 */
>> -		num_stats = update_balloon_stats(vb);
>> -
>> -		sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
>> +		sg_init_one(&sg, vb->stats, sizeof(vb->stats));
>>   		if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
>> -		    < 0)
>> -			BUG();
>> +		    < 0) {
>> +			dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
>> +				 __func__);
>> +			goto err_find;
>> +		}
>>   		virtqueue_kick(vb->stats_vq);
>>   	}
>> +
>> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
>> +		vb->free_page_vq = vqs[i];
>> +
>> +	kfree(names);
>> +	kfree(callbacks);
>> +	kfree(vqs);
>>   	return 0;
>   
> We can assign err=0 and remove above duplicate code?
>   

Where do you want to assign err=0? Could you show it using code?


Best,
Wei
Pankaj Gupta Jan. 17, 2018, 9:27 a.m. UTC | #3
> On 01/17/2018 04:21 PM, Pankaj Gupta wrote:
> >> Negotiation of the VIRTIO_BALLOON_F_FREE_PAGE_VQ feature indicates the
> >> support of reporting hints of guest free pages to host via virtio-balloon.
> >>
> >> Host requests the guest to report free pages by sending a new cmd
> >> id to the guest via the free_page_report_cmd_id configuration register.
> >>
> >> When the guest starts to report, the first element added to the free page
> >> vq is the cmd id given by host. When the guest finishes the reporting
> >> of all the free pages, VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID is added
> >> to the vq to tell host that the reporting is done. Host may also requests
> >> the guest to stop the reporting in advance by sending the stop cmd id to
> >> the guest via the configuration register.
> >>
> >> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> >> Signed-off-by: Liang Li <liang.z.li@intel.com>
> >> Cc: Michael S. Tsirkin <mst@redhat.com>
> >> Cc: Michal Hocko <mhocko@kernel.org>
> >> ---
> >>   drivers/virtio/virtio_balloon.c     | 242
> >>   +++++++++++++++++++++++++++++++-----
> >>   include/uapi/linux/virtio_balloon.h |   4 +
> >>   2 files changed, 214 insertions(+), 32 deletions(-)
> >>
> >> diff --git a/drivers/virtio/virtio_balloon.c
> >> b/drivers/virtio/virtio_balloon.c
> >> index a1fb52c..b9561a5 100644
> >> --- a/drivers/virtio/virtio_balloon.c
> >> +++ b/drivers/virtio/virtio_balloon.c
> >> @@ -53,7 +53,12 @@ static struct vfsmount *balloon_mnt;
> >>   
> >>   struct virtio_balloon {
> >>           struct virtio_device *vdev;
> >> -        struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
> >> +        struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
> >> +
> >> +        /* Balloon's own wq for cpu-intensive work items */
> >> +        struct workqueue_struct *balloon_wq;
> >> +        /* The free page reporting work item submitted to the balloon wq */
> >> +        struct work_struct report_free_page_work;
> >>   
> >>           /* The balloon servicing is delegated to a freezable workqueue. */
> >>           struct work_struct update_balloon_stats_work;
> >> @@ -63,6 +68,13 @@ struct virtio_balloon {
> >>           spinlock_t stop_update_lock;
> >>           bool stop_update;
> >>   
> >> +        /* Start to report free pages */
> >> +        bool report_free_page;
> >> +        /* Stores the cmd id given by host to start the free page reporting */
> >> +        uint32_t start_cmd_id;
> >> +        /* Stores STOP_ID as a sign to tell host that the reporting is done */
> >> +        uint32_t stop_cmd_id;
> >> +
> >>           /* Waiting for host to ack the pages we released. */
> >>           wait_queue_head_t acked;
> >>   
> >> @@ -281,6 +293,71 @@ static unsigned int update_balloon_stats(struct
> >> virtio_balloon *vb)
> >>           return idx;
> >>   }
> >>   
> >> +static void add_one_sg(struct virtqueue *vq, unsigned long pfn, uint32_t
> >> len)
> >> +{
> >> +        struct scatterlist sg;
> >> +        unsigned int unused;
> >> +        int err;
> >> +
> >> +        sg_init_table(&sg, 1);
> >> +        sg_set_page(&sg, pfn_to_page(pfn), len, 0);
> >> +
> >> +        /* Detach all the used buffers from the vq */
> >> +        while (virtqueue_get_buf(vq, &unused))
> >> +                ;
> >> +
> >> +        /*
> >> +         * Since this is an optimization feature, losing a couple of free
> >> +         * pages to report isn't important. We simply resturn without adding
> >> +         * the page if the vq is full. We are adding one entry each time,
> >> +         * which essentially results in no memory allocation, so the
> >> +         * GFP_KERNEL flag below can be ignored.
> >> +         */
> >> +        if (vq->num_free) {
> >> +                err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);
> >> +                /*
> >> +                 * This is expected to never fail, because there is always an
> >> +                 * entry available on the vq.
> >> +                 */
> >> +                BUG_ON(err);
> >> +        }
> >> +}
> >> +
> >> +static void batch_free_page_sg(struct virtqueue *vq,
> >> +                               unsigned long pfn,
> >> +                               uint32_t len)
> >> +{
> >> +        add_one_sg(vq, pfn, len);
> >> +
> >> +        /* Batch till the vq is full */
> >> +        if (!vq->num_free)
> >> +                virtqueue_kick(vq);
> >> +}
> >> +
> >> +static void send_cmd_id(struct virtqueue *vq, void *addr)
> >> +{
> >> +        struct scatterlist sg;
> >> +        unsigned int unused;
> >> +        int err;
> >> +
> >> +        sg_init_one(&sg, addr, sizeof(uint32_t));
> >> +
> >> +        /*
> >> +         * This handles the cornercase that the vq happens to be full when
> >> +         * adding a cmd id. Rarely happen in practice.
> >> +         */
> >> +        while (!vq->num_free)
> >> +                virtqueue_get_buf(vq, &unused);
> >> +
> >> +        err = virtqueue_add_outbuf(vq, &sg, 1, vq, GFP_KERNEL);
> >> +        /*
> >> +         * This is expected to never fail, because there is always an
> >> +         * entry available on the vq.
> >> +         */
> >> +        BUG_ON(err);
> >> +        virtqueue_kick(vq);
> >> +}
> >> +
> >>   /*
> >>    * While most virtqueues communicate guest-initiated requests to the
> >>    hypervisor,
> >>    * the stats queue operates in reverse.  The driver initializes the
> >>    virtqueue
> >> @@ -316,17 +393,6 @@ static void stats_handle_request(struct
> >> virtio_balloon
> >> *vb)
> >>           virtqueue_kick(vq);
> >>   }
> >>   
> >> -static void virtballoon_changed(struct virtio_device *vdev)
> >> -{
> >> -        struct virtio_balloon *vb = vdev->priv;
> >> -        unsigned long flags;
> >> -
> >> -        spin_lock_irqsave(&vb->stop_update_lock, flags);
> >> -        if (!vb->stop_update)
> >> -                queue_work(system_freezable_wq, &vb->update_balloon_size_work);
> >> -        spin_unlock_irqrestore(&vb->stop_update_lock, flags);
> >> -}
> >> -
> >>   static inline s64 towards_target(struct virtio_balloon *vb)
> >>   {
> >>           s64 target;
> >> @@ -343,6 +409,36 @@ static inline s64 towards_target(struct
> >> virtio_balloon
> >> *vb)
> >>           return target - vb->num_pages;
> >>   }
> >>   
> >> +static void virtballoon_changed(struct virtio_device *vdev)
> >> +{
> >> +        struct virtio_balloon *vb = vdev->priv;
> >> +        unsigned long flags;
> >> +        __u32 cmd_id;
> >> +        s64 diff = towards_target(vb);
> >> +
> >> +        if (diff) {
> >> +                spin_lock_irqsave(&vb->stop_update_lock, flags);
> >> +                if (!vb->stop_update)
> >> +                        queue_work(system_freezable_wq,
> >> +                                   &vb->update_balloon_size_work);
> >> +                spin_unlock_irqrestore(&vb->stop_update_lock, flags);
> >> +        }
> >> +
> >> +        virtio_cread(vb->vdev, struct virtio_balloon_config,
> >> +                     free_page_report_cmd_id, &cmd_id);
> >> +        if (cmd_id == VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID) {
> >> +                WRITE_ONCE(vb->report_free_page, false);
> >> +        } else if (cmd_id != vb->start_cmd_id) {
> >> +                /*
> >> +                 * Host requests to start the reporting by sending a new cmd
> >> +                 * id.
> >> +                 */
> >> +                WRITE_ONCE(vb->report_free_page, true);
> >> +                vb->start_cmd_id = cmd_id;
> >> +                queue_work(vb->balloon_wq, &vb->report_free_page_work);
> >> +        }
> >> +}
> >> +
> >>   static void update_balloon_size(struct virtio_balloon *vb)
> >>   {
> >>           u32 actual = vb->num_pages;
> >> @@ -417,40 +513,113 @@ static void update_balloon_size_func(struct
> >> work_struct *work)
> >>   
> >>   static int init_vqs(struct virtio_balloon *vb)
> >>   {
> >> -        struct virtqueue *vqs[3];
> >> -        vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request
> >> };
> >> -        static const char * const names[] = { "inflate", "deflate", "stats" };
> >> -        int err, nvqs;
> >> +        struct virtqueue **vqs;
> >> +        vq_callback_t **callbacks;
> >> +        const char **names;
> >> +        struct scatterlist sg;
> >> +        int i, nvqs, err = -ENOMEM;
> >> +
> >> +        /* Inflateq and deflateq are used unconditionally */
> >> +        nvqs = 2;
> >> +        if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ))
> >> +                nvqs++;
> >> +        if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
> >> +                nvqs++;
> >> +
> >> +        /* Allocate space for find_vqs parameters */
> >> +        vqs = kcalloc(nvqs, sizeof(*vqs), GFP_KERNEL);
> >> +        if (!vqs)
> >> +                goto err_vq;
> >> +        callbacks = kmalloc_array(nvqs, sizeof(*callbacks), GFP_KERNEL);
> >> +        if (!callbacks)
> >> +                goto err_callback;
> >> +        names = kmalloc_array(nvqs, sizeof(*names), GFP_KERNEL);
> >> +        if (!names)
> >> +                goto err_names;
> >> +
> >> +        callbacks[0] = balloon_ack;
> >> +        names[0] = "inflate";
> >> +        callbacks[1] = balloon_ack;
> >> +        names[1] = "deflate";
> >> +
> >> +        i = 2;
> >> +        if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
> >> +                callbacks[i] = stats_request;
> >> +                names[i] = "stats";
> >> +                i++;
> >> +        }
> >>   
> >> -        /*
> >> -         * We expect two virtqueues: inflate and deflate, and
> >> -         * optionally stat.
> >> -         */
> >> -        nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
> >> -        err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL);
> >> +        if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
> >> +                callbacks[i] = NULL;
> >> +                names[i] = "free_page_vq";
> >> +        }
> >> +
> >> +        err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names,
> >> +                                         NULL, NULL);
> >>           if (err)
> >> -                return err;
> >> +                goto err_find;
> >>   
> >>           vb->inflate_vq = vqs[0];
> >>           vb->deflate_vq = vqs[1];
> >> +        i = 2;
> >>           if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
> >> -                struct scatterlist sg;
> >> -                unsigned int num_stats;
> >> -                vb->stats_vq = vqs[2];
> >> -
> >> +                vb->stats_vq = vqs[i++];
> >>                   /*
> >>                    * Prime this virtqueue with one buffer so the hypervisor can
> >>                    * use it to signal us later (it can't be broken yet!).
> >>                    */
> >> -                num_stats = update_balloon_stats(vb);
> >> -
> >> -                sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
> >> +                sg_init_one(&sg, vb->stats, sizeof(vb->stats));
> >>                   if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
> >> -                    < 0)
> >> -                        BUG();
> >> +                    < 0) {
> >> +                        dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
> >> +                                 __func__);
> >> +                        goto err_find;
> >> +                }
> >>                   virtqueue_kick(vb->stats_vq);
> >>           }
> >> +
> >> +        if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
> >> +                vb->free_page_vq = vqs[i];
> >> +
> >> +        kfree(names);
> >> +        kfree(callbacks);
> >> +        kfree(vqs);
> >>           return 0;
> >   
> > We can assign err=0 and remove above duplicate code?
> >   
> 
> Where do you want to assign err=0? Could you show it using code?

o.k  you have initialize "err = -ENOMEM;"

Remove these four lines.
 
 -        kfree(names);
 -        kfree(callbacks);
 -        kfree(vqs);
 -        return 0;

 +        err = 0;              // if executed without any error

Below code is already there, so for error, err is already 'ENOMEM'
and a jump to any label. 

 +
 +err_find:
 +        kfree(names);
 +err_names:
 +        kfree(callbacks);
 +err_callback:
 +        kfree(vqs);
> +err_vq:
> +       return err;
> +}


Thanks,
Pankaj

> 
> 
> Best,
> Wei
> 
> 
>
Wang, Wei W Jan. 17, 2018, 10:47 a.m. UTC | #4
On 01/17/2018 05:27 PM, Pankaj Gupta wrote:
>> On 01/17/2018 04:21 PM, Pankaj Gupta wrote:
>>
> o.k  you have initialize "err = -ENOMEM;"
>
> Remove these four lines.
>   
>   -        kfree(names);
>   -        kfree(callbacks);
>   -        kfree(vqs);
>   -        return 0;
>
>   +        err = 0;              // if executed without any error
>

OK, thanks. "error = 0" is not needed actually.

Best,
Wei
Michael S. Tsirkin Jan. 17, 2018, 4:44 p.m. UTC | #5
On Wed, Jan 17, 2018 at 01:10:11PM +0800, Wei Wang wrote:
> Negotiation of the VIRTIO_BALLOON_F_FREE_PAGE_VQ feature indicates the
> support of reporting hints of guest free pages to host via virtio-balloon.
> 
> Host requests the guest to report free pages by sending a new cmd
> id to the guest via the free_page_report_cmd_id configuration register.
> 
> When the guest starts to report, the first element added to the free page
> vq is the cmd id given by host. When the guest finishes the reporting
> of all the free pages, VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID is added
> to the vq to tell host that the reporting is done. Host may also requests
> the guest to stop the reporting in advance by sending the stop cmd id to
> the guest via the configuration register.
> 
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> Signed-off-by: Liang Li <liang.z.li@intel.com>
> Cc: Michael S. Tsirkin <mst@redhat.com>
> Cc: Michal Hocko <mhocko@kernel.org>
> ---
>  drivers/virtio/virtio_balloon.c     | 242 +++++++++++++++++++++++++++++++-----
>  include/uapi/linux/virtio_balloon.h |   4 +
>  2 files changed, 214 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
> index a1fb52c..b9561a5 100644
> --- a/drivers/virtio/virtio_balloon.c
> +++ b/drivers/virtio/virtio_balloon.c
> @@ -53,7 +53,12 @@ static struct vfsmount *balloon_mnt;
>  
>  struct virtio_balloon {
>  	struct virtio_device *vdev;
> -	struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
> +	struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
> +
> +	/* Balloon's own wq for cpu-intensive work items */
> +	struct workqueue_struct *balloon_wq;
> +	/* The free page reporting work item submitted to the balloon wq */
> +	struct work_struct report_free_page_work;
>  
>  	/* The balloon servicing is delegated to a freezable workqueue. */
>  	struct work_struct update_balloon_stats_work;
> @@ -63,6 +68,13 @@ struct virtio_balloon {
>  	spinlock_t stop_update_lock;
>  	bool stop_update;
>  
> +	/* Start to report free pages */
> +	bool report_free_page;
> +	/* Stores the cmd id given by host to start the free page reporting */
> +	uint32_t start_cmd_id;
> +	/* Stores STOP_ID as a sign to tell host that the reporting is done */
> +	uint32_t stop_cmd_id;
> +
>  	/* Waiting for host to ack the pages we released. */
>  	wait_queue_head_t acked;
>  
> @@ -281,6 +293,71 @@ static unsigned int update_balloon_stats(struct virtio_balloon *vb)
>  	return idx;
>  }
>  
> +static void add_one_sg(struct virtqueue *vq, unsigned long pfn, uint32_t len)
> +{
> +	struct scatterlist sg;
> +	unsigned int unused;
> +	int err;
> +
> +	sg_init_table(&sg, 1);
> +	sg_set_page(&sg, pfn_to_page(pfn), len, 0);
> +
> +	/* Detach all the used buffers from the vq */
> +	while (virtqueue_get_buf(vq, &unused))
> +		;
> +
> +	/*
> +	 * Since this is an optimization feature, losing a couple of free
> +	 * pages to report isn't important.
> We simply resturn

return

> without adding
> +	 * the page if the vq is full. We are adding one entry each time,
> +	 * which essentially results in no memory allocation, so the
> +	 * GFP_KERNEL flag below can be ignored.
> +	 */
> +	if (vq->num_free) {
> +		err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);

Should we kick here? At least when ring is close to
being full. Kick at half way full?
Otherwise it's unlikely ring will
ever be cleaned until we finish the scan.

> +		/*
> +		 * This is expected to never fail, because there is always an
> +		 * entry available on the vq.
> +		 */
> +		BUG_ON(err);
> +	}
> +}
> +
> +static void batch_free_page_sg(struct virtqueue *vq,
> +			       unsigned long pfn,
> +			       uint32_t len)

Not sure what does batch refer to here.
I'd just open-code this.

> +{
> +	add_one_sg(vq, pfn, len);
> +
> +	/* Batch till the vq is full */
> +	if (!vq->num_free)
> +		virtqueue_kick(vq);
> +}
> +
> +static void send_cmd_id(struct virtqueue *vq, void *addr)

Why void *? Should be a specific type.
then you can use sizeof *addr as size.

> +{
> +	struct scatterlist sg;
> +	unsigned int unused;
> +	int err;
> +
> +	sg_init_one(&sg, addr, sizeof(uint32_t));

This passes a guest-endian value to host. This is a problem:
should always pass LE values.

> +
> +	/*
> +	 * This handles the cornercase that the vq happens to be full when
> +	 * adding a cmd id. Rarely happen in practice.
> +	 */
> +	while (!vq->num_free)
> +		virtqueue_get_buf(vq, &unused);

I dislike this busy-waiting. It's a hint after all -
why not just retry later - hopefully after getting an
interrupt?

Alternatively, stop adding more entries when we have a single
ring entry left, making sure we have space for the command.

> +
> +	err = virtqueue_add_outbuf(vq, &sg, 1, vq, GFP_KERNEL);
> +	/*
> +	 * This is expected to never fail, because there is always an
> +	 * entry available on the vq.
> +	 */
> +	BUG_ON(err);
> +	virtqueue_kick(vq);
> +}
> +
>  /*
>   * While most virtqueues communicate guest-initiated requests to the hypervisor,
>   * the stats queue operates in reverse.  The driver initializes the virtqueue
> @@ -316,17 +393,6 @@ static void stats_handle_request(struct virtio_balloon *vb)
>  	virtqueue_kick(vq);
>  }
>  
> -static void virtballoon_changed(struct virtio_device *vdev)
> -{
> -	struct virtio_balloon *vb = vdev->priv;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&vb->stop_update_lock, flags);
> -	if (!vb->stop_update)
> -		queue_work(system_freezable_wq, &vb->update_balloon_size_work);
> -	spin_unlock_irqrestore(&vb->stop_update_lock, flags);
> -}
> -
>  static inline s64 towards_target(struct virtio_balloon *vb)
>  {
>  	s64 target;
> @@ -343,6 +409,36 @@ static inline s64 towards_target(struct virtio_balloon *vb)
>  	return target - vb->num_pages;
>  }
>  
> +static void virtballoon_changed(struct virtio_device *vdev)
> +{
> +	struct virtio_balloon *vb = vdev->priv;
> +	unsigned long flags;
> +	__u32 cmd_id;
> +	s64 diff = towards_target(vb);
> +
> +	if (diff) {
> +		spin_lock_irqsave(&vb->stop_update_lock, flags);
> +		if (!vb->stop_update)

Why do you ignore stop_update for freeze?
This means new wq entries can be added during remove
causing use after free issues.

> +			queue_work(system_freezable_wq,
> +				   &vb->update_balloon_size_work);
> +		spin_unlock_irqrestore(&vb->stop_update_lock, flags);
> +	}
> +
> +	virtio_cread(vb->vdev, struct virtio_balloon_config,
> +		     free_page_report_cmd_id, &cmd_id);

You want virtio_cread_feature, don't access the new field
if the feature has not been negotiated.


> +	if (cmd_id == VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID) {
> +		WRITE_ONCE(vb->report_free_page, false);
> +	} else if (cmd_id != vb->start_cmd_id) {
> +		/*
> +		 * Host requests to start the reporting by sending a new cmd
> +		 * id.
> +		 */
> +		WRITE_ONCE(vb->report_free_page, true);

I don't know why we bother with WRITE_ONCE here.  The point of
report_free_page being used lockless is that that it's not a big deal if
it's wrong occasionally, right?



> +		vb->start_cmd_id = cmd_id;
> +		queue_work(vb->balloon_wq, &vb->report_free_page_work);

It seems that if a command was already queued (with a different id),
this will result in new command id being sent to host twice, which will
likely confuse the host.



> +	}
> +}
> +
>  static void update_balloon_size(struct virtio_balloon *vb)
>  {
>  	u32 actual = vb->num_pages;
> @@ -417,40 +513,113 @@ static void update_balloon_size_func(struct work_struct *work)
>  
>  static int init_vqs(struct virtio_balloon *vb)
>  {
> -	struct virtqueue *vqs[3];
> -	vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
> -	static const char * const names[] = { "inflate", "deflate", "stats" };
> -	int err, nvqs;
> +	struct virtqueue **vqs;
> +	vq_callback_t **callbacks;
> +	const char **names;
> +	struct scatterlist sg;
> +	int i, nvqs, err = -ENOMEM;
> +
> +	/* Inflateq and deflateq are used unconditionally */
> +	nvqs = 2;
> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ))
> +		nvqs++;
> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
> +		nvqs++;
> +
> +	/* Allocate space for find_vqs parameters */
> +	vqs = kcalloc(nvqs, sizeof(*vqs), GFP_KERNEL);
> +	if (!vqs)
> +		goto err_vq;
> +	callbacks = kmalloc_array(nvqs, sizeof(*callbacks), GFP_KERNEL);
> +	if (!callbacks)
> +		goto err_callback;
> +	names = kmalloc_array(nvqs, sizeof(*names), GFP_KERNEL);
> +	if (!names)
> +		goto err_names;

Why not just keep these 3 arrays on stack? they aren't large.

> +
> +	callbacks[0] = balloon_ack;
> +	names[0] = "inflate";
> +	callbacks[1] = balloon_ack;
> +	names[1] = "deflate";
> +
> +	i = 2;
> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
> +		callbacks[i] = stats_request;
> +		names[i] = "stats";
> +		i++;
> +	}
>  
> -	/*
> -	 * We expect two virtqueues: inflate and deflate, and
> -	 * optionally stat.
> -	 */
> -	nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
> -	err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL);
> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
> +		callbacks[i] = NULL;
> +		names[i] = "free_page_vq";
> +	}
> +
> +	err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names,
> +					 NULL, NULL);
>  	if (err)
> -		return err;
> +		goto err_find;
>  
>  	vb->inflate_vq = vqs[0];
>  	vb->deflate_vq = vqs[1];
> +	i = 2;
>  	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
> -		struct scatterlist sg;
> -		unsigned int num_stats;
> -		vb->stats_vq = vqs[2];
> -
> +		vb->stats_vq = vqs[i++];
>  		/*
>  		 * Prime this virtqueue with one buffer so the hypervisor can
>  		 * use it to signal us later (it can't be broken yet!).
>  		 */
> -		num_stats = update_balloon_stats(vb);
> -
> -		sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
> +		sg_init_one(&sg, vb->stats, sizeof(vb->stats));
>  		if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
> -		    < 0)
> -			BUG();
> +		    < 0) {
> +			dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
> +				 __func__);
> +			goto err_find;
> +		}
>  		virtqueue_kick(vb->stats_vq);
>  	}
> +
> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
> +		vb->free_page_vq = vqs[i];
> +
> +	kfree(names);
> +	kfree(callbacks);
> +	kfree(vqs);
>  	return 0;
> +
> +err_find:
> +	kfree(names);
> +err_names:
> +	kfree(callbacks);
> +err_callback:
> +	kfree(vqs);
> +err_vq:
> +	return err;
> +}
> +
> +static bool virtio_balloon_send_free_pages(void *opaque, unsigned long pfn,
> +					   unsigned long nr_pages)
> +{
> +	struct virtio_balloon *vb = (struct virtio_balloon *)opaque;
> +	uint32_t len = nr_pages << PAGE_SHIFT;
> +
> +	if (!READ_ONCE(vb->report_free_page))
> +		return false;
> +
> +	batch_free_page_sg(vb->free_page_vq, pfn, len);
> +
> +	return true;
> +}
> +
> +static void report_free_page_func(struct work_struct *work)
> +{
> +	struct virtio_balloon *vb;
> +
> +	vb = container_of(work, struct virtio_balloon, report_free_page_work);
> +	/* Start by sending the obtained cmd id to the host with an outbuf */
> +	send_cmd_id(vb->free_page_vq, &vb->start_cmd_id);
> +	walk_free_mem_block(vb, 0, &virtio_balloon_send_free_pages);
> +	/* End by sending the stop id to the host with an outbuf */
> +	send_cmd_id(vb->free_page_vq, &vb->stop_cmd_id);
>  }
>  
>  #ifdef CONFIG_BALLOON_COMPACTION
> @@ -566,6 +735,13 @@ static int virtballoon_probe(struct virtio_device *vdev)
>  	if (err)
>  		goto out_free_vb;
>  
> +	if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
> +		vb->balloon_wq = alloc_workqueue("balloon-wq",
> +					WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);

No destroy_workqueue in sight, will likely leak a wq in remove.



> +		INIT_WORK(&vb->report_free_page_work, report_free_page_func);
> +		vb->stop_cmd_id = VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID;
> +	}
> +
>  	vb->nb.notifier_call = virtballoon_oom_notify;
>  	vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
>  	err = register_oom_notifier(&vb->nb);
> @@ -630,6 +806,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
>  	spin_unlock_irq(&vb->stop_update_lock);
>  	cancel_work_sync(&vb->update_balloon_size_work);
>  	cancel_work_sync(&vb->update_balloon_stats_work);
> +	cancel_work_sync(&vb->report_free_page_work);
>  
>  	remove_common(vb);
>  #ifdef CONFIG_BALLOON_COMPACTION
> @@ -682,6 +859,7 @@ static unsigned int features[] = {
>  	VIRTIO_BALLOON_F_MUST_TELL_HOST,
>  	VIRTIO_BALLOON_F_STATS_VQ,
>  	VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
> +	VIRTIO_BALLOON_F_FREE_PAGE_VQ,
>  };
>  
>  static struct virtio_driver virtio_balloon_driver = {
> diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
> index 343d7dd..55e2456 100644
> --- a/include/uapi/linux/virtio_balloon.h
> +++ b/include/uapi/linux/virtio_balloon.h
> @@ -34,15 +34,19 @@
>  #define VIRTIO_BALLOON_F_MUST_TELL_HOST	0 /* Tell before reclaiming pages */
>  #define VIRTIO_BALLOON_F_STATS_VQ	1 /* Memory Stats virtqueue */
>  #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM	2 /* Deflate balloon on OOM */
> +#define VIRTIO_BALLOON_F_FREE_PAGE_VQ	3 /* VQ to report free pages */
>  
>  /* Size of a PFN in the balloon interface. */
>  #define VIRTIO_BALLOON_PFN_SHIFT 12
>  
> +#define VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID		0
>  struct virtio_balloon_config {
>  	/* Number of pages host wants Guest to give up. */
>  	__u32 num_pages;
>  	/* Number of pages we've actually got in balloon. */
>  	__u32 actual;
> +	/* Free page report command id, readonly by guest */
> +	__u32 free_page_report_cmd_id;
>  };
>  
>  #define VIRTIO_BALLOON_S_SWAP_IN  0   /* Amount of memory swapped in */
> -- 
> 2.7.4
Tetsuo Handa Jan. 18, 2018, 1:30 p.m. UTC | #6
On 2018/01/18 1:44, Michael S. Tsirkin wrote:
>> +static void add_one_sg(struct virtqueue *vq, unsigned long pfn, uint32_t len)
>> +{
>> +	struct scatterlist sg;
>> +	unsigned int unused;
>> +	int err;
>> +
>> +	sg_init_table(&sg, 1);
>> +	sg_set_page(&sg, pfn_to_page(pfn), len, 0);
>> +
>> +	/* Detach all the used buffers from the vq */
>> +	while (virtqueue_get_buf(vq, &unused))
>> +		;
>> +
>> +	/*
>> +	 * Since this is an optimization feature, losing a couple of free
>> +	 * pages to report isn't important.
>> We simply resturn
> 
> return
> 
>> without adding
>> +	 * the page if the vq is full. We are adding one entry each time,
>> +	 * which essentially results in no memory allocation, so the
>> +	 * GFP_KERNEL flag below can be ignored.
>> +	 */
>> +	if (vq->num_free) {
>> +		err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);
> 
> Should we kick here? At least when ring is close to
> being full. Kick at half way full?
> Otherwise it's unlikely ring will
> ever be cleaned until we finish the scan.

Since this add_one_sg() is called between spin_lock_irqsave(&zone->lock, flags)
and spin_unlock_irqrestore(&zone->lock, flags), it is not permitted to sleep.
And walk_free_mem_block() is not ready to handle resume.

By the way, specifying GFP_KERNEL here is confusing even though it is never used.
walk_free_mem_block() says:

  * The callback itself must not sleep or perform any operations which would
  * require any memory allocations directly (not even GFP_NOWAIT/GFP_ATOMIC)
  * or via any lock dependency. 

> 
>> +		/*
>> +		 * This is expected to never fail, because there is always an
>> +		 * entry available on the vq.
>> +		 */
>> +		BUG_ON(err);
>> +	}
>> +}
Michael S. Tsirkin Jan. 18, 2018, 7:09 p.m. UTC | #7
On Thu, Jan 18, 2018 at 10:30:18PM +0900, Tetsuo Handa wrote:
> On 2018/01/18 1:44, Michael S. Tsirkin wrote:
> >> +static void add_one_sg(struct virtqueue *vq, unsigned long pfn, uint32_t len)
> >> +{
> >> +	struct scatterlist sg;
> >> +	unsigned int unused;
> >> +	int err;
> >> +
> >> +	sg_init_table(&sg, 1);
> >> +	sg_set_page(&sg, pfn_to_page(pfn), len, 0);
> >> +
> >> +	/* Detach all the used buffers from the vq */
> >> +	while (virtqueue_get_buf(vq, &unused))
> >> +		;
> >> +
> >> +	/*
> >> +	 * Since this is an optimization feature, losing a couple of free
> >> +	 * pages to report isn't important.
> >> We simply resturn
> > 
> > return
> > 
> >> without adding
> >> +	 * the page if the vq is full. We are adding one entry each time,
> >> +	 * which essentially results in no memory allocation, so the
> >> +	 * GFP_KERNEL flag below can be ignored.
> >> +	 */
> >> +	if (vq->num_free) {
> >> +		err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);
> > 
> > Should we kick here? At least when ring is close to
> > being full. Kick at half way full?
> > Otherwise it's unlikely ring will
> > ever be cleaned until we finish the scan.
> 
> Since this add_one_sg() is called between spin_lock_irqsave(&zone->lock, flags)
> and spin_unlock_irqrestore(&zone->lock, flags), it is not permitted to sleep.

kick takes a while sometimes but it doesn't sleep.

> And walk_free_mem_block() is not ready to handle resume.
> 
> By the way, specifying GFP_KERNEL here is confusing even though it is never used.
> walk_free_mem_block() says:
> 
>   * The callback itself must not sleep or perform any operations which would
>   * require any memory allocations directly (not even GFP_NOWAIT/GFP_ATOMIC)
>   * or via any lock dependency. 

Yea, GFP_ATOMIC would do just as well. But I think any allocation
on this path would be problematic.

How about a flag to make all allocations fail?

E.g. 

#define GFP_FORBIDDEN (___GFP_DMA | ___GFP_HIGHMEM)

Still this is not a blocker, we can worry about this later.


> > 
> >> +		/*
> >> +		 * This is expected to never fail, because there is always an
> >> +		 * entry available on the vq.
> >> +		 */
> >> +		BUG_ON(err);
> >> +	}
> >> +}
Tetsuo Handa Jan. 18, 2018, 9:11 p.m. UTC | #8
Michael S. Tsirkin wrote:
> On Thu, Jan 18, 2018 at 10:30:18PM +0900, Tetsuo Handa wrote:
> > On 2018/01/18 1:44, Michael S. Tsirkin wrote:
> > >> +static void add_one_sg(struct virtqueue *vq, unsigned long pfn, uint32_t len)
> > >> +{
> > >> +	struct scatterlist sg;
> > >> +	unsigned int unused;
> > >> +	int err;
> > >> +
> > >> +	sg_init_table(&sg, 1);
> > >> +	sg_set_page(&sg, pfn_to_page(pfn), len, 0);
> > >> +
> > >> +	/* Detach all the used buffers from the vq */
> > >> +	while (virtqueue_get_buf(vq, &unused))
> > >> +		;
> > >> +
> > >> +	/*
> > >> +	 * Since this is an optimization feature, losing a couple of free
> > >> +	 * pages to report isn't important.
> > >> We simply resturn
> > > 
> > > return
> > > 
> > >> without adding
> > >> +	 * the page if the vq is full. We are adding one entry each time,
> > >> +	 * which essentially results in no memory allocation, so the
> > >> +	 * GFP_KERNEL flag below can be ignored.
> > >> +	 */
> > >> +	if (vq->num_free) {
> > >> +		err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);
> > > 
> > > Should we kick here? At least when ring is close to
> > > being full. Kick at half way full?
> > > Otherwise it's unlikely ring will
> > > ever be cleaned until we finish the scan.
> > 
> > Since this add_one_sg() is called between spin_lock_irqsave(&zone->lock, flags)
> > and spin_unlock_irqrestore(&zone->lock, flags), it is not permitted to sleep.
> 
> kick takes a while sometimes but it doesn't sleep.

I don't know about virtio. But the purpose of kicking here is to wait for pending data
to be flushed in order to increase vq->num_free, isn't it? Then, doesn't waiting for
pending data to be flushed involve sleeping? If yes, we can wait for completion of kick
but we can't wait for completion of flush. Is pending data flushed without sleep?

> 
> > And walk_free_mem_block() is not ready to handle resume.
> > 
> > By the way, specifying GFP_KERNEL here is confusing even though it is never used.
> > walk_free_mem_block() says:
> > 
> >   * The callback itself must not sleep or perform any operations which would
> >   * require any memory allocations directly (not even GFP_NOWAIT/GFP_ATOMIC)
> >   * or via any lock dependency. 
> 
> Yea, GFP_ATOMIC would do just as well. But I think any allocation
> on this path would be problematic.
> 
> How about a flag to make all allocations fail?
> 
> E.g. 
> 
> #define GFP_FORBIDDEN (___GFP_DMA | ___GFP_HIGHMEM)
> 
> Still this is not a blocker, we can worry about this later.
> 
> 
> > > 
> > >> +		/*
> > >> +		 * This is expected to never fail, because there is always an
> > >> +		 * entry available on the vq.
> > >> +		 */
> > >> +		BUG_ON(err);
> > >> +	}
> > >> +}
>
Michael S. Tsirkin Jan. 18, 2018, 10:32 p.m. UTC | #9
On Fri, Jan 19, 2018 at 06:11:31AM +0900, Tetsuo Handa wrote:
> Michael S. Tsirkin wrote:
> > On Thu, Jan 18, 2018 at 10:30:18PM +0900, Tetsuo Handa wrote:
> > > On 2018/01/18 1:44, Michael S. Tsirkin wrote:
> > > >> +static void add_one_sg(struct virtqueue *vq, unsigned long pfn, uint32_t len)
> > > >> +{
> > > >> +	struct scatterlist sg;
> > > >> +	unsigned int unused;
> > > >> +	int err;
> > > >> +
> > > >> +	sg_init_table(&sg, 1);
> > > >> +	sg_set_page(&sg, pfn_to_page(pfn), len, 0);
> > > >> +
> > > >> +	/* Detach all the used buffers from the vq */
> > > >> +	while (virtqueue_get_buf(vq, &unused))
> > > >> +		;
> > > >> +
> > > >> +	/*
> > > >> +	 * Since this is an optimization feature, losing a couple of free
> > > >> +	 * pages to report isn't important.
> > > >> We simply resturn
> > > > 
> > > > return
> > > > 
> > > >> without adding
> > > >> +	 * the page if the vq is full. We are adding one entry each time,
> > > >> +	 * which essentially results in no memory allocation, so the
> > > >> +	 * GFP_KERNEL flag below can be ignored.
> > > >> +	 */
> > > >> +	if (vq->num_free) {
> > > >> +		err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);
> > > > 
> > > > Should we kick here? At least when ring is close to
> > > > being full. Kick at half way full?
> > > > Otherwise it's unlikely ring will
> > > > ever be cleaned until we finish the scan.
> > > 
> > > Since this add_one_sg() is called between spin_lock_irqsave(&zone->lock, flags)
> > > and spin_unlock_irqrestore(&zone->lock, flags), it is not permitted to sleep.
> > 
> > kick takes a while sometimes but it doesn't sleep.
> 
> I don't know about virtio. But the purpose of kicking here is to wait for pending data
> to be flushed in order to increase vq->num_free, isn't it?

It isn't. It's to wake up device out of sleep to make it start
processing the pending data. If device isn't asleep, it's a nop.

> Then, doesn't waiting for
> pending data to be flushed involve sleeping? If yes, we can wait for completion of kick
> but we can't wait for completion of flush. Is pending data flushed without sleep?
> 
> > 
> > > And walk_free_mem_block() is not ready to handle resume.
> > > 
> > > By the way, specifying GFP_KERNEL here is confusing even though it is never used.
> > > walk_free_mem_block() says:
> > > 
> > >   * The callback itself must not sleep or perform any operations which would
> > >   * require any memory allocations directly (not even GFP_NOWAIT/GFP_ATOMIC)
> > >   * or via any lock dependency. 
> > 
> > Yea, GFP_ATOMIC would do just as well. But I think any allocation
> > on this path would be problematic.
> > 
> > How about a flag to make all allocations fail?
> > 
> > E.g. 
> > 
> > #define GFP_FORBIDDEN (___GFP_DMA | ___GFP_HIGHMEM)
> > 
> > Still this is not a blocker, we can worry about this later.
> > 
> > 
> > > > 
> > > >> +		/*
> > > >> +		 * This is expected to never fail, because there is always an
> > > >> +		 * entry available on the vq.
> > > >> +		 */
> > > >> +		BUG_ON(err);
> > > >> +	}
> > > >> +}
> >
Wang, Wei W Jan. 19, 2018, 3:44 a.m. UTC | #10
On 01/18/2018 12:44 AM, Michael S. Tsirkin wrote:
> On Wed, Jan 17, 2018 at 01:10:11PM +0800, Wei Wang wrote:
>

>
>> +{
>> +	struct scatterlist sg;
>> +	unsigned int unused;
>> +	int err;
>> +
>> +	sg_init_one(&sg, addr, sizeof(uint32_t));
> This passes a guest-endian value to host. This is a problem:
> should always pass LE values.

I think the endianness is handled when virtqueue_add_outbuf():

desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);

right?

>
>> +
>> +	/*
>> +	 * This handles the cornercase that the vq happens to be full when
>> +	 * adding a cmd id. Rarely happen in practice.
>> +	 */
>> +	while (!vq->num_free)
>> +		virtqueue_get_buf(vq, &unused);
> I dislike this busy-waiting. It's a hint after all -
> why not just retry later - hopefully after getting an
> interrupt?
>
> Alternatively, stop adding more entries when we have a single
> ring entry left, making sure we have space for the command.

I think the second one looks good. Thanks.

>> +			queue_work(system_freezable_wq,
>> +				   &vb->update_balloon_size_work);
>> +		spin_unlock_irqrestore(&vb->stop_update_lock, flags);
>> +	}
>> +
>> +	virtio_cread(vb->vdev, struct virtio_balloon_config,
>> +		     free_page_report_cmd_id, &cmd_id);
> You want virtio_cread_feature, don't access the new field
> if the feature has not been negotiated.

Right. We probably need to put all the following cmd id related things 
under the feature check,

How about

if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
     virtio_cread(..);
     if (cmd_id == VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID) {
     ....
}


>
>
>> +	if (cmd_id == VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID) {
>> +		WRITE_ONCE(vb->report_free_page, false);
>> +	} else if (cmd_id != vb->start_cmd_id) {
>> +		/*
>> +		 * Host requests to start the reporting by sending a new cmd
>> +		 * id.
>> +		 */
>> +		WRITE_ONCE(vb->report_free_page, true);
> I don't know why we bother with WRITE_ONCE here.  The point of
> report_free_page being used lockless is that that it's not a big deal if
> it's wrong occasionally, right?

Actually the main reason is that "vb->report_free_page" is a value 
shared by two threads:
Written by the config_change here, and read by the worker thread that 
reports the free pages.

Alternatively, we could let the two sides access to the shared variable 
with "volatile" pointers.


>
>
>
>> +		vb->start_cmd_id = cmd_id;
>> +		queue_work(vb->balloon_wq, &vb->report_free_page_work);
> It seems that if a command was already queued (with a different id),
> this will result in new command id being sent to host twice, which will
> likely confuse the host.

I think that case won't happen, because
- the host sends a cmd id to the guest via the config, while the guest 
acks back the received cmd id via the virtqueue;
- the guest ack back a cmd id only when a new cmd id is received from 
the host, that is the above check:

     if (cmd_id != vb->start_cmd_id) { --> the driver only queues the 
reporting work only when a new cmd id is received
                         /*
                          * Host requests to start the reporting by 
sending a
                          * new cmd id.
                          */
                         WRITE_ONCE(vb->report_free_page, true);
                         vb->start_cmd_id = cmd_id;
                         queue_work(vb->balloon_wq, 
&vb->report_free_page_work);
     }

So the same cmd id wouldn't queue the reporting work twice.


>
>
>
>> +	}
>> +}
>> +
>>   static void update_balloon_size(struct virtio_balloon *vb)
>>   {
>>   	u32 actual = vb->num_pages;
>> @@ -417,40 +513,113 @@ static void update_balloon_size_func(struct work_struct *work)
>>   
>>   static int init_vqs(struct virtio_balloon *vb)
>>   {
>> -	struct virtqueue *vqs[3];
>> -	vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
>> -	static const char * const names[] = { "inflate", "deflate", "stats" };
>> -	int err, nvqs;
>> +	struct virtqueue **vqs;
>> +	vq_callback_t **callbacks;
>> +	const char **names;
>> +	struct scatterlist sg;
>> +	int i, nvqs, err = -ENOMEM;
>> +
>> +	/* Inflateq and deflateq are used unconditionally */
>> +	nvqs = 2;
>> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ))
>> +		nvqs++;
>> +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
>> +		nvqs++;
>> +
>> +	/* Allocate space for find_vqs parameters */
>> +	vqs = kcalloc(nvqs, sizeof(*vqs), GFP_KERNEL);
>> +	if (!vqs)
>> +		goto err_vq;
>> +	callbacks = kmalloc_array(nvqs, sizeof(*callbacks), GFP_KERNEL);
>> +	if (!callbacks)
>> +		goto err_callback;
>> +	names = kmalloc_array(nvqs, sizeof(*names), GFP_KERNEL);
>> +	if (!names)
>> +		goto err_names;
> Why not just keep these 3 arrays on stack? they aren't large.

Sounds good. Here is the new implementation:

static int init_vqs(struct virtio_balloon *vb)
{
         struct virtqueue *vqs[4];
         vq_callback_t *callbacks[4];
         const char *names[4];
         struct scatterlist sg;
         int ret;


         /*
          * Inflateq and deflateq are used unconditionally. stats_vq and
          * free_page_vq uses names[2] and names[3], respectively. The 
names[]
          * will be NULL if the related feature is not enabled, which will
          * cause no allocation for the corresponding virtqueue in find_vqs.
          */
         callbacks[0] = balloon_ack;
         names[0] = "inflate";
         callbacks[1] = balloon_ack;
         names[1] = "deflate";
         names[2] = NULL;
         names[3] = NULL;

         if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
                 names[2] = "stats";
                 callbacks[2] = stats_request;
         }
         if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
                 names[3] = "free_page_vq";
                 callbacks[3] = NULL;
         }

         ret = vb->vdev->config->find_vqs(vb->vdev, 4, vqs, callbacks, 
names,
                                          NULL, NULL);
         if (ret)
                 return ret;

         vb->inflate_vq = vqs[0];
         vb->deflate_vq = vqs[1];

         if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
                 vb->stats_vq = vqs[2];
                 /*
                  * Prime this virtqueue with one buffer so the 
hypervisor can
                  * use it to signal us later (it can't be broken yet!).
                  */
                 sg_init_one(&sg, vb->stats, sizeof(vb->stats));
                 ret = virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb,
                                            GFP_KERNEL);
                 if (ret) {
                         dev_warn(&vb->vdev->dev, "%s: add stat_vq 
failed\n",
                                  __func__);
                         return ret;
                 }
                 virtqueue_kick(vb->stats_vq);
         }

         if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
                 vb->free_page_vq = vqs[3];

         return 0;
}


Btw, the QEMU side doesn't have an option to disable STATS_VQ currently, 
we may need to add that later.

Best,
Wei
Wang, Wei W Jan. 19, 2018, 6:24 a.m. UTC | #11
On 01/18/2018 12:44 AM, Michael S. Tsirkin wrote:
> On Wed, Jan 17, 2018 at 01:10:11PM +0800, Wei Wang wrote:
>>   
>> +static void virtballoon_changed(struct virtio_device *vdev)
>> +{
>> +	struct virtio_balloon *vb = vdev->priv;
>> +	unsigned long flags;
>> +	__u32 cmd_id;
>> +	s64 diff = towards_target(vb);
>> +
>> +	if (diff) {
>> +		spin_lock_irqsave(&vb->stop_update_lock, flags);
>> +		if (!vb->stop_update)
> Why do you ignore stop_update for freeze?
> This means new wq entries can be added during remove
> causing use after free issues.

I think stop_update isn't needed, because the lock has already been 
handled internally by the APIs. Similar examples like 
mem_cgroup_css_free() in "mm/memcontrol.c", there is no such locks used 
for cancel_work_sync(&memcg->high_work).

Best,
Wei
Michael S. Tsirkin Jan. 19, 2018, 12:39 p.m. UTC | #12
On Fri, Jan 19, 2018 at 11:44:21AM +0800, Wei Wang wrote:
> On 01/18/2018 12:44 AM, Michael S. Tsirkin wrote:
> > On Wed, Jan 17, 2018 at 01:10:11PM +0800, Wei Wang wrote:
> > 
> 
> > 
> > > +{
> > > +	struct scatterlist sg;
> > > +	unsigned int unused;
> > > +	int err;
> > > +
> > > +	sg_init_one(&sg, addr, sizeof(uint32_t));
> > This passes a guest-endian value to host. This is a problem:
> > should always pass LE values.
> 
> I think the endianness is handled when virtqueue_add_outbuf():
> 
> desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
> 
> right?


No - that handles the address, not the value you pass in.


> > 
> > > +
> > > +	/*
> > > +	 * This handles the cornercase that the vq happens to be full when
> > > +	 * adding a cmd id. Rarely happen in practice.
> > > +	 */
> > > +	while (!vq->num_free)
> > > +		virtqueue_get_buf(vq, &unused);
> > I dislike this busy-waiting. It's a hint after all -
> > why not just retry later - hopefully after getting an
> > interrupt?
> > 
> > Alternatively, stop adding more entries when we have a single
> > ring entry left, making sure we have space for the command.
> 
> I think the second one looks good. Thanks.
> 
> > > +			queue_work(system_freezable_wq,
> > > +				   &vb->update_balloon_size_work);
> > > +		spin_unlock_irqrestore(&vb->stop_update_lock, flags);
> > > +	}
> > > +
> > > +	virtio_cread(vb->vdev, struct virtio_balloon_config,
> > > +		     free_page_report_cmd_id, &cmd_id);
> > You want virtio_cread_feature, don't access the new field
> > if the feature has not been negotiated.
> 
> Right. We probably need to put all the following cmd id related things under
> the feature check,
> 
> How about
> 
> if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
>     virtio_cread(..);
>     if (cmd_id == VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID) {
>     ....
> }
> 
that's ok too.

> > 
> > 
> > > +	if (cmd_id == VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID) {
> > > +		WRITE_ONCE(vb->report_free_page, false);
> > > +	} else if (cmd_id != vb->start_cmd_id) {
> > > +		/*
> > > +		 * Host requests to start the reporting by sending a new cmd
> > > +		 * id.
> > > +		 */
> > > +		WRITE_ONCE(vb->report_free_page, true);
> > I don't know why we bother with WRITE_ONCE here.  The point of
> > report_free_page being used lockless is that that it's not a big deal if
> > it's wrong occasionally, right?
> 
> Actually the main reason is that "vb->report_free_page" is a value shared by
> two threads:
> Written by the config_change here, and read by the worker thread that
> reports the free pages.

Right but what's wrong if it's read or written twice and not once?

> Alternatively, we could let the two sides access to the shared variable with
> "volatile" pointers.
> 
> 
> > 
> > 
> > 
> > > +		vb->start_cmd_id = cmd_id;
> > > +		queue_work(vb->balloon_wq, &vb->report_free_page_work);
> > It seems that if a command was already queued (with a different id),
> > this will result in new command id being sent to host twice, which will
> > likely confuse the host.
> 
> I think that case won't happen, because
> - the host sends a cmd id to the guest via the config, while the guest acks
> back the received cmd id via the virtqueue;
> - the guest ack back a cmd id only when a new cmd id is received from the
> host, that is the above check:
> 
>     if (cmd_id != vb->start_cmd_id) { --> the driver only queues the
> reporting work only when a new cmd id is received
>                         /*
>                          * Host requests to start the reporting by sending a
>                          * new cmd id.
>                          */
>                         WRITE_ONCE(vb->report_free_page, true);
>                         vb->start_cmd_id = cmd_id;
>                         queue_work(vb->balloon_wq,
> &vb->report_free_page_work);
>     }
> 
> So the same cmd id wouldn't queue the reporting work twice.
> 

Like this:

		vb->start_cmd_id = cmd_id;
		queue_work(vb->balloon_wq, &vb->report_free_page_work);

command id changes

		vb->start_cmd_id = cmd_id;

work executes

		queue_work(vb->balloon_wq, &vb->report_free_page_work);

work executes again


> > 
> > 
> > 
> > > +	}
> > > +}
> > > +
> > >   static void update_balloon_size(struct virtio_balloon *vb)
> > >   {
> > >   	u32 actual = vb->num_pages;
> > > @@ -417,40 +513,113 @@ static void update_balloon_size_func(struct work_struct *work)
> > >   static int init_vqs(struct virtio_balloon *vb)
> > >   {
> > > -	struct virtqueue *vqs[3];
> > > -	vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
> > > -	static const char * const names[] = { "inflate", "deflate", "stats" };
> > > -	int err, nvqs;
> > > +	struct virtqueue **vqs;
> > > +	vq_callback_t **callbacks;
> > > +	const char **names;
> > > +	struct scatterlist sg;
> > > +	int i, nvqs, err = -ENOMEM;
> > > +
> > > +	/* Inflateq and deflateq are used unconditionally */
> > > +	nvqs = 2;
> > > +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ))
> > > +		nvqs++;
> > > +	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
> > > +		nvqs++;
> > > +
> > > +	/* Allocate space for find_vqs parameters */
> > > +	vqs = kcalloc(nvqs, sizeof(*vqs), GFP_KERNEL);
> > > +	if (!vqs)
> > > +		goto err_vq;
> > > +	callbacks = kmalloc_array(nvqs, sizeof(*callbacks), GFP_KERNEL);
> > > +	if (!callbacks)
> > > +		goto err_callback;
> > > +	names = kmalloc_array(nvqs, sizeof(*names), GFP_KERNEL);
> > > +	if (!names)
> > > +		goto err_names;
> > Why not just keep these 3 arrays on stack? they aren't large.
> 
> Sounds good. Here is the new implementation:
> 
> static int init_vqs(struct virtio_balloon *vb)
> {
>         struct virtqueue *vqs[4];
>         vq_callback_t *callbacks[4];
>         const char *names[4];
>         struct scatterlist sg;
>         int ret;
> 
> 
>         /*
>          * Inflateq and deflateq are used unconditionally. stats_vq and
>          * free_page_vq uses names[2] and names[3], respectively. The
> names[]
>          * will be NULL if the related feature is not enabled, which will
>          * cause no allocation for the corresponding virtqueue in find_vqs.
>          */
>         callbacks[0] = balloon_ack;
>         names[0] = "inflate";
>         callbacks[1] = balloon_ack;
>         names[1] = "deflate";
>         names[2] = NULL;
>         names[3] = NULL;
> 
>         if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
>                 names[2] = "stats";
>                 callbacks[2] = stats_request;
>         }
>         if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
>                 names[3] = "free_page_vq";
>                 callbacks[3] = NULL;
>         }
> 
>         ret = vb->vdev->config->find_vqs(vb->vdev, 4, vqs, callbacks, names,
>                                          NULL, NULL);
>         if (ret)
>                 return ret;
> 
>         vb->inflate_vq = vqs[0];
>         vb->deflate_vq = vqs[1];
> 
>         if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
>                 vb->stats_vq = vqs[2];
>                 /*
>                  * Prime this virtqueue with one buffer so the hypervisor
> can
>                  * use it to signal us later (it can't be broken yet!).
>                  */
>                 sg_init_one(&sg, vb->stats, sizeof(vb->stats));
>                 ret = virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb,
>                                            GFP_KERNEL);
>                 if (ret) {
>                         dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
>                                  __func__);
>                         return ret;
>                 }
>                 virtqueue_kick(vb->stats_vq);
>         }
> 
>         if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
>                 vb->free_page_vq = vqs[3];
> 
>         return 0;
> }
> 
> 
> Btw, the QEMU side doesn't have an option to disable STATS_VQ currently, we
> may need to add that later.
> 
> Best,
> Wei

why not
Tetsuo Handa Jan. 20, 2018, 2:23 p.m. UTC | #13
Michael S. Tsirkin wrote:
> > > > >> +	 * the page if the vq is full. We are adding one entry each time,
> > > > >> +	 * which essentially results in no memory allocation, so the
> > > > >> +	 * GFP_KERNEL flag below can be ignored.
> > > > >> +	 */
> > > > >> +	if (vq->num_free) {
> > > > >> +		err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);
> > > > > 
> > > > > Should we kick here? At least when ring is close to
> > > > > being full. Kick at half way full?
> > > > > Otherwise it's unlikely ring will
> > > > > ever be cleaned until we finish the scan.
> > > > 
> > > > Since this add_one_sg() is called between spin_lock_irqsave(&zone->lock, flags)
> > > > and spin_unlock_irqrestore(&zone->lock, flags), it is not permitted to sleep.
> > > 
> > > kick takes a while sometimes but it doesn't sleep.
> > 
> > I don't know about virtio. But the purpose of kicking here is to wait for pending data
> > to be flushed in order to increase vq->num_free, isn't it?
> 
> It isn't. It's to wake up device out of sleep to make it start
> processing the pending data. If device isn't asleep, it's a nop.

We need to wait until vq->num_free > 0 if vq->num_free == 0 if we want to allow
virtqueue_add_inbuf() to succeed. When will vq->num_free++ be called?

You said virtqueue_kick() is a no-op if the device is not asleep.
Then, there will be no guarantee that we can make vq->num_free > 0
by calling virtqueue_kick(). Are you saying that

	virtqueue_kick(vq);
	while (!vq->num_free)
		virtqueue_get_buf(vq, &unused);
	err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);
	BUG_ON(err);

sequence from IRQ disabled atomic context is safe? If no, what is
the point with calling virtqueue_kick() when ring is close to being
(half way) full? We can't guarantee that all data is sent to QEMU after all.



Also, why does the cmd id matter? If VIRTIO_BALLOON_F_FREE_PAGE_VQ does not
guarantee the atomicity, I don't see the point of communicating the cmd id
between the QEMU and the guest kernel. Just an EOF marker should be enough.
I do want to see changes for the QEMU side in order to review changes for
the guest kernel side.
Wang, Wei W Jan. 22, 2018, 11:25 a.m. UTC | #14
On 01/19/2018 08:39 PM, Michael S. Tsirkin wrote:
> On Fri, Jan 19, 2018 at 11:44:21AM +0800, Wei Wang wrote:
>> On 01/18/2018 12:44 AM, Michael S. Tsirkin wrote:
>>> On Wed, Jan 17, 2018 at 01:10:11PM +0800, Wei Wang wrote:
>>>
>>>> +		vb->start_cmd_id = cmd_id;
>>>> +		queue_work(vb->balloon_wq, &vb->report_free_page_work);
>>> It seems that if a command was already queued (with a different id),
>>> this will result in new command id being sent to host twice, which will
>>> likely confuse the host.
>> I think that case won't happen, because
>> - the host sends a cmd id to the guest via the config, while the guest acks
>> back the received cmd id via the virtqueue;
>> - the guest ack back a cmd id only when a new cmd id is received from the
>> host, that is the above check:
>>
>>      if (cmd_id != vb->start_cmd_id) { --> the driver only queues the
>> reporting work only when a new cmd id is received
>>                          /*
>>                           * Host requests to start the reporting by sending a
>>                           * new cmd id.
>>                           */
>>                          WRITE_ONCE(vb->report_free_page, true);
>>                          vb->start_cmd_id = cmd_id;
>>                          queue_work(vb->balloon_wq,
>> &vb->report_free_page_work);
>>      }
>>
>> So the same cmd id wouldn't queue the reporting work twice.
>>
> Like this:
>
> 		vb->start_cmd_id = cmd_id;
> 		queue_work(vb->balloon_wq, &vb->report_free_page_work);
>
> command id changes
>
> 		vb->start_cmd_id = cmd_id;
>
> work executes
>
> 		queue_work(vb->balloon_wq, &vb->report_free_page_work);
>
> work executes again
>

If we think about the whole working flow, I think this case couldn't happen:

1) device send cmd_id=1 to driver;
2) driver receives cmd_id=1 in the config and acks cmd_id=1 to the 
device via the vq;
3) device revives cmd_id=1;
4) device wants to stop the reporting by sending cmd_id=STOP;
5) driver receives cmd_id=STOP from the config, and acks cmd_id=STOP to 
the device via the vq;
6) device sends cmd_id=2 to driver;
...

cmd_id=2 won't come after cmd_id=1, there will be a STOP cmd in between 
them (STOP won't queue the work).

How about defining the correct device behavior in the spec:
The device Should NOT send a second cmd id to the driver until a STOP 
cmd ack for the previous cmd id has been received from the guest.


Best,
Wei
Wang, Wei W Jan. 24, 2018, 3:18 a.m. UTC | #15
On 01/22/2018 07:25 PM, Wei Wang wrote:
> On 01/19/2018 08:39 PM, Michael S. Tsirkin wrote:
>> On Fri, Jan 19, 2018 at 11:44:21AM +0800, Wei Wang wrote:
>>> On 01/18/2018 12:44 AM, Michael S. Tsirkin wrote:
>>>> On Wed, Jan 17, 2018 at 01:10:11PM +0800, Wei Wang wrote:
>>>>
>>>>> +        vb->start_cmd_id = cmd_id;
>>>>> +        queue_work(vb->balloon_wq, &vb->report_free_page_work);
>>>> It seems that if a command was already queued (with a different id),
>>>> this will result in new command id being sent to host twice, which 
>>>> will
>>>> likely confuse the host.
>>> I think that case won't happen, because
>>> - the host sends a cmd id to the guest via the config, while the 
>>> guest acks
>>> back the received cmd id via the virtqueue;
>>> - the guest ack back a cmd id only when a new cmd id is received 
>>> from the
>>> host, that is the above check:
>>>
>>>      if (cmd_id != vb->start_cmd_id) { --> the driver only queues the
>>> reporting work only when a new cmd id is received
>>>                          /*
>>>                           * Host requests to start the reporting by 
>>> sending a
>>>                           * new cmd id.
>>>                           */
>>>                          WRITE_ONCE(vb->report_free_page, true);
>>>                          vb->start_cmd_id = cmd_id;
>>>                          queue_work(vb->balloon_wq,
>>> &vb->report_free_page_work);
>>>      }
>>>
>>> So the same cmd id wouldn't queue the reporting work twice.
>>>
>> Like this:
>>
>>         vb->start_cmd_id = cmd_id;
>>         queue_work(vb->balloon_wq, &vb->report_free_page_work);
>>
>> command id changes
>>
>>         vb->start_cmd_id = cmd_id;
>>
>> work executes
>>
>>         queue_work(vb->balloon_wq, &vb->report_free_page_work);
>>
>> work executes again
>>
>
> If we think about the whole working flow, I think this case couldn't 
> happen:
>
> 1) device send cmd_id=1 to driver;
> 2) driver receives cmd_id=1 in the config and acks cmd_id=1 to the 
> device via the vq;
> 3) device revives cmd_id=1;
> 4) device wants to stop the reporting by sending cmd_id=STOP;
> 5) driver receives cmd_id=STOP from the config, and acks cmd_id=STOP 
> to the device via the vq;
> 6) device sends cmd_id=2 to driver;
> ...
>
> cmd_id=2 won't come after cmd_id=1, there will be a STOP cmd in 
> between them (STOP won't queue the work).
>
> How about defining the correct device behavior in the spec:
> The device Should NOT send a second cmd id to the driver until a STOP 
> cmd ack for the previous cmd id has been received from the guest.


Thanks for the comments, and I adopted most of them in the new posted 
v23 patches. The above discussion is the one that I haven't included. If 
you could still see issues in the above analysis, please let me know. 
Thanks.

Best,
Wei
Michael S. Tsirkin Jan. 24, 2018, 4:29 a.m. UTC | #16
On Mon, Jan 22, 2018 at 07:25:45PM +0800, Wei Wang wrote:
> On 01/19/2018 08:39 PM, Michael S. Tsirkin wrote:
> > On Fri, Jan 19, 2018 at 11:44:21AM +0800, Wei Wang wrote:
> > > On 01/18/2018 12:44 AM, Michael S. Tsirkin wrote:
> > > > On Wed, Jan 17, 2018 at 01:10:11PM +0800, Wei Wang wrote:
> > > > 
> > > > > +		vb->start_cmd_id = cmd_id;
> > > > > +		queue_work(vb->balloon_wq, &vb->report_free_page_work);
> > > > It seems that if a command was already queued (with a different id),
> > > > this will result in new command id being sent to host twice, which will
> > > > likely confuse the host.
> > > I think that case won't happen, because
> > > - the host sends a cmd id to the guest via the config, while the guest acks
> > > back the received cmd id via the virtqueue;
> > > - the guest ack back a cmd id only when a new cmd id is received from the
> > > host, that is the above check:
> > > 
> > >      if (cmd_id != vb->start_cmd_id) { --> the driver only queues the
> > > reporting work only when a new cmd id is received
> > >                          /*
> > >                           * Host requests to start the reporting by sending a
> > >                           * new cmd id.
> > >                           */
> > >                          WRITE_ONCE(vb->report_free_page, true);
> > >                          vb->start_cmd_id = cmd_id;
> > >                          queue_work(vb->balloon_wq,
> > > &vb->report_free_page_work);
> > >      }
> > > 
> > > So the same cmd id wouldn't queue the reporting work twice.
> > > 
> > Like this:
> > 
> > 		vb->start_cmd_id = cmd_id;
> > 		queue_work(vb->balloon_wq, &vb->report_free_page_work);
> > 
> > command id changes
> > 
> > 		vb->start_cmd_id = cmd_id;
> > 
> > work executes
> > 
> > 		queue_work(vb->balloon_wq, &vb->report_free_page_work);
> > 
> > work executes again
> > 
> 
> If we think about the whole working flow, I think this case couldn't happen:
> 
> 1) device send cmd_id=1 to driver;
> 2) driver receives cmd_id=1 in the config and acks cmd_id=1 to the device
> via the vq;
> 3) device revives cmd_id=1;
> 4) device wants to stop the reporting by sending cmd_id=STOP;
> 5) driver receives cmd_id=STOP from the config, and acks cmd_id=STOP to the
> device via the vq;
> 6) device sends cmd_id=2 to driver;
> ...
> 
> cmd_id=2 won't come after cmd_id=1, there will be a STOP cmd in between them
> (STOP won't queue the work).
> 
> How about defining the correct device behavior in the spec:
> The device Should NOT send a second cmd id to the driver until a STOP cmd
> ack for the previous cmd id has been received from the guest.
> 
> 
> Best,
> Wei

I think we should just fix races in the driver rather than introduce
random restrictions in the device.

If device wants to start a new sequence, it should be able to
do just that without a complicated back and forth with several
roundtrips through the driver.
Michael S. Tsirkin Jan. 24, 2018, 4:31 a.m. UTC | #17
On Wed, Jan 24, 2018 at 11:18:40AM +0800, Wei Wang wrote:
> On 01/22/2018 07:25 PM, Wei Wang wrote:
> > On 01/19/2018 08:39 PM, Michael S. Tsirkin wrote:
> > > On Fri, Jan 19, 2018 at 11:44:21AM +0800, Wei Wang wrote:
> > > > On 01/18/2018 12:44 AM, Michael S. Tsirkin wrote:
> > > > > On Wed, Jan 17, 2018 at 01:10:11PM +0800, Wei Wang wrote:
> > > > > 
> > > > > > +        vb->start_cmd_id = cmd_id;
> > > > > > +        queue_work(vb->balloon_wq, &vb->report_free_page_work);
> > > > > It seems that if a command was already queued (with a different id),
> > > > > this will result in new command id being sent to host twice,
> > > > > which will
> > > > > likely confuse the host.
> > > > I think that case won't happen, because
> > > > - the host sends a cmd id to the guest via the config, while the
> > > > guest acks
> > > > back the received cmd id via the virtqueue;
> > > > - the guest ack back a cmd id only when a new cmd id is received
> > > > from the
> > > > host, that is the above check:
> > > > 
> > > >      if (cmd_id != vb->start_cmd_id) { --> the driver only queues the
> > > > reporting work only when a new cmd id is received
> > > >                          /*
> > > >                           * Host requests to start the reporting
> > > > by sending a
> > > >                           * new cmd id.
> > > >                           */
> > > >                          WRITE_ONCE(vb->report_free_page, true);
> > > >                          vb->start_cmd_id = cmd_id;
> > > >                          queue_work(vb->balloon_wq,
> > > > &vb->report_free_page_work);
> > > >      }
> > > > 
> > > > So the same cmd id wouldn't queue the reporting work twice.
> > > > 
> > > Like this:
> > > 
> > >         vb->start_cmd_id = cmd_id;
> > >         queue_work(vb->balloon_wq, &vb->report_free_page_work);
> > > 
> > > command id changes
> > > 
> > >         vb->start_cmd_id = cmd_id;
> > > 
> > > work executes
> > > 
> > >         queue_work(vb->balloon_wq, &vb->report_free_page_work);
> > > 
> > > work executes again
> > > 
> > 
> > If we think about the whole working flow, I think this case couldn't
> > happen:
> > 
> > 1) device send cmd_id=1 to driver;
> > 2) driver receives cmd_id=1 in the config and acks cmd_id=1 to the
> > device via the vq;
> > 3) device revives cmd_id=1;
> > 4) device wants to stop the reporting by sending cmd_id=STOP;
> > 5) driver receives cmd_id=STOP from the config, and acks cmd_id=STOP to
> > the device via the vq;
> > 6) device sends cmd_id=2 to driver;
> > ...
> > 
> > cmd_id=2 won't come after cmd_id=1, there will be a STOP cmd in between
> > them (STOP won't queue the work).
> > 
> > How about defining the correct device behavior in the spec:
> > The device Should NOT send a second cmd id to the driver until a STOP
> > cmd ack for the previous cmd id has been received from the guest.
> 
> 
> Thanks for the comments, and I adopted most of them in the new posted v23
> patches. The above discussion is the one that I haven't included. If you
> could still see issues in the above analysis, please let me know. Thanks.
> 
> Best,
> Wei
> 
> 
>

Yes, I think you should just fix the race in the driver.
Wang, Wei W Jan. 24, 2018, 11:28 a.m. UTC | #18
On 01/24/2018 12:29 PM, Michael S. Tsirkin wrote:
> On Mon, Jan 22, 2018 at 07:25:45PM +0800, Wei Wang wrote:
>> On 01/19/2018 08:39 PM, Michael S. Tsirkin wrote:
>>> On Fri, Jan 19, 2018 at 11:44:21AM +0800, Wei Wang wrote:
>>>> On 01/18/2018 12:44 AM, Michael S. Tsirkin wrote:
>>>>> On Wed, Jan 17, 2018 at 01:10:11PM +0800, Wei Wang wrote:
>>>>>
>>>>>> +		vb->start_cmd_id = cmd_id;
>>>>>> +		queue_work(vb->balloon_wq, &vb->report_free_page_work);
>>>>> It seems that if a command was already queued (with a different id),
>>>>> this will result in new command id being sent to host twice, which will
>>>>> likely confuse the host.
>>>> I think that case won't happen, because
>>>> - the host sends a cmd id to the guest via the config, while the guest acks
>>>> back the received cmd id via the virtqueue;
>>>> - the guest ack back a cmd id only when a new cmd id is received from the
>>>> host, that is the above check:
>>>>
>>>>       if (cmd_id != vb->start_cmd_id) { --> the driver only queues the
>>>> reporting work only when a new cmd id is received
>>>>                           /*
>>>>                            * Host requests to start the reporting by sending a
>>>>                            * new cmd id.
>>>>                            */
>>>>                           WRITE_ONCE(vb->report_free_page, true);
>>>>                           vb->start_cmd_id = cmd_id;
>>>>                           queue_work(vb->balloon_wq,
>>>> &vb->report_free_page_work);
>>>>       }
>>>>
>>>> So the same cmd id wouldn't queue the reporting work twice.
>>>>
>>> Like this:
>>>
>>> 		vb->start_cmd_id = cmd_id;
>>> 		queue_work(vb->balloon_wq, &vb->report_free_page_work);
>>>
>>> command id changes
>>>
>>> 		vb->start_cmd_id = cmd_id;
>>>
>>> work executes
>>>
>>> 		queue_work(vb->balloon_wq, &vb->report_free_page_work);
>>>
>>> work executes again
>>>
>> If we think about the whole working flow, I think this case couldn't happen:
>>
>> 1) device send cmd_id=1 to driver;
>> 2) driver receives cmd_id=1 in the config and acks cmd_id=1 to the device
>> via the vq;
>> 3) device revives cmd_id=1;
>> 4) device wants to stop the reporting by sending cmd_id=STOP;
>> 5) driver receives cmd_id=STOP from the config, and acks cmd_id=STOP to the
>> device via the vq;
>> 6) device sends cmd_id=2 to driver;
>> ...
>>
>> cmd_id=2 won't come after cmd_id=1, there will be a STOP cmd in between them
>> (STOP won't queue the work).
>>
>> How about defining the correct device behavior in the spec:
>> The device Should NOT send a second cmd id to the driver until a STOP cmd
>> ack for the previous cmd id has been received from the guest.
>>
>>
>> Best,
>> Wei
> I think we should just fix races in the driver rather than introduce
> random restrictions in the device.
>
> If device wants to start a new sequence, it should be able to
> do just that without a complicated back and forth with several
> roundtrips through the driver.
>

OK, I've fixed it in the new version, v24. Please have a check there. 
Thanks.
(Other changes based on the comments on v23 have also been included)

Best,
Wei
diff mbox

Patch

diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index a1fb52c..b9561a5 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -53,7 +53,12 @@  static struct vfsmount *balloon_mnt;
 
 struct virtio_balloon {
 	struct virtio_device *vdev;
-	struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
+	struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
+
+	/* Balloon's own wq for cpu-intensive work items */
+	struct workqueue_struct *balloon_wq;
+	/* The free page reporting work item submitted to the balloon wq */
+	struct work_struct report_free_page_work;
 
 	/* The balloon servicing is delegated to a freezable workqueue. */
 	struct work_struct update_balloon_stats_work;
@@ -63,6 +68,13 @@  struct virtio_balloon {
 	spinlock_t stop_update_lock;
 	bool stop_update;
 
+	/* Start to report free pages */
+	bool report_free_page;
+	/* Stores the cmd id given by host to start the free page reporting */
+	uint32_t start_cmd_id;
+	/* Stores STOP_ID as a sign to tell host that the reporting is done */
+	uint32_t stop_cmd_id;
+
 	/* Waiting for host to ack the pages we released. */
 	wait_queue_head_t acked;
 
@@ -281,6 +293,71 @@  static unsigned int update_balloon_stats(struct virtio_balloon *vb)
 	return idx;
 }
 
+static void add_one_sg(struct virtqueue *vq, unsigned long pfn, uint32_t len)
+{
+	struct scatterlist sg;
+	unsigned int unused;
+	int err;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, pfn_to_page(pfn), len, 0);
+
+	/* Detach all the used buffers from the vq */
+	while (virtqueue_get_buf(vq, &unused))
+		;
+
+	/*
+	 * Since this is an optimization feature, losing a couple of free
+	 * pages to report isn't important. We simply resturn without adding
+	 * the page if the vq is full. We are adding one entry each time,
+	 * which essentially results in no memory allocation, so the
+	 * GFP_KERNEL flag below can be ignored.
+	 */
+	if (vq->num_free) {
+		err = virtqueue_add_inbuf(vq, &sg, 1, vq, GFP_KERNEL);
+		/*
+		 * This is expected to never fail, because there is always an
+		 * entry available on the vq.
+		 */
+		BUG_ON(err);
+	}
+}
+
+static void batch_free_page_sg(struct virtqueue *vq,
+			       unsigned long pfn,
+			       uint32_t len)
+{
+	add_one_sg(vq, pfn, len);
+
+	/* Batch till the vq is full */
+	if (!vq->num_free)
+		virtqueue_kick(vq);
+}
+
+static void send_cmd_id(struct virtqueue *vq, void *addr)
+{
+	struct scatterlist sg;
+	unsigned int unused;
+	int err;
+
+	sg_init_one(&sg, addr, sizeof(uint32_t));
+
+	/*
+	 * This handles the cornercase that the vq happens to be full when
+	 * adding a cmd id. Rarely happen in practice.
+	 */
+	while (!vq->num_free)
+		virtqueue_get_buf(vq, &unused);
+
+	err = virtqueue_add_outbuf(vq, &sg, 1, vq, GFP_KERNEL);
+	/*
+	 * This is expected to never fail, because there is always an
+	 * entry available on the vq.
+	 */
+	BUG_ON(err);
+	virtqueue_kick(vq);
+}
+
 /*
  * While most virtqueues communicate guest-initiated requests to the hypervisor,
  * the stats queue operates in reverse.  The driver initializes the virtqueue
@@ -316,17 +393,6 @@  static void stats_handle_request(struct virtio_balloon *vb)
 	virtqueue_kick(vq);
 }
 
-static void virtballoon_changed(struct virtio_device *vdev)
-{
-	struct virtio_balloon *vb = vdev->priv;
-	unsigned long flags;
-
-	spin_lock_irqsave(&vb->stop_update_lock, flags);
-	if (!vb->stop_update)
-		queue_work(system_freezable_wq, &vb->update_balloon_size_work);
-	spin_unlock_irqrestore(&vb->stop_update_lock, flags);
-}
-
 static inline s64 towards_target(struct virtio_balloon *vb)
 {
 	s64 target;
@@ -343,6 +409,36 @@  static inline s64 towards_target(struct virtio_balloon *vb)
 	return target - vb->num_pages;
 }
 
+static void virtballoon_changed(struct virtio_device *vdev)
+{
+	struct virtio_balloon *vb = vdev->priv;
+	unsigned long flags;
+	__u32 cmd_id;
+	s64 diff = towards_target(vb);
+
+	if (diff) {
+		spin_lock_irqsave(&vb->stop_update_lock, flags);
+		if (!vb->stop_update)
+			queue_work(system_freezable_wq,
+				   &vb->update_balloon_size_work);
+		spin_unlock_irqrestore(&vb->stop_update_lock, flags);
+	}
+
+	virtio_cread(vb->vdev, struct virtio_balloon_config,
+		     free_page_report_cmd_id, &cmd_id);
+	if (cmd_id == VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID) {
+		WRITE_ONCE(vb->report_free_page, false);
+	} else if (cmd_id != vb->start_cmd_id) {
+		/*
+		 * Host requests to start the reporting by sending a new cmd
+		 * id.
+		 */
+		WRITE_ONCE(vb->report_free_page, true);
+		vb->start_cmd_id = cmd_id;
+		queue_work(vb->balloon_wq, &vb->report_free_page_work);
+	}
+}
+
 static void update_balloon_size(struct virtio_balloon *vb)
 {
 	u32 actual = vb->num_pages;
@@ -417,40 +513,113 @@  static void update_balloon_size_func(struct work_struct *work)
 
 static int init_vqs(struct virtio_balloon *vb)
 {
-	struct virtqueue *vqs[3];
-	vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
-	static const char * const names[] = { "inflate", "deflate", "stats" };
-	int err, nvqs;
+	struct virtqueue **vqs;
+	vq_callback_t **callbacks;
+	const char **names;
+	struct scatterlist sg;
+	int i, nvqs, err = -ENOMEM;
+
+	/* Inflateq and deflateq are used unconditionally */
+	nvqs = 2;
+	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ))
+		nvqs++;
+	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
+		nvqs++;
+
+	/* Allocate space for find_vqs parameters */
+	vqs = kcalloc(nvqs, sizeof(*vqs), GFP_KERNEL);
+	if (!vqs)
+		goto err_vq;
+	callbacks = kmalloc_array(nvqs, sizeof(*callbacks), GFP_KERNEL);
+	if (!callbacks)
+		goto err_callback;
+	names = kmalloc_array(nvqs, sizeof(*names), GFP_KERNEL);
+	if (!names)
+		goto err_names;
+
+	callbacks[0] = balloon_ack;
+	names[0] = "inflate";
+	callbacks[1] = balloon_ack;
+	names[1] = "deflate";
+
+	i = 2;
+	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
+		callbacks[i] = stats_request;
+		names[i] = "stats";
+		i++;
+	}
 
-	/*
-	 * We expect two virtqueues: inflate and deflate, and
-	 * optionally stat.
-	 */
-	nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
-	err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL);
+	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
+		callbacks[i] = NULL;
+		names[i] = "free_page_vq";
+	}
+
+	err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names,
+					 NULL, NULL);
 	if (err)
-		return err;
+		goto err_find;
 
 	vb->inflate_vq = vqs[0];
 	vb->deflate_vq = vqs[1];
+	i = 2;
 	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
-		struct scatterlist sg;
-		unsigned int num_stats;
-		vb->stats_vq = vqs[2];
-
+		vb->stats_vq = vqs[i++];
 		/*
 		 * Prime this virtqueue with one buffer so the hypervisor can
 		 * use it to signal us later (it can't be broken yet!).
 		 */
-		num_stats = update_balloon_stats(vb);
-
-		sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats);
+		sg_init_one(&sg, vb->stats, sizeof(vb->stats));
 		if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
-		    < 0)
-			BUG();
+		    < 0) {
+			dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n",
+				 __func__);
+			goto err_find;
+		}
 		virtqueue_kick(vb->stats_vq);
 	}
+
+	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ))
+		vb->free_page_vq = vqs[i];
+
+	kfree(names);
+	kfree(callbacks);
+	kfree(vqs);
 	return 0;
+
+err_find:
+	kfree(names);
+err_names:
+	kfree(callbacks);
+err_callback:
+	kfree(vqs);
+err_vq:
+	return err;
+}
+
+static bool virtio_balloon_send_free_pages(void *opaque, unsigned long pfn,
+					   unsigned long nr_pages)
+{
+	struct virtio_balloon *vb = (struct virtio_balloon *)opaque;
+	uint32_t len = nr_pages << PAGE_SHIFT;
+
+	if (!READ_ONCE(vb->report_free_page))
+		return false;
+
+	batch_free_page_sg(vb->free_page_vq, pfn, len);
+
+	return true;
+}
+
+static void report_free_page_func(struct work_struct *work)
+{
+	struct virtio_balloon *vb;
+
+	vb = container_of(work, struct virtio_balloon, report_free_page_work);
+	/* Start by sending the obtained cmd id to the host with an outbuf */
+	send_cmd_id(vb->free_page_vq, &vb->start_cmd_id);
+	walk_free_mem_block(vb, 0, &virtio_balloon_send_free_pages);
+	/* End by sending the stop id to the host with an outbuf */
+	send_cmd_id(vb->free_page_vq, &vb->stop_cmd_id);
 }
 
 #ifdef CONFIG_BALLOON_COMPACTION
@@ -566,6 +735,13 @@  static int virtballoon_probe(struct virtio_device *vdev)
 	if (err)
 		goto out_free_vb;
 
+	if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_VQ)) {
+		vb->balloon_wq = alloc_workqueue("balloon-wq",
+					WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
+		INIT_WORK(&vb->report_free_page_work, report_free_page_func);
+		vb->stop_cmd_id = VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID;
+	}
+
 	vb->nb.notifier_call = virtballoon_oom_notify;
 	vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
 	err = register_oom_notifier(&vb->nb);
@@ -630,6 +806,7 @@  static void virtballoon_remove(struct virtio_device *vdev)
 	spin_unlock_irq(&vb->stop_update_lock);
 	cancel_work_sync(&vb->update_balloon_size_work);
 	cancel_work_sync(&vb->update_balloon_stats_work);
+	cancel_work_sync(&vb->report_free_page_work);
 
 	remove_common(vb);
 #ifdef CONFIG_BALLOON_COMPACTION
@@ -682,6 +859,7 @@  static unsigned int features[] = {
 	VIRTIO_BALLOON_F_MUST_TELL_HOST,
 	VIRTIO_BALLOON_F_STATS_VQ,
 	VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
+	VIRTIO_BALLOON_F_FREE_PAGE_VQ,
 };
 
 static struct virtio_driver virtio_balloon_driver = {
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 343d7dd..55e2456 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -34,15 +34,19 @@ 
 #define VIRTIO_BALLOON_F_MUST_TELL_HOST	0 /* Tell before reclaiming pages */
 #define VIRTIO_BALLOON_F_STATS_VQ	1 /* Memory Stats virtqueue */
 #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM	2 /* Deflate balloon on OOM */
+#define VIRTIO_BALLOON_F_FREE_PAGE_VQ	3 /* VQ to report free pages */
 
 /* Size of a PFN in the balloon interface. */
 #define VIRTIO_BALLOON_PFN_SHIFT 12
 
+#define VIRTIO_BALLOON_FREE_PAGE_REPORT_STOP_ID		0
 struct virtio_balloon_config {
 	/* Number of pages host wants Guest to give up. */
 	__u32 num_pages;
 	/* Number of pages we've actually got in balloon. */
 	__u32 actual;
+	/* Free page report command id, readonly by guest */
+	__u32 free_page_report_cmd_id;
 };
 
 #define VIRTIO_BALLOON_S_SWAP_IN  0   /* Amount of memory swapped in */