diff mbox series

[v9,14/27] gunyah: vm_mgr: Add ioctls to support basic non-proxy VM boot

Message ID 20230120224627.4053418-15-quic_eberman@quicinc.com (mailing list archive)
State Not Applicable
Headers show
Series Drivers for gunyah hypervisor | expand

Commit Message

Elliot Berman Jan. 20, 2023, 10:46 p.m. UTC
Add remaining ioctls to support non-proxy VM boot:

 - Gunyah Resource Manager uses the VM's devicetree to configure the
   virtual machine. The location of the devicetree in the guest's
   virtual memory can be declared via the SET_DTB_CONFIG ioctl.
 - Trigger start of the virtual machine with VM_START ioctl.

Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
---
 drivers/virt/gunyah/vm_mgr.c    | 110 ++++++++++++++++++++++++++++++++
 drivers/virt/gunyah/vm_mgr.h    |   9 +++
 drivers/virt/gunyah/vm_mgr_mm.c |  24 +++++++
 include/uapi/linux/gunyah.h     |   8 +++
 4 files changed, 151 insertions(+)

Comments

Srivatsa Vaddagiri Jan. 30, 2023, 8:53 a.m. UTC | #1
* Elliot Berman <quic_eberman@quicinc.com> [2023-01-20 14:46:13]:

> +static int gh_vm_start(struct gunyah_vm *ghvm)
> +{
> +	struct gunyah_vm_memory_mapping *mapping;
> +	u64 dtb_offset;
> +	u32 mem_handle;
> +	int ret;
> +
> +	down_write(&ghvm->status_lock);
> +	if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
> +		up_write(&ghvm->status_lock);
> +		return 0;

return -EINVAL in this case.

Additionally check if its already GH_RM_VM_STATUS_READY and return 0 in that
case.

[snip]


> +	mem_handle = mapping->parcel.mem_handle;
> +	dtb_offset = ghvm->dtb_config.gpa - mapping->guest_phys_addr;
> +
> +	ret = gh_rm_vm_configure(ghvm->rm, ghvm->vmid, ghvm->auth, mem_handle,
> +				0, 0, dtb_offset, ghvm->dtb_config.size);

Default value of auth is 0 (GH_RM_VM_AUTH_NONE). Is that what you wanted here?
Perhaps initialize default value of auth to be GH_RM_VM_AUTH_QCOM_PIL_ELF?
Elliot Berman Jan. 30, 2023, 9:44 p.m. UTC | #2
On 1/30/2023 12:53 AM, Srivatsa Vaddagiri wrote:
> * Elliot Berman <quic_eberman@quicinc.com> [2023-01-20 14:46:13]:
> 
>> +static int gh_vm_start(struct gunyah_vm *ghvm)
>> +{
>> +	struct gunyah_vm_memory_mapping *mapping;
>> +	u64 dtb_offset;
>> +	u32 mem_handle;
>> +	int ret;
>> +
>> +	down_write(&ghvm->status_lock);
>> +	if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
>> +		up_write(&ghvm->status_lock);
>> +		return 0;
> 
> return -EINVAL in this case.
> 
> Additionally check if its already GH_RM_VM_STATUS_READY and return 0 in that
> case.
> 
> [snip]
> 

Caller can use gh_vm_ensure_started for this behavior. I'll move this to 
be used in the GH_VM_RUN ioctl as well.

> 
>> +	mem_handle = mapping->parcel.mem_handle;
>> +	dtb_offset = ghvm->dtb_config.gpa - mapping->guest_phys_addr;
>> +
>> +	ret = gh_rm_vm_configure(ghvm->rm, ghvm->vmid, ghvm->auth, mem_handle,
>> +				0, 0, dtb_offset, ghvm->dtb_config.size);
> 
> Default value of auth is 0 (GH_RM_VM_AUTH_NONE). Is that what you wanted here?
> Perhaps initialize default value of auth to be GH_RM_VM_AUTH_QCOM_PIL_ELF?
> 

Yes, default VM is GH_RM_VM_AUTH_NONE.
Elliot Berman Jan. 30, 2023, 9:45 p.m. UTC | #3
On 1/30/2023 1:44 PM, Elliot Berman wrote:
> 
> 
> On 1/30/2023 12:53 AM, Srivatsa Vaddagiri wrote:
>> * Elliot Berman <quic_eberman@quicinc.com> [2023-01-20 14:46:13]:
>>
>>> +static int gh_vm_start(struct gunyah_vm *ghvm)
>>> +{
>>> +    struct gunyah_vm_memory_mapping *mapping;
>>> +    u64 dtb_offset;
>>> +    u32 mem_handle;
>>> +    int ret;
>>> +
>>> +    down_write(&ghvm->status_lock);
>>> +    if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
>>> +        up_write(&ghvm->status_lock);
>>> +        return 0;
>>
>> return -EINVAL in this case.
>>
>> Additionally check if its already GH_RM_VM_STATUS_READY and return 0 
>> in that
>> case.
>>
>> [snip]
>>
> 
> Caller can use gh_vm_ensure_started for this behavior. I'll move this to 
> be used in the GH_VM_RUN ioctl as well.
                 *GH_VM_START
>  >>
>>> +    mem_handle = mapping->parcel.mem_handle;
>>> +    dtb_offset = ghvm->dtb_config.gpa - mapping->guest_phys_addr;
>>> +
>>> +    ret = gh_rm_vm_configure(ghvm->rm, ghvm->vmid, ghvm->auth, 
>>> mem_handle,
>>> +                0, 0, dtb_offset, ghvm->dtb_config.size);
>>
>> Default value of auth is 0 (GH_RM_VM_AUTH_NONE). Is that what you 
>> wanted here?
>> Perhaps initialize default value of auth to be 
>> GH_RM_VM_AUTH_QCOM_PIL_ELF?
>>
> 
> Yes, default VM is GH_RM_VM_AUTH_NONE.
Srinivas Kandagatla Feb. 7, 2023, 11:36 a.m. UTC | #4
On 20/01/2023 22:46, Elliot Berman wrote:
> Add remaining ioctls to support non-proxy VM boot:
> 
>   - Gunyah Resource Manager uses the VM's devicetree to configure the
>     virtual machine. The location of the devicetree in the guest's
>     virtual memory can be declared via the SET_DTB_CONFIG ioctl.
>   - Trigger start of the virtual machine with VM_START ioctl.
> 
> Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
> Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
> Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
> ---
>   drivers/virt/gunyah/vm_mgr.c    | 110 ++++++++++++++++++++++++++++++++
>   drivers/virt/gunyah/vm_mgr.h    |   9 +++
>   drivers/virt/gunyah/vm_mgr_mm.c |  24 +++++++
>   include/uapi/linux/gunyah.h     |   8 +++
>   4 files changed, 151 insertions(+)
> 
> diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
> index b847fde63333..48bd3f06fb6c 100644
> --- a/drivers/virt/gunyah/vm_mgr.c
> +++ b/drivers/virt/gunyah/vm_mgr.c
> @@ -9,6 +9,7 @@
>   #include <linux/file.h>
>   #include <linux/gunyah_rsc_mgr.h>
>   #include <linux/miscdevice.h>
> +#include <linux/mm.h>
>   #include <linux/module.h>
>   
>   #include <uapi/linux/gunyah.h>
> @@ -37,10 +38,98 @@ static __must_check struct gunyah_vm *gunyah_vm_alloc(struct gh_rm_rpc *rm)
>   
>   	mutex_init(&ghvm->mm_lock);
>   	INIT_LIST_HEAD(&ghvm->memory_mappings);
> +	init_rwsem(&ghvm->status_lock);
>   

using read write semaphore is really not going to make any difference in 
this particular case.
we have just one reader (gh_vm_ensure_started) and it mostly makes 
synchronous call to writer (vm_start).

>   	return ghvm;
>   }
>   
> +static int gh_vm_start(struct gunyah_vm *ghvm)
> +{
> +	struct gunyah_vm_memory_mapping *mapping;
> +	u64 dtb_offset;
> +	u32 mem_handle;
> +	int ret;
> +
> +	down_write(&ghvm->status_lock);
> +	if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
> +		up_write(&ghvm->status_lock);
> +		return 0;
> +	}
> +
> +	list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
> +		switch (mapping->share_type) {
> +		case VM_MEM_LEND:
> +			ret = gh_rm_mem_lend(ghvm->rm, &mapping->parcel);
> +			break;
> +		case VM_MEM_SHARE:
> +			ret = gh_rm_mem_share(ghvm->rm, &mapping->parcel);
> +			break;
> +		}

> +		if (ret > 0)
> +			ret = -EINVAL;

why are we converting the error messages, afaiu both gh_rm_mem_lend and 
gh_rm_mem_share return a valid error codes.

> +		if (ret) {
> +			pr_warn("Failed to %s parcel %d: %d\n",
> +				mapping->share_type == VM_MEM_LEND ? "lend" : "share",
> +				mapping->parcel.label,
> +				ret);
> +			goto err;
> +		}
> +	}
> +
> +	mapping = gh_vm_mem_mapping_find_mapping(ghvm, ghvm->dtb_config.gpa, ghvm->dtb_config.size);
> +	if (!mapping) {
> +		pr_warn("Failed to find the memory_handle for DTB\n");
> +		ret = -EINVAL;
> +		goto err;
> +	}
> +
> +	mem_handle = mapping->parcel.mem_handle;
> +	dtb_offset = ghvm->dtb_config.gpa - mapping->guest_phys_addr;
> +
> +	ret = gh_rm_vm_configure(ghvm->rm, ghvm->vmid, ghvm->auth, mem_handle,
> +				0, 0, dtb_offset, ghvm->dtb_config.size);
> +	if (ret) {
> +		pr_warn("Failed to configure VM: %d\n", ret);
> +		goto err;
> +	}
> +
> +	ret = gh_rm_vm_init(ghvm->rm, ghvm->vmid);
> +	if (ret) {
> +		pr_warn("Failed to initialize VM: %d\n", ret);
> +		goto err;
> +	}
> +
> +	ret = gh_rm_vm_start(ghvm->rm, ghvm->vmid);
> +	if (ret) {
> +		pr_warn("Failed to start VM: %d\n", ret);
> +		goto err;
> +	}
> +
> +	ghvm->vm_status = GH_RM_VM_STATUS_READY;
> +
> +	up_write(&ghvm->stvm_status = atus_lock);
> +	return ret;
> +err:
> +	ghvm->vm_status = GH_RM_VM_STATUS_INIT_FAILED;
> +	up_write(&ghvm->status_lock);
> +	return ret;
> +}
> +
> +static void gh_vm_stop(struct gunyah_vm *ghvm)
> +{
> +	int ret;
> +
> +	down_write(&ghvm->status_lock);
> +	if (ghvm->vm_status == GH_RM_VM_STATUS_READY) {
> +		ret = gh_rm_vm_stop(ghvm->rm, ghvm->vmid);
> +		if (ret)
> +			pr_warn("Failed to stop VM: %d\n", ret);
> +	}
> +
> +	ghvm->vm_status = GH_RM_VM_STATUS_EXITED;
> +	up_write(&ghvm->status_lock);
> +}
> +
>   static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
>   {
>   	struct gunyah_vm *ghvm = filp->private_data;
> @@ -84,6 +173,25 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
>   		}
>   		break;
>   	}
> +	case GH_VM_SET_DTB_CONFIG: {
> +		struct gh_vm_dtb_config dtb_config;
> +
> +		r = -EFAULT;
> +		if (copy_from_user(&dtb_config, argp, sizeof(dtb_config)))
> +			break;
> +
same feedback as other patches on setting error codes.
> +		dtb_config.size = PAGE_ALIGN(dtb_config.size);
> +		ghvm->dtb_config = dtb_config;
> +
> +		r = 0;
> +		break;
> +	}
> +	case GH_VM_START: {
> +		r = gh_vm_start(ghvm);
> +		if (r)
> +			r = -EINVAL;
> +		break;
> +	}
>   	default:
>   		r = -ENOTTY;
>   		break;
> @@ -97,6 +205,8 @@ static int gh_vm_release(struct inode *inode, struct file *filp)
>   	struct gunyah_vm *ghvm = filp->private_data;
>   	struct gunyah_vm_memory_mapping *mapping, *tmp;
>   
> +	gh_vm_stop(ghvm);
> +
>   	list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
>   		gh_vm_mem_mapping_reclaim(ghvm, mapping);
>   		kfree(mapping);
> diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
> index 6b38bf780f76..5c02fb305893 100644
> --- a/drivers/virt/gunyah/vm_mgr.h
> +++ b/drivers/virt/gunyah/vm_mgr.h
> @@ -10,6 +10,7 @@
>   #include <linux/list.h>
>   #include <linux/miscdevice.h>
>   #include <linux/mutex.h>
> +#include <linux/rwsem.h>
>   
>   #include <uapi/linux/gunyah.h>
>   
> @@ -34,6 +35,12 @@ struct gunyah_vm {
>   	u16 vmid;
>   	struct gh_rm *rm;
>   
> +	enum gh_rm_vm_auth_mechanism auth;
> +	struct gh_vm_dtb_config dtb_config;
> +
> +	enum gh_rm_vm_status vm_status;
> +	struct rw_semaphore status_lock;
> +
>   	struct mutex mm_lock;
>   	struct list_head memory_mappings;
>   };
> @@ -42,5 +49,7 @@ struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_alloc(struct gunyah_vm *ghvm,
>   							struct gh_userspace_memory_region *region);
>   void gh_vm_mem_mapping_reclaim(struct gunyah_vm *ghvm, struct gunyah_vm_memory_mapping *mapping);
>   struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find(struct gunyah_vm *ghvm, u32 label);
> +struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find_mapping(struct gunyah_vm *ghvm,
> +								u64 gpa, u32 size);
>   
>   #endif
> diff --git a/drivers/virt/gunyah/vm_mgr_mm.c b/drivers/virt/gunyah/vm_mgr_mm.c
> index f2dbdb4ee8ab..7fcb9f8a29bf 100644
> --- a/drivers/virt/gunyah/vm_mgr_mm.c
> +++ b/drivers/virt/gunyah/vm_mgr_mm.c
> @@ -53,6 +53,30 @@ void gh_vm_mem_mapping_reclaim(struct gunyah_vm *ghvm, struct gunyah_vm_memory_m
>   	mutex_unlock(&ghvm->mm_lock);
>   }
>   
> +struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find_mapping(struct gunyah_vm *ghvm,
> +								u64 gpa, u32 size)
> +{
> +	struct gunyah_vm_memory_mapping *mapping = NULL;
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&ghvm->mm_lock);
> +	if (ret)
> +		return ERR_PTR(ret);
> +
> +	list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
> +		if (gpa >= mapping->guest_phys_addr &&
> +			(gpa + size <= mapping->guest_phys_addr +
> +			(mapping->npages << PAGE_SHIFT))) {
> +			goto unlock;
> +		}
> +	}
> +
> +	mapping = NULL;
> +unlock:
> +	mutex_unlock(&ghvm->mm_lock);
> +	return mapping;
> +}
> +
>   struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find(struct gunyah_vm *ghvm, u32 label)
>   {
>   	struct gunyah_vm_memory_mapping *mapping;
> diff --git a/include/uapi/linux/gunyah.h b/include/uapi/linux/gunyah.h
> index 574f33b198d0..36359ad2175e 100644
> --- a/include/uapi/linux/gunyah.h
> +++ b/include/uapi/linux/gunyah.h
> @@ -42,4 +42,12 @@ struct gh_userspace_memory_region {
>   #define GH_VM_SET_USER_MEM_REGION	_IOW(GH_IOCTL_TYPE, 0x1, \
>   						struct gh_userspace_memory_region)
>   
> +struct gh_vm_dtb_config {
> +	__u64 gpa;

need kernedoc, what is gpa?

> +	__u64 size;
> +};
> +#define GH_VM_SET_DTB_CONFIG	_IOW(GH_IOCTL_TYPE, 0x2, struct gh_vm_dtb_config)
> +
> +#define GH_VM_START		_IO(GH_IOCTL_TYPE, 0x3)
> +
>   #endif
Elliot Berman Feb. 8, 2023, 9:04 p.m. UTC | #5
On 2/7/2023 3:36 AM, Srinivas Kandagatla wrote:
> 
> 
> On 20/01/2023 22:46, Elliot Berman wrote:
>> Add remaining ioctls to support non-proxy VM boot:
>>
>>   - Gunyah Resource Manager uses the VM's devicetree to configure the
>>     virtual machine. The location of the devicetree in the guest's
>>     virtual memory can be declared via the SET_DTB_CONFIG ioctl.
>>   - Trigger start of the virtual machine with VM_START ioctl.
>>
>> Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
>> Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com>
>> Signed-off-by: Elliot Berman <quic_eberman@quicinc.com>
>> ---
>>   drivers/virt/gunyah/vm_mgr.c    | 110 ++++++++++++++++++++++++++++++++
>>   drivers/virt/gunyah/vm_mgr.h    |   9 +++
>>   drivers/virt/gunyah/vm_mgr_mm.c |  24 +++++++
>>   include/uapi/linux/gunyah.h     |   8 +++
>>   4 files changed, 151 insertions(+)
>>
>> diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
>> index b847fde63333..48bd3f06fb6c 100644
>> --- a/drivers/virt/gunyah/vm_mgr.c
>> +++ b/drivers/virt/gunyah/vm_mgr.c
>> @@ -9,6 +9,7 @@
>>   #include <linux/file.h>
>>   #include <linux/gunyah_rsc_mgr.h>
>>   #include <linux/miscdevice.h>
>> +#include <linux/mm.h>
>>   #include <linux/module.h>
>>   #include <uapi/linux/gunyah.h>
>> @@ -37,10 +38,98 @@ static __must_check struct gunyah_vm 
>> *gunyah_vm_alloc(struct gh_rm_rpc *rm)
>>       mutex_init(&ghvm->mm_lock);
>>       INIT_LIST_HEAD(&ghvm->memory_mappings);
>> +    init_rwsem(&ghvm->status_lock);
> 
> using read write semaphore is really not going to make any difference in 
> this particular case.
> we have just one reader (gh_vm_ensure_started) and it mostly makes 
> synchronous call to writer (vm_start).
> 

When launching multiple vCPUs, the threads might be racing to ensure the 
VM is started. The typical case is that VM is running and we would have 
bad performance if all the vCPUs needed to sequentially check that the 
VM is indeed running before they're scheduled. rwsem can allow all the 
threads to check if VM is running simultaneously and only one thread to 
start the VM if it wasn't running.

>>       return ghvm;
>>   }
>> +static int gh_vm_start(struct gunyah_vm *ghvm)
>> +{
>> +    struct gunyah_vm_memory_mapping *mapping;
>> +    u64 dtb_offset;
>> +    u32 mem_handle;
>> +    int ret;
>> +
>> +    down_write(&ghvm->status_lock);
>> +    if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
>> +        up_write(&ghvm->status_lock);
>> +        return 0;
>> +    }
>> +
>> +    list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
>> +        switch (mapping->share_type) {
>> +        case VM_MEM_LEND:
>> +            ret = gh_rm_mem_lend(ghvm->rm, &mapping->parcel);
>> +            break;
>> +        case VM_MEM_SHARE:
>> +            ret = gh_rm_mem_share(ghvm->rm, &mapping->parcel);
>> +            break;
>> +        }
> 
>> +        if (ret > 0)
>> +            ret = -EINVAL;
> 
> why are we converting the error messages, afaiu both gh_rm_mem_lend and 
> gh_rm_mem_share return a valid error codes.
> 

Removed.

>> +        if (ret) {
>> +            pr_warn("Failed to %s parcel %d: %d\n",
>> +                mapping->share_type == VM_MEM_LEND ? "lend" : "share",
>> +                mapping->parcel.label,
>> +                ret);
>> +            goto err;
>> +        }
>> +    }
>> +
>> +    mapping = gh_vm_mem_mapping_find_mapping(ghvm, 
>> ghvm->dtb_config.gpa, ghvm->dtb_config.size);
>> +    if (!mapping) {
>> +        pr_warn("Failed to find the memory_handle for DTB\n");
>> +        ret = -EINVAL;
>> +        goto err;
>> +    }
>> +
>> +    mem_handle = mapping->parcel.mem_handle;
>> +    dtb_offset = ghvm->dtb_config.gpa - mapping->guest_phys_addr;
>> +
>> +    ret = gh_rm_vm_configure(ghvm->rm, ghvm->vmid, ghvm->auth, 
>> mem_handle,
>> +                0, 0, dtb_offset, ghvm->dtb_config.size);
>> +    if (ret) {
>> +        pr_warn("Failed to configure VM: %d\n", ret);
>> +        goto err;
>> +    }
>> +
>> +    ret = gh_rm_vm_init(ghvm->rm, ghvm->vmid);
>> +    if (ret) {
>> +        pr_warn("Failed to initialize VM: %d\n", ret);
>> +        goto err;
>> +    }
>> +
>> +    ret = gh_rm_vm_start(ghvm->rm, ghvm->vmid);
>> +    if (ret) {
>> +        pr_warn("Failed to start VM: %d\n", ret);
>> +        goto err;
>> +    }
>> +
>> +    ghvm->vm_status = GH_RM_VM_STATUS_READY;
>> +
>> +    up_write(&ghvm->stvm_status = atus_lock);
>> +    return ret;
>> +err:
>> +    ghvm->vm_status = GH_RM_VM_STATUS_INIT_FAILED;
>> +    up_write(&ghvm->status_lock);
>> +    return ret;
>> +}
>> +
>> +static void gh_vm_stop(struct gunyah_vm *ghvm)
>> +{
>> +    int ret;
>> +
>> +    down_write(&ghvm->status_lock);
>> +    if (ghvm->vm_status == GH_RM_VM_STATUS_READY) {
>> +        ret = gh_rm_vm_stop(ghvm->rm, ghvm->vmid);
>> +        if (ret)
>> +            pr_warn("Failed to stop VM: %d\n", ret);
>> +    }
>> +
>> +    ghvm->vm_status = GH_RM_VM_STATUS_EXITED;
>> +    up_write(&ghvm->status_lock);
>> +}
>> +
>>   static long gh_vm_ioctl(struct file *filp, unsigned int cmd, 
>> unsigned long arg)
>>   {
>>       struct gunyah_vm *ghvm = filp->private_data;
>> @@ -84,6 +173,25 @@ static long gh_vm_ioctl(struct file *filp, 
>> unsigned int cmd, unsigned long arg)
>>           }
>>           break;
>>       }
>> +    case GH_VM_SET_DTB_CONFIG: {
>> +        struct gh_vm_dtb_config dtb_config;
>> +
>> +        r = -EFAULT;
>> +        if (copy_from_user(&dtb_config, argp, sizeof(dtb_config)))
>> +            break;
>> +
> same feedback as other patches on setting error codes.
>> +        dtb_config.size = PAGE_ALIGN(dtb_config.size);
>> +        ghvm->dtb_config = dtb_config;
>> +
>> +        r = 0;
>> +        break;
>> +    }
>> +    case GH_VM_START: {
>> +        r = gh_vm_start(ghvm);
>> +        if (r)
>> +            r = -EINVAL;
>> +        break;
>> +    }
>>       default:
>>           r = -ENOTTY;
>>           break;
>> @@ -97,6 +205,8 @@ static int gh_vm_release(struct inode *inode, 
>> struct file *filp)
>>       struct gunyah_vm *ghvm = filp->private_data;
>>       struct gunyah_vm_memory_mapping *mapping, *tmp;
>> +    gh_vm_stop(ghvm);
>> +
>>       list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, 
>> list) {
>>           gh_vm_mem_mapping_reclaim(ghvm, mapping);
>>           kfree(mapping);
>> diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
>> index 6b38bf780f76..5c02fb305893 100644
>> --- a/drivers/virt/gunyah/vm_mgr.h
>> +++ b/drivers/virt/gunyah/vm_mgr.h
>> @@ -10,6 +10,7 @@
>>   #include <linux/list.h>
>>   #include <linux/miscdevice.h>
>>   #include <linux/mutex.h>
>> +#include <linux/rwsem.h>
>>   #include <uapi/linux/gunyah.h>
>> @@ -34,6 +35,12 @@ struct gunyah_vm {
>>       u16 vmid;
>>       struct gh_rm *rm;
>> +    enum gh_rm_vm_auth_mechanism auth;
>> +    struct gh_vm_dtb_config dtb_config;
>> +
>> +    enum gh_rm_vm_status vm_status;
>> +    struct rw_semaphore status_lock;
>> +
>>       struct mutex mm_lock;
>>       struct list_head memory_mappings;
>>   };
>> @@ -42,5 +49,7 @@ struct gunyah_vm_memory_mapping 
>> *gh_vm_mem_mapping_alloc(struct gunyah_vm *ghvm,
>>                               struct gh_userspace_memory_region *region);
>>   void gh_vm_mem_mapping_reclaim(struct gunyah_vm *ghvm, struct 
>> gunyah_vm_memory_mapping *mapping);
>>   struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find(struct 
>> gunyah_vm *ghvm, u32 label);
>> +struct gunyah_vm_memory_mapping 
>> *gh_vm_mem_mapping_find_mapping(struct gunyah_vm *ghvm,
>> +                                u64 gpa, u32 size);
>>   #endif
>> diff --git a/drivers/virt/gunyah/vm_mgr_mm.c 
>> b/drivers/virt/gunyah/vm_mgr_mm.c
>> index f2dbdb4ee8ab..7fcb9f8a29bf 100644
>> --- a/drivers/virt/gunyah/vm_mgr_mm.c
>> +++ b/drivers/virt/gunyah/vm_mgr_mm.c
>> @@ -53,6 +53,30 @@ void gh_vm_mem_mapping_reclaim(struct gunyah_vm 
>> *ghvm, struct gunyah_vm_memory_m
>>       mutex_unlock(&ghvm->mm_lock);
>>   }
>> +struct gunyah_vm_memory_mapping 
>> *gh_vm_mem_mapping_find_mapping(struct gunyah_vm *ghvm,
>> +                                u64 gpa, u32 size)
>> +{
>> +    struct gunyah_vm_memory_mapping *mapping = NULL;
>> +    int ret;
>> +
>> +    ret = mutex_lock_interruptible(&ghvm->mm_lock);
>> +    if (ret)
>> +        return ERR_PTR(ret);
>> +
>> +    list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
>> +        if (gpa >= mapping->guest_phys_addr &&
>> +            (gpa + size <= mapping->guest_phys_addr +
>> +            (mapping->npages << PAGE_SHIFT))) {
>> +            goto unlock;
>> +        }
>> +    }
>> +
>> +    mapping = NULL;
>> +unlock:
>> +    mutex_unlock(&ghvm->mm_lock);
>> +    return mapping;
>> +}
>> +
>>   struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find(struct 
>> gunyah_vm *ghvm, u32 label)
>>   {
>>       struct gunyah_vm_memory_mapping *mapping;
>> diff --git a/include/uapi/linux/gunyah.h b/include/uapi/linux/gunyah.h
>> index 574f33b198d0..36359ad2175e 100644
>> --- a/include/uapi/linux/gunyah.h
>> +++ b/include/uapi/linux/gunyah.h
>> @@ -42,4 +42,12 @@ struct gh_userspace_memory_region {
>>   #define GH_VM_SET_USER_MEM_REGION    _IOW(GH_IOCTL_TYPE, 0x1, \
>>                           struct gh_userspace_memory_region)
>> +struct gh_vm_dtb_config {
>> +    __u64 gpa;
> 
> need kernedoc, what is gpa?
> 

Added. It's the address of the VM's devicetree in guest memory.

>> +    __u64 size;
>> +};
>> +#define GH_VM_SET_DTB_CONFIG    _IOW(GH_IOCTL_TYPE, 0x2, struct 
>> gh_vm_dtb_config)
>> +
>> +#define GH_VM_START        _IO(GH_IOCTL_TYPE, 0x3)
>> +
>>   #endif
diff mbox series

Patch

diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c
index b847fde63333..48bd3f06fb6c 100644
--- a/drivers/virt/gunyah/vm_mgr.c
+++ b/drivers/virt/gunyah/vm_mgr.c
@@ -9,6 +9,7 @@ 
 #include <linux/file.h>
 #include <linux/gunyah_rsc_mgr.h>
 #include <linux/miscdevice.h>
+#include <linux/mm.h>
 #include <linux/module.h>
 
 #include <uapi/linux/gunyah.h>
@@ -37,10 +38,98 @@  static __must_check struct gunyah_vm *gunyah_vm_alloc(struct gh_rm_rpc *rm)
 
 	mutex_init(&ghvm->mm_lock);
 	INIT_LIST_HEAD(&ghvm->memory_mappings);
+	init_rwsem(&ghvm->status_lock);
 
 	return ghvm;
 }
 
+static int gh_vm_start(struct gunyah_vm *ghvm)
+{
+	struct gunyah_vm_memory_mapping *mapping;
+	u64 dtb_offset;
+	u32 mem_handle;
+	int ret;
+
+	down_write(&ghvm->status_lock);
+	if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
+		up_write(&ghvm->status_lock);
+		return 0;
+	}
+
+	list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
+		switch (mapping->share_type) {
+		case VM_MEM_LEND:
+			ret = gh_rm_mem_lend(ghvm->rm, &mapping->parcel);
+			break;
+		case VM_MEM_SHARE:
+			ret = gh_rm_mem_share(ghvm->rm, &mapping->parcel);
+			break;
+		}
+		if (ret > 0)
+			ret = -EINVAL;
+		if (ret) {
+			pr_warn("Failed to %s parcel %d: %d\n",
+				mapping->share_type == VM_MEM_LEND ? "lend" : "share",
+				mapping->parcel.label,
+				ret);
+			goto err;
+		}
+	}
+
+	mapping = gh_vm_mem_mapping_find_mapping(ghvm, ghvm->dtb_config.gpa, ghvm->dtb_config.size);
+	if (!mapping) {
+		pr_warn("Failed to find the memory_handle for DTB\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	mem_handle = mapping->parcel.mem_handle;
+	dtb_offset = ghvm->dtb_config.gpa - mapping->guest_phys_addr;
+
+	ret = gh_rm_vm_configure(ghvm->rm, ghvm->vmid, ghvm->auth, mem_handle,
+				0, 0, dtb_offset, ghvm->dtb_config.size);
+	if (ret) {
+		pr_warn("Failed to configure VM: %d\n", ret);
+		goto err;
+	}
+
+	ret = gh_rm_vm_init(ghvm->rm, ghvm->vmid);
+	if (ret) {
+		pr_warn("Failed to initialize VM: %d\n", ret);
+		goto err;
+	}
+
+	ret = gh_rm_vm_start(ghvm->rm, ghvm->vmid);
+	if (ret) {
+		pr_warn("Failed to start VM: %d\n", ret);
+		goto err;
+	}
+
+	ghvm->vm_status = GH_RM_VM_STATUS_READY;
+
+	up_write(&ghvm->status_lock);
+	return ret;
+err:
+	ghvm->vm_status = GH_RM_VM_STATUS_INIT_FAILED;
+	up_write(&ghvm->status_lock);
+	return ret;
+}
+
+static void gh_vm_stop(struct gunyah_vm *ghvm)
+{
+	int ret;
+
+	down_write(&ghvm->status_lock);
+	if (ghvm->vm_status == GH_RM_VM_STATUS_READY) {
+		ret = gh_rm_vm_stop(ghvm->rm, ghvm->vmid);
+		if (ret)
+			pr_warn("Failed to stop VM: %d\n", ret);
+	}
+
+	ghvm->vm_status = GH_RM_VM_STATUS_EXITED;
+	up_write(&ghvm->status_lock);
+}
+
 static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	struct gunyah_vm *ghvm = filp->private_data;
@@ -84,6 +173,25 @@  static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		}
 		break;
 	}
+	case GH_VM_SET_DTB_CONFIG: {
+		struct gh_vm_dtb_config dtb_config;
+
+		r = -EFAULT;
+		if (copy_from_user(&dtb_config, argp, sizeof(dtb_config)))
+			break;
+
+		dtb_config.size = PAGE_ALIGN(dtb_config.size);
+		ghvm->dtb_config = dtb_config;
+
+		r = 0;
+		break;
+	}
+	case GH_VM_START: {
+		r = gh_vm_start(ghvm);
+		if (r)
+			r = -EINVAL;
+		break;
+	}
 	default:
 		r = -ENOTTY;
 		break;
@@ -97,6 +205,8 @@  static int gh_vm_release(struct inode *inode, struct file *filp)
 	struct gunyah_vm *ghvm = filp->private_data;
 	struct gunyah_vm_memory_mapping *mapping, *tmp;
 
+	gh_vm_stop(ghvm);
+
 	list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
 		gh_vm_mem_mapping_reclaim(ghvm, mapping);
 		kfree(mapping);
diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h
index 6b38bf780f76..5c02fb305893 100644
--- a/drivers/virt/gunyah/vm_mgr.h
+++ b/drivers/virt/gunyah/vm_mgr.h
@@ -10,6 +10,7 @@ 
 #include <linux/list.h>
 #include <linux/miscdevice.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
 
 #include <uapi/linux/gunyah.h>
 
@@ -34,6 +35,12 @@  struct gunyah_vm {
 	u16 vmid;
 	struct gh_rm *rm;
 
+	enum gh_rm_vm_auth_mechanism auth;
+	struct gh_vm_dtb_config dtb_config;
+
+	enum gh_rm_vm_status vm_status;
+	struct rw_semaphore status_lock;
+
 	struct mutex mm_lock;
 	struct list_head memory_mappings;
 };
@@ -42,5 +49,7 @@  struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_alloc(struct gunyah_vm *ghvm,
 							struct gh_userspace_memory_region *region);
 void gh_vm_mem_mapping_reclaim(struct gunyah_vm *ghvm, struct gunyah_vm_memory_mapping *mapping);
 struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find(struct gunyah_vm *ghvm, u32 label);
+struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find_mapping(struct gunyah_vm *ghvm,
+								u64 gpa, u32 size);
 
 #endif
diff --git a/drivers/virt/gunyah/vm_mgr_mm.c b/drivers/virt/gunyah/vm_mgr_mm.c
index f2dbdb4ee8ab..7fcb9f8a29bf 100644
--- a/drivers/virt/gunyah/vm_mgr_mm.c
+++ b/drivers/virt/gunyah/vm_mgr_mm.c
@@ -53,6 +53,30 @@  void gh_vm_mem_mapping_reclaim(struct gunyah_vm *ghvm, struct gunyah_vm_memory_m
 	mutex_unlock(&ghvm->mm_lock);
 }
 
+struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find_mapping(struct gunyah_vm *ghvm,
+								u64 gpa, u32 size)
+{
+	struct gunyah_vm_memory_mapping *mapping = NULL;
+	int ret;
+
+	ret = mutex_lock_interruptible(&ghvm->mm_lock);
+	if (ret)
+		return ERR_PTR(ret);
+
+	list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
+		if (gpa >= mapping->guest_phys_addr &&
+			(gpa + size <= mapping->guest_phys_addr +
+			(mapping->npages << PAGE_SHIFT))) {
+			goto unlock;
+		}
+	}
+
+	mapping = NULL;
+unlock:
+	mutex_unlock(&ghvm->mm_lock);
+	return mapping;
+}
+
 struct gunyah_vm_memory_mapping *gh_vm_mem_mapping_find(struct gunyah_vm *ghvm, u32 label)
 {
 	struct gunyah_vm_memory_mapping *mapping;
diff --git a/include/uapi/linux/gunyah.h b/include/uapi/linux/gunyah.h
index 574f33b198d0..36359ad2175e 100644
--- a/include/uapi/linux/gunyah.h
+++ b/include/uapi/linux/gunyah.h
@@ -42,4 +42,12 @@  struct gh_userspace_memory_region {
 #define GH_VM_SET_USER_MEM_REGION	_IOW(GH_IOCTL_TYPE, 0x1, \
 						struct gh_userspace_memory_region)
 
+struct gh_vm_dtb_config {
+	__u64 gpa;
+	__u64 size;
+};
+#define GH_VM_SET_DTB_CONFIG	_IOW(GH_IOCTL_TYPE, 0x2, struct gh_vm_dtb_config)
+
+#define GH_VM_START		_IO(GH_IOCTL_TYPE, 0x3)
+
 #endif