diff mbox series

[4/7] KVM: selftests: Add helpers to consolidate open coded list operations

Message ID 20200320205546.2396-5-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: Fix memslot use-after-free bug | expand

Commit Message

Sean Christopherson March 20, 2020, 8:55 p.m. UTC
Add helpers for the KVM sefltests' variant of a linked list to replace a
variety of open coded adds, deletes and iterators.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 tools/testing/selftests/kvm/lib/kvm_util.c | 68 ++++++++++++----------
 1 file changed, 37 insertions(+), 31 deletions(-)

Comments

Peter Xu March 20, 2020, 10:47 p.m. UTC | #1
On Fri, Mar 20, 2020 at 01:55:43PM -0700, Sean Christopherson wrote:
> Add helpers for the KVM sefltests' variant of a linked list to replace a
> variety of open coded adds, deletes and iterators.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  tools/testing/selftests/kvm/lib/kvm_util.c | 68 ++++++++++++----------
>  1 file changed, 37 insertions(+), 31 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
> index 9a783c20dd26..d7b74f465570 100644
> --- a/tools/testing/selftests/kvm/lib/kvm_util.c
> +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
> @@ -19,6 +19,27 @@
>  #define KVM_UTIL_PGS_PER_HUGEPG 512
>  #define KVM_UTIL_MIN_PFN	2
>  
> +#define kvm_list_add(head, new)		\
> +do {					\
> +	if (head)			\
> +		head->prev = new;	\
> +	new->next = head;		\
> +	head = new;			\
> +} while (0)
> +
> +#define kvm_list_del(head, del)			\
> +do {						\
> +	if (del->next)				\
> +		del->next->prev = del->prev;	\
> +	if (del->prev)				\
> +		del->prev->next = del->next;	\
> +	else					\
> +		head = del->next;		\
> +} while (0)
> +
> +#define kvm_list_for_each(head, iter)		\
> +	for (iter = head; iter; iter = iter->next)
> +

I'm not sure whether we should start to use a common list, e.g.,
tools/include/linux/list.h, if we're going to rework them after all...
Even if this is preferred, maybe move to a header so kvm selftests can
use it in the future outside "vcpu" struct too and this file only?

Thanks,
Paolo Bonzini March 24, 2020, 11:28 a.m. UTC | #2
On 20/03/20 23:47, Peter Xu wrote:
> I'm not sure whether we should start to use a common list, e.g.,
> tools/include/linux/list.h, if we're going to rework them after all...
> Even if this is preferred, maybe move to a header so kvm selftests can
> use it in the future outside "vcpu" struct too and this file only?

Yes, we should.

Paolo
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 9a783c20dd26..d7b74f465570 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -19,6 +19,27 @@ 
 #define KVM_UTIL_PGS_PER_HUGEPG 512
 #define KVM_UTIL_MIN_PFN	2
 
+#define kvm_list_add(head, new)		\
+do {					\
+	if (head)			\
+		head->prev = new;	\
+	new->next = head;		\
+	head = new;			\
+} while (0)
+
+#define kvm_list_del(head, del)			\
+do {						\
+	if (del->next)				\
+		del->next->prev = del->prev;	\
+	if (del->prev)				\
+		del->prev->next = del->next;	\
+	else					\
+		head = del->next;		\
+} while (0)
+
+#define kvm_list_for_each(head, iter)		\
+	for (iter = head; iter; iter = iter->next)
+
 /* Aligns x up to the next multiple of size. Size must be a power of 2. */
 static void *align(void *x, size_t size)
 {
@@ -258,8 +279,7 @@  void kvm_vm_restart(struct kvm_vm *vmp, int perm)
 	if (vmp->has_irqchip)
 		vm_create_irqchip(vmp);
 
-	for (region = vmp->userspace_mem_region_head; region;
-		region = region->next) {
+	kvm_list_for_each(vmp->userspace_mem_region_head, region) {
 		int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
 		TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
 			    "  rc: %i errno: %i\n"
@@ -319,8 +339,7 @@  userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
 {
 	struct userspace_mem_region *region;
 
-	for (region = vm->userspace_mem_region_head; region;
-		region = region->next) {
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		uint64_t existing_start = region->region.guest_phys_addr;
 		uint64_t existing_end = region->region.guest_phys_addr
 			+ region->region.memory_size - 1;
@@ -380,7 +399,7 @@  struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
 {
 	struct vcpu *vcpup;
 
-	for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) {
+	kvm_list_for_each(vm->vcpu_head, vcpup) {
 		if (vcpup->id == vcpuid)
 			return vcpup;
 	}
@@ -412,12 +431,7 @@  static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
 	TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
 		"errno: %i", ret, errno);
 
-	if (vcpu->next)
-		vcpu->next->prev = vcpu->prev;
-	if (vcpu->prev)
-		vcpu->prev->next = vcpu->next;
-	else
-		vm->vcpu_head = vcpu->next;
+	kvm_list_del(vm->vcpu_head, vcpu);
 	free(vcpu);
 }
 
@@ -452,13 +466,14 @@  void kvm_vm_free(struct kvm_vm *vmp)
 		struct userspace_mem_region *region
 			= vmp->userspace_mem_region_head;
 
+		kvm_list_del(vmp->userspace_mem_region_head, region);
+
 		region->region.memory_size = 0;
 		ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION,
 			&region->region);
 		TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
 			"rc: %i errno: %i", ret, errno);
 
-		vmp->userspace_mem_region_head = region->next;
 		sparsebit_free(&region->unused_phy_pages);
 		ret = munmap(region->mmap_start, region->mmap_size);
 		TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i",
@@ -611,8 +626,7 @@  void vm_userspace_mem_region_add(struct kvm_vm *vm,
 			(uint64_t) region->region.memory_size);
 
 	/* Confirm no region with the requested slot already exists. */
-	for (region = vm->userspace_mem_region_head; region;
-		region = region->next) {
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		if (region->region.slot == slot)
 			break;
 	}
@@ -685,10 +699,7 @@  void vm_userspace_mem_region_add(struct kvm_vm *vm,
 		guest_paddr, (uint64_t) region->region.memory_size);
 
 	/* Add to linked-list of memory regions. */
-	if (vm->userspace_mem_region_head)
-		vm->userspace_mem_region_head->prev = region;
-	region->next = vm->userspace_mem_region_head;
-	vm->userspace_mem_region_head = region;
+	kvm_list_add(vm->userspace_mem_region_head, region);
 }
 
 /*
@@ -711,8 +722,7 @@  memslot2region(struct kvm_vm *vm, uint32_t memslot)
 {
 	struct userspace_mem_region *region;
 
-	for (region = vm->userspace_mem_region_head; region;
-		region = region->next) {
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		if (region->region.slot == memslot)
 			break;
 	}
@@ -862,10 +872,7 @@  void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
 		"vcpu id: %u errno: %i", vcpuid, errno);
 
 	/* Add to linked-list of VCPUs. */
-	if (vm->vcpu_head)
-		vm->vcpu_head->prev = vcpu;
-	vcpu->next = vm->vcpu_head;
-	vm->vcpu_head = vcpu;
+	kvm_list_add(vm->vcpu_head, vcpu);
 }
 
 /*
@@ -1058,8 +1065,8 @@  void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
 {
 	struct userspace_mem_region *region;
-	for (region = vm->userspace_mem_region_head; region;
-	     region = region->next) {
+
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		if ((gpa >= region->region.guest_phys_addr)
 			&& (gpa <= (region->region.guest_phys_addr
 				+ region->region.memory_size - 1)))
@@ -1091,8 +1098,8 @@  void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
 {
 	struct userspace_mem_region *region;
-	for (region = vm->userspace_mem_region_head; region;
-	     region = region->next) {
+
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		if ((hva >= region->host_mem)
 			&& (hva <= (region->host_mem
 				+ region->region.memory_size - 1)))
@@ -1519,8 +1526,7 @@  void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 	fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
 	fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
 	fprintf(stream, "%*sMem Regions:\n", indent, "");
-	for (region = vm->userspace_mem_region_head; region;
-		region = region->next) {
+	kvm_list_for_each(vm->userspace_mem_region_head, region) {
 		fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
 			"host_virt: %p\n", indent + 2, "",
 			(uint64_t) region->region.guest_phys_addr,
@@ -1539,7 +1545,7 @@  void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 		virt_dump(stream, vm, indent + 4);
 	}
 	fprintf(stream, "%*sVCPUs:\n", indent, "");
-	for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next)
+	kvm_list_for_each(vm->vcpu_head, vcpu)
 		vcpu_dump(stream, vm, vcpu->id, indent + 2);
 }