diff mbox series

[kvm-unit-tests,RESEND,v3,6/8] x86: nSVM: Correct indentation for svm.c

Message ID 20220425114417.151540-7-manali.shukla@amd.com (mailing list archive)
State New, archived
Headers show
Series Move npt test cases and NPT code improvements | expand

Commit Message

Manali Shukla April 25, 2022, 11:44 a.m. UTC
Used ./scripts/Lident script from linux kernel source base to correct the
indentation in svm.c file.

No functional changes intended.

Signed-off-by: Manali Shukla <manali.shukla@amd.com>
---
 x86/svm.c | 225 +++++++++++++++++++++++++++---------------------------
 1 file changed, 111 insertions(+), 114 deletions(-)
diff mbox series

Patch

diff --git a/x86/svm.c b/x86/svm.c
index e66c801..081a167 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -23,26 +23,26 @@  struct vmcb *vmcb;
 
 u64 *npt_get_pte(u64 address)
 {
-        return get_pte(npt_get_pml4e(), (void*)address);
+	return get_pte(npt_get_pml4e(), (void *)address);
 }
 
 u64 *npt_get_pde(u64 address)
 {
-    struct pte_search search;
-    search = find_pte_level(npt_get_pml4e(), (void*)address, 2);
-    return search.pte;
+	struct pte_search search;
+	search = find_pte_level(npt_get_pml4e(), (void *)address, 2);
+	return search.pte;
 }
 
 u64 *npt_get_pdpe(u64 address)
 {
-    struct pte_search search;
-    search = find_pte_level(npt_get_pml4e(), (void*)address, 3);
-    return search.pte;
+	struct pte_search search;
+	search = find_pte_level(npt_get_pml4e(), (void *)address, 3);
+	return search.pte;
 }
 
 u64 *npt_get_pml4e(void)
 {
-    return pml4e;
+	return pml4e;
 }
 
 bool smp_supported(void)
@@ -52,7 +52,7 @@  bool smp_supported(void)
 
 bool default_supported(void)
 {
-    return true;
+	return true;
 }
 
 bool vgif_supported(void)
@@ -62,25 +62,24 @@  bool vgif_supported(void)
 
 bool lbrv_supported(void)
 {
-    return this_cpu_has(X86_FEATURE_LBRV);
+	return this_cpu_has(X86_FEATURE_LBRV);
 }
 
 bool tsc_scale_supported(void)
 {
-    return this_cpu_has(X86_FEATURE_TSCRATEMSR);
+	return this_cpu_has(X86_FEATURE_TSCRATEMSR);
 }
 
 bool pause_filter_supported(void)
 {
-    return this_cpu_has(X86_FEATURE_PAUSEFILTER);
+	return this_cpu_has(X86_FEATURE_PAUSEFILTER);
 }
 
 bool pause_threshold_supported(void)
 {
-    return this_cpu_has(X86_FEATURE_PFTHRESHOLD);
+	return this_cpu_has(X86_FEATURE_PFTHRESHOLD);
 }
 
-
 void default_prepare(struct svm_test *test)
 {
 	vmcb_ident(vmcb);
@@ -92,7 +91,7 @@  void default_prepare_gif_clear(struct svm_test *test)
 
 bool default_finished(struct svm_test *test)
 {
-	return true; /* one vmexit */
+	return true;		/* one vmexit */
 }
 
 bool npt_supported(void)
@@ -121,7 +120,7 @@  void inc_test_stage(struct svm_test *test)
 }
 
 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
-                         u64 base, u32 limit, u32 attr)
+			 u64 base, u32 limit, u32 attr)
 {
 	seg->selector = selector;
 	seg->attrib = attr;
@@ -131,7 +130,7 @@  static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
 
 inline void vmmcall(void)
 {
-	asm volatile ("vmmcall" : : : "memory");
+	asm volatile ("vmmcall":::"memory");
 }
 
 static test_guest_func guest_main;
@@ -165,15 +164,17 @@  void vmcb_ident(struct vmcb *vmcb)
 	struct descriptor_table_ptr desc_table_ptr;
 
 	memset(vmcb, 0, sizeof(*vmcb));
-	asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory");
+	asm volatile ("vmsave %0"::"a" (vmcb_phys):"memory");
 	vmcb_set_seg(&save->es, read_es(), 0, -1U, data_seg_attr);
 	vmcb_set_seg(&save->cs, read_cs(), 0, -1U, code_seg_attr);
 	vmcb_set_seg(&save->ss, read_ss(), 0, -1U, data_seg_attr);
 	vmcb_set_seg(&save->ds, read_ds(), 0, -1U, data_seg_attr);
 	sgdt(&desc_table_ptr);
-	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
+	vmcb_set_seg(&save->gdtr, 0, desc_table_ptr.base, desc_table_ptr.limit,
+		     0);
 	sidt(&desc_table_ptr);
-	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit, 0);
+	vmcb_set_seg(&save->idtr, 0, desc_table_ptr.base, desc_table_ptr.limit,
+		     0);
 	ctrl->asid = 1;
 	save->cpl = 0;
 	save->efer = rdmsr(MSR_EFER);
@@ -186,14 +187,13 @@  void vmcb_ident(struct vmcb *vmcb)
 	save->g_pat = rdmsr(MSR_IA32_CR_PAT);
 	save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
 	ctrl->intercept = (1ULL << INTERCEPT_VMRUN) |
-			  (1ULL << INTERCEPT_VMMCALL) |
-			  (1ULL << INTERCEPT_SHUTDOWN);
+	    (1ULL << INTERCEPT_VMMCALL) | (1ULL << INTERCEPT_SHUTDOWN);
 	ctrl->iopm_base_pa = virt_to_phys(io_bitmap);
 	ctrl->msrpm_base_pa = virt_to_phys(msr_bitmap);
 
 	if (npt_supported()) {
 		ctrl->nested_ctl = 1;
-		ctrl->nested_cr3 = (u64)pml4e;
+		ctrl->nested_cr3 = (u64) pml4e;
 		ctrl->tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
 	}
 }
@@ -207,32 +207,29 @@  struct regs get_regs(void)
 
 // rax handled specially below
 
-
 struct svm_test *v2_test;
 
-
 u64 guest_stack[10000];
 
 int __svm_vmrun(u64 rip)
 {
-	vmcb->save.rip = (ulong)rip;
-	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
-	regs.rdi = (ulong)v2_test;
+	vmcb->save.rip = (ulong) rip;
+	vmcb->save.rsp = (ulong) (guest_stack + ARRAY_SIZE(guest_stack));
+	regs.rdi = (ulong) v2_test;
 
-	asm volatile (
-		ASM_PRE_VMRUN_CMD
-                "vmrun %%rax\n\t"               \
-		ASM_POST_VMRUN_CMD
-		:
-		: "a" (virt_to_phys(vmcb))
-		: "memory", "r15");
+	asm volatile (ASM_PRE_VMRUN_CMD
+			  "vmrun %%rax\n\t" \
+			  ASM_POST_VMRUN_CMD
+			  :
+			  :"a"(virt_to_phys(vmcb))
+			  :"memory", "r15");
 
 	return (vmcb->control.exit_code);
 }
 
 int svm_vmrun(void)
 {
-	return __svm_vmrun((u64)test_thunk);
+	return __svm_vmrun((u64) test_thunk);
 }
 
 extern u8 vmrun_rip;
@@ -246,40 +243,38 @@  static noinline void test_run(struct svm_test *test)
 
 	test->prepare(test);
 	guest_main = test->guest_func;
-	vmcb->save.rip = (ulong)test_thunk;
-	vmcb->save.rsp = (ulong)(guest_stack + ARRAY_SIZE(guest_stack));
-	regs.rdi = (ulong)test;
+	vmcb->save.rip = (ulong) test_thunk;
+	vmcb->save.rsp = (ulong) (guest_stack + ARRAY_SIZE(guest_stack));
+	regs.rdi = (ulong) test;
 	do {
 		struct svm_test *the_test = test;
 		u64 the_vmcb = vmcb_phys;
-		asm volatile (
-			"clgi;\n\t" // semi-colon needed for LLVM compatibility
-			"sti \n\t"
-			"call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
-			"mov %[vmcb_phys], %%rax \n\t"
-			ASM_PRE_VMRUN_CMD
-			".global vmrun_rip\n\t"		\
-			"vmrun_rip: vmrun %%rax\n\t"    \
-			ASM_POST_VMRUN_CMD
-			"cli \n\t"
-			"stgi"
-			: // inputs clobbered by the guest:
-			"=D" (the_test),            // first argument register
-			"=b" (the_vmcb)             // callee save register!
-			: [test] "0" (the_test),
-			[vmcb_phys] "1"(the_vmcb),
-			[PREPARE_GIF_CLEAR] "i" (offsetof(struct svm_test, prepare_gif_clear))
-			: "rax", "rcx", "rdx", "rsi",
-			"r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15",
-			"memory");
+		asm volatile ("clgi;\n\t"	// semi-colon needed for LLVM compatibility
+			      "sti \n\t"
+			      "call *%c[PREPARE_GIF_CLEAR](%[test]) \n \t"
+			      "mov %[vmcb_phys], %%rax \n\t"
+			      ASM_PRE_VMRUN_CMD
+			      ".global vmrun_rip\n\t"       \
+			      "vmrun_rip: vmrun %%rax\n\t"  \
+			      ASM_POST_VMRUN_CMD "cli \n\t"
+			      "stgi"
+			      :	// inputs clobbered by the guest:
+			      "=D"(the_test),	// first argument register
+			      "=b"(the_vmcb)	// callee save register!
+			      :[test] "0"(the_test),
+			      [vmcb_phys] "1"(the_vmcb),
+			      [PREPARE_GIF_CLEAR]
+			      "i"(offsetof(struct svm_test, prepare_gif_clear))
+			      :"rax", "rcx", "rdx", "rsi", "r8", "r9", "r10",
+			      "r11", "r12", "r13", "r14", "r15", "memory");
 		++test->exits;
 	} while (!test->finished(test));
 	irq_enable();
 
 	report(test->succeeded(test), "%s", test->name);
 
-        if (test->on_vcpu)
-	    test->on_vcpu_done = true;
+	if (test->on_vcpu)
+		test->on_vcpu_done = true;
 }
 
 static void set_additional_vcpu_msr(void *msr_efer)
@@ -287,18 +282,19 @@  static void set_additional_vcpu_msr(void *msr_efer)
 	void *hsave = alloc_page();
 
 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
-	wrmsr(MSR_EFER, (ulong)msr_efer | EFER_SVME);
+	wrmsr(MSR_EFER, (ulong) msr_efer | EFER_SVME);
 }
 
-void setup_npt(void) {
-    u64 end_of_memory;
-    pml4e = alloc_page();
+void setup_npt(void)
+{
+	u64 end_of_memory;
+	pml4e = alloc_page();
 
-    end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
-    if (end_of_memory < (1ul << 32))
-        end_of_memory = (1ul << 32);
+	end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE);
+	if (end_of_memory < (1ul << 32))
+		end_of_memory = (1ul << 32);
 
-    setup_mmu_range(pml4e, 0, end_of_memory, true);
+	setup_mmu_range(pml4e, 0, end_of_memory, true);
 }
 
 static void setup_svm(void)
@@ -309,63 +305,64 @@  static void setup_svm(void)
 	wrmsr(MSR_VM_HSAVE_PA, virt_to_phys(hsave));
 	wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
 
-	io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
+	io_bitmap = (void *)ALIGN((ulong) io_bitmap_area, PAGE_SIZE);
 
-	msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
+	msr_bitmap = (void *)ALIGN((ulong) msr_bitmap_area, PAGE_SIZE);
 
 	if (!npt_supported())
 		return;
 
 	for (i = 1; i < cpu_count(); i++)
-		on_cpu(i, (void *)set_additional_vcpu_msr, (void *)rdmsr(MSR_EFER));
+		on_cpu(i, (void *)set_additional_vcpu_msr,
+		       (void *)rdmsr(MSR_EFER));
 
 	printf("NPT detected - running all tests with NPT enabled\n");
 
 	/*
-	* Nested paging supported - Build a nested page table
-	* Build the page-table bottom-up and map everything with 4k
-	* pages to get enough granularity for the NPT unit-tests.
-	*/
+	 * Nested paging supported - Build a nested page table
+	 * Build the page-table bottom-up and map everything with 4k
+	 * pages to get enough granularity for the NPT unit-tests.
+	 */
 
-  setup_npt();
+	setup_npt();
 }
 
 int matched;
 
-static bool
-test_wanted(const char *name, char *filters[], int filter_count)
-{
-        int i;
-        bool positive = false;
-        bool match = false;
-        char clean_name[strlen(name) + 1];
-        char *c;
-        const char *n;
-
-        /* Replace spaces with underscores. */
-        n = name;
-        c = &clean_name[0];
-        do *c++ = (*n == ' ') ? '_' : *n;
-        while (*n++);
-
-        for (i = 0; i < filter_count; i++) {
-                const char *filter = filters[i];
-
-                if (filter[0] == '-') {
-                        if (simple_glob(clean_name, filter + 1))
-                                return false;
-                } else {
-                        positive = true;
-                        match |= simple_glob(clean_name, filter);
-                }
-        }
-
-        if (!positive || match) {
-                matched++;
-                return true;
-        } else {
-                return false;
-        }
+static bool test_wanted(const char *name, char *filters[], int filter_count)
+{
+	int i;
+	bool positive = false;
+	bool match = false;
+	char clean_name[strlen(name) + 1];
+	char *c;
+	const char *n;
+
+	/* Replace spaces with underscores. */
+	n = name;
+	c = &clean_name[0];
+	do
+		*c++ = (*n == ' ') ? '_' : *n;
+	while (*n++);
+
+	for (i = 0; i < filter_count; i++) {
+		const char *filter = filters[i];
+
+		if (filter[0] == '-') {
+			if (simple_glob(clean_name, filter + 1))
+				return false;
+		} else {
+			positive = true;
+			match |= simple_glob(clean_name, filter);
+		}
+	}
+
+	if (!positive || match) {
+		matched++;
+		return true;
+	} else {
+		return false;
+	}
 }
 
 int run_svm_tests(int ac, char **av)
@@ -393,11 +390,11 @@  int run_svm_tests(int ac, char **av)
 			if (svm_tests[i].on_vcpu) {
 				if (cpu_count() <= svm_tests[i].on_vcpu)
 					continue;
-				on_cpu_async(svm_tests[i].on_vcpu, (void *)test_run, &svm_tests[i]);
+				on_cpu_async(svm_tests[i].on_vcpu,
+					     (void *)test_run, &svm_tests[i]);
 				while (!svm_tests[i].on_vcpu_done)
 					cpu_relax();
-			}
-			else
+			} else
 				test_run(&svm_tests[i]);
 		} else {
 			vmcb_ident(vmcb);