@@ -31,6 +31,33 @@ struct kvm_ext {
int code;
};
+struct kvm {
+ struct kvm_arch arch;
+ int sys_fd; /* For system ioctls(), i.e. /dev/kvm */
+ int vm_fd; /* For VM ioctls() */
+ timer_t timerid; /* Posix timer for interrupts */
+
+ int nrcpus; /* Number of cpus to run */
+
+ u32 mem_slots; /* for KVM_SET_USER_MEMORY_REGION */
+
+ u64 ram_size;
+ void *ram_start;
+ u64 ram_pagesize;
+
+ bool nmi_disabled;
+
+ bool single_step;
+
+ const char *vmlinux;
+ struct disk_image **disks;
+ int nr_disks;
+
+ char *name;
+
+ int vm_state;
+};
+
void kvm__set_dir(const char *fmt, ...);
const char *kvm__get_dir(void);
@@ -174,7 +174,7 @@ struct cpu_info *find_cpu_info(struct kvm *kvm)
{
struct cpu_info *info;
unsigned int i;
- u32 pvr = kvm->pvr;
+ u32 pvr = kvm->arch.pvr;
for (info = NULL, i = 0; i < ARRAY_SIZE(host_pvr_info); i++) {
if ((pvr & host_pvr_info[i].pvr_mask) == host_pvr_info[i].pvr) {
@@ -43,36 +43,14 @@
struct spapr_phb;
-struct kvm {
- int sys_fd; /* For system ioctls(), i.e. /dev/kvm */
- int vm_fd; /* For VM ioctls() */
- timer_t timerid; /* Posix timer for interrupts */
-
- int nrcpus; /* Number of cpus to run */
-
- u32 mem_slots; /* for KVM_SET_USER_MEMORY_REGION */
-
- u64 ram_size;
- void *ram_start;
- u64 ram_pagesize;
-
+struct kvm_arch {
u64 sdr1;
u32 pvr;
-
- bool nmi_disabled;
-
- bool single_step;
-
- const char *vmlinux;
- struct disk_image **disks;
- int nr_disks;
unsigned long rtas_gra;
unsigned long rtas_size;
unsigned long fdt_gra;
unsigned long initrd_gra;
unsigned long initrd_size;
- char *name;
- int vm_state;
struct icp_state *icp;
struct spapr_phb *phb;
};
@@ -59,7 +59,7 @@ int irq__init(struct kvm *kvm)
* are numbered 0..nrcpus. This may not really be true,
* but it is OK currently.
*/
- kvm->icp = xics_system_init(XICS_IRQS, kvm->nrcpus);
+ kvm->arch.icp = xics_system_init(XICS_IRQS, kvm->nrcpus);
return 0;
}
@@ -115,7 +115,7 @@ static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu)
if (vcpu->cpu_id == 0) {
r->pc = KERNEL_START_ADDR;
- r->gpr[3] = vcpu->kvm->fdt_gra;
+ r->gpr[3] = vcpu->kvm->arch.fdt_gra;
r->gpr[5] = 0;
} else {
r->pc = KERNEL_SECONDARY_START_ADDR;
@@ -142,8 +142,8 @@ static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu)
if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
die("KVM_GET_SREGS failed");
- sregs.u.s.sdr1 = vcpu->kvm->sdr1;
- sregs.pvr = vcpu->kvm->pvr;
+ sregs.u.s.sdr1 = vcpu->kvm->arch.sdr1;
+ sregs.pvr = vcpu->kvm->arch.pvr;
if (ioctl(vcpu->vcpu_fd, KVM_SET_SREGS, &sregs) < 0)
die("KVM_SET_SREGS failed");
@@ -108,18 +108,18 @@ void kvm__arch_init(struct kvm *kvm, const char *hugetlbfs_path, u64 ram_size)
kvm->ram_size, errno);
/* FDT goes at top of memory, RTAS just below */
- kvm->fdt_gra = kvm->ram_size - FDT_MAX_SIZE;
+ kvm->arch.fdt_gra = kvm->ram_size - FDT_MAX_SIZE;
/* FIXME: Not all PPC systems have RTAS */
- kvm->rtas_gra = kvm->fdt_gra - RTAS_MAX_SIZE;
+ kvm->arch.rtas_gra = kvm->arch.fdt_gra - RTAS_MAX_SIZE;
madvise(kvm->ram_start, kvm->ram_size, MADV_MERGEABLE);
/* FIXME: SPAPR-PR specific; allocate a guest HPT. */
if (posix_memalign((void **)&hpt, (1<<HPT_ORDER), (1<<HPT_ORDER)))
die("Can't allocate %d bytes for HPT\n", (1<<HPT_ORDER));
- kvm->sdr1 = ((hpt + 0x3ffffULL) & ~0x3ffffULL) | (HPT_ORDER-18);
+ kvm->arch.sdr1 = ((hpt + 0x3ffffULL) & ~0x3ffffULL) | (HPT_ORDER-18);
- kvm->pvr = mfpvr();
+ kvm->arch.pvr = mfpvr();
/* FIXME: This is book3s-specific */
cap_ppc_rma = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_PPC_RMA);
@@ -193,10 +193,10 @@ int load_flat_binary(struct kvm *kvm, int fd_kernel, int fd_initrd, const char *
pr_info("Loaded initrd to 0x%x (%ld bytes)",
INITRD_LOAD_ADDR, p-i_start);
- kvm->initrd_gra = INITRD_LOAD_ADDR;
- kvm->initrd_size = p-i_start;
+ kvm->arch.initrd_gra = INITRD_LOAD_ADDR;
+ kvm->arch.initrd_size = p-i_start;
} else {
- kvm->initrd_size = 0;
+ kvm->arch.initrd_size = 0;
}
strncpy(kern_cmdline, kernel_cmdline, 2048);
kern_cmdline[2047] = '\0';
@@ -301,8 +301,8 @@ static void setup_fdt(struct kvm *kvm)
struct fdt_prop segment_page_sizes;
u32 segment_sizes_1T[] = {0x1c, 0x28, 0xffffffff, 0xffffffff};
- /* Generate an appropriate DT at kvm->fdt_gra */
- void *fdt_dest = guest_flat_to_host(kvm, kvm->fdt_gra);
+ /* Generate an appropriate DT at kvm->arch.fdt_gra */
+ void *fdt_dest = guest_flat_to_host(kvm, kvm->arch.fdt_gra);
void *fdt = staging_fdt;
_FDT(fdt_create(fdt, FDT_MAX_SIZE));
@@ -320,9 +320,9 @@ static void setup_fdt(struct kvm *kvm)
/* This is what the kernel uses to switch 'We're an LPAR'! */
_FDT(fdt_property(fdt, "ibm,hypertas-functions", hypertas_prop_kvm,
sizeof(hypertas_prop_kvm)));
- _FDT(fdt_property_cell(fdt, "linux,rtas-base", kvm->rtas_gra));
- _FDT(fdt_property_cell(fdt, "linux,rtas-entry", kvm->rtas_gra));
- _FDT(fdt_property_cell(fdt, "rtas-size", kvm->rtas_size));
+ _FDT(fdt_property_cell(fdt, "linux,rtas-base", kvm->arch.rtas_gra));
+ _FDT(fdt_property_cell(fdt, "linux,rtas-entry", kvm->arch.rtas_gra));
+ _FDT(fdt_property_cell(fdt, "rtas-size", kvm->arch.rtas_size));
/* Now add properties for all RTAS tokens: */
if (spapr_rtas_fdt_setup(kvm, fdt))
die("Couldn't create RTAS FDT properties\n");
@@ -334,10 +334,10 @@ static void setup_fdt(struct kvm *kvm)
/* cmdline */
_FDT(fdt_property_string(fdt, "bootargs", kern_cmdline));
/* Initrd */
- if (kvm->initrd_size != 0) {
- uint32_t ird_st_prop = cpu_to_be32(kvm->initrd_gra);
- uint32_t ird_end_prop = cpu_to_be32(kvm->initrd_gra +
- kvm->initrd_size);
+ if (kvm->arch.initrd_size != 0) {
+ uint32_t ird_st_prop = cpu_to_be32(kvm->arch.initrd_gra);
+ uint32_t ird_end_prop = cpu_to_be32(kvm->arch.initrd_gra +
+ kvm->arch.initrd_size);
_FDT(fdt_property(fdt, "linux,initrd-start",
&ird_st_prop, sizeof(ird_st_prop)));
_FDT(fdt_property(fdt, "linux,initrd-end",
@@ -384,7 +384,7 @@ static void setup_fdt(struct kvm *kvm)
_FDT(fdt_property_string(fdt, "device_type", "cpu"));
_FDT(fdt_property_cell(fdt, "reg", i));
- _FDT(fdt_property_cell(fdt, "cpu-version", kvm->pvr));
+ _FDT(fdt_property_cell(fdt, "cpu-version", kvm->arch.pvr));
_FDT(fdt_property_cell(fdt, "dcache-block-size", cpu_info->d_bsize));
_FDT(fdt_property_cell(fdt, "icache-block-size", cpu_info->i_bsize));
@@ -484,7 +484,7 @@ static void setup_fdt(struct kvm *kvm)
if (spapr_populate_pci_devices(kvm, PHANDLE_XICP, fdt_dest))
die("Fail populating PCI device nodes");
- _FDT(fdt_add_mem_rsv(fdt_dest, kvm->rtas_gra, kvm->rtas_size));
+ _FDT(fdt_add_mem_rsv(fdt_dest, kvm->arch.rtas_gra, kvm->arch.rtas_size));
_FDT(fdt_pack(fdt_dest));
free(segment_page_sizes.value);
@@ -503,17 +503,17 @@ int kvm__arch_setup_firmware(struct kvm *kvm)
* c: 44 00 00 22 sc 1
* 10: 4e 80 00 20 blr
*/
- uint32_t *rtas = guest_flat_to_host(kvm, kvm->rtas_gra);
+ uint32_t *rtas = guest_flat_to_host(kvm, kvm->arch.rtas_gra);
rtas[0] = 0x7c641b78;
rtas[1] = 0x3c600000;
rtas[2] = 0x6063f000;
rtas[3] = 0x44000022;
rtas[4] = 0x4e800020;
- kvm->rtas_size = 20;
+ kvm->arch.rtas_size = 20;
pr_info("Set up %ld bytes of RTAS at 0x%lx\n",
- kvm->rtas_size, kvm->rtas_gra);
+ kvm->arch.rtas_size, kvm->arch.rtas_gra);
/* Load SLOF */
@@ -220,7 +220,7 @@ void spapr_create_phb(struct kvm *kvm,
phb.io_addr = io_win_addr;
phb.io_size = io_win_size;
- kvm->phb = &phb;
+ kvm->arch.phb = &phb;
}
static uint32_t bar_to_ss(unsigned long bar)
@@ -302,7 +302,7 @@ static target_ulong h_cppr(struct kvm_cpu *vcpu,
target_ulong cppr = args[0];
xics_dprintf("h_cppr(%lx)\n", cppr);
- icp_set_cppr(vcpu->kvm->icp, vcpu->cpu_id, cppr);
+ icp_set_cppr(vcpu->kvm->arch.icp, vcpu->cpu_id, cppr);
return H_SUCCESS;
}
@@ -313,18 +313,18 @@ static target_ulong h_ipi(struct kvm_cpu *vcpu,
target_ulong mfrr = args[1];
xics_dprintf("h_ipi(%lx, %lx)\n", server, mfrr);
- if (server >= vcpu->kvm->icp->nr_servers) {
+ if (server >= vcpu->kvm->arch.icp->nr_servers) {
return H_PARAMETER;
}
- icp_set_mfrr(vcpu->kvm->icp, server, mfrr);
+ icp_set_mfrr(vcpu->kvm->arch.icp, server, mfrr);
return H_SUCCESS;
}
static target_ulong h_xirr(struct kvm_cpu *vcpu,
target_ulong opcode, target_ulong *args)
{
- uint32_t xirr = icp_accept(vcpu->kvm->icp->ss + vcpu->cpu_id);
+ uint32_t xirr = icp_accept(vcpu->kvm->arch.icp->ss + vcpu->cpu_id);
xics_dprintf("h_xirr() = %x\n", xirr);
args[0] = xirr;
@@ -337,7 +337,7 @@ static target_ulong h_eoi(struct kvm_cpu *vcpu,
target_ulong xirr = args[0];
xics_dprintf("h_eoi(%lx)\n", xirr);
- icp_eoi(vcpu->kvm->icp, vcpu->cpu_id, xirr);
+ icp_eoi(vcpu->kvm->arch.icp, vcpu->cpu_id, xirr);
return H_SUCCESS;
}
@@ -345,7 +345,7 @@ static void rtas_set_xive(struct kvm_cpu *vcpu, uint32_t token,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
- struct ics_state *ics = vcpu->kvm->icp->ics;
+ struct ics_state *ics = vcpu->kvm->arch.icp->ics;
uint32_t nr, server, priority;
if ((nargs != 3) || (nret != 1)) {
@@ -373,7 +373,7 @@ static void rtas_get_xive(struct kvm_cpu *vcpu, uint32_t token,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
- struct ics_state *ics = vcpu->kvm->icp->ics;
+ struct ics_state *ics = vcpu->kvm->arch.icp->ics;
uint32_t nr;
if ((nargs != 1) || (nret != 3)) {
@@ -397,7 +397,7 @@ static void rtas_int_off(struct kvm_cpu *vcpu, uint32_t token,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
- struct ics_state *ics = vcpu->kvm->icp->ics;
+ struct ics_state *ics = vcpu->kvm->arch.icp->ics;
uint32_t nr;
if ((nargs != 1) || (nret != 1)) {
@@ -421,7 +421,7 @@ static void rtas_int_on(struct kvm_cpu *vcpu, uint32_t token,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
- struct ics_state *ics = vcpu->kvm->icp->ics;
+ struct ics_state *ics = vcpu->kvm->arch.icp->ics;
uint32_t nr;
if ((nargs != 1) || (nret != 1)) {
@@ -443,8 +443,8 @@ static void rtas_int_on(struct kvm_cpu *vcpu, uint32_t token,
void xics_cpu_register(struct kvm_cpu *vcpu)
{
- if (vcpu->cpu_id < vcpu->kvm->icp->nr_servers)
- vcpu->kvm->icp->ss[vcpu->cpu_id].cpu = vcpu;
+ if (vcpu->cpu_id < vcpu->kvm->arch.icp->nr_servers)
+ vcpu->kvm->arch.icp->ss[vcpu->cpu_id].cpu = vcpu;
else
die("Setting invalid server for cpuid %ld\n", vcpu->cpu_id);
}
@@ -510,5 +510,5 @@ void kvm__irq_line(struct kvm *kvm, int irq, int level)
* kvm_cpu__irq(vcpu, POWER7_EXT_IRQ, 1)
*/
xics_dprintf("Raising IRQ %d -> %d\n", irq, level);
- ics_set_irq_msi(kvm->icp->ics, irq - kvm->icp->ics->offset, level);
+ ics_set_irq_msi(kvm->arch.icp->ics, irq - kvm->arch.icp->ics->offset, level);
}
@@ -48,7 +48,7 @@ static void setup_irq_handler(struct kvm *kvm, struct irq_handler *handler)
DIE_IF((handler->address - MB_BIOS_BEGIN) > 0xffffUL);
- interrupt_table__set(&kvm->interrupt_table, &intr_desc, handler->irq);
+ interrupt_table__set(&kvm->arch.interrupt_table, &intr_desc, handler->irq);
}
/**
@@ -163,12 +163,12 @@ void setup_bios(struct kvm *kvm)
.segment = REAL_SEGMENT(MB_BIOS_BEGIN),
.offset = address - MB_BIOS_BEGIN,
};
- interrupt_table__setup(&kvm->interrupt_table, &intr_desc);
+ interrupt_table__setup(&kvm->arch.interrupt_table, &intr_desc);
for (i = 0; i < ARRAY_SIZE(bios_irq_handlers); i++)
setup_irq_handler(kvm, &bios_irq_handlers[i]);
/* we almost done */
p = guest_flat_to_host(kvm, 0);
- interrupt_table__copy(&kvm->interrupt_table, p, REAL_INTR_SIZE);
+ interrupt_table__copy(&kvm->arch.interrupt_table, p, REAL_INTR_SIZE);
}
@@ -33,9 +33,9 @@ bool kvm__load_firmware(struct kvm *kvm, const char *firmware_filename)
while ((nr = read(fd, p, st.st_size)) > 0)
p += nr;
- kvm->boot_selector = BIOS_SELECTOR;
- kvm->boot_ip = BIOS_IP;
- kvm->boot_sp = BIOS_SP;
+ kvm->arch.boot_selector = BIOS_SELECTOR;
+ kvm->arch.boot_ip = BIOS_IP;
+ kvm->arch.boot_sp = BIOS_SP;
return true;
}
@@ -23,36 +23,12 @@
#define KVM_PCI_MMIO_AREA (KVM_MMIO_START + 0x1000000)
#define KVM_VIRTIO_MMIO_AREA (KVM_MMIO_START + 0x2000000)
-struct kvm {
- int sys_fd; /* For system ioctls(), i.e. /dev/kvm */
- int vm_fd; /* For VM ioctls() */
- timer_t timerid; /* Posix timer for interrupts */
-
- int nrcpus; /* Number of cpus to run */
-
- u32 mem_slots; /* for KVM_SET_USER_MEMORY_REGION */
-
- u64 ram_size;
- void *ram_start;
- u64 ram_pagesize;
-
- bool nmi_disabled;
-
- bool single_step;
-
+struct kvm_arch {
u16 boot_selector;
u16 boot_ip;
u16 boot_sp;
struct interrupt_table interrupt_table;
-
- const char *vmlinux;
- struct disk_image **disks;
- int nr_disks;
-
- char *name;
-
- int vm_state;
};
static inline void *guest_flat_to_host(struct kvm *kvm, unsigned long offset); /* In kvm.h */
@@ -182,9 +182,9 @@ static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu)
/* We start the guest in 16-bit real mode */
.rflags = 0x0000000000000002ULL,
- .rip = vcpu->kvm->boot_ip,
- .rsp = vcpu->kvm->boot_sp,
- .rbp = vcpu->kvm->boot_sp,
+ .rip = vcpu->kvm->arch.boot_ip,
+ .rsp = vcpu->kvm->arch.boot_sp,
+ .rbp = vcpu->kvm->arch.boot_sp,
};
if (vcpu->regs.rip > USHRT_MAX)
@@ -199,18 +199,18 @@ static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu)
if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
die_perror("KVM_GET_SREGS failed");
- vcpu->sregs.cs.selector = vcpu->kvm->boot_selector;
- vcpu->sregs.cs.base = selector_to_base(vcpu->kvm->boot_selector);
- vcpu->sregs.ss.selector = vcpu->kvm->boot_selector;
- vcpu->sregs.ss.base = selector_to_base(vcpu->kvm->boot_selector);
- vcpu->sregs.ds.selector = vcpu->kvm->boot_selector;
- vcpu->sregs.ds.base = selector_to_base(vcpu->kvm->boot_selector);
- vcpu->sregs.es.selector = vcpu->kvm->boot_selector;
- vcpu->sregs.es.base = selector_to_base(vcpu->kvm->boot_selector);
- vcpu->sregs.fs.selector = vcpu->kvm->boot_selector;
- vcpu->sregs.fs.base = selector_to_base(vcpu->kvm->boot_selector);
- vcpu->sregs.gs.selector = vcpu->kvm->boot_selector;
- vcpu->sregs.gs.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.cs.selector = vcpu->kvm->arch.boot_selector;
+ vcpu->sregs.cs.base = selector_to_base(vcpu->kvm->arch.boot_selector);
+ vcpu->sregs.ss.selector = vcpu->kvm->arch.boot_selector;
+ vcpu->sregs.ss.base = selector_to_base(vcpu->kvm->arch.boot_selector);
+ vcpu->sregs.ds.selector = vcpu->kvm->arch.boot_selector;
+ vcpu->sregs.ds.base = selector_to_base(vcpu->kvm->arch.boot_selector);
+ vcpu->sregs.es.selector = vcpu->kvm->arch.boot_selector;
+ vcpu->sregs.es.base = selector_to_base(vcpu->kvm->arch.boot_selector);
+ vcpu->sregs.fs.selector = vcpu->kvm->arch.boot_selector;
+ vcpu->sregs.fs.base = selector_to_base(vcpu->kvm->arch.boot_selector);
+ vcpu->sregs.gs.selector = vcpu->kvm->arch.boot_selector;
+ vcpu->sregs.gs.base = selector_to_base(vcpu->kvm->arch.boot_selector);
if (ioctl(vcpu->vcpu_fd, KVM_SET_SREGS, &vcpu->sregs) < 0)
die_perror("KVM_SET_SREGS failed");
@@ -219,9 +219,9 @@ int load_flat_binary(struct kvm *kvm, int fd_kernel, int fd_initrd, const char *
while ((nr = read(fd_kernel, p, 65536)) > 0)
p += nr;
- kvm->boot_selector = BOOT_LOADER_SELECTOR;
- kvm->boot_ip = BOOT_LOADER_IP;
- kvm->boot_sp = BOOT_LOADER_SP;
+ kvm->arch.boot_selector = BOOT_LOADER_SELECTOR;
+ kvm->arch.boot_ip = BOOT_LOADER_IP;
+ kvm->arch.boot_sp = BOOT_LOADER_SP;
return true;
}
@@ -322,13 +322,13 @@ bool load_bzimage(struct kvm *kvm, int fd_kernel,
kern_boot->hdr.ramdisk_size = initrd_stat.st_size;
}
- kvm->boot_selector = BOOT_LOADER_SELECTOR;
+ kvm->arch.boot_selector = BOOT_LOADER_SELECTOR;
/*
* The real-mode setup code starts at offset 0x200 of a bzImage. See
* Documentation/x86/boot.txt for details.
*/
- kvm->boot_ip = BOOT_LOADER_IP + 0x200;
- kvm->boot_sp = BOOT_LOADER_SP;
+ kvm->arch.boot_ip = BOOT_LOADER_IP + 0x200;
+ kvm->arch.boot_sp = BOOT_LOADER_SP;
return true;
}
Move all the non-arch specific members into a generic struct, and the arch specific members into a arch specific kvm_arch. This prevents code duplication across different archs. Signed-off-by: Sasha Levin <levinsasha928@gmail.com> --- tools/kvm/include/kvm/kvm.h | 27 ++++++++++++++++++++ tools/kvm/powerpc/cpu_info.c | 2 +- tools/kvm/powerpc/include/kvm/kvm-arch.h | 24 +----------------- tools/kvm/powerpc/irq.c | 2 +- tools/kvm/powerpc/kvm-cpu.c | 6 ++--- tools/kvm/powerpc/kvm.c | 42 ++++++++++++++++---------------- tools/kvm/powerpc/spapr_pci.c | 2 +- tools/kvm/powerpc/xics.c | 24 +++++++++--------- tools/kvm/x86/bios.c | 6 ++--- tools/kvm/x86/boot.c | 6 ++--- tools/kvm/x86/include/kvm/kvm-arch.h | 26 +------------------- tools/kvm/x86/kvm-cpu.c | 30 +++++++++++------------ tools/kvm/x86/kvm.c | 12 ++++----- 13 files changed, 95 insertions(+), 114 deletions(-)