@@ -70,7 +70,7 @@ static struct serial8250_device devices[] = {
static int sysrq_pending;
-static void serial8250__sysrq(struct kvm *self, struct serial8250_device *dev)
+static void serial8250__sysrq(struct kvm *kvm, struct serial8250_device *dev)
{
switch (sysrq_pending) {
case SYSRQ_PENDING_BREAK:
@@ -87,7 +87,7 @@ static void serial8250__sysrq(struct kvm *self, struct serial8250_device *dev)
}
}
-static void serial8250__receive(struct kvm *self, struct serial8250_device *dev)
+static void serial8250__receive(struct kvm *kvm, struct serial8250_device *dev)
{
int c;
@@ -95,7 +95,7 @@ static void serial8250__receive(struct kvm *self, struct serial8250_device *dev)
return;
if (sysrq_pending) {
- serial8250__sysrq(self, dev);
+ serial8250__sysrq(kvm, dev);
return;
}
@@ -114,13 +114,13 @@ static void serial8250__receive(struct kvm *self, struct serial8250_device *dev)
/*
* Interrupts are injected for ttyS0 only.
*/
-void serial8250__inject_interrupt(struct kvm *self)
+void serial8250__inject_interrupt(struct kvm *kvm)
{
struct serial8250_device *dev = &devices[0];
mutex_lock(&dev->mutex);
- serial8250__receive(self, dev);
+ serial8250__receive(kvm, dev);
if (dev->ier & UART_IER_RDI && dev->lsr & UART_LSR_DR)
dev->iir = UART_IIR_RDI;
@@ -130,14 +130,14 @@ void serial8250__inject_interrupt(struct kvm *self)
dev->iir = UART_IIR_NO_INT;
if (dev->iir != UART_IIR_NO_INT) {
- kvm__irq_line(self, dev->irq, 0);
- kvm__irq_line(self, dev->irq, 1);
+ kvm__irq_line(kvm, dev->irq, 0);
+ kvm__irq_line(kvm, dev->irq, 1);
}
mutex_unlock(&dev->mutex);
}
-void serial8250__inject_sysrq(struct kvm *self)
+void serial8250__inject_sysrq(struct kvm *kvm)
{
sysrq_pending = SYSRQ_PENDING_BREAK;
}
@@ -155,7 +155,7 @@ static struct serial8250_device *find_device(u16 port)
return NULL;
}
-static bool serial8250_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool serial8250_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
struct serial8250_device *dev;
u16 offset;
@@ -243,7 +243,7 @@ out_unlock:
return ret;
}
-static bool serial8250_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool serial8250_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
struct serial8250_device *dev;
u16 offset;
@@ -32,19 +32,19 @@ static void filter_cpuid(struct kvm_cpuid2 *kvm_cpuid)
}
}
-void kvm_cpu__setup_cpuid(struct kvm_cpu *self)
+void kvm_cpu__setup_cpuid(struct kvm_cpu *vcpu)
{
struct kvm_cpuid2 *kvm_cpuid;
kvm_cpuid = calloc(1, sizeof(*kvm_cpuid) + MAX_KVM_CPUID_ENTRIES * sizeof(*kvm_cpuid->entries));
kvm_cpuid->nent = MAX_KVM_CPUID_ENTRIES;
- if (ioctl(self->kvm->sys_fd, KVM_GET_SUPPORTED_CPUID, kvm_cpuid) < 0)
+ if (ioctl(vcpu->kvm->sys_fd, KVM_GET_SUPPORTED_CPUID, kvm_cpuid) < 0)
die_perror("KVM_GET_SUPPORTED_CPUID failed");
filter_cpuid(kvm_cpuid);
- if (ioctl(self->vcpu_fd, KVM_SET_CPUID2, kvm_cpuid) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_CPUID2, kvm_cpuid) < 0)
die_perror("KVM_SET_CPUID2 failed");
free(kvm_cpuid);
@@ -19,74 +19,74 @@
struct disk_image *disk_image__new(int fd, u64 size, struct disk_image_operations *ops)
{
- struct disk_image *self;
+ struct disk_image *disk;
- self = malloc(sizeof *self);
- if (!self)
+ disk = malloc(sizeof *disk);
+ if (!disk)
return NULL;
- self->fd = fd;
- self->size = size;
- self->ops = ops;
- return self;
+ disk->fd = fd;
+ disk->size = size;
+ disk->ops = ops;
+ return disk;
}
struct disk_image *disk_image__new_readonly(int fd, u64 size, struct disk_image_operations *ops)
{
- struct disk_image *self;
+ struct disk_image *disk;
- self = disk_image__new(fd, size, ops);
- if (!self)
+ disk = disk_image__new(fd, size, ops);
+ if (!disk)
return NULL;
- self->priv = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_NORESERVE, fd, 0);
- if (self->priv == MAP_FAILED)
+ disk->priv = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_NORESERVE, fd, 0);
+ if (disk->priv == MAP_FAILED)
die("mmap() failed");
- return self;
+ return disk;
}
-static ssize_t raw_image__read_sector_iov(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount)
+static ssize_t raw_image__read_sector_iov(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount)
{
u64 offset = sector << SECTOR_SHIFT;
- return preadv_in_full(self->fd, iov, iovcount, offset);
+ return preadv_in_full(disk->fd, iov, iovcount, offset);
}
-static ssize_t raw_image__write_sector_iov(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount)
+static ssize_t raw_image__write_sector_iov(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount)
{
u64 offset = sector << SECTOR_SHIFT;
- return pwritev_in_full(self->fd, iov, iovcount, offset);
+ return pwritev_in_full(disk->fd, iov, iovcount, offset);
}
-static int raw_image__read_sector_ro_mmap(struct disk_image *self, u64 sector, void *dst, u32 dst_len)
+static int raw_image__read_sector_ro_mmap(struct disk_image *disk, u64 sector, void *dst, u32 dst_len)
{
u64 offset = sector << SECTOR_SHIFT;
- if (offset + dst_len > self->size)
+ if (offset + dst_len > disk->size)
return -1;
- memcpy(dst, self->priv + offset, dst_len);
+ memcpy(dst, disk->priv + offset, dst_len);
return 0;
}
-static int raw_image__write_sector_ro_mmap(struct disk_image *self, u64 sector, void *src, u32 src_len)
+static int raw_image__write_sector_ro_mmap(struct disk_image *disk, u64 sector, void *src, u32 src_len)
{
u64 offset = sector << SECTOR_SHIFT;
- if (offset + src_len > self->size)
+ if (offset + src_len > disk->size)
return -1;
- memcpy(self->priv + offset, src, src_len);
+ memcpy(disk->priv + offset, src, src_len);
return 0;
}
-static void raw_image__close_ro_mmap(struct disk_image *self)
+static void raw_image__close_ro_mmap(struct disk_image *disk)
{
- if (self->priv != MAP_FAILED)
- munmap(self->priv, self->size);
+ if (disk->priv != MAP_FAILED)
+ munmap(disk->priv, disk->size);
}
static struct disk_image_operations raw_image_ops = {
@@ -130,7 +130,7 @@ static struct disk_image *blkdev__probe(const char *filename, struct stat *st)
struct disk_image *disk_image__open(const char *filename, bool readonly)
{
- struct disk_image *self;
+ struct disk_image *disk;
struct stat st;
int fd;
@@ -144,13 +144,13 @@ struct disk_image *disk_image__open(const char *filename, bool readonly)
if (fd < 0)
return NULL;
- self = qcow_probe(fd);
- if (self)
- return self;
+ disk = qcow_probe(fd);
+ if (disk)
+ return disk;
- self = raw_image__probe(fd, &st, readonly);
- if (self)
- return self;
+ disk = raw_image__probe(fd, &st, readonly);
+ if (disk)
+ return disk;
if (close(fd) < 0)
warning("close() failed");
@@ -158,17 +158,17 @@ struct disk_image *disk_image__open(const char *filename, bool readonly)
return NULL;
}
-void disk_image__close(struct disk_image *self)
+void disk_image__close(struct disk_image *disk)
{
/* If there was no disk image then there's nothing to do: */
- if (!self)
+ if (!disk)
return;
- if (self->ops->close)
- self->ops->close(self);
+ if (disk->ops->close)
+ disk->ops->close(disk);
- if (close(self->fd) < 0)
+ if (close(disk->fd) < 0)
warning("close() failed");
- free(self);
+ free(disk);
}
@@ -11,11 +11,11 @@
struct disk_image;
struct disk_image_operations {
- int (*read_sector)(struct disk_image *self, u64 sector, void *dst, u32 dst_len);
- int (*write_sector)(struct disk_image *self, u64 sector, void *src, u32 src_len);
- ssize_t (*read_sector_iov)(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount);
- ssize_t (*write_sector_iov)(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount);
- void (*close)(struct disk_image *self);
+ int (*read_sector)(struct disk_image *disk, u64 sector, void *dst, u32 dst_len);
+ int (*write_sector)(struct disk_image *disk, u64 sector, void *src, u32 src_len);
+ ssize_t (*read_sector_iov)(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount);
+ ssize_t (*write_sector_iov)(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount);
+ void (*close)(struct disk_image *disk);
};
struct disk_image {
@@ -28,25 +28,25 @@ struct disk_image {
struct disk_image *disk_image__open(const char *filename, bool readonly);
struct disk_image *disk_image__new(int fd, u64 size, struct disk_image_operations *ops);
struct disk_image *disk_image__new_readonly(int fd, u64 size, struct disk_image_operations *ops);
-void disk_image__close(struct disk_image *self);
+void disk_image__close(struct disk_image *disk);
-static inline int disk_image__read_sector(struct disk_image *self, u64 sector, void *dst, u32 dst_len)
+static inline int disk_image__read_sector(struct disk_image *disk, u64 sector, void *dst, u32 dst_len)
{
- return self->ops->read_sector(self, sector, dst, dst_len);
+ return disk->ops->read_sector(disk, sector, dst, dst_len);
}
-static inline int disk_image__write_sector(struct disk_image *self, u64 sector, void *src, u32 src_len)
+static inline int disk_image__write_sector(struct disk_image *disk, u64 sector, void *src, u32 src_len)
{
- return self->ops->write_sector(self, sector, src, src_len);
+ return disk->ops->write_sector(disk, sector, src, src_len);
}
-static inline ssize_t disk_image__read_sector_iov(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount)
+static inline ssize_t disk_image__read_sector_iov(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount)
{
- if (self->ops->read_sector_iov)
- return self->ops->read_sector_iov(self, sector, iov, iovcount);
+ if (disk->ops->read_sector_iov)
+ return disk->ops->read_sector_iov(disk, sector, iov, iovcount);
while (iovcount--) {
- self->ops->read_sector(self, sector, iov->iov_base, iov->iov_len);
+ disk->ops->read_sector(disk, sector, iov->iov_base, iov->iov_len);
sector += iov->iov_len >> SECTOR_SHIFT;
iov++;
}
@@ -54,13 +54,13 @@ static inline ssize_t disk_image__read_sector_iov(struct disk_image *self, u64 s
return sector << SECTOR_SHIFT;
}
-static inline ssize_t disk_image__write_sector_iov(struct disk_image *self, u64 sector, const struct iovec *iov, int iovcount)
+static inline ssize_t disk_image__write_sector_iov(struct disk_image *disk, u64 sector, const struct iovec *iov, int iovcount)
{
- if (self->ops->write_sector_iov)
- return self->ops->write_sector_iov(self, sector, iov, iovcount);
+ if (disk->ops->write_sector_iov)
+ return disk->ops->write_sector_iov(disk, sector, iov, iovcount);
while (iovcount--) {
- self->ops->write_sector(self, sector, iov->iov_base, iov->iov_len);
+ disk->ops->write_sector(disk, sector, iov->iov_base, iov->iov_len);
sector += iov->iov_len >> SECTOR_SHIFT;
iov++;
}
@@ -19,8 +19,8 @@ struct interrupt_table {
struct real_intr_desc entries[REAL_INTR_VECTORS];
};
-void interrupt_table__copy(struct interrupt_table *self, void *dst, unsigned int size);
-void interrupt_table__setup(struct interrupt_table *self, struct real_intr_desc *entry);
-void interrupt_table__set(struct interrupt_table *self, struct real_intr_desc *entry, unsigned int num);
+void interrupt_table__copy(struct interrupt_table *itable, void *dst, unsigned int size);
+void interrupt_table__setup(struct interrupt_table *itable, struct real_intr_desc *entry);
+void interrupt_table__set(struct interrupt_table *itable, struct real_intr_desc *entry, unsigned int num);
#endif /* KVM__INTERRUPT_H */
@@ -19,8 +19,8 @@
struct kvm;
struct ioport_operations {
- bool (*io_in)(struct kvm *self, u16 port, void *data, int size, u32 count);
- bool (*io_out)(struct kvm *self, u16 port, void *data, int size, u32 count);
+ bool (*io_in)(struct kvm *kvm, u16 port, void *data, int size, u32 count);
+ bool (*io_out)(struct kvm *kvm, u16 port, void *data, int size, u32 count);
};
void ioport__setup_legacy(void);
@@ -24,15 +24,15 @@ struct kvm_cpu {
};
struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id);
-void kvm_cpu__delete(struct kvm_cpu *self);
-void kvm_cpu__reset_vcpu(struct kvm_cpu *self);
-void kvm_cpu__setup_cpuid(struct kvm_cpu *self);
-void kvm_cpu__enable_singlestep(struct kvm_cpu *self);
-void kvm_cpu__run(struct kvm_cpu *self);
+void kvm_cpu__delete(struct kvm_cpu *vcpu);
+void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu);
+void kvm_cpu__setup_cpuid(struct kvm_cpu *vcpu);
+void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu);
+void kvm_cpu__run(struct kvm_cpu *vcpu);
int kvm_cpu__start(struct kvm_cpu *cpu);
-void kvm_cpu__show_code(struct kvm_cpu *self);
-void kvm_cpu__show_registers(struct kvm_cpu *self);
-void kvm_cpu__show_page_tables(struct kvm_cpu *self);
+void kvm_cpu__show_code(struct kvm_cpu *vcpu);
+void kvm_cpu__show_registers(struct kvm_cpu *vcpu);
+void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu);
#endif /* KVM__KVM_CPU_H */
@@ -31,28 +31,28 @@ struct kvm {
};
struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size);
-int kvm__max_cpus(struct kvm *self);
-void kvm__init_ram(struct kvm *self);
-void kvm__delete(struct kvm *self);
+int kvm__max_cpus(struct kvm *kvm);
+void kvm__init_ram(struct kvm *kvm);
+void kvm__delete(struct kvm *kvm);
bool kvm__load_kernel(struct kvm *kvm, const char *kernel_filename,
const char *initrd_filename, const char *kernel_cmdline);
-void kvm__setup_bios(struct kvm *self);
-void kvm__start_timer(struct kvm *self);
-void kvm__stop_timer(struct kvm *self);
-void kvm__irq_line(struct kvm *self, int irq, int level);
-bool kvm__emulate_io(struct kvm *self, u16 port, void *data, int direction, int size, u32 count);
-bool kvm__emulate_mmio(struct kvm *self, u64 phys_addr, u8 *data, u32 len, u8 is_write);
+void kvm__setup_bios(struct kvm *kvm);
+void kvm__start_timer(struct kvm *kvm);
+void kvm__stop_timer(struct kvm *kvm);
+void kvm__irq_line(struct kvm *kvm, int irq, int level);
+bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count);
+bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write);
/*
* Debugging
*/
-void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size);
+void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size);
extern const char *kvm_exit_reasons[];
-static inline bool host_ptr_in_ram(struct kvm *self, void *p)
+static inline bool host_ptr_in_ram(struct kvm *kvm, void *p)
{
- return self->ram_start <= p && p < (self->ram_start + self->ram_size);
+ return kvm->ram_start <= p && p < (kvm->ram_start + kvm->ram_size);
}
static inline u32 segment_to_flat(u16 selector, u16 offset)
@@ -60,16 +60,16 @@ static inline u32 segment_to_flat(u16 selector, u16 offset)
return ((u32)selector << 4) + (u32) offset;
}
-static inline void *guest_flat_to_host(struct kvm *self, unsigned long offset)
+static inline void *guest_flat_to_host(struct kvm *kvm, unsigned long offset)
{
- return self->ram_start + offset;
+ return kvm->ram_start + offset;
}
-static inline void *guest_real_to_host(struct kvm *self, u16 selector, u16 offset)
+static inline void *guest_real_to_host(struct kvm *kvm, u16 selector, u16 offset)
{
unsigned long flat = segment_to_flat(selector, offset);
- return guest_flat_to_host(self, flat);
+ return guest_flat_to_host(kvm, flat);
}
#endif /* KVM__KVM_H */
@@ -5,6 +5,6 @@
struct kvm;
-void virtio_blk__init(struct kvm *self, struct disk_image *disk);
+void virtio_blk__init(struct kvm *kvm, struct disk_image *disk);
#endif /* KVM__BLK_VIRTIO_H */
@@ -3,7 +3,7 @@
struct kvm;
-void virtio_console__init(struct kvm *self);
-void virtio_console__inject_interrupt(struct kvm *self);
+void virtio_console__init(struct kvm *kvm);
+void virtio_console__inject_interrupt(struct kvm *kvm);
#endif /* KVM__CONSOLE_VIRTIO_H */
@@ -4,7 +4,7 @@
struct kvm;
struct virtio_net_parameters {
- struct kvm *self;
+ struct kvm *kvm;
const char *host_ip;
char guest_mac[6];
const char *script;
@@ -4,24 +4,24 @@
#include <string.h>
-void interrupt_table__copy(struct interrupt_table *self, void *dst, unsigned int size)
+void interrupt_table__copy(struct interrupt_table *itable, void *dst, unsigned int size)
{
- if (size < sizeof(self->entries))
+ if (size < sizeof(itable->entries))
die("An attempt to overwrite host memory");
- memcpy(dst, self->entries, sizeof(self->entries));
+ memcpy(dst, itable->entries, sizeof(itable->entries));
}
-void interrupt_table__setup(struct interrupt_table *self, struct real_intr_desc *entry)
+void interrupt_table__setup(struct interrupt_table *itable, struct real_intr_desc *entry)
{
unsigned int i;
for (i = 0; i < REAL_INTR_VECTORS; i++)
- self->entries[i] = *entry;
+ itable->entries[i] = *entry;
}
-void interrupt_table__set(struct interrupt_table *self, struct real_intr_desc *entry, unsigned int num)
+void interrupt_table__set(struct interrupt_table *itable, struct real_intr_desc *entry, unsigned int num)
{
if (num < REAL_INTR_VECTORS)
- self->entries[num] = *entry;
+ itable->entries[num] = *entry;
}
@@ -13,7 +13,7 @@
bool ioport_debug;
-static bool debug_io_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool debug_io_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
exit(EXIT_SUCCESS);
}
@@ -22,12 +22,12 @@ static struct ioport_operations debug_ops = {
.io_out = debug_io_out,
};
-static bool dummy_io_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool dummy_io_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
return true;
}
-static bool dummy_io_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool dummy_io_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
return true;
}
@@ -64,7 +64,7 @@ static void ioport_error(u16 port, void *data, int direction, int size, u32 coun
fprintf(stderr, "IO error: %s port=%x, size=%d, count=%u\n", to_direction(direction), port, size, count);
}
-bool kvm__emulate_io(struct kvm *self, u16 port, void *data, int direction, int size, u32 count)
+bool kvm__emulate_io(struct kvm *kvm, u16 port, void *data, int direction, int size, u32 count)
{
struct ioport_operations *ops = ioport_ops[port];
bool ret;
@@ -76,14 +76,14 @@ bool kvm__emulate_io(struct kvm *self, u16 port, void *data, int direction, int
if (!ops->io_in)
goto error;
- ret = ops->io_in(self, port, data, size, count);
+ ret = ops->io_in(kvm, port, data, size, count);
if (!ret)
goto error;
} else {
if (!ops->io_out)
goto error;
- ret = ops->io_out(self, port, data, size, count);
+ ret = ops->io_out(kvm, port, data, size, count);
if (!ret)
goto error;
}
@@ -12,12 +12,12 @@
#include <errno.h>
#include <stdio.h>
-static inline bool is_in_protected_mode(struct kvm_cpu *self)
+static inline bool is_in_protected_mode(struct kvm_cpu *vcpu)
{
- return self->sregs.cr0 & 0x01;
+ return vcpu->sregs.cr0 & 0x01;
}
-static inline u64 ip_to_flat(struct kvm_cpu *self, u64 ip)
+static inline u64 ip_to_flat(struct kvm_cpu *vcpu, u64 ip)
{
u64 cs;
@@ -25,10 +25,10 @@ static inline u64 ip_to_flat(struct kvm_cpu *self, u64 ip)
* NOTE! We should take code segment base address into account here.
* Luckily it's usually zero because Linux uses flat memory model.
*/
- if (is_in_protected_mode(self))
+ if (is_in_protected_mode(vcpu))
return ip;
- cs = self->sregs.cs.selector;
+ cs = vcpu->sregs.cs.selector;
return ip + (cs << 4);
}
@@ -43,159 +43,159 @@ static inline u32 selector_to_base(u16 selector)
static struct kvm_cpu *kvm_cpu__new(struct kvm *kvm)
{
- struct kvm_cpu *self;
+ struct kvm_cpu *vcpu;
- self = calloc(1, sizeof *self);
- if (!self)
+ vcpu = calloc(1, sizeof *vcpu);
+ if (!vcpu)
return NULL;
- self->kvm = kvm;
+ vcpu->kvm = kvm;
- return self;
+ return vcpu;
}
-void kvm_cpu__delete(struct kvm_cpu *self)
+void kvm_cpu__delete(struct kvm_cpu *vcpu)
{
- if (self->msrs)
- free(self->msrs);
+ if (vcpu->msrs)
+ free(vcpu->msrs);
- free(self);
+ free(vcpu);
}
struct kvm_cpu *kvm_cpu__init(struct kvm *kvm, unsigned long cpu_id)
{
- struct kvm_cpu *self;
+ struct kvm_cpu *vcpu;
int mmap_size;
- self = kvm_cpu__new(kvm);
- if (!self)
+ vcpu = kvm_cpu__new(kvm);
+ if (!vcpu)
return NULL;
- self->cpu_id = cpu_id;
+ vcpu->cpu_id = cpu_id;
- self->vcpu_fd = ioctl(self->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
- if (self->vcpu_fd < 0)
+ vcpu->vcpu_fd = ioctl(vcpu->kvm->vm_fd, KVM_CREATE_VCPU, cpu_id);
+ if (vcpu->vcpu_fd < 0)
die_perror("KVM_CREATE_VCPU ioctl");
- mmap_size = ioctl(self->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
+ mmap_size = ioctl(vcpu->kvm->sys_fd, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0)
die_perror("KVM_GET_VCPU_MMAP_SIZE ioctl");
- self->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
- if (self->kvm_run == MAP_FAILED)
+ vcpu->kvm_run = mmap(NULL, mmap_size, PROT_READ|PROT_WRITE, MAP_SHARED, vcpu->vcpu_fd, 0);
+ if (vcpu->kvm_run == MAP_FAILED)
die("unable to mmap vcpu fd");
- return self;
+ return vcpu;
}
-void kvm_cpu__enable_singlestep(struct kvm_cpu *self)
+void kvm_cpu__enable_singlestep(struct kvm_cpu *vcpu)
{
struct kvm_guest_debug debug = {
.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP,
};
- if (ioctl(self->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_GUEST_DEBUG, &debug) < 0)
warning("KVM_SET_GUEST_DEBUG failed");
}
static struct kvm_msrs *kvm_msrs__new(size_t nmsrs)
{
- struct kvm_msrs *self = calloc(1, sizeof(*self) + (sizeof(struct kvm_msr_entry) * nmsrs));
+ struct kvm_msrs *vcpu = calloc(1, sizeof(*vcpu) + (sizeof(struct kvm_msr_entry) * nmsrs));
- if (!self)
+ if (!vcpu)
die("out of memory");
- return self;
+ return vcpu;
}
#define KVM_MSR_ENTRY(_index, _data) \
(struct kvm_msr_entry) { .index = _index, .data = _data }
-static void kvm_cpu__setup_msrs(struct kvm_cpu *self)
+static void kvm_cpu__setup_msrs(struct kvm_cpu *vcpu)
{
unsigned long ndx = 0;
- self->msrs = kvm_msrs__new(100);
+ vcpu->msrs = kvm_msrs__new(100);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_CS, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_ESP, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_SYSENTER_EIP, 0x0);
#ifdef CONFIG_X86_64
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_STAR, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_CSTAR, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_KERNEL_GS_BASE, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_SYSCALL_MASK, 0x0);
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_LSTAR, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_STAR, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_CSTAR, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_KERNEL_GS_BASE, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_SYSCALL_MASK, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_LSTAR, 0x0);
#endif
- self->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TSC, 0x0);
+ vcpu->msrs->entries[ndx++] = KVM_MSR_ENTRY(MSR_IA32_TSC, 0x0);
- self->msrs->nmsrs = ndx;
+ vcpu->msrs->nmsrs = ndx;
- if (ioctl(self->vcpu_fd, KVM_SET_MSRS, self->msrs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_MSRS, vcpu->msrs) < 0)
die_perror("KVM_SET_MSRS failed");
}
-static void kvm_cpu__setup_fpu(struct kvm_cpu *self)
+static void kvm_cpu__setup_fpu(struct kvm_cpu *vcpu)
{
- self->fpu = (struct kvm_fpu) {
+ vcpu->fpu = (struct kvm_fpu) {
.fcw = 0x37f,
.mxcsr = 0x1f80,
};
- if (ioctl(self->vcpu_fd, KVM_SET_FPU, &self->fpu) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_FPU, &vcpu->fpu) < 0)
die_perror("KVM_SET_FPU failed");
}
-static void kvm_cpu__setup_regs(struct kvm_cpu *self)
+static void kvm_cpu__setup_regs(struct kvm_cpu *vcpu)
{
- self->regs = (struct kvm_regs) {
+ vcpu->regs = (struct kvm_regs) {
/* We start the guest in 16-bit real mode */
.rflags = 0x0000000000000002ULL,
- .rip = self->kvm->boot_ip,
- .rsp = self->kvm->boot_sp,
- .rbp = self->kvm->boot_sp,
+ .rip = vcpu->kvm->boot_ip,
+ .rsp = vcpu->kvm->boot_sp,
+ .rbp = vcpu->kvm->boot_sp,
};
- if (self->regs.rip > USHRT_MAX)
- die("ip 0x%llx is too high for real mode", (u64) self->regs.rip);
+ if (vcpu->regs.rip > USHRT_MAX)
+ die("ip 0x%llx is too high for real mode", (u64) vcpu->regs.rip);
- if (ioctl(self->vcpu_fd, KVM_SET_REGS, &self->regs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_REGS, &vcpu->regs) < 0)
die_perror("KVM_SET_REGS failed");
}
-static void kvm_cpu__setup_sregs(struct kvm_cpu *self)
+static void kvm_cpu__setup_sregs(struct kvm_cpu *vcpu)
{
- if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
die_perror("KVM_GET_SREGS failed");
- self->sregs.cs.selector = self->kvm->boot_selector;
- self->sregs.cs.base = selector_to_base(self->kvm->boot_selector);
- self->sregs.ss.selector = self->kvm->boot_selector;
- self->sregs.ss.base = selector_to_base(self->kvm->boot_selector);
- self->sregs.ds.selector = self->kvm->boot_selector;
- self->sregs.ds.base = selector_to_base(self->kvm->boot_selector);
- self->sregs.es.selector = self->kvm->boot_selector;
- self->sregs.es.base = selector_to_base(self->kvm->boot_selector);
- self->sregs.fs.selector = self->kvm->boot_selector;
- self->sregs.fs.base = selector_to_base(self->kvm->boot_selector);
- self->sregs.gs.selector = self->kvm->boot_selector;
- self->sregs.gs.base = selector_to_base(self->kvm->boot_selector);
-
- if (ioctl(self->vcpu_fd, KVM_SET_SREGS, &self->sregs) < 0)
+ vcpu->sregs.cs.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.cs.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.ss.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.ss.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.ds.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.ds.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.es.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.es.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.fs.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.fs.base = selector_to_base(vcpu->kvm->boot_selector);
+ vcpu->sregs.gs.selector = vcpu->kvm->boot_selector;
+ vcpu->sregs.gs.base = selector_to_base(vcpu->kvm->boot_selector);
+
+ if (ioctl(vcpu->vcpu_fd, KVM_SET_SREGS, &vcpu->sregs) < 0)
die_perror("KVM_SET_SREGS failed");
}
/**
* kvm_cpu__reset_vcpu - reset virtual CPU to a known state
*/
-void kvm_cpu__reset_vcpu(struct kvm_cpu *self)
+void kvm_cpu__reset_vcpu(struct kvm_cpu *vcpu)
{
- kvm_cpu__setup_sregs(self);
- kvm_cpu__setup_regs(self);
- kvm_cpu__setup_fpu(self);
- kvm_cpu__setup_msrs(self);
+ kvm_cpu__setup_sregs(vcpu);
+ kvm_cpu__setup_regs(vcpu);
+ kvm_cpu__setup_fpu(vcpu);
+ kvm_cpu__setup_msrs(vcpu);
}
static void print_dtable(const char *name, struct kvm_dtable *dtable)
@@ -211,7 +211,7 @@ static void print_segment(const char *name, struct kvm_segment *seg)
(u8) seg->type, seg->present, seg->dpl, seg->db, seg->s, seg->l, seg->g, seg->avl);
}
-void kvm_cpu__show_registers(struct kvm_cpu *self)
+void kvm_cpu__show_registers(struct kvm_cpu *vcpu)
{
unsigned long cr0, cr2, cr3;
unsigned long cr4, cr8;
@@ -226,7 +226,7 @@ void kvm_cpu__show_registers(struct kvm_cpu *self)
struct kvm_regs regs;
int i;
- if (ioctl(self->vcpu_fd, KVM_GET_REGS, ®s) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, ®s) < 0)
die("KVM_GET_REGS failed");
rflags = regs.rflags;
@@ -247,7 +247,7 @@ void kvm_cpu__show_registers(struct kvm_cpu *self)
printf(" r10: %016lx r11: %016lx r12: %016lx\n", r10, r11, r12);
printf(" r13: %016lx r14: %016lx r15: %016lx\n", r13, r14, r15);
- if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &sregs) < 0)
die("KVM_GET_REGS failed");
cr0 = sregs.cr0; cr2 = sregs.cr2; cr3 = sregs.cr3;
@@ -273,7 +273,7 @@ void kvm_cpu__show_registers(struct kvm_cpu *self)
printf( " -----\n");
printf(" efer: %016llx apic base: %016llx nmi: %s\n",
(u64) sregs.efer, (u64) sregs.apic_base,
- (self->kvm->nmi_disabled ? "disabled" : "enabled"));
+ (vcpu->kvm->nmi_disabled ? "disabled" : "enabled"));
printf("\n Interrupt bitmap:\n");
printf( " -----------------\n");
@@ -282,7 +282,7 @@ void kvm_cpu__show_registers(struct kvm_cpu *self)
printf("\n");
}
-void kvm_cpu__show_code(struct kvm_cpu *self)
+void kvm_cpu__show_code(struct kvm_cpu *vcpu)
{
unsigned int code_bytes = 64;
unsigned int code_prologue = code_bytes * 43 / 64;
@@ -291,24 +291,24 @@ void kvm_cpu__show_code(struct kvm_cpu *self)
unsigned int i;
u8 *ip;
- if (ioctl(self->vcpu_fd, KVM_GET_REGS, &self->regs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_REGS, &vcpu->regs) < 0)
die("KVM_GET_REGS failed");
- if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
die("KVM_GET_SREGS failed");
- ip = guest_flat_to_host(self->kvm, ip_to_flat(self, self->regs.rip) - code_prologue);
+ ip = guest_flat_to_host(vcpu->kvm, ip_to_flat(vcpu, vcpu->regs.rip) - code_prologue);
printf("\n Code:\n");
printf( " -----\n");
for (i = 0; i < code_len; i++, ip++) {
- if (!host_ptr_in_ram(self->kvm, ip))
+ if (!host_ptr_in_ram(vcpu->kvm, ip))
break;
c = *ip;
- if (ip == guest_flat_to_host(self->kvm, ip_to_flat(self, self->regs.rip)))
+ if (ip == guest_flat_to_host(vcpu->kvm, ip_to_flat(vcpu, vcpu->regs.rip)))
printf(" <%02x>", c);
else
printf(" %02x", c);
@@ -318,36 +318,36 @@ void kvm_cpu__show_code(struct kvm_cpu *self)
printf("\n Stack:\n");
printf( " ------\n");
- kvm__dump_mem(self->kvm, self->regs.rsp, 32);
+ kvm__dump_mem(vcpu->kvm, vcpu->regs.rsp, 32);
}
-void kvm_cpu__show_page_tables(struct kvm_cpu *self)
+void kvm_cpu__show_page_tables(struct kvm_cpu *vcpu)
{
u64 *pte1;
u64 *pte2;
u64 *pte3;
u64 *pte4;
- if (!is_in_protected_mode(self))
+ if (!is_in_protected_mode(vcpu))
return;
- if (ioctl(self->vcpu_fd, KVM_GET_SREGS, &self->sregs) < 0)
+ if (ioctl(vcpu->vcpu_fd, KVM_GET_SREGS, &vcpu->sregs) < 0)
die("KVM_GET_SREGS failed");
- pte4 = guest_flat_to_host(self->kvm, self->sregs.cr3);
- if (!host_ptr_in_ram(self->kvm, pte4))
+ pte4 = guest_flat_to_host(vcpu->kvm, vcpu->sregs.cr3);
+ if (!host_ptr_in_ram(vcpu->kvm, pte4))
return;
- pte3 = guest_flat_to_host(self->kvm, (*pte4 & ~0xfff));
- if (!host_ptr_in_ram(self->kvm, pte3))
+ pte3 = guest_flat_to_host(vcpu->kvm, (*pte4 & ~0xfff));
+ if (!host_ptr_in_ram(vcpu->kvm, pte3))
return;
- pte2 = guest_flat_to_host(self->kvm, (*pte3 & ~0xfff));
- if (!host_ptr_in_ram(self->kvm, pte2))
+ pte2 = guest_flat_to_host(vcpu->kvm, (*pte3 & ~0xfff));
+ if (!host_ptr_in_ram(vcpu->kvm, pte2))
return;
- pte1 = guest_flat_to_host(self->kvm, (*pte2 & ~0xfff));
- if (!host_ptr_in_ram(self->kvm, pte1))
+ pte1 = guest_flat_to_host(vcpu->kvm, (*pte2 & ~0xfff));
+ if (!host_ptr_in_ram(vcpu->kvm, pte1))
return;
printf("Page Tables:\n");
@@ -361,11 +361,11 @@ void kvm_cpu__show_page_tables(struct kvm_cpu *self)
*pte4, *pte3, *pte2, *pte1);
}
-void kvm_cpu__run(struct kvm_cpu *self)
+void kvm_cpu__run(struct kvm_cpu *vcpu)
{
int err;
- err = ioctl(self->vcpu_fd, KVM_RUN, 0);
+ err = ioctl(vcpu->vcpu_fd, KVM_RUN, 0);
if (err && (errno != EINTR && errno != EAGAIN))
die_perror("KVM_RUN failed");
}
@@ -496,7 +496,7 @@ int kvm_cmd_run(int argc, const char **argv, const char *prefix)
if (!strncmp(network, "virtio", 6)) {
net_params = (struct virtio_net_parameters) {
.host_ip = host_ip_addr,
- .self = kvm,
+ .kvm = kvm,
.script = script
};
sscanf(guest_mac, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
@@ -67,23 +67,23 @@ struct {
{ DEFINE_KVM_EXT(KVM_CAP_EXT_CPUID) },
};
-static bool kvm__supports_extension(struct kvm *self, unsigned int extension)
+static bool kvm__supports_extension(struct kvm *kvm, unsigned int extension)
{
int ret;
- ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, extension);
+ ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, extension);
if (ret < 0)
return false;
return ret;
}
-static int kvm__check_extensions(struct kvm *self)
+static int kvm__check_extensions(struct kvm *kvm)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(kvm_req_ext); i++) {
- if (!kvm__supports_extension(self, kvm_req_ext[i].code)) {
+ if (!kvm__supports_extension(kvm, kvm_req_ext[i].code)) {
error("Unsuppored KVM extension detected: %s",
kvm_req_ext[i].name);
return (int)-i;
@@ -95,20 +95,20 @@ static int kvm__check_extensions(struct kvm *self)
static struct kvm *kvm__new(void)
{
- struct kvm *self = calloc(1, sizeof *self);
+ struct kvm *kvm = calloc(1, sizeof *kvm);
- if (!self)
+ if (!kvm)
die("out of memory");
- return self;
+ return kvm;
}
-void kvm__delete(struct kvm *self)
+void kvm__delete(struct kvm *kvm)
{
- kvm__stop_timer(self);
+ kvm__stop_timer(kvm);
- munmap(self->ram_start, self->ram_size);
- free(self);
+ munmap(kvm->ram_start, kvm->ram_size);
+ free(kvm);
}
static bool kvm__cpu_supports_vm(void)
@@ -179,43 +179,43 @@ static void kvm_register_mem_slot(struct kvm *kvm, u32 slot, u64 guest_phys, u64
* a gap between 0xe0000000 and 0x100000000 in the guest virtual mem space.
*/
-void kvm__init_ram(struct kvm *self)
+void kvm__init_ram(struct kvm *kvm)
{
u64 phys_start, phys_size;
void *host_mem;
- if (self->ram_size < KVM_32BIT_GAP_START) {
+ if (kvm->ram_size < KVM_32BIT_GAP_START) {
/* Use a single block of RAM for 32bit RAM */
phys_start = 0;
- phys_size = self->ram_size;
- host_mem = self->ram_start;
+ phys_size = kvm->ram_size;
+ host_mem = kvm->ram_start;
- kvm_register_mem_slot(self, 0, 0, self->ram_size, self->ram_start);
+ kvm_register_mem_slot(kvm, 0, 0, kvm->ram_size, kvm->ram_start);
} else {
/* First RAM range from zero to the PCI gap: */
phys_start = 0;
phys_size = KVM_32BIT_GAP_START;
- host_mem = self->ram_start;
+ host_mem = kvm->ram_start;
- kvm_register_mem_slot(self, 0, phys_start, phys_size, host_mem);
+ kvm_register_mem_slot(kvm, 0, phys_start, phys_size, host_mem);
/* Second RAM range from 4GB to the end of RAM: */
phys_start = 0x100000000ULL;
- phys_size = self->ram_size - phys_size;
- host_mem = self->ram_start + phys_start;
+ phys_size = kvm->ram_size - phys_size;
+ host_mem = kvm->ram_start + phys_start;
- kvm_register_mem_slot(self, 1, phys_start, phys_size, host_mem);
+ kvm_register_mem_slot(kvm, 1, phys_start, phys_size, host_mem);
}
}
-int kvm__max_cpus(struct kvm *self)
+int kvm__max_cpus(struct kvm *kvm)
{
int ret;
- ret = ioctl(self->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
+ ret = ioctl(kvm->sys_fd, KVM_CHECK_EXTENSION, KVM_CAP_NR_VCPUS);
if (ret < 0)
die_perror("KVM_CAP_NR_VCPUS");
@@ -225,16 +225,16 @@ int kvm__max_cpus(struct kvm *self)
struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
{
struct kvm_pit_config pit_config = { .flags = 0, };
- struct kvm *self;
+ struct kvm *kvm;
int ret;
if (!kvm__cpu_supports_vm())
die("Your CPU does not support hardware virtualization");
- self = kvm__new();
+ kvm = kvm__new();
- self->sys_fd = open(kvm_dev, O_RDWR);
- if (self->sys_fd < 0) {
+ kvm->sys_fd = open(kvm_dev, O_RDWR);
+ if (kvm->sys_fd < 0) {
if (errno == ENOENT)
die("'%s' not found. Please make sure your kernel has CONFIG_KVM enabled and that the KVM modules are loaded.", kvm_dev);
if (errno == ENODEV)
@@ -245,47 +245,47 @@ struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
exit(1);
}
- ret = ioctl(self->sys_fd, KVM_GET_API_VERSION, 0);
+ ret = ioctl(kvm->sys_fd, KVM_GET_API_VERSION, 0);
if (ret != KVM_API_VERSION)
die_perror("KVM_API_VERSION ioctl");
- self->vm_fd = ioctl(self->sys_fd, KVM_CREATE_VM, 0);
- if (self->vm_fd < 0)
+ kvm->vm_fd = ioctl(kvm->sys_fd, KVM_CREATE_VM, 0);
+ if (kvm->vm_fd < 0)
die_perror("KVM_CREATE_VM ioctl");
- if (kvm__check_extensions(self))
+ if (kvm__check_extensions(kvm))
die("A required KVM extention is not supported by OS");
- ret = ioctl(self->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
+ ret = ioctl(kvm->vm_fd, KVM_SET_TSS_ADDR, 0xfffbd000);
if (ret < 0)
die_perror("KVM_SET_TSS_ADDR ioctl");
- ret = ioctl(self->vm_fd, KVM_CREATE_PIT2, &pit_config);
+ ret = ioctl(kvm->vm_fd, KVM_CREATE_PIT2, &pit_config);
if (ret < 0)
die_perror("KVM_CREATE_PIT2 ioctl");
- self->ram_size = ram_size;
+ kvm->ram_size = ram_size;
- if (self->ram_size < KVM_32BIT_GAP_START) {
- self->ram_start = mmap(NULL, ram_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
+ if (kvm->ram_size < KVM_32BIT_GAP_START) {
+ kvm->ram_start = mmap(NULL, ram_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
} else {
- self->ram_start = mmap(NULL, ram_size + KVM_32BIT_GAP_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
- if (self->ram_start != MAP_FAILED) {
+ kvm->ram_start = mmap(NULL, ram_size + KVM_32BIT_GAP_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
+ if (kvm->ram_start != MAP_FAILED) {
/*
* We mprotect the gap (see kvm__init_ram() for details) PROT_NONE so that
* if we accidently write to it, we will know.
*/
- mprotect(self->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE);
+ mprotect(kvm->ram_start + KVM_32BIT_GAP_START, KVM_32BIT_GAP_SIZE, PROT_NONE);
}
}
- if (self->ram_start == MAP_FAILED)
+ if (kvm->ram_start == MAP_FAILED)
die("out of memory");
- ret = ioctl(self->vm_fd, KVM_CREATE_IRQCHIP);
+ ret = ioctl(kvm->vm_fd, KVM_CREATE_IRQCHIP);
if (ret < 0)
die_perror("KVM_CREATE_IRQCHIP ioctl");
- return self;
+ return kvm;
}
#define BOOT_LOADER_SELECTOR 0x1000
@@ -296,7 +296,7 @@ struct kvm *kvm__init(const char *kvm_dev, unsigned long ram_size)
#define BOOT_PROTOCOL_REQUIRED 0x206
#define LOAD_HIGH 0x01
-static int load_flat_binary(struct kvm *self, int fd)
+static int load_flat_binary(struct kvm *kvm, int fd)
{
void *p;
int nr;
@@ -304,21 +304,21 @@ static int load_flat_binary(struct kvm *self, int fd)
if (lseek(fd, 0, SEEK_SET) < 0)
die_perror("lseek");
- p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
+ p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
while ((nr = read(fd, p, 65536)) > 0)
p += nr;
- self->boot_selector = BOOT_LOADER_SELECTOR;
- self->boot_ip = BOOT_LOADER_IP;
- self->boot_sp = BOOT_LOADER_SP;
+ kvm->boot_selector = BOOT_LOADER_SELECTOR;
+ kvm->boot_ip = BOOT_LOADER_IP;
+ kvm->boot_sp = BOOT_LOADER_SP;
return true;
}
static const char *BZIMAGE_MAGIC = "HdrS";
-static bool load_bzimage(struct kvm *self, int fd_kernel,
+static bool load_bzimage(struct kvm *kvm, int fd_kernel,
int fd_initrd, const char *kernel_cmdline)
{
struct boot_params *kern_boot;
@@ -354,19 +354,19 @@ static bool load_bzimage(struct kvm *self, int fd_kernel,
setup_sects = boot.hdr.setup_sects + 1;
setup_size = setup_sects << 9;
- p = guest_real_to_host(self, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
+ p = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, BOOT_LOADER_IP);
/* copy setup.bin to mem*/
if (read(fd_kernel, p, setup_size) != setup_size)
die_perror("read");
/* copy vmlinux.bin to BZ_KERNEL_START*/
- p = guest_flat_to_host(self, BZ_KERNEL_START);
+ p = guest_flat_to_host(kvm, BZ_KERNEL_START);
while ((nr = read(fd_kernel, p, 65536)) > 0)
p += nr;
- p = guest_flat_to_host(self, BOOT_CMDLINE_OFFSET);
+ p = guest_flat_to_host(kvm, BOOT_CMDLINE_OFFSET);
if (kernel_cmdline) {
cmdline_size = strlen(kernel_cmdline) + 1;
if (cmdline_size > boot.hdr.cmdline_size)
@@ -376,7 +376,7 @@ static bool load_bzimage(struct kvm *self, int fd_kernel,
memcpy(p, kernel_cmdline, cmdline_size - 1);
}
- kern_boot = guest_real_to_host(self, BOOT_LOADER_SELECTOR, 0x00);
+ kern_boot = guest_real_to_host(kvm, BOOT_LOADER_SELECTOR, 0x00);
kern_boot->hdr.cmd_line_ptr = BOOT_CMDLINE_OFFSET;
kern_boot->hdr.type_of_loader = 0xff;
@@ -397,12 +397,12 @@ static bool load_bzimage(struct kvm *self, int fd_kernel,
for (;;) {
if (addr < BZ_KERNEL_START)
die("Not enough memory for initrd");
- else if (addr < (self->ram_size - initrd_stat.st_size))
+ else if (addr < (kvm->ram_size - initrd_stat.st_size))
break;
addr -= 0x100000;
}
- p = guest_flat_to_host(self, addr);
+ p = guest_flat_to_host(kvm, addr);
nr = read(fd_initrd, p, initrd_stat.st_size);
if (nr != initrd_stat.st_size)
die("Failed to read initrd");
@@ -411,13 +411,13 @@ static bool load_bzimage(struct kvm *self, int fd_kernel,
kern_boot->hdr.ramdisk_size = initrd_stat.st_size;
}
- self->boot_selector = BOOT_LOADER_SELECTOR;
+ kvm->boot_selector = BOOT_LOADER_SELECTOR;
/*
* The real-mode setup code starts at offset 0x200 of a bzImage. See
* Documentation/x86/boot.txt for details.
*/
- self->boot_ip = BOOT_LOADER_IP + 0x200;
- self->boot_sp = BOOT_LOADER_SP;
+ kvm->boot_ip = BOOT_LOADER_IP + 0x200;
+ kvm->boot_sp = BOOT_LOADER_SP;
return true;
}
@@ -464,20 +464,20 @@ found_kernel:
/**
* kvm__setup_bios - inject BIOS into guest system memory
- * @self - guest system descriptor
+ * @kvm - guest system descriptor
*
* This function is a main routine where we poke guest memory
* and install BIOS there.
*/
-void kvm__setup_bios(struct kvm *self)
+void kvm__setup_bios(struct kvm *kvm)
{
/* standart minimal configuration */
- setup_bios(self);
+ setup_bios(kvm);
/* FIXME: SMP, ACPI and friends here */
/* MP table */
- mptable_setup(self, self->nrcpus);
+ mptable_setup(kvm, kvm->nrcpus);
}
#define TIMER_INTERVAL_NS 1000000 /* 1 msec */
@@ -487,7 +487,7 @@ void kvm__setup_bios(struct kvm *self)
* userspace hypervisor into the guest at periodical intervals. Please note
* that clock interrupt, for example, is not handled here.
*/
-void kvm__start_timer(struct kvm *self)
+void kvm__start_timer(struct kvm *kvm)
{
struct itimerspec its;
struct sigevent sev;
@@ -497,7 +497,7 @@ void kvm__start_timer(struct kvm *self)
sev.sigev_notify = SIGEV_SIGNAL;
sev.sigev_signo = SIGALRM;
- if (timer_create(CLOCK_REALTIME, &sev, &self->timerid) < 0)
+ if (timer_create(CLOCK_REALTIME, &sev, &kvm->timerid) < 0)
die("timer_create()");
its.it_value.tv_sec = TIMER_INTERVAL_NS / 1000000000;
@@ -505,20 +505,20 @@ void kvm__start_timer(struct kvm *self)
its.it_interval.tv_sec = its.it_value.tv_sec;
its.it_interval.tv_nsec = its.it_value.tv_nsec;
- if (timer_settime(self->timerid, 0, &its, NULL) < 0)
+ if (timer_settime(kvm->timerid, 0, &its, NULL) < 0)
die("timer_settime()");
}
-void kvm__stop_timer(struct kvm *self)
+void kvm__stop_timer(struct kvm *kvm)
{
- if (self->timerid)
- if (timer_delete(self->timerid) < 0)
+ if (kvm->timerid)
+ if (timer_delete(kvm->timerid) < 0)
die("timer_delete()");
- self->timerid = 0;
+ kvm->timerid = 0;
}
-void kvm__irq_line(struct kvm *self, int irq, int level)
+void kvm__irq_line(struct kvm *kvm, int irq, int level)
{
struct kvm_irq_level irq_level;
@@ -529,11 +529,11 @@ void kvm__irq_line(struct kvm *self, int irq, int level)
.level = level,
};
- if (ioctl(self->vm_fd, KVM_IRQ_LINE, &irq_level) < 0)
+ if (ioctl(kvm->vm_fd, KVM_IRQ_LINE, &irq_level) < 0)
die_perror("KVM_IRQ_LINE failed");
}
-void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
+void kvm__dump_mem(struct kvm *kvm, unsigned long addr, unsigned long size)
{
unsigned char *p;
unsigned long n;
@@ -542,10 +542,10 @@ void kvm__dump_mem(struct kvm *self, unsigned long addr, unsigned long size)
if (!size)
return;
- p = guest_flat_to_host(self, addr);
+ p = guest_flat_to_host(kvm, addr);
for (n = 0; n < size; n += 8) {
- if (!host_ptr_in_ram(self, p + n))
+ if (!host_ptr_in_ram(kvm, p + n))
break;
printf(" 0x%08lx: %02x %02x %02x %02x %02x %02x %02x %02x\n",
@@ -11,7 +11,7 @@ static const char *to_direction(u8 is_write)
return "read";
}
-bool kvm__emulate_mmio(struct kvm *self, u64 phys_addr, u8 *data, u32 len, u8 is_write)
+bool kvm__emulate_mmio(struct kvm *kvm, u64 phys_addr, u8 *data, u32 len, u8 is_write)
{
fprintf(stderr, "Warning: Ignoring MMIO %s at %016llx (length %u)\n",
to_direction(is_write), phys_addr, len);
@@ -260,7 +260,7 @@ void mptable_setup(struct kvm *kvm, unsigned int ncpus)
/*
* We will copy the whole table, no need to separate
- * floating structure and table itself.
+ * floating structure and table itkvm.
*/
size = (unsigned long)mpf_intel + sizeof(*mpf_intel) - (unsigned long)mpc_table;
@@ -21,7 +21,7 @@ static void *pci_config_address_ptr(u16 port)
return base + offset;
}
-static bool pci_config_address_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool pci_config_address_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
void *p = pci_config_address_ptr(port);
@@ -30,7 +30,7 @@ static bool pci_config_address_out(struct kvm *self, u16 port, void *data, int s
return true;
}
-static bool pci_config_address_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool pci_config_address_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
void *p = pci_config_address_ptr(port);
@@ -44,7 +44,7 @@ static struct ioport_operations pci_config_address_ops = {
.io_out = pci_config_address_out,
};
-static bool pci_config_data_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool pci_config_data_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
return true;
}
@@ -67,7 +67,7 @@ static bool pci_device_exists(u8 bus_number, u8 device_number, u8 function_numbe
return dev != NULL;
}
-static bool pci_config_data_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool pci_config_data_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
unsigned long start;
u8 dev_num;
@@ -101,10 +101,10 @@ out_error:
goto out;
}
-static int qcow1_read_sector(struct disk_image *self, u64 sector,
+static int qcow1_read_sector(struct disk_image *disk, u64 sector,
void *dst, u32 dst_len)
{
- struct qcow *q = self->priv;
+ struct qcow *q = disk->priv;
struct qcow_header *header = q->header;
char *buf = dst;
u64 offset;
@@ -130,19 +130,19 @@ out_error:
return -1;
}
-static int qcow1_write_sector(struct disk_image *self, u64 sector, void *src, u32 src_len)
+static int qcow1_write_sector(struct disk_image *disk, u64 sector, void *src, u32 src_len)
{
return -1;
}
-static void qcow1_disk_close(struct disk_image *self)
+static void qcow1_disk_close(struct disk_image *disk)
{
struct qcow *q;
- if (!self)
+ if (!disk)
return;
- q = self->priv;
+ q = disk->priv;
free(q->table.l1_table);
free(q->header);
@@ -19,7 +19,7 @@ static inline unsigned char bin2bcd(unsigned val)
return ((val / 10) << 4) + val % 10;
}
-static bool cmos_ram_data_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool cmos_ram_data_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
struct tm *tm;
time_t ti;
@@ -52,7 +52,7 @@ static bool cmos_ram_data_in(struct kvm *self, u16 port, void *data, int size, u
return true;
}
-static bool cmos_ram_data_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool cmos_ram_data_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
return true;
}
@@ -62,13 +62,13 @@ static struct ioport_operations cmos_ram_data_ioport_ops = {
.io_in = cmos_ram_data_in,
};
-static bool cmos_ram_index_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool cmos_ram_index_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
u8 value;
value = ioport__read8(data);
- self->nmi_disabled = value & (1UL << 7);
+ kvm->nmi_disabled = value & (1UL << 7);
cmos_index = value & ~(1UL << 7);
@@ -69,7 +69,7 @@ static void virtio_blk_port2dev(u16 port, u16 base, u16 size, u16 *dev_idx, u16
*offset = port - (base + *dev_idx * size);
}
-static bool virtio_blk_pci_io_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool virtio_blk_pci_io_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
struct blk_dev *bdev;
u16 offset, dev_idx;
@@ -103,7 +103,7 @@ static bool virtio_blk_pci_io_in(struct kvm *self, u16 port, void *data, int siz
break;
case VIRTIO_PCI_ISR:
ioport__write8(data, bdev->isr);
- kvm__irq_line(self, bdev->pci_hdr.irq_line, VIRTIO_IRQ_LOW);
+ kvm__irq_line(kvm, bdev->pci_hdr.irq_line, VIRTIO_IRQ_LOW);
bdev->isr = VIRTIO_IRQ_LOW;
break;
case VIRTIO_MSI_CONFIG_VECTOR:
@@ -119,7 +119,7 @@ static bool virtio_blk_pci_io_in(struct kvm *self, u16 port, void *data, int siz
return ret;
}
-static bool virtio_blk_do_io_request(struct kvm *self,
+static bool virtio_blk_do_io_request(struct kvm *kvm,
struct blk_dev *bdev,
struct virt_queue *queue)
{
@@ -129,7 +129,7 @@ static bool virtio_blk_do_io_request(struct kvm *self,
u16 out, in, head;
u8 *status;
- head = virt_queue__get_iov(queue, iov, &out, &in, self);
+ head = virt_queue__get_iov(queue, iov, &out, &in, kvm);
/* head */
req = iov[0].iov_base;
@@ -171,7 +171,7 @@ static void virtio_blk_do_io(struct kvm *kvm, void *param)
virt_queue__trigger_irq(vq, bdev->pci_hdr.irq_line, &bdev->isr, kvm);
}
-static bool virtio_blk_pci_io_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool virtio_blk_pci_io_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
struct blk_dev *bdev;
u16 offset, dev_idx;
@@ -196,7 +196,7 @@ static bool virtio_blk_pci_io_out(struct kvm *self, u16 port, void *data, int si
queue = &bdev->vqs[bdev->queue_selector];
queue->pfn = ioport__read32(data);
- p = guest_pfn_to_host(self, queue->pfn);
+ p = guest_pfn_to_host(kvm, queue->pfn);
vring_init(&queue->vring, VIRTIO_BLK_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
@@ -205,7 +205,7 @@ static bool virtio_blk_pci_io_out(struct kvm *self, u16 port, void *data, int si
.bdev = bdev,
};
- job->job_id = thread_pool__add_job(self, virtio_blk_do_io, job);
+ job->job_id = thread_pool__add_job(kvm, virtio_blk_do_io, job);
break;
}
@@ -255,7 +255,7 @@ static int virtio_blk_find_empty_dev(void)
return -1;
}
-void virtio_blk__init(struct kvm *self, struct disk_image *disk)
+void virtio_blk__init(struct kvm *kvm, struct disk_image *disk)
{
u16 blk_dev_base_addr;
u8 dev, pin, line;
@@ -69,7 +69,7 @@ static struct con_dev cdev = {
/*
* Interrupts are injected for hvc0 only.
*/
-static void virtio_console__inject_interrupt_callback(struct kvm *self, void *param)
+static void virtio_console__inject_interrupt_callback(struct kvm *kvm, void *param)
{
struct iovec iov[VIRTIO_CONSOLE_QUEUE_SIZE];
struct virt_queue *vq;
@@ -82,16 +82,16 @@ static void virtio_console__inject_interrupt_callback(struct kvm *self, void *pa
vq = param;
if (term_readable(CONSOLE_VIRTIO) && virt_queue__available(vq)) {
- head = virt_queue__get_iov(vq, iov, &out, &in, self);
+ head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
len = term_getc_iov(CONSOLE_VIRTIO, iov, in);
virt_queue__set_used_elem(vq, head, len);
- virt_queue__trigger_irq(vq, virtio_console_pci_device.irq_line, &cdev.isr, self);
+ virt_queue__trigger_irq(vq, virtio_console_pci_device.irq_line, &cdev.isr, kvm);
}
mutex_unlock(&cdev.mutex);
}
-void virtio_console__inject_interrupt(struct kvm *self)
+void virtio_console__inject_interrupt(struct kvm *kvm)
{
thread_pool__do_job(cdev.jobs[VIRTIO_CONSOLE_RX_QUEUE]);
}
@@ -111,7 +111,7 @@ static bool virtio_console_pci_io_device_specific_in(void *data, unsigned long o
return true;
}
-static bool virtio_console_pci_io_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool virtio_console_pci_io_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
unsigned long offset = port - IOPORT_VIRTIO_CONSOLE;
bool ret = true;
@@ -140,7 +140,7 @@ static bool virtio_console_pci_io_in(struct kvm *self, u16 port, void *data, int
break;
case VIRTIO_PCI_ISR:
ioport__write8(data, cdev.isr);
- kvm__irq_line(self, virtio_console_pci_device.irq_line, VIRTIO_IRQ_LOW);
+ kvm__irq_line(kvm, virtio_console_pci_device.irq_line, VIRTIO_IRQ_LOW);
cdev.isr = VIRTIO_IRQ_LOW;
break;
case VIRTIO_MSI_CONFIG_VECTOR:
@@ -155,7 +155,7 @@ static bool virtio_console_pci_io_in(struct kvm *self, u16 port, void *data, int
return ret;
}
-static void virtio_console_handle_callback(struct kvm *self, void *param)
+static void virtio_console_handle_callback(struct kvm *kvm, void *param)
{
struct iovec iov[VIRTIO_CONSOLE_QUEUE_SIZE];
struct virt_queue *vq;
@@ -172,14 +172,14 @@ static void virtio_console_handle_callback(struct kvm *self, void *param)
*/
while (virt_queue__available(vq)) {
- head = virt_queue__get_iov(vq, iov, &out, &in, self);
+ head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
len = term_putc_iov(CONSOLE_VIRTIO, iov, out);
virt_queue__set_used_elem(vq, head, len);
}
}
-static bool virtio_console_pci_io_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool virtio_console_pci_io_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
unsigned long offset = port - IOPORT_VIRTIO_CONSOLE;
bool ret = true;
@@ -198,14 +198,14 @@ static bool virtio_console_pci_io_out(struct kvm *self, u16 port, void *data, in
queue = &cdev.vqs[cdev.queue_selector];
queue->pfn = ioport__read32(data);
- p = guest_pfn_to_host(self, queue->pfn);
+ p = guest_pfn_to_host(kvm, queue->pfn);
vring_init(&queue->vring, VIRTIO_CONSOLE_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
if (cdev.queue_selector == VIRTIO_CONSOLE_TX_QUEUE)
- cdev.jobs[cdev.queue_selector] = thread_pool__add_job(self, virtio_console_handle_callback, queue);
+ cdev.jobs[cdev.queue_selector] = thread_pool__add_job(kvm, virtio_console_handle_callback, queue);
else if (cdev.queue_selector == VIRTIO_CONSOLE_RX_QUEUE)
- cdev.jobs[cdev.queue_selector] = thread_pool__add_job(self, virtio_console__inject_interrupt_callback, queue);
+ cdev.jobs[cdev.queue_selector] = thread_pool__add_job(kvm, virtio_console__inject_interrupt_callback, queue);
break;
}
@@ -240,7 +240,7 @@ static struct ioport_operations virtio_console_io_ops = {
.io_out = virtio_console_pci_io_out,
};
-void virtio_console__init(struct kvm *self)
+void virtio_console__init(struct kvm *kvm)
{
u8 dev, line, pin;
@@ -82,12 +82,12 @@ static void *virtio_net_rx_thread(void *p)
{
struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
struct virt_queue *vq;
- struct kvm *self;
+ struct kvm *kvm;
u16 out, in;
u16 head;
int len;
- self = p;
+ kvm = p;
vq = &net_device.vqs[VIRTIO_NET_RX_QUEUE];
while (1) {
@@ -97,12 +97,12 @@ static void *virtio_net_rx_thread(void *p)
mutex_unlock(&net_device.io_rx_mutex);
while (virt_queue__available(vq)) {
- head = virt_queue__get_iov(vq, iov, &out, &in, self);
+ head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
len = readv(net_device.tap_fd, iov, in);
virt_queue__set_used_elem(vq, head, len);
/* We should interrupt guest right now, otherwise latency is huge. */
- virt_queue__trigger_irq(vq, virtio_net_pci_device.irq_line, &net_device.isr, self);
+ virt_queue__trigger_irq(vq, virtio_net_pci_device.irq_line, &net_device.isr, kvm);
}
}
@@ -116,12 +116,12 @@ static void *virtio_net_tx_thread(void *p)
{
struct iovec iov[VIRTIO_NET_QUEUE_SIZE];
struct virt_queue *vq;
- struct kvm *self;
+ struct kvm *kvm;
u16 out, in;
u16 head;
int len;
- self = p;
+ kvm = p;
vq = &net_device.vqs[VIRTIO_NET_TX_QUEUE];
while (1) {
@@ -131,12 +131,12 @@ static void *virtio_net_tx_thread(void *p)
mutex_unlock(&net_device.io_tx_mutex);
while (virt_queue__available(vq)) {
- head = virt_queue__get_iov(vq, iov, &out, &in, self);
+ head = virt_queue__get_iov(vq, iov, &out, &in, kvm);
len = writev(net_device.tap_fd, iov, out);
virt_queue__set_used_elem(vq, head, len);
}
- virt_queue__trigger_irq(vq, virtio_net_pci_device.irq_line, &net_device.isr, self);
+ virt_queue__trigger_irq(vq, virtio_net_pci_device.irq_line, &net_device.isr, kvm);
}
@@ -161,7 +161,7 @@ static bool virtio_net_pci_io_device_specific_in(void *data, unsigned long offse
return true;
}
-static bool virtio_net_pci_io_in(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool virtio_net_pci_io_in(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
unsigned long offset = port - IOPORT_VIRTIO_NET;
bool ret = true;
@@ -190,7 +190,7 @@ static bool virtio_net_pci_io_in(struct kvm *self, u16 port, void *data, int siz
break;
case VIRTIO_PCI_ISR:
ioport__write8(data, net_device.isr);
- kvm__irq_line(self, virtio_net_pci_device.irq_line, VIRTIO_IRQ_LOW);
+ kvm__irq_line(kvm, virtio_net_pci_device.irq_line, VIRTIO_IRQ_LOW);
net_device.isr = VIRTIO_IRQ_LOW;
break;
case VIRTIO_MSI_CONFIG_VECTOR:
@@ -205,7 +205,7 @@ static bool virtio_net_pci_io_in(struct kvm *self, u16 port, void *data, int siz
return ret;
}
-static void virtio_net_handle_callback(struct kvm *self, u16 queue_index)
+static void virtio_net_handle_callback(struct kvm *kvm, u16 queue_index)
{
switch (queue_index) {
case VIRTIO_NET_TX_QUEUE: {
@@ -225,7 +225,7 @@ static void virtio_net_handle_callback(struct kvm *self, u16 queue_index)
}
}
-static bool virtio_net_pci_io_out(struct kvm *self, u16 port, void *data, int size, u32 count)
+static bool virtio_net_pci_io_out(struct kvm *kvm, u16 port, void *data, int size, u32 count)
{
unsigned long offset = port - IOPORT_VIRTIO_NET;
bool ret = true;
@@ -244,7 +244,7 @@ static bool virtio_net_pci_io_out(struct kvm *self, u16 port, void *data, int si
queue = &net_device.vqs[net_device.queue_selector];
queue->pfn = ioport__read32(data);
- p = guest_pfn_to_host(self, queue->pfn);
+ p = guest_pfn_to_host(kvm, queue->pfn);
vring_init(&queue->vring, VIRTIO_NET_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
@@ -256,7 +256,7 @@ static bool virtio_net_pci_io_out(struct kvm *self, u16 port, void *data, int si
case VIRTIO_PCI_QUEUE_NOTIFY: {
u16 queue_index;
queue_index = ioport__read16(data);
- virtio_net_handle_callback(self, queue_index);
+ virtio_net_handle_callback(kvm, queue_index);
break;
}
case VIRTIO_PCI_STATUS:
@@ -367,7 +367,7 @@ fail:
return 0;
}
-static void virtio_net__io_thread_init(struct kvm *self)
+static void virtio_net__io_thread_init(struct kvm *kvm)
{
pthread_mutex_init(&net_device.io_rx_mutex, NULL);
pthread_cond_init(&net_device.io_tx_cond, NULL);
@@ -375,8 +375,8 @@ static void virtio_net__io_thread_init(struct kvm *self)
pthread_mutex_init(&net_device.io_rx_mutex, NULL);
pthread_cond_init(&net_device.io_tx_cond, NULL);
- pthread_create(&net_device.io_rx_thread, NULL, virtio_net_rx_thread, (void *)self);
- pthread_create(&net_device.io_tx_thread, NULL, virtio_net_tx_thread, (void *)self);
+ pthread_create(&net_device.io_rx_thread, NULL, virtio_net_rx_thread, (void *)kvm);
+ pthread_create(&net_device.io_tx_thread, NULL, virtio_net_tx_thread, (void *)kvm);
}
void virtio_net__init(const struct virtio_net_parameters *params)
@@ -392,6 +392,6 @@ void virtio_net__init(const struct virtio_net_parameters *params)
pci__register(&virtio_net_pci_device, dev);
ioport__register(IOPORT_VIRTIO_NET, &virtio_net_io_ops, IOPORT_VIRTIO_NET_SIZE);
- virtio_net__io_thread_init(params->self);
+ virtio_net__io_thread_init(params->kvm);
}
}
Give proper names to vars named 'self'. Signed-off-by: Sasha Levin <levinsasha928@gmail.com> --- tools/kvm/8250-serial.c | 20 ++-- tools/kvm/cpuid.c | 6 +- tools/kvm/disk-image.c | 78 +++++++------- tools/kvm/include/kvm/disk-image.h | 36 +++--- tools/kvm/include/kvm/interrupt.h | 6 +- tools/kvm/include/kvm/ioport.h | 4 +- tools/kvm/include/kvm/kvm-cpu.h | 16 ++-- tools/kvm/include/kvm/kvm.h | 32 +++--- tools/kvm/include/kvm/virtio-blk.h | 2 +- tools/kvm/include/kvm/virtio-console.h | 4 +- tools/kvm/include/kvm/virtio-net.h | 2 +- tools/kvm/interrupt.c | 14 +- tools/kvm/ioport.c | 12 +- tools/kvm/kvm-cpu.c | 196 ++++++++++++++++---------------- tools/kvm/kvm-run.c | 2 +- tools/kvm/kvm.c | 146 ++++++++++++------------ tools/kvm/mmio.c | 2 +- tools/kvm/mptable.c | 2 +- tools/kvm/pci.c | 8 +- tools/kvm/qcow.c | 12 +- tools/kvm/rtc.c | 8 +- tools/kvm/virtio/blk.c | 16 ++-- tools/kvm/virtio/console.c | 26 ++-- tools/kvm/virtio/net.c | 36 +++--- 24 files changed, 343 insertions(+), 343 deletions(-)