@@ -32,7 +32,7 @@
static struct workqueue_struct *irqfd_cleanup_wq;
-bool __attribute__((weak))
+bool __weak
kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
{
return true;
@@ -179,7 +179,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}
-int __attribute__((weak)) kvm_arch_set_irq_inatomic(
+int __weak kvm_arch_set_irq_inatomic(
struct kvm_kernel_irq_routing_entry *irq,
struct kvm *kvm, int irq_source_id,
int level,
@@ -192,19 +192,20 @@ int __attribute__((weak)) kvm_arch_set_irq_inatomic(
* Called with wqh->lock held and interrupts disabled
*/
static int
-irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
+irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
{
struct kvm_kernel_irqfd *irqfd =
container_of(wait, struct kvm_kernel_irqfd, wait);
__poll_t flags = key_to_poll(key);
struct kvm_kernel_irq_routing_entry irq;
struct kvm *kvm = irqfd->kvm;
- unsigned seq;
+ unsigned int seq;
int idx;
int ret = 0;
if (flags & EPOLLIN) {
u64 cnt;
+
eventfd_ctx_do_read(irqfd->eventfd, &cnt);
idx = srcu_read_lock(&kvm->irq_srcu);
@@ -275,24 +276,24 @@ static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
}
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
-void __attribute__((weak)) kvm_arch_irq_bypass_stop(
+void __weak kvm_arch_irq_bypass_stop(
struct irq_bypass_consumer *cons)
{
}
-void __attribute__((weak)) kvm_arch_irq_bypass_start(
+void __weak kvm_arch_irq_bypass_start(
struct irq_bypass_consumer *cons)
{
}
-int __attribute__((weak)) kvm_arch_update_irqfd_routing(
+int __weak kvm_arch_update_irqfd_routing(
struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set)
{
return 0;
}
-bool __attribute__((weak)) kvm_arch_irqfd_route_changed(
+bool __weak kvm_arch_irqfd_route_changed(
struct kvm_kernel_irq_routing_entry *old,
struct kvm_kernel_irq_routing_entry *new)
{
@@ -456,7 +457,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
return ret;
}
-bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
+bool kvm_irq_has_notifier(struct kvm *kvm, unsigned int irqchip, unsigned int pin)
{
struct kvm_irq_ack_notifier *kian;
int gsi, idx;
@@ -487,7 +488,7 @@ void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
kian->irq_acked(kian);
}
-void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
+void kvm_notify_acked_irq(struct kvm *kvm, unsigned int irqchip, unsigned int pin)
{
int gsi, idx;
@@ -32,6 +32,7 @@ static int __kvm_gmem_prepare_folio(struct kvm *kvm, struct kvm_memory_slot *slo
kvm_pfn_t pfn = folio_file_pfn(folio, index);
gfn_t gfn = slot->base_gfn + index - slot->gmem.pgoff;
int rc = kvm_arch_gmem_prepare(kvm, gfn, pfn, folio_order(folio));
+
if (rc) {
pr_warn_ratelimited("gmem: Failed to prepare folio for index %lx GFN %llx PFN %llx error %d.\n",
index, gfn, pfn, rc);
@@ -37,7 +37,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
return n;
}
-int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned int irqchip, unsigned int pin)
{
struct kvm_irq_routing_table *irq_rt;
@@ -85,6 +85,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
while (i--) {
int r;
+
r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
line_status);
if (r < 0)
@@ -121,6 +122,7 @@ void kvm_free_irq_routing(struct kvm *kvm)
/* Called only during vm destruction. Nobody can use the pointer
at this stage */
struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing);
+
free_irq_routing_table(rt);
}
@@ -156,7 +158,7 @@ static int setup_routing_entry(struct kvm *kvm,
return 0;
}
-void __attribute__((weak)) kvm_arch_irq_routing_update(struct kvm *kvm)
+void __weak kvm_arch_irq_routing_update(struct kvm *kvm)
{
}
@@ -167,8 +169,8 @@ bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm)
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *ue,
- unsigned nr,
- unsigned flags)
+ unsigned int nr,
+ unsigned int flags)
{
struct kvm_irq_routing_table *new, *old;
struct kvm_kernel_irq_routing_entry *e;
@@ -50,7 +50,7 @@
#include <linux/kthread.h>
#include <linux/suspend.h>
-#include <asm/processor.h>
+#include <linux/processor.h>
#include <asm/ioctl.h>
#include <linux/uaccess.h>
@@ -185,7 +185,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(vcpu_put);
/* TODO: merge with kvm_arch_vcpu_should_kick */
-static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
+static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned int req)
{
int mode = kvm_vcpu_exiting_guest_mode(vcpu);
@@ -440,7 +440,7 @@ void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
}
#endif
-static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
+static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned int id)
{
mutex_init(&vcpu->mutex);
vcpu->cpu = -1;
@@ -1661,6 +1661,7 @@ static void kvm_commit_memory_region(struct kvm *kvm,
if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES) {
int change = (new_flags & KVM_MEM_LOG_DIRTY_PAGES) ? 1 : -1;
+
atomic_set(&kvm->nr_memslots_dirty_logging,
atomic_read(&kvm->nr_memslots_dirty_logging) + change);
}
@@ -2296,7 +2297,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
if (log->first_page > memslot->npages ||
log->num_pages > memslot->npages - log->first_page ||
(log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
- return -EINVAL;
+ return -EINVAL;
kvm_arch_sync_dirty_log(kvm, memslot);
@@ -2311,6 +2312,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
i++, offset += BITS_PER_LONG) {
unsigned long mask = *dirty_bitmap_buffer++;
atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
+
if (!mask)
continue;
@@ -2875,6 +2877,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
* not call the fault handler, so do it here.
*/
bool unlocked = false;
+
r = fixup_user_fault(current->mm, kfp->hva,
(write_fault ? FAULT_FLAG_WRITE : 0),
&unlocked);
@@ -3172,7 +3175,7 @@ int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned l
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
- void *data, int offset, unsigned long len)
+ void *data, int offset, unsigned long len)
{
int r;
unsigned long addr;
@@ -3205,7 +3208,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
/* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
static int __kvm_write_guest_page(struct kvm *kvm,
struct kvm_memory_slot *memslot, gfn_t gfn,
- const void *data, int offset, int len)
+ const void *data, int offset, int len)
{
int r;
unsigned long addr;
@@ -3263,7 +3266,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
EXPORT_SYMBOL_GPL(kvm_write_guest);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
- unsigned long len)
+ unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
@@ -3328,6 +3331,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
+
return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
@@ -3429,7 +3433,7 @@ EXPORT_SYMBOL_GPL(kvm_clear_guest);
void mark_page_dirty_in_slot(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
- gfn_t gfn)
+ gfn_t gfn)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
@@ -3752,7 +3756,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
*/
if (kvm_arch_vcpu_should_kick(vcpu)) {
cpu = READ_ONCE(vcpu->cpu);
- if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
+ if (cpu != me && (unsigned int)cpu < nr_cpu_ids && cpu_online(cpu))
smp_send_reschedule(cpu);
}
out:
@@ -4310,6 +4314,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
switch (ioctl) {
case KVM_RUN: {
struct pid *oldpid;
+
r = -EINVAL;
if (arg)
goto out;
@@ -4859,7 +4864,7 @@ static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
return cleared;
}
-int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+int __weak kvm_vm_ioctl_enable_cap(struct kvm *kvm,
struct kvm_enable_cap *cap)
{
return -EINVAL;
@@ -5941,8 +5946,8 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
/*
* The debugfs files are a reference to the kvm struct which
- * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe
- * avoids the race between open and the removal of the debugfs directory.
+ * is still valid when kvm_destroy_vm is called. kvm_get_kvm_safe
+ * avoids the race between open and the removal of the debugfs directory.
*/
if (!kvm_get_kvm_safe(stat_data->kvm))
return -ENOENT;
@@ -6062,7 +6067,7 @@ static const struct file_operations stat_fops_per_vm = {
static int vm_stat_get(void *_offset, u64 *val)
{
- unsigned offset = (long)_offset;
+ unsigned int offset = (long)_offset;
struct kvm *kvm;
u64 tmp_val;
@@ -6078,7 +6083,7 @@ static int vm_stat_get(void *_offset, u64 *val)
static int vm_stat_clear(void *_offset, u64 val)
{
- unsigned offset = (long)_offset;
+ unsigned int offset = (long)_offset;
struct kvm *kvm;
if (val)
@@ -6098,7 +6103,7 @@ DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
static int vcpu_stat_get(void *_offset, u64 *val)
{
- unsigned offset = (long)_offset;
+ unsigned int offset = (long)_offset;
struct kvm *kvm;
u64 tmp_val;
@@ -6114,7 +6119,7 @@ static int vcpu_stat_get(void *_offset, u64 *val)
static int vcpu_stat_clear(void *_offset, u64 val)
{
- unsigned offset = (long)_offset;
+ unsigned int offset = (long)_offset;
struct kvm *kvm;
if (val)
@@ -6274,7 +6279,7 @@ EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
*/
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
{
- return &kvm_running_vcpu;
+ return &kvm_running_vcpu;
}
#ifdef CONFIG_GUEST_PERF_EVENTS
@@ -6321,7 +6326,7 @@ void kvm_unregister_perf_callbacks(void)
}
#endif
-int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
+int kvm_init(unsigned int vcpu_size, unsigned int vcpu_align, struct module *module)
{
int r;
int cpu;
@@ -190,6 +190,7 @@ static int kvm_vfio_file_del(struct kvm_device *dev, unsigned int fd)
{
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_file *kvf;
+
CLASS(fd, f)(fd);
int ret;
Fixing checkpatch warnings and errors in KVM. Signed-off-by: Daniel Hejduk <danielhejduk@disroot.org> --- virt/kvm/eventfd.c | 21 +++++++++++---------- virt/kvm/guest_memfd.c | 1 + virt/kvm/irqchip.c | 10 ++++++---- virt/kvm/kvm_main.c | 41 +++++++++++++++++++++++------------------ virt/kvm/vfio.c | 1 + 5 files changed, 42 insertions(+), 32 deletions(-)