Message ID | 20250227230631.303431-3-kbusch@meta.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [PATCHv3,1/2] vhost: return task creation error instead of NULL | expand |
On Thu, Feb 27, 2025 at 03:06:31PM -0800, Keith Busch wrote: > From: Sean Christopherson <seanjc@google.com> > > A VMM may send a signal to its threads while they've entered KVM_RUN. If > that thread happens to be trying to make the huge page recovery vhost > task, then it fails with -ERESTARTNOINTR. We need to retry if that > happens, so call_once needs to be retryable. Make call_once complete > only if what it called was successful. > > [implemented the kvm user side] > Signed-off-by: Keith Busch <kbusch@kernel.org> ... > diff --git a/include/linux/call_once.h b/include/linux/call_once.h > index 6261aa0b3fb00..ddcfd91493eaa 100644 > --- a/include/linux/call_once.h > +++ b/include/linux/call_once.h > @@ -26,20 +26,26 @@ do { \ > __once_init((once), #once, &__key); \ > } while (0) > > -static inline void call_once(struct once *once, void (*cb)(struct once *)) > +static inline int call_once(struct once *once, int (*cb)(struct once *)) > { > + int r; > + > /* Pairs with atomic_set_release() below. */ > if (atomic_read_acquire(&once->state) == ONCE_COMPLETED) > - return; > + return 0; > > guard(mutex)(&once->lock); > WARN_ON(atomic_read(&once->state) == ONCE_RUNNING); > if (atomic_read(&once->state) != ONCE_NOT_STARTED) > - return; > + return -EINVAL; Hi Keith, A minor nit from my side: As you are changing this line, and it seems like there will be another revision of this series anyway, please consider updating the indentation to use tabs. > > atomic_set(&once->state, ONCE_RUNNING); > - cb(once); > - atomic_set_release(&once->state, ONCE_COMPLETED); > + r = cb(once); > + if (r) > + atomic_set(&once->state, ONCE_NOT_STARTED); > + else > + atomic_set_release(&once->state, ONCE_COMPLETED); > + return r; > } > > #endif /* _LINUX_CALL_ONCE_H */ > -- > 2.43.5 > >
On Tue, Mar 04, 2025 at 03:59:22PM +0000, Simon Horman wrote: > A minor nit from my side: > > As you are changing this line, and it seems like there will be another > revision of this series anyway, please consider updating the indentation to > use tabs. The patch is already applied to upstream and stable, and I think Paolo must have taken care of the formatting because it looks good in the commit now: https://web.git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=916b7f42b3b3b539a71c204a9b49fdc4ca92cd82
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 18ca1ea6dc240..8160870398b90 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data) return true; } -static void kvm_mmu_start_lpage_recovery(struct once *once) +static int kvm_mmu_start_lpage_recovery(struct once *once) { struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once); struct kvm *kvm = container_of(ka, struct kvm, arch); @@ -7472,12 +7472,13 @@ static void kvm_mmu_start_lpage_recovery(struct once *once) kvm, "kvm-nx-lpage-recovery"); if (IS_ERR(nx_thread)) - return; + return PTR_ERR(nx_thread); vhost_task_start(nx_thread); /* Make the task visible only once it is fully started. */ WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread); + return 0; } int kvm_mmu_post_init_vm(struct kvm *kvm) @@ -7485,10 +7486,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm) if (nx_hugepage_mitigation_hard_disabled) return 0; - call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery); - if (!kvm->arch.nx_huge_page_recovery_thread) - return -ENOMEM; - return 0; + return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery); } void kvm_mmu_pre_destroy_vm(struct kvm *kvm) diff --git a/include/linux/call_once.h b/include/linux/call_once.h index 6261aa0b3fb00..ddcfd91493eaa 100644 --- a/include/linux/call_once.h +++ b/include/linux/call_once.h @@ -26,20 +26,26 @@ do { \ __once_init((once), #once, &__key); \ } while (0) -static inline void call_once(struct once *once, void (*cb)(struct once *)) +static inline int call_once(struct once *once, int (*cb)(struct once *)) { + int r; + /* Pairs with atomic_set_release() below. */ if (atomic_read_acquire(&once->state) == ONCE_COMPLETED) - return; + return 0; guard(mutex)(&once->lock); WARN_ON(atomic_read(&once->state) == ONCE_RUNNING); if (atomic_read(&once->state) != ONCE_NOT_STARTED) - return; + return -EINVAL; atomic_set(&once->state, ONCE_RUNNING); - cb(once); - atomic_set_release(&once->state, ONCE_COMPLETED); + r = cb(once); + if (r) + atomic_set(&once->state, ONCE_NOT_STARTED); + else + atomic_set_release(&once->state, ONCE_COMPLETED); + return r; } #endif /* _LINUX_CALL_ONCE_H */