Skip to content

Commit 4a9f3a9

Browse files
sean-jcgregkh
authored andcommitted
x86/kvm: Alloc dummy async #PF token outside of raw spinlock
commit 0547758 upstream. Drop the raw spinlock in kvm_async_pf_task_wake() before allocating the the dummy async #PF token, the allocator is preemptible on PREEMPT_RT kernels and must not be called from truly atomic contexts. Opportunistically document why it's ok to loop on allocation failure, i.e. why the function won't get stuck in an infinite loop. Reported-by: Yajun Deng <yajun.deng@linux.dev> Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 4c4a11c commit 4a9f3a9

File tree

1 file changed

+27
-14
lines changed

1 file changed

+27
-14
lines changed

arch/x86/kernel/kvm.c

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ void kvm_async_pf_task_wake(u32 token)
188188
{
189189
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
190190
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
191-
struct kvm_task_sleep_node *n;
191+
struct kvm_task_sleep_node *n, *dummy = NULL;
192192

193193
if (token == ~0) {
194194
apf_task_wake_all();
@@ -200,28 +200,41 @@ void kvm_async_pf_task_wake(u32 token)
200200
n = _find_apf_task(b, token);
201201
if (!n) {
202202
/*
203-
* async PF was not yet handled.
204-
* Add dummy entry for the token.
203+
* Async #PF not yet handled, add a dummy entry for the token.
204+
* Allocating the token must be down outside of the raw lock
205+
* as the allocator is preemptible on PREEMPT_RT kernels.
205206
*/
206-
n = kzalloc(sizeof(*n), GFP_ATOMIC);
207-
if (!n) {
207+
if (!dummy) {
208+
raw_spin_unlock(&b->lock);
209+
dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
210+
208211
/*
209-
* Allocation failed! Busy wait while other cpu
210-
* handles async PF.
212+
* Continue looping on allocation failure, eventually
213+
* the async #PF will be handled and allocating a new
214+
* node will be unnecessary.
215+
*/
216+
if (!dummy)
217+
cpu_relax();
218+
219+
/*
220+
* Recheck for async #PF completion before enqueueing
221+
* the dummy token to avoid duplicate list entries.
211222
*/
212-
raw_spin_unlock(&b->lock);
213-
cpu_relax();
214223
goto again;
215224
}
216-
n->token = token;
217-
n->cpu = smp_processor_id();
218-
init_swait_queue_head(&n->wq);
219-
hlist_add_head(&n->link, &b->list);
225+
dummy->token = token;
226+
dummy->cpu = smp_processor_id();
227+
init_swait_queue_head(&dummy->wq);
228+
hlist_add_head(&dummy->link, &b->list);
229+
dummy = NULL;
220230
} else {
221231
apf_task_wake_one(n);
222232
}
223233
raw_spin_unlock(&b->lock);
224-
return;
234+
235+
/* A dummy token might be allocated and ultimately not used. */
236+
if (dummy)
237+
kfree(dummy);
225238
}
226239
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
227240

0 commit comments

Comments
 (0)