Likely through copy'n'paste, all three instances of guest MCE
processing jumped to the wrong place (where NMI processing code
correctly jumps to) when MCE-s are temporarily masked (due to one
currently being processed by the guest). A nested, unmasked NMI should
get delivered immediately, however.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Keir Fraser <keir@xen.org>
jnz process_softirqs
testb $1,VCPU_mce_pending(%ebx)
jnz process_mce
+.Ltest_guest_nmi:
testb $1,VCPU_nmi_pending(%ebx)
jnz process_nmi
test_guest_events:
/* %ebx: struct vcpu */
process_mce:
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%ebx)
- jnz test_guest_events
+ jnz .Ltest_guest_nmi
sti
movb $0,VCPU_mce_pending(%ebx)
call set_guest_machinecheck_trapbounce
jnz compat_process_softirqs
testb $1,VCPU_mce_pending(%rbx)
jnz compat_process_mce
+.Lcompat_test_guest_nmi:
testb $1,VCPU_nmi_pending(%rbx)
jnz compat_process_nmi
compat_test_guest_events:
/* %rbx: struct vcpu */
compat_process_mce:
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
- jnz compat_test_guest_events
+ jnz .Lcompat_test_guest_nmi
sti
movb $0,VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
jnz process_softirqs
testb $1,VCPU_mce_pending(%rbx)
jnz process_mce
+.Ltest_guest_nmi:
testb $1,VCPU_nmi_pending(%rbx)
jnz process_nmi
test_guest_events:
/* %rbx: struct vcpu */
process_mce:
testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
- jnz test_guest_events
+ jnz .Ltest_guest_nmi
sti
movb $0,VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce