Both being benign exceptions, and both being possible to get triggered
by exception delivery, this is required to prevent a guest from locking
up a CPU (resulting from no other VM exits occurring once getting into
such a loop).
The specific scenarios:
1) #AC may be raised during exception delivery if the handler is set to
be a ring-3 one by a 32-bit guest, and the stack is misaligned.
This is CVE-2015-5307 / XSA-156.
Reported-by: Benjamin Serebrin <serebrin@google.com>
2) #DB may be raised during exception delivery when a breakpoint got
placed on a data structure involved in delivering the exception. This
can result in an endless loop when a 64-bit guest uses a non-zero IST
for the vector 1 IDT entry, but even without use of IST the time it
takes until a contributory fault would get raised (results depending
on the handler) may be quite long.
This is CVE-2015-8104 / XSA-156.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
master commit:
bd2239d9fa975a1ee5bcd27c218ae042cd0a57bc
master date: 2015-11-10 12:03:08 +0100
unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
{
uint32_t intercepts = vmcb_get_exception_intercepts(vmcb);
- uint32_t mask = (1U << TRAP_debug) | (1U << TRAP_int3);
+
v->arch.hvm_vcpu.debug_state_latch = debug_state;
vmcb_set_exception_intercepts(
- vmcb, debug_state ? (intercepts | mask) : (intercepts & ~mask));
+ vmcb, debug_state ? (intercepts | (1U << TRAP_int3))
+ : (intercepts & ~(1U << TRAP_int3)));
}
if ( v->arch.hvm_svm.launch_core != smp_processor_id() )
case VMEXIT_EXCEPTION_DB:
if ( !v->domain->debugger_attached )
- goto unexpected_exit_type;
- domain_pause_for_debugger();
+ hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
+ else
+ domain_pause_for_debugger();
break;
case VMEXIT_EXCEPTION_BP:
break;
}
+ case VMEXIT_EXCEPTION_AC:
+ HVMTRACE_1D(TRAP, TRAP_alignment_check);
+ hvm_inject_hw_exception(TRAP_alignment_check, vmcb->exitinfo1);
+ break;
+
case VMEXIT_EXCEPTION_UD:
svm_vmexit_ud_intercept(regs);
break;
void vmx_update_debug_state(struct vcpu *v)
{
- unsigned long mask;
-
ASSERT(v == current);
- mask = 1u << TRAP_int3;
- if ( !cpu_has_monitor_trap_flag )
- mask |= 1u << TRAP_debug;
-
if ( v->arch.hvm_vcpu.debug_state_latch )
- v->arch.hvm_vmx.exception_bitmap |= mask;
+ v->arch.hvm_vmx.exception_bitmap |= 1U << TRAP_int3;
else
- v->arch.hvm_vmx.exception_bitmap &= ~mask;
+ v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_int3);
vmx_update_exception_bitmap(v);
}
exit_qualification = __vmread(EXIT_QUALIFICATION);
HVMTRACE_1D(TRAP_DEBUG, exit_qualification);
write_debugreg(6, exit_qualification | 0xffff0ff0);
- if ( !v->domain->debugger_attached || cpu_has_monitor_trap_flag )
- goto exit_and_crash;
- domain_pause_for_debugger();
+ if ( !v->domain->debugger_attached )
+ hvm_inject_hw_exception(vector, HVM_DELIVER_NO_ERROR_CODE);
+ else
+ domain_pause_for_debugger();
break;
case TRAP_int3:
{
hvm_inject_page_fault(regs->error_code, exit_qualification);
break;
+ case TRAP_alignment_check:
+ HVMTRACE_1D(TRAP, vector);
+ hvm_inject_hw_exception(vector,
+ __vmread(VM_EXIT_INTR_ERROR_CODE));
+ break;
case TRAP_nmi:
if ( (intr_info & INTR_INFO_INTR_TYPE_MASK) !=
(X86_EVENTTYPE_NMI << 8) )
})
/* These exceptions must always be intercepted. */
-#define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op))
+#define HVM_TRAP_MASK ((1U << TRAP_debug) | \
+ (1U << TRAP_invalid_op) | \
+ (1U << TRAP_alignment_check) | \
+ (1U << TRAP_machine_check))
/*
* x86 event types. This enumeration is valid for: