]> xenbits.xensource.com Git - xen.git/commitdiff
x86/HVM: clear upper halves of GPRs upon entry from 32-bit code
authorJan Beulich <jbeulich@suse.com>
Wed, 27 Mar 2024 17:31:38 +0000 (17:31 +0000)
committerAndrew Cooper <andrew.cooper3@citrix.com>
Fri, 5 Apr 2024 14:44:25 +0000 (15:44 +0100)
Hypercalls in particular can be the subject of continuations, and logic
there checks updated state against incoming register values. If the
guest manufactured a suitable argument register with a non-zero upper
half before entering compatibility mode and issuing a hypercall from
there, checks in hypercall_xlat_continuation() might trip.

Since for HVM we want to also be sure to not hit a corner case in the
emulator, initiate the clipping right from the top of
{svm,vmx}_vmexit_handler(). Also rename the invoked function, as it no
longer does only invalidation of fields.

Note that architecturally the upper halves of registers are undefined
after a switch between compatibility and 64-bit mode (either direction).
Hence once having entered compatibility mode, the guest can't assume
the upper half of any register to retain its value.

This is part of XSA-454 / CVE-2023-46842.

Fixes: b8a7efe8528a ("Enable compatibility mode operation for HYPERVISOR_memory_op")
Reported-by: Manuel Andreas <manuel.andreas@tum.de>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
(cherry picked from commit 6a98383b0877bb66ebfe189da43bf81abe3d7909)

xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/include/asm-x86/hvm/hvm.h

index ba17bfe97a05de140115cd40e53fb1aef10e3a04..0cc5e07f461f72c515b1f2b5cf164201d774b3af 100644 (file)
@@ -2592,7 +2592,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     regs->rsp = vmcb->rsp;
     regs->rflags = vmcb->rflags;
 
-    hvm_invalidate_regs_fields(regs);
+    hvm_sanitize_regs_fields(
+        regs, !(vmcb_get_efer(vmcb) & EFER_LMA) || !(vmcb->cs.l));
 
     if ( paging_mode_hap(v->domain) )
         v->arch.hvm.guest_cr[3] = v->arch.hvm.hw_cr[3] = vmcb_get_cr3(vmcb);
index 54023a92587c1c088b4e7f6b12633d667cb16053..8bb4a6e042d782425c40e139ac90685c4faa5971 100644 (file)
@@ -3932,6 +3932,7 @@ static int vmx_handle_apic_write(void)
 void vmx_vmexit_handler(struct cpu_user_regs *regs)
 {
     unsigned long exit_qualification, exit_reason, idtv_info, intr_info = 0;
+    unsigned long cs_ar_bytes = 0;
     unsigned int vector = 0, mode;
     struct vcpu *v = current;
     struct domain *currd = v->domain;
@@ -3940,7 +3941,10 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
     __vmread(GUEST_RSP,    &regs->rsp);
     __vmread(GUEST_RFLAGS, &regs->rflags);
 
-    hvm_invalidate_regs_fields(regs);
+    if ( hvm_long_mode_active(v) )
+        __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
+
+    hvm_sanitize_regs_fields(regs, !(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE));
 
     if ( paging_mode_hap(v->domain) )
     {
index b8dc44555059157ef7babcc07304edf542b9e371..8e7a6b922d54adc36e49c2499a04f97b23b86584 100644 (file)
@@ -564,8 +564,24 @@ static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
             ? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0);
 }
 
-static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs)
+static inline void hvm_sanitize_regs_fields(struct cpu_user_regs *regs,
+                                            bool compat)
 {
+    if ( compat )
+    {
+        /* Clear GPR upper halves, to counteract guests playing games. */
+        regs->rbp = (uint32_t)regs->ebp;
+        regs->rbx = (uint32_t)regs->ebx;
+        regs->rax = (uint32_t)regs->eax;
+        regs->rcx = (uint32_t)regs->ecx;
+        regs->rdx = (uint32_t)regs->edx;
+        regs->rsi = (uint32_t)regs->esi;
+        regs->rdi = (uint32_t)regs->edi;
+        regs->rip = (uint32_t)regs->eip;
+        regs->rflags = (uint32_t)regs->eflags;
+        regs->rsp = (uint32_t)regs->esp;
+    }
+
 #ifndef NDEBUG
     regs->error_code = 0xbeef;
     regs->entry_vector = 0xbeef;