c/s
0f1cb96e "x86 hvm: Allow cross-vendor migration" caused HVM domains to
unconditionally intercept #UD exceptions. While cross-vendor migration is
cool as a demo, it is extremely niche.
Intercepting #UD allows userspace code in a multi-vcpu guest to execute
arbitrary instructions in the x86 emulator by having one thread execute a ud2a
instruction, and having a second thread rewrite the instruction before the
emulator performs an instruction fetch.
XSAs 105, 106 and 110 are all examples where guest userspace can use bugs in
the x86 emulator to compromise security of the domain, either by privilege
escalation or causing a crash.
c/s
2d67a7a4 "x86: synchronize PCI config space access decoding"
introduced (amongst other things) a per-domain vendor, based on the guests
cpuid policy.
Use the per-guest vendor to enable #UD interception only when a domain is
configured for a vendor different to the current hardware. (#UD interception
is also enabled if hvm_fep is specified on the Xen command line. This is a
debug-only option whose entire purpose is for testing the x86 emulator.)
As a result, the overwhelming majority of usecases now have #UD interception
disabled, removing an attack surface for malicious guest userspace.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
.ecx = ctl->ecx
}
};
+ int old_vendor = d->arch.x86_vendor;
d->arch.x86_vendor = get_cpu_vendor(vendor_id.str, gcv_guest);
+
+ if ( is_hvm_domain(d) && (d->arch.x86_vendor != old_vendor) )
+ {
+ struct vcpu *v;
+
+ for_each_vcpu( d, v )
+ hvm_update_guest_vendor(v);
+ }
+
break;
}
xen_domctl_cpuid_t *ctl = &domctl->u.cpuid;
cpuid_input_t *cpuid, *unused = NULL;
+ if ( d == currd ) /* no domain_pause() */
+ {
+ ret = -EINVAL;
+ break;
+ }
+
for ( i = 0; i < MAX_CPUID_INPUT; i++ )
{
cpuid = &d->arch.cpuids[i];
break;
}
+ domain_pause(d);
+
if ( i < MAX_CPUID_INPUT )
*cpuid = *ctl;
else if ( unused )
if ( !ret )
update_domain_cpuid_info(d, ctl);
+ domain_unpause(d);
break;
}
static bool_t __initdata opt_hap_enabled = 1;
boolean_param("hap", opt_hap_enabled);
-#ifndef NDEBUG
+#ifndef opt_hvm_fep
/* Permit use of the Forced Emulation Prefix in HVM guests */
-static bool_t opt_hvm_fep;
+bool_t opt_hvm_fep;
boolean_param("hvm_fep", opt_hvm_fep);
-#else
-#define opt_hvm_fep 0
#endif
/* Xen command-line option to enable altp2m */
hvm_set_guest_tsc(v, 0);
}
+ hvm_update_guest_vendor(v);
+
return 0;
fail7:
vmcb_set_efer(vmcb, new_efer);
}
+static void svm_update_guest_vendor(struct vcpu *v)
+{
+ struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
+ struct vmcb_struct *vmcb = arch_svm->vmcb;
+ u32 bitmap = vmcb_get_exception_intercepts(vmcb);
+
+ if ( opt_hvm_fep ||
+ (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) )
+ bitmap |= (1U << TRAP_invalid_op);
+ else
+ bitmap &= ~(1U << TRAP_invalid_op);
+
+ vmcb_set_exception_intercepts(vmcb, bitmap);
+}
+
static void svm_sync_vmcb(struct vcpu *v)
{
struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
.get_shadow_gs_base = svm_get_shadow_gs_base,
.update_guest_cr = svm_update_guest_cr,
.update_guest_efer = svm_update_guest_efer,
+ .update_guest_vendor = svm_update_guest_vendor,
.set_guest_pat = svm_set_guest_pat,
.get_guest_pat = svm_get_guest_pat,
.set_tsc_offset = svm_set_tsc_offset,
static void vmx_install_vlapic_mapping(struct vcpu *v);
static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
static void vmx_update_guest_efer(struct vcpu *v);
+static void vmx_update_guest_vendor(struct vcpu *v);
static void vmx_cpuid_intercept(
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
__vmwrite(EXCEPTION_BITMAP, bitmap);
}
+static void vmx_update_guest_vendor(struct vcpu *v)
+{
+ if ( opt_hvm_fep ||
+ (v->domain->arch.x86_vendor != boot_cpu_data.x86_vendor) )
+ v->arch.hvm_vmx.exception_bitmap |= (1U << TRAP_invalid_op);
+ else
+ v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_invalid_op);
+
+ vmx_vmcs_enter(v);
+ vmx_update_exception_bitmap(v);
+ vmx_vmcs_exit(v);
+}
+
static int vmx_guest_x86_mode(struct vcpu *v)
{
unsigned long cs_ar_bytes;
.update_host_cr3 = vmx_update_host_cr3,
.update_guest_cr = vmx_update_guest_cr,
.update_guest_efer = vmx_update_guest_efer,
+ .update_guest_vendor = vmx_update_guest_vendor,
.set_guest_pat = vmx_set_guest_pat,
.get_guest_pat = vmx_get_guest_pat,
.set_tsc_offset = vmx_set_tsc_offset,
#include <public/hvm/ioreq.h>
#include <xen/mm.h>
+#ifndef NDEBUG
+/* Permit use of the Forced Emulation Prefix in HVM guests */
+extern bool_t opt_hvm_fep;
+#else
+#define opt_hvm_fep 0
+#endif
+
/* Interrupt acknowledgement sources. */
enum hvm_intsrc {
hvm_intsrc_none,
void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
void (*update_guest_efer)(struct vcpu *v);
+ void (*update_guest_vendor)(struct vcpu *v);
+
int (*get_guest_pat)(struct vcpu *v, u64 *);
int (*set_guest_pat)(struct vcpu *v, u64);
hvm_funcs.update_guest_efer(v);
}
+static inline void hvm_update_guest_vendor(struct vcpu *v)
+{
+ hvm_funcs.update_guest_vendor(v);
+}
+
/*
* Called to ensure than all guest-specific mappings in a tagged TLB are
* flushed; does *not* flush Xen's TLB entries, and on processors without a
/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK ((1U << TRAP_debug) | \
- (1U << TRAP_invalid_op) | \
(1U << TRAP_alignment_check) | \
(1U << TRAP_machine_check))