From 89c423a170de2fef08445ea9151bcfa15c45b217 Mon Sep 17 00:00:00 2001 From: Andrew Cooper Date: Mon, 26 Sep 2016 14:28:21 +0000 Subject: [PATCH] x86/svm: Drop the set_segment_register() macro Replace its sole users with a single piece of inline assembly which is more flexable about its register constraints, rather than forcing the use of %ax. While editing this area, reflow the comment to remove trailing whitespace and use fewer lines. No functional change. Signed-off-by: Andrew Cooper Reviewed-by: Jan Beulich Reviewed-by: Boris Ostrovsky --- xen/arch/x86/hvm/svm/svm.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 679e61586f..0ed3e73395 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -69,9 +69,6 @@ u32 svm_feature_flags; /* Indicates whether guests may use EFER.LMSLE. */ bool_t cpu_has_lmsl; -#define set_segment_register(name, value) \ - asm volatile ( "movw %%ax ,%%" STR(name) "" : : "a" (value) ) - static void svm_update_guest_efer(struct vcpu *); static struct hvm_function_table svm_function_table; @@ -1023,15 +1020,12 @@ static void svm_ctxt_switch_to(struct vcpu *v) struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; int cpu = smp_processor_id(); - /* - * This is required, because VMRUN does consistency check - * and some of the DOM0 selectors are pointing to - * invalid GDT locations, and cause AMD processors - * to shutdown. + /* + * This is required, because VMRUN does consistency check and some of the + * DOM0 selectors are pointing to invalid GDT locations, and cause AMD + * processors to shutdown. */ - set_segment_register(ds, 0); - set_segment_register(es, 0); - set_segment_register(ss, 0); + asm volatile ("mov %0, %%ds; mov %0, %%es; mov %0, %%ss;" :: "r" (0)); /* * Cannot use ISTs for NMI/#MC/#DF while we are running with the guest TR. -- 2.39.5