direct-io.hg

changeset 12306:9f9f569b0a1d

[VMX] __vmread() and __vmwrite() now BUG on failure, rather than
returning the error to the caller.

None of our vmcs reads/writes should fail. In fact many callers were
ignoring failures, which can hide bugs (in fact it hid one in
vmx_restore_dr(), which was incorrectly using __vmread() before
the vmcs was loaded).

__vmread_safe() is used by the vmcs dump function, which is the only
function which can legitimately fail a vmcs access.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Nov 08 16:43:50 2006 +0000 (2006-11-08)
parents 6555ca56d844
children 5e0fb830a53c
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/vmx/io.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/vmx.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Wed Nov 08 15:11:18 2006 +0000
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Wed Nov 08 16:43:50 2006 +0000
     1.3 @@ -424,17 +424,21 @@ static inline int long_mode_do_msr_write
     1.4  
     1.5  static inline void svm_save_dr(struct vcpu *v)
     1.6  {
     1.7 -    if (v->arch.hvm_vcpu.flag_dr_dirty)
     1.8 -    {
     1.9 -        /* clear the DR dirty flag and re-enable intercepts for DR accesses */ 
    1.10 -        v->arch.hvm_vcpu.flag_dr_dirty = 0;
    1.11 -        v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
    1.12 -
    1.13 -        savedebug(&v->arch.guest_context, 0);    
    1.14 -        savedebug(&v->arch.guest_context, 1);    
    1.15 -        savedebug(&v->arch.guest_context, 2);    
    1.16 -        savedebug(&v->arch.guest_context, 3);    
    1.17 -    }
    1.18 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.19 +
    1.20 +    if ( !v->arch.hvm_vcpu.flag_dr_dirty )
    1.21 +        return;
    1.22 +
    1.23 +    /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
    1.24 +    v->arch.hvm_vcpu.flag_dr_dirty = 0;
    1.25 +    v->arch.hvm_svm.vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
    1.26 +
    1.27 +    savedebug(&v->arch.guest_context, 0);
    1.28 +    savedebug(&v->arch.guest_context, 1);
    1.29 +    savedebug(&v->arch.guest_context, 2);
    1.30 +    savedebug(&v->arch.guest_context, 3);
    1.31 +    v->arch.guest_context.debugreg[6] = vmcb->dr6;
    1.32 +    v->arch.guest_context.debugreg[7] = vmcb->dr7;
    1.33  }
    1.34  
    1.35  
    1.36 @@ -444,17 +448,13 @@ static inline void __restore_debug_regis
    1.37      loaddebug(&v->arch.guest_context, 1);
    1.38      loaddebug(&v->arch.guest_context, 2);
    1.39      loaddebug(&v->arch.guest_context, 3);
    1.40 +    /* DR6 and DR7 are loaded from the VMCB. */
    1.41  }
    1.42  
    1.43  
    1.44  static inline void svm_restore_dr(struct vcpu *v)
    1.45  {
    1.46 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.47 -
    1.48 -    if (!vmcb)
    1.49 -        return;
    1.50 -
    1.51 -    if (unlikely(vmcb->dr7 & 0xFF))
    1.52 +    if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) )
    1.53          __restore_debug_registers(v);
    1.54  }
    1.55  
     2.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Wed Nov 08 15:11:18 2006 +0000
     2.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Wed Nov 08 16:43:50 2006 +0000
     2.3 @@ -91,7 +91,6 @@ static int construct_vmcb(struct vcpu *v
     2.4      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
     2.5      struct vmcb_struct *vmcb = arch_svm->vmcb;
     2.6      segment_attributes_t attrib;
     2.7 -    unsigned long dr7;
     2.8  
     2.9      /* Always flush the TLB on VMRUN. */
    2.10      vmcb->tlb_control = 1;
    2.11 @@ -204,10 +203,6 @@ static int construct_vmcb(struct vcpu *v
    2.12          read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
    2.13      vmcb->cr4 = arch_svm->cpu_shadow_cr4 | SVM_CR4_HOST_MASK;
    2.14  
    2.15 -    /* Guest DR7. */
    2.16 -    __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (dr7));
    2.17 -    vmcb->dr7 = dr7;
    2.18 -
    2.19      shadow_update_paging_modes(v);
    2.20      vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
    2.21  
     3.1 --- a/xen/arch/x86/hvm/vmx/io.c	Wed Nov 08 15:11:18 2006 +0000
     3.2 +++ b/xen/arch/x86/hvm/vmx/io.c	Wed Nov 08 16:43:50 2006 +0000
     3.3 @@ -63,9 +63,7 @@ disable_irq_window(struct vcpu *v)
     3.4  
     3.5  static inline int is_interruptibility_state(void)
     3.6  {
     3.7 -    int  interruptibility;
     3.8 -    __vmread(GUEST_INTERRUPTIBILITY_INFO, &interruptibility);
     3.9 -    return interruptibility;
    3.10 +    return __vmread(GUEST_INTERRUPTIBILITY_INFO);
    3.11  }
    3.12  
    3.13  #ifdef __x86_64__
    3.14 @@ -129,7 +127,7 @@ asmlinkage void vmx_intr_assist(void)
    3.15      }
    3.16  
    3.17      /* This could be moved earlier in the VMX resume sequence. */
    3.18 -    __vmread(IDT_VECTORING_INFO_FIELD, &idtv_info_field);
    3.19 +    idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD);
    3.20      if (unlikely(idtv_info_field & INTR_INFO_VALID_MASK)) {
    3.21          __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
    3.22  
    3.23 @@ -138,14 +136,12 @@ asmlinkage void vmx_intr_assist(void)
    3.24           * and interrupts. If we get here then delivery of some event caused a
    3.25           * fault, and this always results in defined VM_EXIT_INSTRUCTION_LEN.
    3.26           */
    3.27 -        __vmread(VM_EXIT_INSTRUCTION_LEN, &inst_len); /* Safe */
    3.28 +        inst_len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe */
    3.29          __vmwrite(VM_ENTRY_INSTRUCTION_LEN, inst_len);
    3.30  
    3.31 -        if (unlikely(idtv_info_field & 0x800)) { /* valid error code */
    3.32 -            unsigned long error_code;
    3.33 -            __vmread(IDT_VECTORING_ERROR_CODE, &error_code);
    3.34 -            __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
    3.35 -        }
    3.36 +        if (unlikely(idtv_info_field & 0x800)) /* valid error code */
    3.37 +            __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
    3.38 +                      __vmread(IDT_VECTORING_ERROR_CODE));
    3.39          if (unlikely(has_ext_irq))
    3.40              enable_irq_window(v);
    3.41  
    3.42 @@ -163,7 +159,7 @@ asmlinkage void vmx_intr_assist(void)
    3.43          return;
    3.44      }
    3.45  
    3.46 -    __vmread(GUEST_RFLAGS, &eflags);
    3.47 +    eflags = __vmread(GUEST_RFLAGS);
    3.48      if (irq_masked(eflags)) {
    3.49          enable_irq_window(v);
    3.50          return;
     4.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Nov 08 15:11:18 2006 +0000
     4.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed Nov 08 16:43:50 2006 +0000
     4.3 @@ -257,7 +257,7 @@ struct host_execution_env {
     4.4  
     4.5  static void vmx_set_host_env(struct vcpu *v)
     4.6  {
     4.7 -    unsigned int tr, cpu, error = 0;
     4.8 +    unsigned int tr, cpu;
     4.9      struct host_execution_env host_env;
    4.10      struct Xgt_desc_struct desc;
    4.11  
    4.12 @@ -265,93 +265,95 @@ static void vmx_set_host_env(struct vcpu
    4.13      __asm__ __volatile__ ("sidt  (%0) \n" :: "a"(&desc) : "memory");
    4.14      host_env.idtr_limit = desc.size;
    4.15      host_env.idtr_base = desc.address;
    4.16 -    error |= __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
    4.17 +    __vmwrite(HOST_IDTR_BASE, host_env.idtr_base);
    4.18  
    4.19      __asm__ __volatile__ ("sgdt  (%0) \n" :: "a"(&desc) : "memory");
    4.20      host_env.gdtr_limit = desc.size;
    4.21      host_env.gdtr_base = desc.address;
    4.22 -    error |= __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
    4.23 +    __vmwrite(HOST_GDTR_BASE, host_env.gdtr_base);
    4.24  
    4.25      __asm__ __volatile__ ("str  (%0) \n" :: "a"(&tr) : "memory");
    4.26      host_env.tr_selector = tr;
    4.27      host_env.tr_limit = sizeof(struct tss_struct);
    4.28      host_env.tr_base = (unsigned long) &init_tss[cpu];
    4.29 -    error |= __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
    4.30 -    error |= __vmwrite(HOST_TR_BASE, host_env.tr_base);
    4.31 -    error |= __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
    4.32 +    __vmwrite(HOST_TR_SELECTOR, host_env.tr_selector);
    4.33 +    __vmwrite(HOST_TR_BASE, host_env.tr_base);
    4.34 +    __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
    4.35  }
    4.36  
    4.37 -static int construct_vmcs(struct vcpu *v)
    4.38 +static void construct_vmcs(struct vcpu *v)
    4.39  {
    4.40 -    int error = 0;
    4.41 -    unsigned long tmp, cr0, cr4;
    4.42 +    unsigned long cr0, cr4;
    4.43      union vmcs_arbytes arbytes;
    4.44  
    4.45      vmx_vmcs_enter(v);
    4.46  
    4.47      /* VMCS controls. */
    4.48 -    error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
    4.49 -    error |= __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
    4.50 -    error |= __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
    4.51 -    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
    4.52 +    __vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_control);
    4.53 +    __vmwrite(VM_EXIT_CONTROLS, vmx_vmexit_control);
    4.54 +    __vmwrite(VM_ENTRY_CONTROLS, vmx_vmentry_control);
    4.55 +    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
    4.56      v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
    4.57  
    4.58      /* Host data selectors. */
    4.59 -    error |= __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
    4.60 -    error |= __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
    4.61 -    error |= __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
    4.62 +    __vmwrite(HOST_SS_SELECTOR, __HYPERVISOR_DS);
    4.63 +    __vmwrite(HOST_DS_SELECTOR, __HYPERVISOR_DS);
    4.64 +    __vmwrite(HOST_ES_SELECTOR, __HYPERVISOR_DS);
    4.65  #if defined(__i386__)
    4.66 -    error |= __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
    4.67 -    error |= __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
    4.68 -    error |= __vmwrite(HOST_FS_BASE, 0);
    4.69 -    error |= __vmwrite(HOST_GS_BASE, 0);
    4.70 +    __vmwrite(HOST_FS_SELECTOR, __HYPERVISOR_DS);
    4.71 +    __vmwrite(HOST_GS_SELECTOR, __HYPERVISOR_DS);
    4.72 +    __vmwrite(HOST_FS_BASE, 0);
    4.73 +    __vmwrite(HOST_GS_BASE, 0);
    4.74  #elif defined(__x86_64__)
    4.75 -    rdmsrl(MSR_FS_BASE, tmp); error |= __vmwrite(HOST_FS_BASE, tmp);
    4.76 -    rdmsrl(MSR_GS_BASE, tmp); error |= __vmwrite(HOST_GS_BASE, tmp);
    4.77 +    {
    4.78 +        unsigned long msr;
    4.79 +        rdmsrl(MSR_FS_BASE, msr); __vmwrite(HOST_FS_BASE, msr);
    4.80 +        rdmsrl(MSR_GS_BASE, msr); __vmwrite(HOST_GS_BASE, msr);
    4.81 +    }
    4.82  #endif
    4.83  
    4.84      /* Host control registers. */
    4.85 -    error |= __vmwrite(HOST_CR0, read_cr0());
    4.86 -    error |= __vmwrite(HOST_CR4, read_cr4());
    4.87 +    __vmwrite(HOST_CR0, read_cr0());
    4.88 +    __vmwrite(HOST_CR4, read_cr4());
    4.89  
    4.90      /* Host CS:RIP. */
    4.91 -    error |= __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
    4.92 -    error |= __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
    4.93 +    __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
    4.94 +    __vmwrite(HOST_RIP, (unsigned long)vmx_asm_vmexit_handler);
    4.95  
    4.96      /* MSR intercepts. */
    4.97 -    error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
    4.98 -    error |= __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
    4.99 -    error |= __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
   4.100 -    error |= __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
   4.101 -    error |= __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
   4.102 +    __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
   4.103 +    __vmwrite(VM_EXIT_MSR_STORE_ADDR, 0);
   4.104 +    __vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
   4.105 +    __vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
   4.106 +    __vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
   4.107  
   4.108 -    error |= __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
   4.109 +    __vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
   4.110  
   4.111 -    error |= __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
   4.112 -    error |= __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
   4.113 +    __vmwrite(CR0_GUEST_HOST_MASK, ~0UL);
   4.114 +    __vmwrite(CR4_GUEST_HOST_MASK, ~0UL);
   4.115  
   4.116 -    error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
   4.117 -    error |= __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
   4.118 +    __vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
   4.119 +    __vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
   4.120  
   4.121 -    error |= __vmwrite(CR3_TARGET_COUNT, 0);
   4.122 +    __vmwrite(CR3_TARGET_COUNT, 0);
   4.123  
   4.124 -    error |= __vmwrite(GUEST_ACTIVITY_STATE, 0);
   4.125 +    __vmwrite(GUEST_ACTIVITY_STATE, 0);
   4.126  
   4.127      /* Guest segment bases. */
   4.128 -    error |= __vmwrite(GUEST_ES_BASE, 0);
   4.129 -    error |= __vmwrite(GUEST_SS_BASE, 0);
   4.130 -    error |= __vmwrite(GUEST_DS_BASE, 0);
   4.131 -    error |= __vmwrite(GUEST_FS_BASE, 0);
   4.132 -    error |= __vmwrite(GUEST_GS_BASE, 0);
   4.133 -    error |= __vmwrite(GUEST_CS_BASE, 0);
   4.134 +    __vmwrite(GUEST_ES_BASE, 0);
   4.135 +    __vmwrite(GUEST_SS_BASE, 0);
   4.136 +    __vmwrite(GUEST_DS_BASE, 0);
   4.137 +    __vmwrite(GUEST_FS_BASE, 0);
   4.138 +    __vmwrite(GUEST_GS_BASE, 0);
   4.139 +    __vmwrite(GUEST_CS_BASE, 0);
   4.140  
   4.141      /* Guest segment limits. */
   4.142 -    error |= __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
   4.143 -    error |= __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
   4.144 -    error |= __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
   4.145 -    error |= __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
   4.146 -    error |= __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
   4.147 -    error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
   4.148 +    __vmwrite(GUEST_ES_LIMIT, GUEST_SEGMENT_LIMIT);
   4.149 +    __vmwrite(GUEST_SS_LIMIT, GUEST_SEGMENT_LIMIT);
   4.150 +    __vmwrite(GUEST_DS_LIMIT, GUEST_SEGMENT_LIMIT);
   4.151 +    __vmwrite(GUEST_FS_LIMIT, GUEST_SEGMENT_LIMIT);
   4.152 +    __vmwrite(GUEST_GS_LIMIT, GUEST_SEGMENT_LIMIT);
   4.153 +    __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT);
   4.154  
   4.155      /* Guest segment AR bytes. */
   4.156      arbytes.bytes = 0;
   4.157 @@ -362,82 +364,77 @@ static int construct_vmcs(struct vcpu *v
   4.158      arbytes.fields.default_ops_size = 1;    /* 32-bit */
   4.159      arbytes.fields.g = 1;
   4.160      arbytes.fields.null_bit = 0;            /* not null */
   4.161 -    error |= __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
   4.162 -    error |= __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
   4.163 -    error |= __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
   4.164 -    error |= __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
   4.165 -    error |= __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
   4.166 +    __vmwrite(GUEST_ES_AR_BYTES, arbytes.bytes);
   4.167 +    __vmwrite(GUEST_SS_AR_BYTES, arbytes.bytes);
   4.168 +    __vmwrite(GUEST_DS_AR_BYTES, arbytes.bytes);
   4.169 +    __vmwrite(GUEST_FS_AR_BYTES, arbytes.bytes);
   4.170 +    __vmwrite(GUEST_GS_AR_BYTES, arbytes.bytes);
   4.171      arbytes.fields.seg_type = 0xb;          /* type = 0xb */
   4.172 -    error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
   4.173 +    __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes);
   4.174  
   4.175      /* Guest GDT. */
   4.176 -    error |= __vmwrite(GUEST_GDTR_BASE, 0);
   4.177 -    error |= __vmwrite(GUEST_GDTR_LIMIT, 0);
   4.178 +    __vmwrite(GUEST_GDTR_BASE, 0);
   4.179 +    __vmwrite(GUEST_GDTR_LIMIT, 0);
   4.180  
   4.181      /* Guest IDT. */
   4.182 -    error |= __vmwrite(GUEST_IDTR_BASE, 0);
   4.183 -    error |= __vmwrite(GUEST_IDTR_LIMIT, 0);
   4.184 +    __vmwrite(GUEST_IDTR_BASE, 0);
   4.185 +    __vmwrite(GUEST_IDTR_LIMIT, 0);
   4.186  
   4.187      /* Guest LDT and TSS. */
   4.188      arbytes.fields.s = 0;                   /* not code or data segement */
   4.189      arbytes.fields.seg_type = 0x2;          /* LTD */
   4.190      arbytes.fields.default_ops_size = 0;    /* 16-bit */
   4.191      arbytes.fields.g = 0;
   4.192 -    error |= __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
   4.193 +    __vmwrite(GUEST_LDTR_AR_BYTES, arbytes.bytes);
   4.194      arbytes.fields.seg_type = 0xb;          /* 32-bit TSS (busy) */
   4.195 -    error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
   4.196 +    __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
   4.197  
   4.198 -    error |= __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
   4.199 -    __asm__ __volatile__ ("mov %%dr7, %0\n" : "=r" (tmp));
   4.200 -    error |= __vmwrite(GUEST_DR7, tmp);
   4.201 -    error |= __vmwrite(VMCS_LINK_POINTER, ~0UL);
   4.202 +    __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
   4.203 +    __vmwrite(GUEST_DR7, 0);
   4.204 +    __vmwrite(VMCS_LINK_POINTER, ~0UL);
   4.205  #if defined(__i386__)
   4.206 -    error |= __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
   4.207 +    __vmwrite(VMCS_LINK_POINTER_HIGH, ~0UL);
   4.208  #endif
   4.209  
   4.210 -    error |= __vmwrite(EXCEPTION_BITMAP,
   4.211 -                       MONITOR_DEFAULT_EXCEPTION_BITMAP);
   4.212 +    __vmwrite(EXCEPTION_BITMAP, MONITOR_DEFAULT_EXCEPTION_BITMAP);
   4.213  
   4.214      /* Guest CR0. */
   4.215      cr0 = read_cr0();
   4.216      v->arch.hvm_vmx.cpu_cr0 = cr0;
   4.217 -    error |= __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   4.218 +    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
   4.219      v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
   4.220 -    error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   4.221 +    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   4.222  
   4.223      /* Guest CR4. */
   4.224      cr4 = read_cr4();
   4.225 -    error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
   4.226 +    __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
   4.227      v->arch.hvm_vmx.cpu_shadow_cr4 =
   4.228          cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
   4.229 -    error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   4.230 +    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   4.231  
   4.232  #ifdef __x86_64__ 
   4.233      /* VLAPIC TPR optimisation. */
   4.234      v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_TPR_SHADOW;
   4.235      v->arch.hvm_vcpu.u.vmx.exec_control &=
   4.236          ~(CPU_BASED_CR8_STORE_EXITING | CPU_BASED_CR8_LOAD_EXITING);
   4.237 -    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
   4.238 -                       v->arch.hvm_vcpu.u.vmx.exec_control);
   4.239 -    error |= __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
   4.240 -                       page_to_maddr(vcpu_vlapic(v)->regs_page));
   4.241 -    error |= __vmwrite(TPR_THRESHOLD, 0);
   4.242 +    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vcpu.u.vmx.exec_control);
   4.243 +    __vmwrite(VIRTUAL_APIC_PAGE_ADDR,
   4.244 +              page_to_maddr(vcpu_vlapic(v)->regs_page));
   4.245 +    __vmwrite(TPR_THRESHOLD, 0);
   4.246  #endif
   4.247  
   4.248 -    error |= __vmwrite(GUEST_LDTR_SELECTOR, 0);
   4.249 -    error |= __vmwrite(GUEST_LDTR_BASE, 0);
   4.250 -    error |= __vmwrite(GUEST_LDTR_LIMIT, 0);
   4.251 +    __vmwrite(GUEST_LDTR_SELECTOR, 0);
   4.252 +    __vmwrite(GUEST_LDTR_BASE, 0);
   4.253 +    __vmwrite(GUEST_LDTR_LIMIT, 0);
   4.254  
   4.255 -    error |= __vmwrite(GUEST_TR_BASE, 0);
   4.256 -    error |= __vmwrite(GUEST_TR_LIMIT, 0xff);
   4.257 +    __vmwrite(GUEST_TR_BASE, 0);
   4.258 +    __vmwrite(GUEST_TR_LIMIT, 0xff);
   4.259  
   4.260      shadow_update_paging_modes(v);
   4.261      __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
   4.262      __vmwrite(HOST_CR3, v->arch.cr3);
   4.263  
   4.264      vmx_vmcs_exit(v);
   4.265 -
   4.266 -    return error;
   4.267  }
   4.268  
   4.269  int vmx_create_vmcs(struct vcpu *v)
   4.270 @@ -446,13 +443,8 @@ int vmx_create_vmcs(struct vcpu *v)
   4.271          return -ENOMEM;
   4.272   
   4.273      __vmx_clear_vmcs(v);
   4.274 -    
   4.275 -    if ( construct_vmcs(v) != 0 )
   4.276 -    {
   4.277 -        vmx_free_vmcs(v->arch.hvm_vmx.vmcs);
   4.278 -        v->arch.hvm_vmx.vmcs = NULL;
   4.279 -        return -EINVAL;
   4.280 -    }
   4.281 +
   4.282 +    construct_vmcs(v);
   4.283  
   4.284      return 0;
   4.285  }
   4.286 @@ -472,16 +464,14 @@ void vmx_destroy_vmcs(struct vcpu *v)
   4.287  
   4.288  void vm_launch_fail(unsigned long eflags)
   4.289  {
   4.290 -    unsigned long error;
   4.291 -    __vmread(VM_INSTRUCTION_ERROR, &error);
   4.292 +    unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
   4.293      printk("<vm_launch_fail> error code %lx\n", error);
   4.294      __hvm_bug(guest_cpu_user_regs());
   4.295  }
   4.296  
   4.297  void vm_resume_fail(unsigned long eflags)
   4.298  {
   4.299 -    unsigned long error;
   4.300 -    __vmread(VM_INSTRUCTION_ERROR, &error);
   4.301 +    unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
   4.302      printk("<vm_resume_fail> error code %lx\n", error);
   4.303      __hvm_bug(guest_cpu_user_regs());
   4.304  }
   4.305 @@ -510,7 +500,7 @@ static void print_section(char *header, 
   4.306  {
   4.307      uint32_t addr, j;
   4.308      unsigned long val;
   4.309 -    int code;
   4.310 +    int code, rc;
   4.311      char *fmt[4] = {"0x%04lx ", "0x%016lx ", "0x%08lx ", "0x%016lx "};
   4.312      char *err[4] = {"------ ", "------------------ ", 
   4.313                      "---------- ", "------------------ "};
   4.314 @@ -526,7 +516,8 @@ static void print_section(char *header, 
   4.315          if (!(j&3))
   4.316              printk("\n\t\t0x%08x: ", addr);
   4.317  
   4.318 -        if (!__vmread(addr, &val))
   4.319 +        val = __vmread_safe(addr, &rc);
   4.320 +        if (rc == 0)
   4.321              printk(fmt[code], val);
   4.322          else
   4.323              printk("%s", err[code]);
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Nov 08 15:11:18 2006 +0000
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Nov 08 16:43:50 2006 +0000
     5.3 @@ -154,14 +154,14 @@ static inline int long_mode_do_msr_read(
     5.4              /* XXX should it be GP fault */
     5.5              domain_crash_synchronous();
     5.6  
     5.7 -        __vmread(GUEST_FS_BASE, &msr_content);
     5.8 +        msr_content = __vmread(GUEST_FS_BASE);
     5.9          break;
    5.10  
    5.11      case MSR_GS_BASE:
    5.12          if ( !(vmx_long_mode_enabled(v)) )
    5.13              domain_crash_synchronous();
    5.14  
    5.15 -        __vmread(GUEST_GS_BASE, &msr_content);
    5.16 +        msr_content = __vmread(GUEST_GS_BASE);
    5.17          break;
    5.18  
    5.19      case MSR_SHADOW_GS_BASE:
    5.20 @@ -323,20 +323,20 @@ static inline int long_mode_do_msr_write
    5.21  
    5.22  static inline void vmx_save_dr(struct vcpu *v)
    5.23  {
    5.24 -    if ( v->arch.hvm_vcpu.flag_dr_dirty )
    5.25 -    {
    5.26 -        savedebug(&v->arch.guest_context, 0);
    5.27 -        savedebug(&v->arch.guest_context, 1);
    5.28 -        savedebug(&v->arch.guest_context, 2);
    5.29 -        savedebug(&v->arch.guest_context, 3);
    5.30 -        savedebug(&v->arch.guest_context, 6);
    5.31 -        
    5.32 -        v->arch.hvm_vcpu.flag_dr_dirty = 0;
    5.33 +    if ( !v->arch.hvm_vcpu.flag_dr_dirty )
    5.34 +        return;
    5.35  
    5.36 -        v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
    5.37 -        __vmwrite(CPU_BASED_VM_EXEC_CONTROL,
    5.38 -                  v->arch.hvm_vcpu.u.vmx.exec_control);
    5.39 -    }
    5.40 +    /* Clear the DR dirty flag and re-enable intercepts for DR accesses. */
    5.41 +    v->arch.hvm_vcpu.flag_dr_dirty = 0;
    5.42 +    v->arch.hvm_vcpu.u.vmx.exec_control |= CPU_BASED_MOV_DR_EXITING;
    5.43 +    __vmwrite(CPU_BASED_VM_EXEC_CONTROL, v->arch.hvm_vcpu.u.vmx.exec_control);
    5.44 +
    5.45 +    savedebug(&v->arch.guest_context, 0);
    5.46 +    savedebug(&v->arch.guest_context, 1);
    5.47 +    savedebug(&v->arch.guest_context, 2);
    5.48 +    savedebug(&v->arch.guest_context, 3);
    5.49 +    savedebug(&v->arch.guest_context, 6);
    5.50 +    v->arch.guest_context.debugreg[7] = __vmread(GUEST_DR7);
    5.51  }
    5.52  
    5.53  static inline void __restore_debug_registers(struct vcpu *v)
    5.54 @@ -347,7 +347,7 @@ static inline void __restore_debug_regis
    5.55      loaddebug(&v->arch.guest_context, 3);
    5.56      /* No 4 and 5 */
    5.57      loaddebug(&v->arch.guest_context, 6);
    5.58 -    /* DR7 is loaded from the vmcs. */
    5.59 +    /* DR7 is loaded from the VMCS. */
    5.60  }
    5.61  
    5.62  /*
    5.63 @@ -355,21 +355,13 @@ static inline void __restore_debug_regis
    5.64   * need to be restored if their value is going to affect execution -- i.e.,
    5.65   * if one of the breakpoints is enabled.  So mask out all bits that don't
    5.66   * enable some breakpoint functionality.
    5.67 - *
    5.68 - * This is in part necessary because bit 10 of DR7 is hardwired to 1, so a
    5.69 - * simple if( guest_dr7 ) will always return true.  As long as we're masking,
    5.70 - * we might as well do it right.
    5.71   */
    5.72  #define DR7_ACTIVE_MASK 0xff
    5.73  
    5.74  static inline void vmx_restore_dr(struct vcpu *v)
    5.75  {
    5.76 -    unsigned long guest_dr7;
    5.77 -
    5.78 -    __vmread(GUEST_DR7, &guest_dr7);
    5.79 -
    5.80 -    /* Assumes guest does not have DR access at time of context switch. */
    5.81 -    if ( unlikely(guest_dr7 & DR7_ACTIVE_MASK) )
    5.82 +    /* NB. __vmread() is not usable here, so we cannot read from the VMCS. */
    5.83 +    if ( unlikely(v->arch.guest_context.debugreg[7] & DR7_ACTIVE_MASK) )
    5.84          __restore_debug_registers(v);
    5.85  }
    5.86  
    5.87 @@ -430,22 +422,22 @@ static void vmx_store_cpu_guest_regs(
    5.88  
    5.89      if ( regs != NULL )
    5.90      {
    5.91 -        __vmread(GUEST_RFLAGS, &regs->eflags);
    5.92 -        __vmread(GUEST_SS_SELECTOR, &regs->ss);
    5.93 -        __vmread(GUEST_CS_SELECTOR, &regs->cs);
    5.94 -        __vmread(GUEST_DS_SELECTOR, &regs->ds);
    5.95 -        __vmread(GUEST_ES_SELECTOR, &regs->es);
    5.96 -        __vmread(GUEST_GS_SELECTOR, &regs->gs);
    5.97 -        __vmread(GUEST_FS_SELECTOR, &regs->fs);
    5.98 -        __vmread(GUEST_RIP, &regs->eip);
    5.99 -        __vmread(GUEST_RSP, &regs->esp);
   5.100 +        regs->eflags = __vmread(GUEST_RFLAGS);
   5.101 +        regs->ss = __vmread(GUEST_SS_SELECTOR);
   5.102 +        regs->cs = __vmread(GUEST_CS_SELECTOR);
   5.103 +        regs->ds = __vmread(GUEST_DS_SELECTOR);
   5.104 +        regs->es = __vmread(GUEST_ES_SELECTOR);
   5.105 +        regs->gs = __vmread(GUEST_GS_SELECTOR);
   5.106 +        regs->fs = __vmread(GUEST_FS_SELECTOR);
   5.107 +        regs->eip = __vmread(GUEST_RIP);
   5.108 +        regs->esp = __vmread(GUEST_RSP);
   5.109      }
   5.110  
   5.111      if ( crs != NULL )
   5.112      {
   5.113          crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
   5.114          crs[2] = v->arch.hvm_vmx.cpu_cr2;
   5.115 -        __vmread(GUEST_CR3, &crs[3]);
   5.116 +        crs[3] = __vmread(GUEST_CR3);
   5.117          crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
   5.118      }
   5.119  
   5.120 @@ -466,29 +458,26 @@ static void vmx_store_cpu_guest_regs(
   5.121   */
   5.122  static void fixup_vm86_seg_bases(struct cpu_user_regs *regs)
   5.123  {
   5.124 -    int err = 0;
   5.125      unsigned long base;
   5.126  
   5.127 -    err |= __vmread(GUEST_ES_BASE, &base);
   5.128 +    base = __vmread(GUEST_ES_BASE);
   5.129      if (regs->es << 4 != base)
   5.130 -        err |= __vmwrite(GUEST_ES_BASE, regs->es << 4);
   5.131 -    err |= __vmread(GUEST_CS_BASE, &base);
   5.132 +        __vmwrite(GUEST_ES_BASE, regs->es << 4);
   5.133 +    base = __vmread(GUEST_CS_BASE);
   5.134      if (regs->cs << 4 != base)
   5.135 -        err |= __vmwrite(GUEST_CS_BASE, regs->cs << 4);
   5.136 -    err |= __vmread(GUEST_SS_BASE, &base);
   5.137 +        __vmwrite(GUEST_CS_BASE, regs->cs << 4);
   5.138 +    base = __vmread(GUEST_SS_BASE);
   5.139      if (regs->ss << 4 != base)
   5.140 -        err |= __vmwrite(GUEST_SS_BASE, regs->ss << 4);
   5.141 -    err |= __vmread(GUEST_DS_BASE, &base);
   5.142 +        __vmwrite(GUEST_SS_BASE, regs->ss << 4);
   5.143 +    base = __vmread(GUEST_DS_BASE);
   5.144      if (regs->ds << 4 != base)
   5.145 -        err |= __vmwrite(GUEST_DS_BASE, regs->ds << 4);
   5.146 -    err |= __vmread(GUEST_FS_BASE, &base);
   5.147 +        __vmwrite(GUEST_DS_BASE, regs->ds << 4);
   5.148 +    base = __vmread(GUEST_FS_BASE);
   5.149      if (regs->fs << 4 != base)
   5.150 -        err |= __vmwrite(GUEST_FS_BASE, regs->fs << 4);
   5.151 -    err |= __vmread(GUEST_GS_BASE, &base);
   5.152 +        __vmwrite(GUEST_FS_BASE, regs->fs << 4);
   5.153 +    base = __vmread(GUEST_GS_BASE);
   5.154      if (regs->gs << 4 != base)
   5.155 -        err |= __vmwrite(GUEST_GS_BASE, regs->gs << 4);
   5.156 -
   5.157 -    BUG_ON(err);
   5.158 +        __vmwrite(GUEST_GS_BASE, regs->gs << 4);
   5.159  }
   5.160  
   5.161  static void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
   5.162 @@ -605,7 +594,7 @@ static int vmx_realmode(struct vcpu *v)
   5.163  
   5.164      ASSERT(v == current);
   5.165  
   5.166 -    __vmread(GUEST_RFLAGS, &rflags);
   5.167 +    rflags = __vmread(GUEST_RFLAGS);
   5.168      return rflags & X86_EFLAGS_VM;
   5.169  }
   5.170  
   5.171 @@ -615,7 +604,7 @@ static int vmx_guest_x86_mode(struct vcp
   5.172  
   5.173      ASSERT(v == current);
   5.174  
   5.175 -    __vmread(GUEST_CS_AR_BYTES, &cs_ar_bytes);
   5.176 +    cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
   5.177  
   5.178      if ( vmx_long_mode_enabled(v) )
   5.179          return ((cs_ar_bytes & (1u<<13)) ?
   5.180 @@ -735,7 +724,7 @@ int start_vmx(void)
   5.181  static int __get_instruction_length(void)
   5.182  {
   5.183      int len;
   5.184 -    __vmread(VM_EXIT_INSTRUCTION_LEN, &len); /* Safe: callers audited */
   5.185 +    len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */
   5.186      if ( (len < 1) || (len > 15) )
   5.187          __hvm_bug(guest_cpu_user_regs());
   5.188      return len;
   5.189 @@ -745,7 +734,7 @@ static void inline __update_guest_eip(un
   5.190  {
   5.191      unsigned long current_eip;
   5.192  
   5.193 -    __vmread(GUEST_RIP, &current_eip);
   5.194 +    current_eip = __vmread(GUEST_RIP);
   5.195      __vmwrite(GUEST_RIP, current_eip + inst_len);
   5.196      __vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
   5.197  }
   5.198 @@ -758,8 +747,8 @@ static int vmx_do_page_fault(unsigned lo
   5.199      {
   5.200          unsigned long eip, cs;
   5.201  
   5.202 -        __vmread(GUEST_CS_BASE, &cs);
   5.203 -        __vmread(GUEST_RIP, &eip);
   5.204 +        cs = __vmread(GUEST_CS_BASE);
   5.205 +        eip = __vmread(GUEST_RIP);
   5.206          HVM_DBG_LOG(DBG_LEVEL_VMMU,
   5.207                      "vmx_do_page_fault = 0x%lx, cs_base=%lx, "
   5.208                      "eip = %lx, error_code = %lx\n",
   5.209 @@ -773,7 +762,7 @@ static int vmx_do_page_fault(unsigned lo
   5.210  #if 0
   5.211      if ( !result )
   5.212      {
   5.213 -        __vmread(GUEST_RIP, &eip);
   5.214 +        eip = __vmread(GUEST_RIP);
   5.215          printk("vmx pgfault to guest va=%lx eip=%lx\n", va, eip);
   5.216      }
   5.217  #endif
   5.218 @@ -805,7 +794,7 @@ static void vmx_do_cpuid(struct cpu_user
   5.219      unsigned long eip;
   5.220      struct vcpu *v = current;
   5.221  
   5.222 -    __vmread(GUEST_RIP, &eip);
   5.223 +    eip = __vmread(GUEST_RIP);
   5.224  
   5.225      HVM_DBG_LOG(DBG_LEVEL_3, "(eax) 0x%08lx, (ebx) 0x%08lx, "
   5.226                  "(ecx) 0x%08lx, (edx) 0x%08lx, (esi) 0x%08lx, (edi) 0x%08lx",
   5.227 @@ -946,7 +935,7 @@ static void vmx_do_invlpg(unsigned long 
   5.228      unsigned long eip;
   5.229      struct vcpu *v = current;
   5.230  
   5.231 -    __vmread(GUEST_RIP, &eip);
   5.232 +    eip = __vmread(GUEST_RIP);
   5.233  
   5.234      HVM_DBG_LOG(DBG_LEVEL_VMMU, "eip=%lx, va=%lx",
   5.235                  eip, va);
   5.236 @@ -969,7 +958,7 @@ static int check_for_null_selector(unsig
   5.237      /* INS can only use ES segment register, and it can't be overridden */
   5.238      if ( dir == IOREQ_READ )
   5.239      {
   5.240 -        __vmread(GUEST_ES_SELECTOR, &sel);
   5.241 +        sel = __vmread(GUEST_ES_SELECTOR);
   5.242          return sel == 0 ? 1 : 0;
   5.243      }
   5.244  
   5.245 @@ -991,25 +980,25 @@ static int check_for_null_selector(unsig
   5.246          case 0x67: /* addr32 */
   5.247              continue;
   5.248          case 0x2e: /* CS */
   5.249 -            __vmread(GUEST_CS_SELECTOR, &sel);
   5.250 +            sel = __vmread(GUEST_CS_SELECTOR);
   5.251              break;
   5.252          case 0x36: /* SS */
   5.253 -            __vmread(GUEST_SS_SELECTOR, &sel);
   5.254 +            sel = __vmread(GUEST_SS_SELECTOR);
   5.255              break;
   5.256          case 0x26: /* ES */
   5.257 -            __vmread(GUEST_ES_SELECTOR, &sel);
   5.258 +            sel = __vmread(GUEST_ES_SELECTOR);
   5.259              break;
   5.260          case 0x64: /* FS */
   5.261 -            __vmread(GUEST_FS_SELECTOR, &sel);
   5.262 +            sel = __vmread(GUEST_FS_SELECTOR);
   5.263              break;
   5.264          case 0x65: /* GS */
   5.265 -            __vmread(GUEST_GS_SELECTOR, &sel);
   5.266 +            sel = __vmread(GUEST_GS_SELECTOR);
   5.267              break;
   5.268          case 0x3e: /* DS */
   5.269              /* FALLTHROUGH */
   5.270          default:
   5.271              /* DS is the default */
   5.272 -            __vmread(GUEST_DS_SELECTOR, &sel);
   5.273 +            sel = __vmread(GUEST_DS_SELECTOR);
   5.274          }
   5.275          return sel == 0 ? 1 : 0;
   5.276      }
   5.277 @@ -1056,7 +1045,7 @@ static void vmx_io_instruction(unsigned 
   5.278          unsigned long addr, count = 1;
   5.279          int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
   5.280  
   5.281 -        __vmread(GUEST_LINEAR_ADDRESS, &addr);
   5.282 +        addr = __vmread(GUEST_LINEAR_ADDRESS);
   5.283  
   5.284          /*
   5.285           * In protected mode, guest linear address is invalid if the
   5.286 @@ -1119,98 +1108,96 @@ static void vmx_io_instruction(unsigned 
   5.287      }
   5.288  }
   5.289  
   5.290 -static int vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
   5.291 +static void vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
   5.292  {
   5.293 -    int error = 0;
   5.294 -
   5.295      /* NB. Skip transition instruction. */
   5.296 -    error |= __vmread(GUEST_RIP, &c->eip);
   5.297 +    c->eip = __vmread(GUEST_RIP);
   5.298      c->eip += __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
   5.299  
   5.300 -    error |= __vmread(GUEST_RSP, &c->esp);
   5.301 -    error |= __vmread(GUEST_RFLAGS, &c->eflags);
   5.302 +    c->esp = __vmread(GUEST_RSP);
   5.303 +    c->eflags = __vmread(GUEST_RFLAGS);
   5.304  
   5.305      c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
   5.306      c->cr3 = v->arch.hvm_vmx.cpu_cr3;
   5.307      c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
   5.308  
   5.309 -    error |= __vmread(GUEST_IDTR_LIMIT, &c->idtr_limit);
   5.310 -    error |= __vmread(GUEST_IDTR_BASE, &c->idtr_base);
   5.311 +    c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
   5.312 +    c->idtr_base = __vmread(GUEST_IDTR_BASE);
   5.313  
   5.314 -    error |= __vmread(GUEST_GDTR_LIMIT, &c->gdtr_limit);
   5.315 -    error |= __vmread(GUEST_GDTR_BASE, &c->gdtr_base);
   5.316 +    c->gdtr_limit = __vmread(GUEST_GDTR_LIMIT);
   5.317 +    c->gdtr_base = __vmread(GUEST_GDTR_BASE);
   5.318  
   5.319 -    error |= __vmread(GUEST_CS_SELECTOR, &c->cs_sel);
   5.320 -    error |= __vmread(GUEST_CS_LIMIT, &c->cs_limit);
   5.321 -    error |= __vmread(GUEST_CS_BASE, &c->cs_base);
   5.322 -    error |= __vmread(GUEST_CS_AR_BYTES, &c->cs_arbytes.bytes);
   5.323 +    c->cs_sel = __vmread(GUEST_CS_SELECTOR);
   5.324 +    c->cs_limit = __vmread(GUEST_CS_LIMIT);
   5.325 +    c->cs_base = __vmread(GUEST_CS_BASE);
   5.326 +    c->cs_arbytes.bytes = __vmread(GUEST_CS_AR_BYTES);
   5.327  
   5.328 -    error |= __vmread(GUEST_DS_SELECTOR, &c->ds_sel);
   5.329 -    error |= __vmread(GUEST_DS_LIMIT, &c->ds_limit);
   5.330 -    error |= __vmread(GUEST_DS_BASE, &c->ds_base);
   5.331 -    error |= __vmread(GUEST_DS_AR_BYTES, &c->ds_arbytes.bytes);
   5.332 +    c->ds_sel = __vmread(GUEST_DS_SELECTOR);
   5.333 +    c->ds_limit = __vmread(GUEST_DS_LIMIT);
   5.334 +    c->ds_base = __vmread(GUEST_DS_BASE);
   5.335 +    c->ds_arbytes.bytes = __vmread(GUEST_DS_AR_BYTES);
   5.336  
   5.337 -    error |= __vmread(GUEST_ES_SELECTOR, &c->es_sel);
   5.338 -    error |= __vmread(GUEST_ES_LIMIT, &c->es_limit);
   5.339 -    error |= __vmread(GUEST_ES_BASE, &c->es_base);
   5.340 -    error |= __vmread(GUEST_ES_AR_BYTES, &c->es_arbytes.bytes);
   5.341 +    c->es_sel = __vmread(GUEST_ES_SELECTOR);
   5.342 +    c->es_limit = __vmread(GUEST_ES_LIMIT);
   5.343 +    c->es_base = __vmread(GUEST_ES_BASE);
   5.344 +    c->es_arbytes.bytes = __vmread(GUEST_ES_AR_BYTES);
   5.345  
   5.346 -    error |= __vmread(GUEST_SS_SELECTOR, &c->ss_sel);
   5.347 -    error |= __vmread(GUEST_SS_LIMIT, &c->ss_limit);
   5.348 -    error |= __vmread(GUEST_SS_BASE, &c->ss_base);
   5.349 -    error |= __vmread(GUEST_SS_AR_BYTES, &c->ss_arbytes.bytes);
   5.350 +    c->ss_sel = __vmread(GUEST_SS_SELECTOR);
   5.351 +    c->ss_limit = __vmread(GUEST_SS_LIMIT);
   5.352 +    c->ss_base = __vmread(GUEST_SS_BASE);
   5.353 +    c->ss_arbytes.bytes = __vmread(GUEST_SS_AR_BYTES);
   5.354  
   5.355 -    error |= __vmread(GUEST_FS_SELECTOR, &c->fs_sel);
   5.356 -    error |= __vmread(GUEST_FS_LIMIT, &c->fs_limit);
   5.357 -    error |= __vmread(GUEST_FS_BASE, &c->fs_base);
   5.358 -    error |= __vmread(GUEST_FS_AR_BYTES, &c->fs_arbytes.bytes);
   5.359 +    c->fs_sel = __vmread(GUEST_FS_SELECTOR);
   5.360 +    c->fs_limit = __vmread(GUEST_FS_LIMIT);
   5.361 +    c->fs_base = __vmread(GUEST_FS_BASE);
   5.362 +    c->fs_arbytes.bytes = __vmread(GUEST_FS_AR_BYTES);
   5.363  
   5.364 -    error |= __vmread(GUEST_GS_SELECTOR, &c->gs_sel);
   5.365 -    error |= __vmread(GUEST_GS_LIMIT, &c->gs_limit);
   5.366 -    error |= __vmread(GUEST_GS_BASE, &c->gs_base);
   5.367 -    error |= __vmread(GUEST_GS_AR_BYTES, &c->gs_arbytes.bytes);
   5.368 +    c->gs_sel = __vmread(GUEST_GS_SELECTOR);
   5.369 +    c->gs_limit = __vmread(GUEST_GS_LIMIT);
   5.370 +    c->gs_base = __vmread(GUEST_GS_BASE);
   5.371 +    c->gs_arbytes.bytes = __vmread(GUEST_GS_AR_BYTES);
   5.372  
   5.373 -    error |= __vmread(GUEST_TR_SELECTOR, &c->tr_sel);
   5.374 -    error |= __vmread(GUEST_TR_LIMIT, &c->tr_limit);
   5.375 -    error |= __vmread(GUEST_TR_BASE, &c->tr_base);
   5.376 -    error |= __vmread(GUEST_TR_AR_BYTES, &c->tr_arbytes.bytes);
   5.377 +    c->tr_sel = __vmread(GUEST_TR_SELECTOR);
   5.378 +    c->tr_limit = __vmread(GUEST_TR_LIMIT);
   5.379 +    c->tr_base = __vmread(GUEST_TR_BASE);
   5.380 +    c->tr_arbytes.bytes = __vmread(GUEST_TR_AR_BYTES);
   5.381  
   5.382 -    error |= __vmread(GUEST_LDTR_SELECTOR, &c->ldtr_sel);
   5.383 -    error |= __vmread(GUEST_LDTR_LIMIT, &c->ldtr_limit);
   5.384 -    error |= __vmread(GUEST_LDTR_BASE, &c->ldtr_base);
   5.385 -    error |= __vmread(GUEST_LDTR_AR_BYTES, &c->ldtr_arbytes.bytes);
   5.386 -
   5.387 -    return !error;
   5.388 +    c->ldtr_sel = __vmread(GUEST_LDTR_SELECTOR);
   5.389 +    c->ldtr_limit = __vmread(GUEST_LDTR_LIMIT);
   5.390 +    c->ldtr_base = __vmread(GUEST_LDTR_BASE);
   5.391 +    c->ldtr_arbytes.bytes = __vmread(GUEST_LDTR_AR_BYTES);
   5.392  }
   5.393  
   5.394 -static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
   5.395 +static void vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
   5.396  {
   5.397      unsigned long mfn, old_base_mfn;
   5.398 -    int error = 0;
   5.399  
   5.400 -    error |= __vmwrite(GUEST_RIP, c->eip);
   5.401 -    error |= __vmwrite(GUEST_RSP, c->esp);
   5.402 -    error |= __vmwrite(GUEST_RFLAGS, c->eflags);
   5.403 +    __vmwrite(GUEST_RIP, c->eip);
   5.404 +    __vmwrite(GUEST_RSP, c->esp);
   5.405 +    __vmwrite(GUEST_RFLAGS, c->eflags);
   5.406  
   5.407      v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
   5.408 -    error |= __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   5.409 +    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   5.410  
   5.411 -    if (!vmx_paging_enabled(v))
   5.412 +    if ( !vmx_paging_enabled(v) )
   5.413          goto skip_cr3;
   5.414  
   5.415 -    if (c->cr3 == v->arch.hvm_vmx.cpu_cr3) {
   5.416 +    if ( c->cr3 == v->arch.hvm_vmx.cpu_cr3 )
   5.417 +    {
   5.418          /*
   5.419           * This is simple TLB flush, implying the guest has
   5.420           * removed some translation or changed page attributes.
   5.421           * We simply invalidate the shadow.
   5.422           */
   5.423          mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
   5.424 -        if (mfn != pagetable_get_pfn(v->arch.guest_table)) {
   5.425 +        if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
   5.426 +        {
   5.427              printk("Invalid CR3 value=%x", c->cr3);
   5.428              domain_crash_synchronous();
   5.429 -            return 0;
   5.430          }
   5.431 -    } else {
   5.432 +    }
   5.433 +    else
   5.434 +    {
   5.435          /*
   5.436           * If different, make a shadow. Check if the PDBR is valid
   5.437           * first.
   5.438 @@ -1221,10 +1208,9 @@ static int vmx_world_restore(struct vcpu
   5.439          {
   5.440              printk("Invalid CR3 value=%x", c->cr3);
   5.441              domain_crash_synchronous();
   5.442 -            return 0;
   5.443          }
   5.444 -        if(!get_page(mfn_to_page(mfn), v->domain))
   5.445 -                return 0;
   5.446 +        if ( !get_page(mfn_to_page(mfn), v->domain) )
   5.447 +            domain_crash_synchronous();
   5.448          old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   5.449          v->arch.guest_table = pagetable_from_pfn(mfn);
   5.450          if (old_base_mfn)
   5.451 @@ -1236,66 +1222,63 @@ static int vmx_world_restore(struct vcpu
   5.452      }
   5.453  
   5.454   skip_cr3:
   5.455 -
   5.456 -    if (!vmx_paging_enabled(v))
   5.457 +    if ( !vmx_paging_enabled(v) )
   5.458          HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
   5.459      else
   5.460          HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
   5.461  
   5.462 -    error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
   5.463 +    __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
   5.464      v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
   5.465 -    error |= __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   5.466 +    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
   5.467  
   5.468 -    error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
   5.469 -    error |= __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
   5.470 +    __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
   5.471 +    __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
   5.472  
   5.473 -    error |= __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
   5.474 -    error |= __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
   5.475 +    __vmwrite(GUEST_GDTR_LIMIT, c->gdtr_limit);
   5.476 +    __vmwrite(GUEST_GDTR_BASE, c->gdtr_base);
   5.477  
   5.478 -    error |= __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
   5.479 -    error |= __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
   5.480 -    error |= __vmwrite(GUEST_CS_BASE, c->cs_base);
   5.481 -    error |= __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
   5.482 +    __vmwrite(GUEST_CS_SELECTOR, c->cs_sel);
   5.483 +    __vmwrite(GUEST_CS_LIMIT, c->cs_limit);
   5.484 +    __vmwrite(GUEST_CS_BASE, c->cs_base);
   5.485 +    __vmwrite(GUEST_CS_AR_BYTES, c->cs_arbytes.bytes);
   5.486  
   5.487 -    error |= __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
   5.488 -    error |= __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
   5.489 -    error |= __vmwrite(GUEST_DS_BASE, c->ds_base);
   5.490 -    error |= __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
   5.491 +    __vmwrite(GUEST_DS_SELECTOR, c->ds_sel);
   5.492 +    __vmwrite(GUEST_DS_LIMIT, c->ds_limit);
   5.493 +    __vmwrite(GUEST_DS_BASE, c->ds_base);
   5.494 +    __vmwrite(GUEST_DS_AR_BYTES, c->ds_arbytes.bytes);
   5.495  
   5.496 -    error |= __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
   5.497 -    error |= __vmwrite(GUEST_ES_LIMIT, c->es_limit);
   5.498 -    error |= __vmwrite(GUEST_ES_BASE, c->es_base);
   5.499 -    error |= __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
   5.500 +    __vmwrite(GUEST_ES_SELECTOR, c->es_sel);
   5.501 +    __vmwrite(GUEST_ES_LIMIT, c->es_limit);
   5.502 +    __vmwrite(GUEST_ES_BASE, c->es_base);
   5.503 +    __vmwrite(GUEST_ES_AR_BYTES, c->es_arbytes.bytes);
   5.504  
   5.505 -    error |= __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
   5.506 -    error |= __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
   5.507 -    error |= __vmwrite(GUEST_SS_BASE, c->ss_base);
   5.508 -    error |= __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
   5.509 +    __vmwrite(GUEST_SS_SELECTOR, c->ss_sel);
   5.510 +    __vmwrite(GUEST_SS_LIMIT, c->ss_limit);
   5.511 +    __vmwrite(GUEST_SS_BASE, c->ss_base);
   5.512 +    __vmwrite(GUEST_SS_AR_BYTES, c->ss_arbytes.bytes);
   5.513  
   5.514 -    error |= __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
   5.515 -    error |= __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
   5.516 -    error |= __vmwrite(GUEST_FS_BASE, c->fs_base);
   5.517 -    error |= __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
   5.518 +    __vmwrite(GUEST_FS_SELECTOR, c->fs_sel);
   5.519 +    __vmwrite(GUEST_FS_LIMIT, c->fs_limit);
   5.520 +    __vmwrite(GUEST_FS_BASE, c->fs_base);
   5.521 +    __vmwrite(GUEST_FS_AR_BYTES, c->fs_arbytes.bytes);
   5.522  
   5.523 -    error |= __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
   5.524 -    error |= __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
   5.525 -    error |= __vmwrite(GUEST_GS_BASE, c->gs_base);
   5.526 -    error |= __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
   5.527 +    __vmwrite(GUEST_GS_SELECTOR, c->gs_sel);
   5.528 +    __vmwrite(GUEST_GS_LIMIT, c->gs_limit);
   5.529 +    __vmwrite(GUEST_GS_BASE, c->gs_base);
   5.530 +    __vmwrite(GUEST_GS_AR_BYTES, c->gs_arbytes.bytes);
   5.531  
   5.532 -    error |= __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
   5.533 -    error |= __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
   5.534 -    error |= __vmwrite(GUEST_TR_BASE, c->tr_base);
   5.535 -    error |= __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
   5.536 +    __vmwrite(GUEST_TR_SELECTOR, c->tr_sel);
   5.537 +    __vmwrite(GUEST_TR_LIMIT, c->tr_limit);
   5.538 +    __vmwrite(GUEST_TR_BASE, c->tr_base);
   5.539 +    __vmwrite(GUEST_TR_AR_BYTES, c->tr_arbytes.bytes);
   5.540  
   5.541 -    error |= __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
   5.542 -    error |= __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
   5.543 -    error |= __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
   5.544 -    error |= __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
   5.545 +    __vmwrite(GUEST_LDTR_SELECTOR, c->ldtr_sel);
   5.546 +    __vmwrite(GUEST_LDTR_LIMIT, c->ldtr_limit);
   5.547 +    __vmwrite(GUEST_LDTR_BASE, c->ldtr_base);
   5.548 +    __vmwrite(GUEST_LDTR_AR_BYTES, c->ldtr_arbytes.bytes);
   5.549  
   5.550      shadow_update_paging_modes(v);
   5.551      __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
   5.552 -
   5.553 -    return !error;
   5.554  }
   5.555  
   5.556  enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
   5.557 @@ -1325,8 +1308,7 @@ static int vmx_assist(struct vcpu *v, in
   5.558          if (hvm_copy_from_guest_phys(&cp, VMXASSIST_OLD_CONTEXT, sizeof(cp)))
   5.559              goto error;
   5.560          if (cp != 0) {
   5.561 -            if (!vmx_world_save(v, &c))
   5.562 -                goto error;
   5.563 +            vmx_world_save(v, &c);
   5.564              if (hvm_copy_to_guest_phys(cp, &c, sizeof(c)))
   5.565                  goto error;
   5.566          }
   5.567 @@ -1337,8 +1319,7 @@ static int vmx_assist(struct vcpu *v, in
   5.568          if (cp != 0) {
   5.569              if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
   5.570                  goto error;
   5.571 -            if (!vmx_world_restore(v, &c))
   5.572 -                goto error;
   5.573 +            vmx_world_restore(v, &c);
   5.574              v->arch.hvm_vmx.vmxassist_enabled = 1;            
   5.575              return 1;
   5.576          }
   5.577 @@ -1355,8 +1336,7 @@ static int vmx_assist(struct vcpu *v, in
   5.578          if (cp != 0) {
   5.579              if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
   5.580                  goto error;
   5.581 -            if (!vmx_world_restore(v, &c))
   5.582 -                goto error;
   5.583 +            vmx_world_restore(v, &c);
   5.584              v->arch.hvm_vmx.vmxassist_enabled = 0;
   5.585              return 1;
   5.586          }
   5.587 @@ -1428,7 +1408,7 @@ static int vmx_set_cr0(unsigned long val
   5.588                  HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode\n");
   5.589                  v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
   5.590                      |= EFER_LMA;
   5.591 -                __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
   5.592 +                vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
   5.593                  vm_entry_value |= VM_ENTRY_IA32E_MODE;
   5.594                  __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   5.595              }
   5.596 @@ -1482,7 +1462,7 @@ static int vmx_set_cr0(unsigned long val
   5.597              {
   5.598                  v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
   5.599                      &= ~EFER_LMA;
   5.600 -                __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
   5.601 +                vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
   5.602                  vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
   5.603                  __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   5.604              }
   5.605 @@ -1490,7 +1470,7 @@ static int vmx_set_cr0(unsigned long val
   5.606  
   5.607          if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
   5.608          {
   5.609 -            __vmread(GUEST_RIP, &eip);
   5.610 +            eip = __vmread(GUEST_RIP);
   5.611              HVM_DBG_LOG(DBG_LEVEL_1,
   5.612                          "Transfering control to vmxassist %%eip 0x%lx\n", eip);
   5.613              return 0; /* do not update eip! */
   5.614 @@ -1498,12 +1478,12 @@ static int vmx_set_cr0(unsigned long val
   5.615      }
   5.616      else if ( v->arch.hvm_vmx.vmxassist_enabled )
   5.617      {
   5.618 -        __vmread(GUEST_RIP, &eip);
   5.619 +        eip = __vmread(GUEST_RIP);
   5.620          HVM_DBG_LOG(DBG_LEVEL_1,
   5.621                      "Enabling CR0.PE at %%eip 0x%lx\n", eip);
   5.622          if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
   5.623          {
   5.624 -            __vmread(GUEST_RIP, &eip);
   5.625 +            eip = __vmread(GUEST_RIP);
   5.626              HVM_DBG_LOG(DBG_LEVEL_1,
   5.627                          "Restoring to %%eip 0x%lx\n", eip);
   5.628              return 0; /* do not update eip! */
   5.629 @@ -1515,7 +1495,7 @@ static int vmx_set_cr0(unsigned long val
   5.630          {
   5.631              v->arch.hvm_vmx.msr_content.msr_items[VMX_INDEX_MSR_EFER]
   5.632                &= ~EFER_LMA;
   5.633 -            __vmread(VM_ENTRY_CONTROLS, &vm_entry_value);
   5.634 +            vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
   5.635              vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
   5.636              __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   5.637          }
   5.638 @@ -1570,7 +1550,7 @@ static int mov_to_cr(int gp, int cr, str
   5.639      CASE_GET_REG(EDI, edi);
   5.640      CASE_EXTEND_GET_REG;
   5.641      case REG_ESP:
   5.642 -        __vmread(GUEST_RSP, &value);
   5.643 +        value = __vmread(GUEST_RSP);
   5.644          break;
   5.645      default:
   5.646          printk("invalid gp: %d\n", gp);
   5.647 @@ -1821,13 +1801,13 @@ static inline void vmx_do_msr_read(struc
   5.648          msr_content = hvm_get_guest_time(v);
   5.649          break;
   5.650      case MSR_IA32_SYSENTER_CS:
   5.651 -        __vmread(GUEST_SYSENTER_CS, (u32 *)&msr_content);
   5.652 +        msr_content = (u32)__vmread(GUEST_SYSENTER_CS);
   5.653          break;
   5.654      case MSR_IA32_SYSENTER_ESP:
   5.655 -        __vmread(GUEST_SYSENTER_ESP, &msr_content);
   5.656 +        msr_content = __vmread(GUEST_SYSENTER_ESP);
   5.657          break;
   5.658      case MSR_IA32_SYSENTER_EIP:
   5.659 -        __vmread(GUEST_SYSENTER_EIP, &msr_content);
   5.660 +        msr_content = __vmread(GUEST_SYSENTER_EIP);
   5.661          break;
   5.662      case MSR_IA32_APICBASE:
   5.663          msr_content = vcpu_vlapic(v)->apic_base_msr;
   5.664 @@ -1903,14 +1883,13 @@ static inline void vmx_do_msr_write(stru
   5.665  static void vmx_do_hlt(void)
   5.666  {
   5.667      unsigned long rflags;
   5.668 -    __vmread(GUEST_RFLAGS, &rflags);
   5.669 +    rflags = __vmread(GUEST_RFLAGS);
   5.670      hvm_hlt(rflags);
   5.671  }
   5.672  
   5.673  static inline void vmx_do_extint(struct cpu_user_regs *regs)
   5.674  {
   5.675      unsigned int vector;
   5.676 -    int error;
   5.677  
   5.678      asmlinkage void do_IRQ(struct cpu_user_regs *);
   5.679      fastcall void smp_apic_timer_interrupt(struct cpu_user_regs *);
   5.680 @@ -1923,9 +1902,8 @@ static inline void vmx_do_extint(struct 
   5.681      fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);
   5.682  #endif
   5.683  
   5.684 -    if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
   5.685 -        && !(vector & INTR_INFO_VALID_MASK))
   5.686 -        __hvm_bug(regs);
   5.687 +    vector = __vmread(VM_EXIT_INTR_INFO);
   5.688 +    BUG_ON(!(vector & INTR_INFO_VALID_MASK));
   5.689  
   5.690      vector &= INTR_INFO_VECTOR_MASK;
   5.691      TRACE_VMEXIT(1, vector);
   5.692 @@ -1964,40 +1942,40 @@ static inline void vmx_do_extint(struct 
   5.693  #if defined (__x86_64__)
   5.694  void store_cpu_user_regs(struct cpu_user_regs *regs)
   5.695  {
   5.696 -    __vmread(GUEST_SS_SELECTOR, &regs->ss);
   5.697 -    __vmread(GUEST_RSP, &regs->rsp);
   5.698 -    __vmread(GUEST_RFLAGS, &regs->rflags);
   5.699 -    __vmread(GUEST_CS_SELECTOR, &regs->cs);
   5.700 -    __vmread(GUEST_DS_SELECTOR, &regs->ds);
   5.701 -    __vmread(GUEST_ES_SELECTOR, &regs->es);
   5.702 -    __vmread(GUEST_RIP, &regs->rip);
   5.703 +    regs->ss = __vmread(GUEST_SS_SELECTOR);
   5.704 +    regs->rsp = __vmread(GUEST_RSP);
   5.705 +    regs->rflags = __vmread(GUEST_RFLAGS);
   5.706 +    regs->cs = __vmread(GUEST_CS_SELECTOR);
   5.707 +    regs->ds = __vmread(GUEST_DS_SELECTOR);
   5.708 +    regs->es = __vmread(GUEST_ES_SELECTOR);
   5.709 +    regs->rip = __vmread(GUEST_RIP);
   5.710  }
   5.711  #elif defined (__i386__)
   5.712  void store_cpu_user_regs(struct cpu_user_regs *regs)
   5.713  {
   5.714 -    __vmread(GUEST_SS_SELECTOR, &regs->ss);
   5.715 -    __vmread(GUEST_RSP, &regs->esp);
   5.716 -    __vmread(GUEST_RFLAGS, &regs->eflags);
   5.717 -    __vmread(GUEST_CS_SELECTOR, &regs->cs);
   5.718 -    __vmread(GUEST_DS_SELECTOR, &regs->ds);
   5.719 -    __vmread(GUEST_ES_SELECTOR, &regs->es);
   5.720 -    __vmread(GUEST_RIP, &regs->eip);
   5.721 +    regs->ss = __vmread(GUEST_SS_SELECTOR);
   5.722 +    regs->esp = __vmread(GUEST_RSP);
   5.723 +    regs->eflags = __vmread(GUEST_RFLAGS);
   5.724 +    regs->cs = __vmread(GUEST_CS_SELECTOR);
   5.725 +    regs->ds = __vmread(GUEST_DS_SELECTOR);
   5.726 +    regs->es = __vmread(GUEST_ES_SELECTOR);
   5.727 +    regs->eip = __vmread(GUEST_RIP);
   5.728  }
   5.729  #endif 
   5.730  
   5.731  #ifdef XEN_DEBUGGER
   5.732  void save_cpu_user_regs(struct cpu_user_regs *regs)
   5.733  {
   5.734 -    __vmread(GUEST_SS_SELECTOR, &regs->xss);
   5.735 -    __vmread(GUEST_RSP, &regs->esp);
   5.736 -    __vmread(GUEST_RFLAGS, &regs->eflags);
   5.737 -    __vmread(GUEST_CS_SELECTOR, &regs->xcs);
   5.738 -    __vmread(GUEST_RIP, &regs->eip);
   5.739 +    regs->xss = __vmread(GUEST_SS_SELECTOR);
   5.740 +    regs->esp = __vmread(GUEST_RSP);
   5.741 +    regs->eflags = __vmread(GUEST_RFLAGS);
   5.742 +    regs->xcs = __vmread(GUEST_CS_SELECTOR);
   5.743 +    regs->eip = __vmread(GUEST_RIP);
   5.744  
   5.745 -    __vmread(GUEST_GS_SELECTOR, &regs->xgs);
   5.746 -    __vmread(GUEST_FS_SELECTOR, &regs->xfs);
   5.747 -    __vmread(GUEST_ES_SELECTOR, &regs->xes);
   5.748 -    __vmread(GUEST_DS_SELECTOR, &regs->xds);
   5.749 +    regs->xgs = __vmread(GUEST_GS_SELECTOR);
   5.750 +    regs->xfs = __vmread(GUEST_FS_SELECTOR);
   5.751 +    regs->xes = __vmread(GUEST_ES_SELECTOR);
   5.752 +    regs->xds = __vmread(GUEST_DS_SELECTOR);
   5.753  }
   5.754  
   5.755  void restore_cpu_user_regs(struct cpu_user_regs *regs)
   5.756 @@ -2019,10 +1997,10 @@ static void vmx_reflect_exception(struct
   5.757  {
   5.758      int error_code, intr_info, vector;
   5.759  
   5.760 -    __vmread(VM_EXIT_INTR_INFO, &intr_info);
   5.761 +    intr_info = __vmread(VM_EXIT_INTR_INFO);
   5.762      vector = intr_info & 0xff;
   5.763      if ( intr_info & INTR_INFO_DELIVER_CODE_MASK )
   5.764 -        __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
   5.765 +        error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
   5.766      else
   5.767          error_code = VMX_DELIVER_NO_ERROR_CODE;
   5.768  
   5.769 @@ -2030,7 +2008,7 @@ static void vmx_reflect_exception(struct
   5.770      {
   5.771          unsigned long rip;
   5.772  
   5.773 -        __vmread(GUEST_RIP, &rip);
   5.774 +        rip = __vmread(GUEST_RIP);
   5.775          HVM_DBG_LOG(DBG_LEVEL_1, "rip = %lx, error_code = %x",
   5.776                      rip, error_code);
   5.777      }
   5.778 @@ -2062,7 +2040,7 @@ asmlinkage void vmx_vmexit_handler(struc
   5.779      unsigned long exit_qualification, inst_len = 0;
   5.780      struct vcpu *v = current;
   5.781  
   5.782 -    __vmread(VM_EXIT_REASON, &exit_reason);
   5.783 +    exit_reason = __vmread(VM_EXIT_REASON);
   5.784  
   5.785      perfc_incra(vmexits, exit_reason);
   5.786  
   5.787 @@ -2078,7 +2056,7 @@ asmlinkage void vmx_vmexit_handler(struc
   5.788      {
   5.789          unsigned int failed_vmentry_reason = exit_reason & 0xFFFF;
   5.790  
   5.791 -        __vmread(EXIT_QUALIFICATION, &exit_qualification);
   5.792 +        exit_qualification = __vmread(EXIT_QUALIFICATION);
   5.793          printk("Failed vm entry (exit reason 0x%x) ", exit_reason);
   5.794          switch ( failed_vmentry_reason ) {
   5.795          case EXIT_REASON_INVALID_GUEST_STATE:
   5.796 @@ -2114,9 +2092,8 @@ asmlinkage void vmx_vmexit_handler(struc
   5.797           */
   5.798          unsigned int intr_info, vector;
   5.799  
   5.800 -        if ( __vmread(VM_EXIT_INTR_INFO, &intr_info) ||
   5.801 -             !(intr_info & INTR_INFO_VALID_MASK) )
   5.802 -            __hvm_bug(regs);
   5.803 +        intr_info = __vmread(VM_EXIT_INTR_INFO);
   5.804 +        BUG_ON(!(intr_info & INTR_INFO_VALID_MASK));
   5.805  
   5.806          vector = intr_info & INTR_INFO_VECTOR_MASK;
   5.807  
   5.808 @@ -2177,8 +2154,8 @@ asmlinkage void vmx_vmexit_handler(struc
   5.809          }
   5.810          case TRAP_page_fault:
   5.811          {
   5.812 -            __vmread(EXIT_QUALIFICATION, &exit_qualification);
   5.813 -            __vmread(VM_EXIT_INTR_ERROR_CODE, &regs->error_code);
   5.814 +            exit_qualification = __vmread(EXIT_QUALIFICATION);
   5.815 +            regs->error_code = __vmread(VM_EXIT_INTR_ERROR_CODE);
   5.816  
   5.817              TRACE_VMEXIT(3, regs->error_code);
   5.818              TRACE_VMEXIT(4, exit_qualification);
   5.819 @@ -2240,7 +2217,7 @@ asmlinkage void vmx_vmexit_handler(struc
   5.820      {
   5.821          inst_len = __get_instruction_length(); /* Safe: INVLPG */
   5.822          __update_guest_eip(inst_len);
   5.823 -        __vmread(EXIT_QUALIFICATION, &exit_qualification);
   5.824 +        exit_qualification = __vmread(EXIT_QUALIFICATION);
   5.825          vmx_do_invlpg(exit_qualification);
   5.826          TRACE_VMEXIT(4, exit_qualification);
   5.827          break;
   5.828 @@ -2254,7 +2231,7 @@ asmlinkage void vmx_vmexit_handler(struc
   5.829      }
   5.830      case EXIT_REASON_CR_ACCESS:
   5.831      {
   5.832 -        __vmread(EXIT_QUALIFICATION, &exit_qualification);
   5.833 +        exit_qualification = __vmread(EXIT_QUALIFICATION);
   5.834          inst_len = __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
   5.835          if ( vmx_cr_access(exit_qualification, regs) )
   5.836              __update_guest_eip(inst_len);
   5.837 @@ -2262,11 +2239,11 @@ asmlinkage void vmx_vmexit_handler(struc
   5.838          break;
   5.839      }
   5.840      case EXIT_REASON_DR_ACCESS:
   5.841 -        __vmread(EXIT_QUALIFICATION, &exit_qualification);
   5.842 +        exit_qualification = __vmread(EXIT_QUALIFICATION);
   5.843          vmx_dr_access(exit_qualification, regs);
   5.844          break;
   5.845      case EXIT_REASON_IO_INSTRUCTION:
   5.846 -        __vmread(EXIT_QUALIFICATION, &exit_qualification);
   5.847 +        exit_qualification = __vmread(EXIT_QUALIFICATION);
   5.848          inst_len = __get_instruction_length(); /* Safe: IN, INS, OUT, OUTS */
   5.849          vmx_io_instruction(exit_qualification, inst_len);
   5.850          TRACE_VMEXIT(4, exit_qualification);
     6.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Wed Nov 08 15:11:18 2006 +0000
     6.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Wed Nov 08 16:43:50 2006 +0000
     6.3 @@ -183,80 +183,55 @@ static inline void __vmpclear(u64 addr)
     6.4                             : "memory");
     6.5  }
     6.6  
     6.7 -#define __vmread(x, ptr) ___vmread((x), (ptr), sizeof(*(ptr)))
     6.8 +static inline unsigned long __vmread(unsigned long field)
     6.9 +{
    6.10 +    unsigned long ecx;
    6.11 +
    6.12 +    __asm__ __volatile__ ( VMREAD_OPCODE
    6.13 +                           MODRM_EAX_ECX
    6.14 +                           /* CF==1 or ZF==1 --> crash (ud2) */
    6.15 +                           "ja 1f ; ud2 ; 1:\n"
    6.16 +                           : "=c" (ecx)
    6.17 +                           : "a" (field)
    6.18 +                           : "memory");
    6.19 +
    6.20 +    return ecx;
    6.21 +}
    6.22  
    6.23 -static always_inline int ___vmread(
    6.24 -    const unsigned long field, void *ptr, const int size)
    6.25 +static inline void __vmwrite(unsigned long field, unsigned long value)
    6.26  {
    6.27 -    unsigned long ecx = 0;
    6.28 -    int rc;
    6.29 +    __asm__ __volatile__ ( VMWRITE_OPCODE
    6.30 +                           MODRM_EAX_ECX
    6.31 +                           /* CF==1 or ZF==1 --> crash (ud2) */
    6.32 +                           "ja 1f ; ud2 ; 1:\n"
    6.33 +                           : 
    6.34 +                           : "a" (field) , "c" (value)
    6.35 +                           : "memory");
    6.36 +}
    6.37 +
    6.38 +static inline unsigned long __vmread_safe(unsigned long field, int *error)
    6.39 +{
    6.40 +    unsigned long ecx;
    6.41  
    6.42      __asm__ __volatile__ ( VMREAD_OPCODE
    6.43                             MODRM_EAX_ECX
    6.44                             /* CF==1 or ZF==1 --> rc = -1 */
    6.45                             "setna %b0 ; neg %0"
    6.46 -                           : "=q" (rc), "=c" (ecx)
    6.47 +                           : "=q" (*error), "=c" (ecx)
    6.48                             : "0" (0), "a" (field)
    6.49                             : "memory");
    6.50  
    6.51 -    switch ( size ) {
    6.52 -    case 1:
    6.53 -        *((u8 *) (ptr)) = ecx;
    6.54 -        break;
    6.55 -    case 2:
    6.56 -        *((u16 *) (ptr)) = ecx;
    6.57 -        break;
    6.58 -    case 4:
    6.59 -        *((u32 *) (ptr)) = ecx;
    6.60 -        break;
    6.61 -    case 8:
    6.62 -        *((u64 *) (ptr)) = ecx;
    6.63 -        break;
    6.64 -    default:
    6.65 -        domain_crash_synchronous();
    6.66 -        break;
    6.67 -    }
    6.68 -
    6.69 -    return rc;
    6.70 +    return ecx;
    6.71  }
    6.72  
    6.73 -static inline int __vmwrite(unsigned long field, unsigned long value)
    6.74 +static inline void __vm_set_bit(unsigned long field, unsigned long mask)
    6.75  {
    6.76 -    int rc;
    6.77 -
    6.78 -    __asm__ __volatile__ ( VMWRITE_OPCODE
    6.79 -                           MODRM_EAX_ECX
    6.80 -                           /* CF==1 or ZF==1 --> rc = -1 */
    6.81 -                           "setna %b0 ; neg %0"
    6.82 -                           : "=q" (rc)
    6.83 -                           : "0" (0), "a" (field) , "c" (value)
    6.84 -                           : "memory");
    6.85 -
    6.86 -    return rc;
    6.87 +    __vmwrite(field, __vmread(field) | mask);
    6.88  }
    6.89  
    6.90 -static inline int __vm_set_bit(unsigned long field, unsigned long mask)
    6.91 +static inline void __vm_clear_bit(unsigned long field, unsigned long mask)
    6.92  {
    6.93 -    unsigned long tmp;
    6.94 -    int err = 0;
    6.95 -
    6.96 -    err |= __vmread(field, &tmp);
    6.97 -    tmp |= mask;
    6.98 -    err |= __vmwrite(field, tmp);
    6.99 -
   6.100 -    return err;
   6.101 -}
   6.102 -
   6.103 -static inline int __vm_clear_bit(unsigned long field, unsigned long mask)
   6.104 -{
   6.105 -    unsigned long tmp;
   6.106 -    int err = 0;
   6.107 -
   6.108 -    err |= __vmread(field, &tmp);
   6.109 -    tmp &= ~mask;
   6.110 -    err |= __vmwrite(field, tmp);
   6.111 -
   6.112 -    return err;
   6.113 +    __vmwrite(field, __vmread(field) & ~mask);
   6.114  }
   6.115  
   6.116  static inline void __vmxoff (void)