ia64/xen-unstable

changeset 6988:b8537442f3d6

Removes redundant/unnecessary __vmread/__vmwrite.

Signed-off-by: Jun Nakajima <jun.nakajima@intel.com>
Signed-off-by: Eddie Dong <eddie.dong@intel.com>
Signed-off-by: Edwin Zhai <edwin.zhai@intel.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Sep 21 10:11:02 2005 +0000 (2005-09-21)
parents 21e7935b2025
children 7a45b8ccef01
files xen/arch/x86/vmx.c xen/arch/x86/vmx_io.c xen/arch/x86/vmx_vmcs.c xen/include/asm-x86/vmx.h xen/include/asm-x86/vmx_vmcs.h
line diff
     1.1 --- a/xen/arch/x86/vmx.c	Wed Sep 21 09:58:15 2005 +0000
     1.2 +++ b/xen/arch/x86/vmx.c	Wed Sep 21 10:11:02 2005 +0000
     1.3 @@ -377,12 +377,13 @@ static void inline __update_guest_eip(un
     1.4  
     1.5  static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
     1.6  {
     1.7 -    unsigned long eip;
     1.8      unsigned long gpa; /* FIXME: PAE */
     1.9      int result;
    1.10  
    1.11 -#if VMX_DEBUG
    1.12 +#if 0 /* keep for debugging */
    1.13      {
    1.14 +        unsigned long eip;
    1.15 +
    1.16          __vmread(GUEST_RIP, &eip);
    1.17          VMX_DBG_LOG(DBG_LEVEL_VMMU, 
    1.18                      "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
    1.19 @@ -429,9 +430,9 @@ static void vmx_do_no_device_fault(void)
    1.20          
    1.21      clts();
    1.22      setup_fpu(current);
    1.23 -    __vmread(CR0_READ_SHADOW, &cr0);
    1.24 +    __vmread_vcpu(CR0_READ_SHADOW, &cr0);
    1.25      if (!(cr0 & X86_CR0_TS)) {
    1.26 -        __vmread(GUEST_CR0, &cr0);
    1.27 +        __vmread_vcpu(GUEST_CR0, &cr0);
    1.28          cr0 &= ~X86_CR0_TS;
    1.29          __vmwrite(GUEST_CR0, cr0);
    1.30      }
    1.31 @@ -1129,9 +1130,7 @@ static int vmx_set_cr0(unsigned long val
    1.32                  __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
    1.33              }
    1.34          }
    1.35 -        __vmread(GUEST_RIP, &eip);
    1.36 -        VMX_DBG_LOG(DBG_LEVEL_1,
    1.37 -                    "Disabling CR0.PE at %%eip 0x%lx\n", eip);
    1.38 +
    1.39          if (vmx_assist(d, VMX_ASSIST_INVOKE)) {
    1.40              set_bit(VMX_CPU_STATE_ASSIST_ENABLED, &d->arch.arch_vmx.cpu_state);
    1.41              __vmread(GUEST_RIP, &eip);
    1.42 @@ -1370,17 +1369,17 @@ static int vmx_cr_access(unsigned long e
    1.43          clts();
    1.44          setup_fpu(current);
    1.45  
    1.46 -        __vmread(GUEST_CR0, &value);
    1.47 +        __vmread_vcpu(GUEST_CR0, &value);
    1.48          value &= ~X86_CR0_TS; /* clear TS */
    1.49          __vmwrite(GUEST_CR0, value);
    1.50  
    1.51 -        __vmread(CR0_READ_SHADOW, &value);
    1.52 +        __vmread_vcpu(CR0_READ_SHADOW, &value);
    1.53          value &= ~X86_CR0_TS; /* clear TS */
    1.54          __vmwrite(CR0_READ_SHADOW, value);
    1.55          break;
    1.56      case TYPE_LMSW:
    1.57          TRACE_VMEXIT(1,TYPE_LMSW);
    1.58 -        __vmread(CR0_READ_SHADOW, &value);
    1.59 +        __vmread_vcpu(CR0_READ_SHADOW, &value);
    1.60          value = (value & ~0xF) |
    1.61              (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
    1.62          return vmx_set_cr0(value);
    1.63 @@ -1456,16 +1455,12 @@ static inline void vmx_do_msr_write(stru
    1.64                  (unsigned long)regs->edx);
    1.65  }
    1.66  
    1.67 +volatile unsigned long do_hlt_count;
    1.68  /*
    1.69   * Need to use this exit to reschedule
    1.70   */
    1.71 -static inline void vmx_vmexit_do_hlt(void)
    1.72 +void vmx_vmexit_do_hlt(void)
    1.73  {
    1.74 -#if VMX_DEBUG
    1.75 -    unsigned long eip;
    1.76 -    __vmread(GUEST_RIP, &eip);
    1.77 -#endif
    1.78 -    VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_hlt:eip=%lx", eip);
    1.79      raise_softirq(SCHEDULE_SOFTIRQ);
    1.80  }
    1.81  
    1.82 @@ -1516,13 +1511,9 @@ static inline void vmx_vmexit_do_extint(
    1.83      }
    1.84  }
    1.85  
    1.86 +volatile unsigned long do_mwait_count;
    1.87  static inline void vmx_vmexit_do_mwait(void)
    1.88  {
    1.89 -#if VMX_DEBUG
    1.90 -    unsigned long eip;
    1.91 -    __vmread(GUEST_RIP, &eip);
    1.92 -#endif
    1.93 -    VMX_DBG_LOG(DBG_LEVEL_1, "vmx_vmexit_do_mwait:eip=%lx", eip);
    1.94      raise_softirq(SCHEDULE_SOFTIRQ);
    1.95  }
    1.96  
    1.97 @@ -1631,9 +1622,13 @@ asmlinkage void vmx_vmexit_handler(struc
    1.98          return;
    1.99      }
   1.100  
   1.101 -    __vmread(GUEST_RIP, &eip);
   1.102 -    TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
   1.103 -    TRACE_VMEXIT(0,exit_reason);
   1.104 +#ifdef TRACE_BUFFER
   1.105 +    {
   1.106 +        __vmread(GUEST_RIP, &eip);
   1.107 +        TRACE_3D(TRC_VMX_VMEXIT, v->domain->domain_id, eip, exit_reason);
   1.108 +        TRACE_VMEXIT(0,exit_reason);
   1.109 +    }
   1.110 +#endif
   1.111  
   1.112      switch (exit_reason) {
   1.113      case EXIT_REASON_EXCEPTION_NMI:
     2.1 --- a/xen/arch/x86/vmx_io.c	Wed Sep 21 09:58:15 2005 +0000
     2.2 +++ b/xen/arch/x86/vmx_io.c	Wed Sep 21 10:11:02 2005 +0000
     2.3 @@ -891,7 +891,7 @@ asmlinkage void vmx_intr_assist(void)
     2.4      struct vcpu *v = current;
     2.5  
     2.6      highest_vector = find_highest_pending_irq(v, &intr_type);
     2.7 -    __vmread(CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control);
     2.8 +    __vmread_vcpu(CPU_BASED_VM_EXEC_CONTROL, &cpu_exec_control);
     2.9  
    2.10      if (highest_vector == -1) {
    2.11          disable_irq_window(cpu_exec_control);
    2.12 @@ -948,14 +948,6 @@ asmlinkage void vmx_intr_assist(void)
    2.13  void vmx_do_resume(struct vcpu *d) 
    2.14  {
    2.15      vmx_stts();
    2.16 -    if ( vmx_paging_enabled(d) )
    2.17 -        __vmwrite(GUEST_CR3, pagetable_get_paddr(d->arch.shadow_table));
    2.18 -    else
    2.19 -        // paging is not enabled in the guest
    2.20 -        __vmwrite(GUEST_CR3, pagetable_get_paddr(d->domain->arch.phys_table));
    2.21 -
    2.22 -    __vmwrite(HOST_CR3, pagetable_get_paddr(d->arch.monitor_table));
    2.23 -    __vmwrite(HOST_RSP, (unsigned long)get_stack_bottom());
    2.24  
    2.25      if (event_pending(d)) {
    2.26          vmx_check_events(d);
     3.1 --- a/xen/arch/x86/vmx_vmcs.c	Wed Sep 21 09:58:15 2005 +0000
     3.2 +++ b/xen/arch/x86/vmx_vmcs.c	Wed Sep 21 10:11:02 2005 +0000
     3.3 @@ -68,9 +68,6 @@ static inline int construct_vmcs_control
     3.4      error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL, 
     3.5                         MONITOR_PIN_BASED_EXEC_CONTROLS);
     3.6  
     3.7 -    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, 
     3.8 -                       MONITOR_CPU_BASED_EXEC_CONTROLS);
     3.9 -
    3.10      error |= __vmwrite(VM_EXIT_CONTROLS, MONITOR_VM_EXIT_CONTROLS);
    3.11  
    3.12      error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
    3.13 @@ -117,12 +114,6 @@ struct host_execution_env {
    3.14      unsigned long fs_base; 
    3.15      unsigned long gs_base; 
    3.16  #endif 
    3.17 -
    3.18 -    /* control registers */
    3.19 -    unsigned long cr3;
    3.20 -    unsigned long cr0;
    3.21 -    unsigned long cr4;
    3.22 -    unsigned long dr7;
    3.23  };
    3.24  
    3.25  #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
    3.26 @@ -217,9 +208,33 @@ void vmx_do_launch(struct vcpu *v)
    3.27  /* Update CR3, GDT, LDT, TR */
    3.28      unsigned int  error = 0;
    3.29      unsigned long pfn = 0;
    3.30 +    unsigned long cr0, cr4;
    3.31      struct pfn_info *page;
    3.32      struct cpu_user_regs *regs = guest_cpu_user_regs();
    3.33  
    3.34 +    __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (cr0) : );
    3.35 +
    3.36 +    error |= __vmwrite(GUEST_CR0, cr0);
    3.37 +    cr0 &= ~X86_CR0_PG;
    3.38 +    error |= __vmwrite(CR0_READ_SHADOW, cr0);
    3.39 +    error |= __vmwrite(CPU_BASED_VM_EXEC_CONTROL, 
    3.40 +                       MONITOR_CPU_BASED_EXEC_CONTROLS);
    3.41 +
    3.42 +    __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (cr4) : );
    3.43 +
    3.44 +#ifdef __x86_64__
    3.45 +    error |= __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
    3.46 +#else
    3.47 +    error |= __vmwrite(GUEST_CR4, cr4);
    3.48 +#endif
    3.49 +
    3.50 +#ifdef __x86_64__
    3.51 +    cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
    3.52 +#else
    3.53 +    cr4 &= ~(X86_CR4_PGE | X86_CR4_VMXE);
    3.54 +#endif
    3.55 +    error |= __vmwrite(CR4_READ_SHADOW, cr4);
    3.56 +
    3.57      vmx_stts();
    3.58  
    3.59      page = (struct pfn_info *) alloc_domheap_page(NULL);
    3.60 @@ -254,7 +269,7 @@ construct_init_vmcs_guest(struct cpu_use
    3.61      int error = 0;
    3.62      union vmcs_arbytes arbytes;
    3.63      unsigned long dr7;
    3.64 -    unsigned long eflags, shadow_cr;
    3.65 +    unsigned long eflags;
    3.66  
    3.67      /* MSR */
    3.68      error |= __vmwrite(VM_EXIT_MSR_LOAD_ADDR, 0);
    3.69 @@ -326,27 +341,7 @@ construct_init_vmcs_guest(struct cpu_use
    3.70  
    3.71      arbytes.fields.seg_type = 0xb;          /* 32-bit TSS (busy) */
    3.72      error |= __vmwrite(GUEST_TR_AR_BYTES, arbytes.bytes);
    3.73 -
    3.74 -    error |= __vmwrite(GUEST_CR0, host_env->cr0); /* same CR0 */
    3.75 -
    3.76 -    /* Initally PG, PE are not set*/
    3.77 -    shadow_cr = host_env->cr0;
    3.78 -    shadow_cr &= ~X86_CR0_PG;
    3.79 -    error |= __vmwrite(CR0_READ_SHADOW, shadow_cr);
    3.80      /* CR3 is set in vmx_final_setup_guest */
    3.81 -#ifdef __x86_64__
    3.82 -    error |= __vmwrite(GUEST_CR4, host_env->cr4 & ~X86_CR4_PSE);
    3.83 -#else
    3.84 -    error |= __vmwrite(GUEST_CR4, host_env->cr4);
    3.85 -#endif
    3.86 -    shadow_cr = host_env->cr4;
    3.87 -
    3.88 -#ifdef __x86_64__
    3.89 -    shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
    3.90 -#else
    3.91 -    shadow_cr &= ~(X86_CR4_PGE | X86_CR4_VMXE);
    3.92 -#endif
    3.93 -    error |= __vmwrite(CR4_READ_SHADOW, shadow_cr);
    3.94  
    3.95      error |= __vmwrite(GUEST_ES_BASE, host_env->ds_base);
    3.96      error |= __vmwrite(GUEST_CS_BASE, host_env->cs_base);
    3.97 @@ -403,12 +398,10 @@ static inline int construct_vmcs_host(st
    3.98      host_env->cs_base = 0;
    3.99  
   3.100      __asm__ __volatile__ ("mov %%cr0,%0" : "=r" (crn) : );
   3.101 -    host_env->cr0 = crn;
   3.102      error |= __vmwrite(HOST_CR0, crn); /* same CR0 */
   3.103  
   3.104      /* CR3 is set in vmx_final_setup_hostos */
   3.105      __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) : ); 
   3.106 -    host_env->cr4 = crn;
   3.107      error |= __vmwrite(HOST_CR4, crn);
   3.108  
   3.109      error |= __vmwrite(HOST_RIP, (unsigned long) vmx_asm_vmexit_handler);
     4.1 --- a/xen/include/asm-x86/vmx.h	Wed Sep 21 09:58:15 2005 +0000
     4.2 +++ b/xen/include/asm-x86/vmx.h	Wed Sep 21 10:11:02 2005 +0000
     4.3 @@ -314,6 +314,57 @@ static always_inline int ___vmread (cons
     4.4      return 0;
     4.5  }
     4.6  
     4.7 +
     4.8 +static always_inline void __vmwrite_vcpu(unsigned long field, unsigned long value)
     4.9 +{
    4.10 +    struct vcpu *v = current;
    4.11 +
    4.12 +    switch(field) {
    4.13 +    case CR0_READ_SHADOW:
    4.14 +	v->arch.arch_vmx.cpu_shadow_cr0 = value;
    4.15 +	break;
    4.16 +    case GUEST_CR0:
    4.17 +	v->arch.arch_vmx.cpu_cr0 = value;
    4.18 +	break;
    4.19 +    case CPU_BASED_VM_EXEC_CONTROL:
    4.20 +	v->arch.arch_vmx.cpu_based_exec_control = value;
    4.21 +	break;
    4.22 +    default:
    4.23 +	printk("__vmwrite_cpu: invalid field %lx\n", field);
    4.24 +	break;
    4.25 +    }
    4.26 +}
    4.27 +
    4.28 +static always_inline void __vmread_vcpu(unsigned long field, unsigned long *value)
    4.29 +{
    4.30 +    struct vcpu *v = current;
    4.31 +
    4.32 +    switch(field) {
    4.33 +    case CR0_READ_SHADOW:
    4.34 +	*value = v->arch.arch_vmx.cpu_shadow_cr0;
    4.35 +	break;
    4.36 +    case GUEST_CR0:
    4.37 +	*value = v->arch.arch_vmx.cpu_cr0;
    4.38 +	break;
    4.39 +    case CPU_BASED_VM_EXEC_CONTROL:
    4.40 +	*value = v->arch.arch_vmx.cpu_based_exec_control;
    4.41 +	break;
    4.42 +    default:
    4.43 +	printk("__vmread_cpu: invalid field %lx\n", field);
    4.44 +	break;
    4.45 +    }
    4.46 +
    4.47 +   /* 
    4.48 +    * __vmwrite() can be used for non-current vcpu, and it's possible that
    4.49 +    * the vcpu field is not initialized at that case.
    4.50 +    * 
    4.51 +    */
    4.52 +    if (!*value) {
    4.53 +	__vmread(field, value);
    4.54 +	__vmwrite_vcpu(field, *value);
    4.55 +    }
    4.56 +}
    4.57 +
    4.58  static inline int __vmwrite (unsigned long field, unsigned long value)
    4.59  {
    4.60      unsigned long eflags;
    4.61 @@ -326,6 +377,15 @@ static inline int __vmwrite (unsigned lo
    4.62      __save_flags(eflags);
    4.63      if (eflags & X86_EFLAGS_ZF || eflags & X86_EFLAGS_CF)
    4.64          return -1;
    4.65 +
    4.66 +    switch(field) {
    4.67 +    case CR0_READ_SHADOW:
    4.68 +    case GUEST_CR0:
    4.69 +    case CPU_BASED_VM_EXEC_CONTROL:
    4.70 +	__vmwrite_vcpu(field, value);
    4.71 +	break;
    4.72 +    }
    4.73 +
    4.74      return 0;
    4.75  }
    4.76  
    4.77 @@ -379,11 +439,12 @@ static inline void vmx_stts(void)
    4.78  {
    4.79      unsigned long cr0;
    4.80  
    4.81 -    __vmread(GUEST_CR0, &cr0);
    4.82 -    if (!(cr0 & X86_CR0_TS))
    4.83 +    __vmread_vcpu(GUEST_CR0, &cr0);
    4.84 +    if (!(cr0 & X86_CR0_TS)) {
    4.85          __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
    4.86 +    }
    4.87  
    4.88 -    __vmread(CR0_READ_SHADOW, &cr0);
    4.89 +    __vmread_vcpu(CR0_READ_SHADOW, &cr0);
    4.90      if (!(cr0 & X86_CR0_TS))
    4.91         __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
    4.92  }
    4.93 @@ -393,7 +454,7 @@ static inline int vmx_paging_enabled(str
    4.94  {
    4.95      unsigned long cr0;
    4.96  
    4.97 -    __vmread(CR0_READ_SHADOW, &cr0);
    4.98 +    __vmread_vcpu(CR0_READ_SHADOW, &cr0);
    4.99      return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
   4.100  }
   4.101  
     5.1 --- a/xen/include/asm-x86/vmx_vmcs.h	Wed Sep 21 09:58:15 2005 +0000
     5.2 +++ b/xen/include/asm-x86/vmx_vmcs.h	Wed Sep 21 10:11:02 2005 +0000
     5.3 @@ -74,9 +74,12 @@ struct msr_state{
     5.4  struct arch_vmx_struct {
     5.5      struct vmcs_struct      *vmcs;  /* VMCS pointer in virtual */
     5.6      unsigned long           flags;  /* VMCS flags */
     5.7 +    unsigned long           cpu_cr0; /* copy of guest CR0 */
     5.8 +    unsigned long           cpu_shadow_cr0; /* copy of guest read shadow CR0 */
     5.9      unsigned long           cpu_cr2; /* save CR2 */
    5.10      unsigned long           cpu_cr3;
    5.11      unsigned long           cpu_state;
    5.12 +    unsigned long           cpu_based_exec_control;
    5.13      struct msr_state        msr_content;
    5.14      void                   *io_bitmap_a, *io_bitmap_b;
    5.15  };