direct-io.hg

changeset 15499:50c18666d660

SVM: Sync with VMX code changes.
Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Sat Jul 07 11:21:34 2007 +0100 (2007-07-07)
parents 41c8284cfc0c
children 259bb15b2d1e
files xen/arch/x86/hvm/svm/emulate.c xen/arch/x86/hvm/svm/svm.c xen/include/asm-x86/hvm/svm/svm.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/emulate.c	Sat Jul 07 11:08:57 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/emulate.c	Sat Jul 07 11:21:34 2007 +0100
     1.3 @@ -201,7 +201,7 @@ unsigned long get_effective_addr_modrm64
     1.4  
     1.5  #if __x86_64__
     1.6          /* 64-bit mode */
     1.7 -        if (vmcb->cs.attr.fields.l && svm_long_mode_enabled(v))
     1.8 +        if (vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v))
     1.9              return vmcb->rip + inst_len + *size + disp;
    1.10  #endif
    1.11          return disp;
    1.12 @@ -327,7 +327,7 @@ unsigned long svm_rip2pointer(struct vcp
    1.13       */
    1.14      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.15      unsigned long p = vmcb->cs.base + vmcb->rip;
    1.16 -    if (!(vmcb->cs.attr.fields.l && svm_long_mode_enabled(v)))
    1.17 +    if (!(vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v)))
    1.18          return (u32)p; /* mask to 32 bits */
    1.19      /* NB. Should mask to 16 bits if in real mode or 16-bit protected mode. */
    1.20      return p;
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Sat Jul 07 11:08:57 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Sat Jul 07 11:21:34 2007 +0100
     2.3 @@ -69,7 +69,7 @@ static void *root_vmcb[NR_CPUS] __read_m
     2.4  /* hardware assisted paging bits */
     2.5  extern int opt_hap_enabled;
     2.6  
     2.7 -static inline void svm_inject_exception(struct vcpu *v, int trap, 
     2.8 +static void svm_inject_exception(struct vcpu *v, int trap, 
     2.9                                          int ev, int error_code)
    2.10  {
    2.11      eventinj_t event;
    2.12 @@ -98,6 +98,57 @@ static void stop_svm(void)
    2.13      write_efer(read_efer() & ~EFER_SVME);
    2.14  }
    2.15  
    2.16 +#ifdef __x86_64__
    2.17 +
    2.18 +static int svm_lme_is_set(struct vcpu *v)
    2.19 +{
    2.20 +    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
    2.21 +    return guest_efer & EFER_LME;
    2.22 +}
    2.23 +
    2.24 +static int svm_long_mode_enabled(struct vcpu *v)
    2.25 +{
    2.26 +    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
    2.27 +    return guest_efer & EFER_LMA;
    2.28 +}
    2.29 +
    2.30 +#else /* __i386__ */
    2.31 +
    2.32 +static int svm_lme_is_set(struct vcpu *v)
    2.33 +{ return 0; }
    2.34 +static int svm_long_mode_enabled(struct vcpu *v)
    2.35 +{ return 0; }
    2.36 +
    2.37 +#endif
    2.38 +
    2.39 +static int svm_cr4_pae_is_set(struct vcpu *v)
    2.40 +{
    2.41 +    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
    2.42 +    return guest_cr4 & X86_CR4_PAE;
    2.43 +}
    2.44 +
    2.45 +static int svm_paging_enabled(struct vcpu *v)
    2.46 +{
    2.47 +    unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
    2.48 +    return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
    2.49 +}
    2.50 +
    2.51 +static int svm_pae_enabled(struct vcpu *v)
    2.52 +{
    2.53 +    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
    2.54 +    return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
    2.55 +}
    2.56 +
    2.57 +static int svm_nx_enabled(struct vcpu *v)
    2.58 +{
    2.59 +    return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
    2.60 +}
    2.61 +
    2.62 +static int svm_pgbit_test(struct vcpu *v)
    2.63 +{
    2.64 +    return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
    2.65 +}
    2.66 +
    2.67  static void svm_store_cpu_guest_regs(
    2.68      struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
    2.69  {
    2.70 @@ -122,7 +173,7 @@ static void svm_store_cpu_guest_regs(
    2.71      }
    2.72  }
    2.73  
    2.74 -static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
    2.75 +static int long_mode_do_msr_write(struct cpu_user_regs *regs)
    2.76  {
    2.77      u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
    2.78      u32 ecx = regs->ecx;
    2.79 @@ -149,7 +200,6 @@ static inline int long_mode_do_msr_write
    2.80              goto gp_fault;
    2.81          }
    2.82  
    2.83 -#ifdef __x86_64__
    2.84          if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
    2.85          {
    2.86              /* EFER.LME transition from 0 to 1. */
    2.87 @@ -170,7 +220,6 @@ static inline int long_mode_do_msr_write
    2.88                  goto gp_fault;
    2.89              }
    2.90          }
    2.91 -#endif /* __x86_64__ */
    2.92  
    2.93          v->arch.hvm_svm.cpu_shadow_efer = msr_content;
    2.94          vmcb->efer = msr_content | EFER_SVME;
    2.95 @@ -204,7 +253,7 @@ static inline int long_mode_do_msr_write
    2.96  #define savedebug(_v,_reg) \
    2.97      asm volatile ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
    2.98  
    2.99 -static inline void svm_save_dr(struct vcpu *v)
   2.100 +static void svm_save_dr(struct vcpu *v)
   2.101  {
   2.102      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.103  
   2.104 @@ -224,7 +273,7 @@ static inline void svm_save_dr(struct vc
   2.105  }
   2.106  
   2.107  
   2.108 -static inline void __restore_debug_registers(struct vcpu *v)
   2.109 +static void __restore_debug_registers(struct vcpu *v)
   2.110  {
   2.111      loaddebug(&v->arch.guest_context, 0);
   2.112      loaddebug(&v->arch.guest_context, 1);
   2.113 @@ -546,7 +595,7 @@ static int svm_load_vmcb_ctxt(struct vcp
   2.114      return 0;
   2.115  }
   2.116  
   2.117 -static inline void svm_restore_dr(struct vcpu *v)
   2.118 +static void svm_restore_dr(struct vcpu *v)
   2.119  {
   2.120      if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) )
   2.121          __restore_debug_registers(v);
   2.122 @@ -636,11 +685,8 @@ static void svm_sync_vmcb(struct vcpu *v
   2.123  static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
   2.124  {
   2.125      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.126 -    int long_mode = 0;
   2.127 -
   2.128 -#ifdef __x86_64__
   2.129 -    long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
   2.130 -#endif
   2.131 +    int long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
   2.132 +
   2.133      switch ( seg )
   2.134      {
   2.135      case x86_seg_cs: return long_mode ? 0 : vmcb->cs.base;
   2.136 @@ -1123,7 +1169,7 @@ static void svm_vmexit_do_cpuid(struct v
   2.137      __update_guest_eip(vmcb, inst_len);
   2.138  }
   2.139  
   2.140 -static inline unsigned long *get_reg_p(
   2.141 +static unsigned long *get_reg_p(
   2.142      unsigned int gpreg, 
   2.143      struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
   2.144  {
   2.145 @@ -1188,7 +1234,7 @@ static inline unsigned long *get_reg_p(
   2.146  }
   2.147  
   2.148  
   2.149 -static inline unsigned long get_reg(
   2.150 +static unsigned long get_reg(
   2.151      unsigned int gpreg, struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
   2.152  {
   2.153      unsigned long *gp;
   2.154 @@ -1197,7 +1243,7 @@ static inline unsigned long get_reg(
   2.155  }
   2.156  
   2.157  
   2.158 -static inline void set_reg(
   2.159 +static void set_reg(
   2.160      unsigned int gpreg, unsigned long value, 
   2.161      struct cpu_user_regs *regs, struct vmcb_struct *vmcb)
   2.162  {
   2.163 @@ -1300,7 +1346,7 @@ static void svm_get_prefix_info(struct v
   2.164  
   2.165  
   2.166  /* Get the address of INS/OUTS instruction */
   2.167 -static inline int svm_get_io_address(
   2.168 +static int svm_get_io_address(
   2.169      struct vcpu *v, struct cpu_user_regs *regs,
   2.170      unsigned int size, ioio_info_t info,
   2.171      unsigned long *count, unsigned long *addr)
   2.172 @@ -1311,10 +1357,8 @@ static inline int svm_get_io_address(
   2.173      svm_segment_register_t *seg = NULL;
   2.174      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.175  
   2.176 -#ifdef __x86_64__
   2.177 -    /* If we're in long mode, we shouldn't check the segment presence & limit */
   2.178 +    /* If we're in long mode, don't check the segment presence & limit */
   2.179      long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
   2.180 -#endif
   2.181  
   2.182      /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit. 
   2.183       * l field combined with EFER_LMA says whether it's 16 or 64 bit. 
   2.184 @@ -1641,7 +1685,6 @@ static int svm_set_cr0(unsigned long val
   2.185  
   2.186      if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
   2.187      {
   2.188 -#if defined(__x86_64__)
   2.189          if ( svm_lme_is_set(v) )
   2.190          {
   2.191              if ( !svm_cr4_pae_is_set(v) )
   2.192 @@ -1654,7 +1697,6 @@ static int svm_set_cr0(unsigned long val
   2.193              v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
   2.194              vmcb->efer |= EFER_LMA | EFER_LME;
   2.195          }
   2.196 -#endif  /* __x86_64__ */
   2.197  
   2.198          if ( !paging_mode_hap(v->domain) )
   2.199          {
   2.200 @@ -2067,7 +2109,7 @@ static int svm_cr_access(struct vcpu *v,
   2.201      return result;
   2.202  }
   2.203  
   2.204 -static inline void svm_do_msr_access(
   2.205 +static void svm_do_msr_access(
   2.206      struct vcpu *v, struct cpu_user_regs *regs)
   2.207  {
   2.208      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   2.209 @@ -2170,7 +2212,7 @@ static inline void svm_do_msr_access(
   2.210      __update_guest_eip(vmcb, inst_len);
   2.211  }
   2.212  
   2.213 -static inline void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
   2.214 +static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
   2.215  {
   2.216      enum hvm_intack type = hvm_vcpu_has_pending_irq(current);
   2.217  
     3.1 --- a/xen/include/asm-x86/hvm/svm/svm.h	Sat Jul 07 11:08:57 2007 +0100
     3.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h	Sat Jul 07 11:21:34 2007 +0100
     3.3 @@ -30,46 +30,6 @@
     3.4  
     3.5  extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
     3.6  
     3.7 -static inline int svm_long_mode_enabled(struct vcpu *v)
     3.8 -{
     3.9 -    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
    3.10 -    return guest_efer & EFER_LMA;
    3.11 -}
    3.12 -
    3.13 -static inline int svm_lme_is_set(struct vcpu *v)
    3.14 -{
    3.15 -    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
    3.16 -    return guest_efer & EFER_LME;
    3.17 -}
    3.18 -
    3.19 -static inline int svm_cr4_pae_is_set(struct vcpu *v)
    3.20 -{
    3.21 -    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
    3.22 -    return guest_cr4 & X86_CR4_PAE;
    3.23 -}
    3.24 -
    3.25 -static inline int svm_paging_enabled(struct vcpu *v)
    3.26 -{
    3.27 -    unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
    3.28 -    return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
    3.29 -}
    3.30 -
    3.31 -static inline int svm_pae_enabled(struct vcpu *v)
    3.32 -{
    3.33 -    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
    3.34 -    return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
    3.35 -}
    3.36 -
    3.37 -static inline int svm_nx_enabled(struct vcpu *v)
    3.38 -{
    3.39 -    return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
    3.40 -}
    3.41 -
    3.42 -static inline int svm_pgbit_test(struct vcpu *v)
    3.43 -{
    3.44 -    return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
    3.45 -}
    3.46 -
    3.47  #define SVM_REG_EAX (0) 
    3.48  #define SVM_REG_ECX (1) 
    3.49  #define SVM_REG_EDX (2)