direct-io.hg

changeset 11132:d20e1835c24b

Various HVM clean-ups.

Signed-off-by: Steven Hand <steven@xensource.com>
author shand@kneesaa.uk.xensource.com
date Tue Aug 15 18:20:03 2006 +0100 (2006-08-15)
parents 5d42f6f0a187
children bb37d167c82e
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/i8254.c xen/arch/x86/hvm/i8259.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/svm/emulate.c xen/arch/x86/hvm/svm/intr.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/svm/x86_32/exits.S xen/arch/x86/hvm/svm/x86_64/exits.S xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vmx/io.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/x86_32/exits.S xen/arch/x86/hvm/vmx/x86_64/exits.S xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_64/asm-offsets.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/io.h xen/include/asm-x86/hvm/svm/svm.h xen/include/asm-x86/hvm/svm/vmcb.h xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/hvm/vmx/vmx.h xen/include/asm-x86/processor.h xen/include/asm-x86/system.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Tue Aug 15 17:03:06 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Tue Aug 15 18:20:03 2006 +0100
     1.3 @@ -199,6 +199,55 @@ void hvm_create_event_channels(struct vc
     1.4      }
     1.5  }
     1.6  
     1.7 +
     1.8 +void hvm_stts(struct vcpu *v)
     1.9 +{
    1.10 +    /* FPU state already dirty? Then no need to setup_fpu() lazily. */
    1.11 +    if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
    1.12 +        return;
    1.13 +    
    1.14 +    hvm_funcs.stts(v);
    1.15 +}
    1.16 +
    1.17 +void hvm_set_guest_time(struct vcpu *v, u64 gtime)
    1.18 +{
    1.19 +    u64 host_tsc;
    1.20 +   
    1.21 +    rdtscll(host_tsc);
    1.22 +    
    1.23 +    v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
    1.24 +    hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
    1.25 +}
    1.26 +
    1.27 +void hvm_do_resume(struct vcpu *v)
    1.28 +{
    1.29 +    ioreq_t *p;
    1.30 +    struct periodic_time *pt =
    1.31 +        &v->domain->arch.hvm_domain.pl_time.periodic_tm;
    1.32 +
    1.33 +    hvm_stts(v);
    1.34 +
    1.35 +    /* pick up the elapsed PIT ticks and re-enable pit_timer */
    1.36 +    if ( pt->enabled && pt->first_injected ) {
    1.37 +        if ( v->arch.hvm_vcpu.guest_time ) {
    1.38 +            hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
    1.39 +            v->arch.hvm_vcpu.guest_time = 0;
    1.40 +        }
    1.41 +        pickup_deactive_ticks(pt);
    1.42 +    }
    1.43 +
    1.44 +    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
    1.45 +    wait_on_xen_event_channel(v->arch.hvm.xen_port,
    1.46 +                              p->state != STATE_IOREQ_READY &&
    1.47 +                              p->state != STATE_IOREQ_INPROCESS);
    1.48 +    if ( p->state == STATE_IORESP_READY )
    1.49 +        hvm_io_assist(v);
    1.50 +    if ( p->state != STATE_INVALID ) {
    1.51 +        printf("Weird HVM iorequest state %d.\n", p->state);
    1.52 +        domain_crash(v->domain);
    1.53 +    }
    1.54 +}
    1.55 +
    1.56  void hvm_release_assist_channel(struct vcpu *v)
    1.57  {
    1.58      free_xen_event_channel(v, v->arch.hvm_vcpu.xen_port);
    1.59 @@ -299,8 +348,7 @@ int cpu_get_interrupt(struct vcpu *v, in
    1.60  /*
    1.61   * Copy from/to guest virtual.
    1.62   */
    1.63 -int
    1.64 -hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
    1.65 +int hvm_copy(void *buf, unsigned long vaddr, int size, int dir)
    1.66  {
    1.67      unsigned long mfn;
    1.68      char *addr;
     2.1 --- a/xen/arch/x86/hvm/i8254.c	Tue Aug 15 17:03:06 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/i8254.c	Tue Aug 15 18:20:03 2006 +0100
     2.3 @@ -389,7 +389,7 @@ void pit_init(struct vcpu *v, unsigned l
     2.4      register_portio_handler(PIT_BASE, 4, handle_pit_io);
     2.5      /* register the speaker port */
     2.6      register_portio_handler(0x61, 1, handle_speaker_io);
     2.7 -    ticks_per_sec(v) = cpu_khz * (int64_t)1000; 
     2.8 +    ticks_per_sec(v) = cpu_khz * (int64_t)1000;
     2.9  #ifdef DEBUG_PIT
    2.10      printk("HVM_PIT: guest frequency =%lld\n", (long long)ticks_per_sec(v));
    2.11  #endif
     3.1 --- a/xen/arch/x86/hvm/i8259.c	Tue Aug 15 17:03:06 2006 +0100
     3.2 +++ b/xen/arch/x86/hvm/i8259.c	Tue Aug 15 18:20:03 2006 +0100
     3.3 @@ -480,7 +480,6 @@ void pic_init(struct hvm_virpic *s, void
     3.4      s->pics[1].elcr_mask = 0xde;
     3.5      s->irq_request = irq_request;
     3.6      s->irq_request_opaque = irq_request_opaque;
     3.7 -    return; 
     3.8  }
     3.9  
    3.10  void pic_set_alt_irq_func(struct hvm_virpic *s,
    3.11 @@ -568,10 +567,10 @@ static int intercept_elcr_io(ioreq_t *p)
    3.12  }
    3.13  void register_pic_io_hook (void)
    3.14  {
    3.15 -    register_portio_handler(0x20, 2, intercept_pic_io); 
    3.16 -    register_portio_handler(0x4d0, 1, intercept_elcr_io); 
    3.17 -    register_portio_handler(0xa0, 2, intercept_pic_io); 
    3.18 -    register_portio_handler(0x4d1, 1, intercept_elcr_io); 
    3.19 +    register_portio_handler(0x20, 2, intercept_pic_io);
    3.20 +    register_portio_handler(0x4d0, 1, intercept_elcr_io);
    3.21 +    register_portio_handler(0xa0, 2, intercept_pic_io);
    3.22 +    register_portio_handler(0x4d1, 1, intercept_elcr_io);
    3.23  }
    3.24  
    3.25  
     4.1 --- a/xen/arch/x86/hvm/io.c	Tue Aug 15 17:03:06 2006 +0100
     4.2 +++ b/xen/arch/x86/hvm/io.c	Tue Aug 15 18:20:03 2006 +0100
     4.3 @@ -668,6 +668,37 @@ static void hvm_mmio_assist(struct cpu_u
     4.4      }
     4.5  }
     4.6  
     4.7 +void hvm_interrupt_post(struct vcpu *v, int vector, int type)
     4.8 +{
     4.9 +    struct  periodic_time *pt = 
    4.10 +        &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
    4.11 +
    4.12 +    if ( is_pit_irq(v, vector, type) ) {
    4.13 +        if ( !pt->first_injected ) {
    4.14 +            pt->pending_intr_nr = 0;
    4.15 +            pt->last_plt_gtime = hvm_get_guest_time(v);
    4.16 +            pt->scheduled = NOW() + pt->period;
    4.17 +            set_timer(&pt->timer, pt->scheduled);
    4.18 +            pt->first_injected = 1;
    4.19 +        } else {
    4.20 +            pt->pending_intr_nr--;
    4.21 +            pt->last_plt_gtime += pt->period_cycles;
    4.22 +            hvm_set_guest_time(v, pt->last_plt_gtime);
    4.23 +            pit_time_fired(v, pt->priv);
    4.24 +        }
    4.25 +    }
    4.26 +    
    4.27 +    switch(type) {
    4.28 +    case APIC_DM_EXTINT:
    4.29 +        break;
    4.30 +            
    4.31 +    default:
    4.32 +        vlapic_post_injection(v, vector, type);
    4.33 +        break;
    4.34 +    }
    4.35 +}
    4.36 +
    4.37 +
    4.38  void hvm_io_assist(struct vcpu *v)
    4.39  {
    4.40      vcpu_iodata_t *vio;
     5.1 --- a/xen/arch/x86/hvm/svm/emulate.c	Tue Aug 15 17:03:06 2006 +0100
     5.2 +++ b/xen/arch/x86/hvm/svm/emulate.c	Tue Aug 15 18:20:03 2006 +0100
     5.3 @@ -78,7 +78,7 @@ static inline unsigned long DECODE_GPR_V
     5.4      case 0x4:
     5.5          value = (unsigned long)vmcb->rsp;
     5.6      case 0x5:
     5.7 -        value = regs->ebp; 
     5.8 +        value = regs->ebp;
     5.9          break;
    5.10      case 0x6:
    5.11          value = regs->esi;
    5.12 @@ -429,7 +429,7 @@ int __get_instruction_length_from_list(s
    5.13          enum instruction_index *list, unsigned int list_count, 
    5.14          u8 *guest_eip_buf, enum instruction_index *match)
    5.15  {
    5.16 -    unsigned int inst_len = 0; 
    5.17 +    unsigned int inst_len = 0;
    5.18      unsigned int i;
    5.19      unsigned int j;
    5.20      int found = 0;
     6.1 --- a/xen/arch/x86/hvm/svm/intr.c	Tue Aug 15 17:03:06 2006 +0100
     6.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Tue Aug 15 18:20:03 2006 +0100
     6.3 @@ -42,48 +42,6 @@
     6.4   * Most of this code is copied from vmx_io.c and modified 
     6.5   * to be suitable for SVM.
     6.6   */
     6.7 -#define BSP_CPU(v)    (!(v->vcpu_id))
     6.8 -
     6.9 -void svm_set_guest_time(struct vcpu *v, u64 gtime)
    6.10 -{
    6.11 -    u64    host_tsc;
    6.12 -   
    6.13 -    rdtscll(host_tsc);
    6.14 -    
    6.15 -    v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
    6.16 -    v->arch.hvm_svm.vmcb->tsc_offset = v->arch.hvm_vcpu.cache_tsc_offset;
    6.17 -}
    6.18 -
    6.19 -static inline void
    6.20 -interrupt_post_injection(struct vcpu * v, int vector, int type)
    6.21 -{
    6.22 -    struct  periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
    6.23 -
    6.24 -    if ( is_pit_irq(v, vector, type) ) {
    6.25 -        if ( !pt->first_injected ) {
    6.26 -            pt->pending_intr_nr = 0;
    6.27 -            pt->last_plt_gtime = hvm_get_guest_time(v);
    6.28 -            pt->scheduled = NOW() + pt->period;
    6.29 -            set_timer(&pt->timer, pt->scheduled);
    6.30 -            pt->first_injected = 1;
    6.31 -        } else {
    6.32 -            pt->pending_intr_nr--;
    6.33 -            pt->last_plt_gtime += pt->period_cycles;
    6.34 -            svm_set_guest_time(v, pt->last_plt_gtime);
    6.35 -            pit_time_fired(v, pt->priv);
    6.36 -        }
    6.37 -    }
    6.38 -
    6.39 -    switch(type)
    6.40 -    {
    6.41 -    case APIC_DM_EXTINT:
    6.42 -        break;
    6.43 -
    6.44 -    default:
    6.45 -        vlapic_post_injection(v, vector, type);
    6.46 -        break;
    6.47 -    }
    6.48 -}
    6.49  
    6.50  static inline int svm_inject_extint(struct vcpu *v, int trap, int error_code)
    6.51  {
    6.52 @@ -109,7 +67,7 @@ asmlinkage void svm_intr_assist(void)
    6.53  {
    6.54      struct vcpu *v = current;
    6.55      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    6.56 -    struct hvm_domain *plat=&v->domain->arch.hvm_domain; 
    6.57 +    struct hvm_domain *plat=&v->domain->arch.hvm_domain;
    6.58      struct periodic_time *pt = &plat->pl_time.periodic_tm;
    6.59      struct hvm_virpic *pic= &plat->vpic;
    6.60      int callback_irq;
    6.61 @@ -194,7 +152,7 @@ asmlinkage void svm_intr_assist(void)
    6.62              /* let's inject this interrupt */
    6.63              TRACE_3D(TRC_VMX_INT, v->domain->domain_id, intr_vector, 0);
    6.64              svm_inject_extint(v, intr_vector, VMX_DELIVER_NO_ERROR_CODE);
    6.65 -            interrupt_post_injection(v, intr_vector, intr_type);
    6.66 +            hvm_interrupt_post(v, intr_vector, intr_type);
    6.67              break;
    6.68          case APIC_DM_SMI:
    6.69          case APIC_DM_NMI:
     7.1 --- a/xen/arch/x86/hvm/svm/svm.c	Tue Aug 15 17:03:06 2006 +0100
     7.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Tue Aug 15 18:20:03 2006 +0100
     7.3 @@ -54,8 +54,7 @@
     7.4  #define set_segment_register(name, value)  \
     7.5         __asm__ __volatile__ ( "movw %%ax ,%%" STR(name) "" : : "a" (value) )
     7.6  
     7.7 -/* 
     7.8 - * External functions, etc. We should move these to some suitable header file(s) */
     7.9 +/* External functions. We should move these to some suitable header file(s) */
    7.10  
    7.11  extern void do_nmi(struct cpu_user_regs *, unsigned long);
    7.12  extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
    7.13 @@ -72,12 +71,32 @@ static void svm_relinquish_guest_resourc
    7.14  static int svm_do_vmmcall_reset_to_realmode(struct vcpu *v,
    7.15          struct cpu_user_regs *regs);
    7.16  
    7.17 -
    7.18 -
    7.19 -extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
    7.20 -
    7.21 -/* Host save area and ASID glogal data */
    7.22 -struct svm_percore_globals svm_globals[NR_CPUS];
    7.23 +/* va of hardware host save area     */
    7.24 +static void *hsa[NR_CPUS] __read_mostly;
    7.25 +
    7.26 +/* vmcb used for extended host state */
    7.27 +static void *root_vmcb[NR_CPUS] __read_mostly;
    7.28 +
    7.29 +/* physical address of above for host VMSAVE/VMLOAD */
    7.30 +u64 root_vmcb_pa[NR_CPUS] __read_mostly;
    7.31 +
    7.32 +
    7.33 +/* ASID API */
    7.34 +enum {
    7.35 +    ASID_AVAILABLE = 0,
    7.36 +    ASID_INUSE,
    7.37 +    ASID_RETIRED
    7.38 +};
    7.39 +#define   INITIAL_ASID      0
    7.40 +#define   ASID_MAX          64
    7.41 + 
    7.42 +struct asid_pool {
    7.43 +    spinlock_t asid_lock;
    7.44 +    u32 asid[ASID_MAX];
    7.45 +};
    7.46 +
    7.47 +static DEFINE_PER_CPU(struct asid_pool, asid_pool);
    7.48 +
    7.49  
    7.50  /*
    7.51   * Initializes the POOL of ASID used by the guests per core.
    7.52 @@ -86,25 +105,25 @@ void asidpool_init(int core)
    7.53  {
    7.54      int i;
    7.55  
    7.56 -    spin_lock_init(&svm_globals[core].ASIDpool.asid_lock);
    7.57 +    spin_lock_init(&per_cpu(asid_pool,core).asid_lock);
    7.58  
    7.59      /* Host ASID is always in use */
    7.60 -    svm_globals[core].ASIDpool.asid[INITIAL_ASID] = ASID_INUSE;
    7.61 +    per_cpu(asid_pool,core).asid[INITIAL_ASID] = ASID_INUSE;
    7.62      for ( i = 1; i < ASID_MAX; i++ )
    7.63 -       svm_globals[core].ASIDpool.asid[i] = ASID_AVAILABLE;
    7.64 +       per_cpu(asid_pool,core).asid[i] = ASID_AVAILABLE;
    7.65  }
    7.66  
    7.67  
    7.68  /* internal function to get the next available ASID */
    7.69  static int asidpool_fetch_next(struct vmcb_struct *vmcb, int core)
    7.70  {
    7.71 -    int i;   
    7.72 +    int i;  
    7.73      for ( i = 1; i < ASID_MAX; i++ )
    7.74      {
    7.75 -        if ( svm_globals[core].ASIDpool.asid[i] == ASID_AVAILABLE )
    7.76 +        if ( per_cpu(asid_pool,core).asid[i] == ASID_AVAILABLE )
    7.77          {
    7.78              vmcb->guest_asid = i;
    7.79 -            svm_globals[core].ASIDpool.asid[i] = ASID_INUSE;
    7.80 +            per_cpu(asid_pool,core).asid[i] = ASID_INUSE;
    7.81              return i;
    7.82          }
    7.83      }
    7.84 @@ -125,43 +144,46 @@ static int asidpool_fetch_next(struct vm
    7.85  int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
    7.86                               int oldcore, int newcore )
    7.87  {
    7.88 -    int i; 
    7.89 +    int i;
    7.90      int res = 1;
    7.91      static unsigned long cnt=0;
    7.92  
    7.93 -    spin_lock(&svm_globals[oldcore].ASIDpool.asid_lock);
    7.94 +    spin_lock(&per_cpu(asid_pool,oldcore).asid_lock);
    7.95      if( retire_current && vmcb->guest_asid ) {
    7.96 -       svm_globals[oldcore].ASIDpool.asid[ vmcb->guest_asid & (ASID_MAX-1) ] = ASID_RETIRED;
    7.97 +       per_cpu(asid_pool,oldcore).asid[vmcb->guest_asid & (ASID_MAX-1)] = 
    7.98 +           ASID_RETIRED;
    7.99      }
   7.100 -    spin_unlock(&svm_globals[oldcore].ASIDpool.asid_lock);
   7.101 -    spin_lock(&svm_globals[newcore].ASIDpool.asid_lock);
   7.102 +    spin_unlock(&per_cpu(asid_pool,oldcore).asid_lock);
   7.103 +    spin_lock(&per_cpu(asid_pool,newcore).asid_lock);
   7.104      if( asidpool_fetch_next( vmcb, newcore ) < 0 ) {
   7.105          if (svm_dbg_on)
   7.106              printk( "SVM: tlb(%ld)\n", cnt++ );
   7.107          /* FLUSH the TLB and all retired slots are made available */ 
   7.108          vmcb->tlb_control = 1;
   7.109          for( i = 1; i < ASID_MAX; i++ ) {
   7.110 -            if( svm_globals[newcore].ASIDpool.asid[i] == ASID_RETIRED ) {
   7.111 -                svm_globals[newcore].ASIDpool.asid[i] = ASID_AVAILABLE;
   7.112 +            if( per_cpu(asid_pool,newcore).asid[i] == ASID_RETIRED ) {
   7.113 +                per_cpu(asid_pool,newcore).asid[i] = ASID_AVAILABLE;
   7.114              }
   7.115          }
   7.116          /* Get the First slot available */ 
   7.117          res = asidpool_fetch_next( vmcb, newcore ) > 0;
   7.118      }
   7.119 -    spin_unlock(&svm_globals[newcore].ASIDpool.asid_lock);
   7.120 +    spin_unlock(&per_cpu(asid_pool,newcore).asid_lock);
   7.121      return res;
   7.122  }
   7.123  
   7.124  void asidpool_retire( struct vmcb_struct *vmcb, int core )
   7.125  {
   7.126 -   spin_lock(&svm_globals[core].ASIDpool.asid_lock);
   7.127 +   spin_lock(&per_cpu(asid_pool,core).asid_lock);
   7.128     if( vmcb->guest_asid ) {
   7.129 -       svm_globals[core].ASIDpool.asid[ vmcb->guest_asid & (ASID_MAX-1) ] = ASID_RETIRED;
   7.130 +       per_cpu(asid_pool,core).asid[vmcb->guest_asid & (ASID_MAX-1)] = 
   7.131 +           ASID_RETIRED;
   7.132     }
   7.133 -   spin_unlock(&svm_globals[core].ASIDpool.asid_lock);
   7.134 +   spin_unlock(&per_cpu(asid_pool,core).asid_lock);
   7.135  }
   7.136  
   7.137 -static inline void svm_inject_exception(struct vcpu *v, int trap, int ev, int error_code)
   7.138 +static inline void svm_inject_exception(struct vcpu *v, int trap, 
   7.139 +                                        int ev, int error_code)
   7.140  {
   7.141      eventinj_t event;
   7.142      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.143 @@ -178,7 +200,7 @@ static inline void svm_inject_exception(
   7.144      vmcb->eventinj = event;
   7.145  }
   7.146  
   7.147 -void stop_svm(void)
   7.148 +static void stop_svm(void)
   7.149  {
   7.150      u32 eax, edx;    
   7.151      int cpu = smp_processor_id();
   7.152 @@ -189,22 +211,18 @@ void stop_svm(void)
   7.153      wrmsr(MSR_EFER, eax, edx);
   7.154   
   7.155      /* release the HSA */
   7.156 -    free_host_save_area( svm_globals[cpu].hsa );
   7.157 -    free_host_save_area( svm_globals[cpu].scratch_hsa );
   7.158 -    svm_globals[cpu].hsa    = NULL;
   7.159 -    svm_globals[cpu].hsa_pa = 0;
   7.160 -    svm_globals[cpu].scratch_hsa    = NULL;
   7.161 -    svm_globals[cpu].scratch_hsa_pa = 0;
   7.162 +    free_host_save_area(hsa[cpu]);
   7.163 +    hsa[cpu] = NULL;
   7.164      wrmsr(MSR_K8_VM_HSAVE_PA, 0, 0 );
   7.165  
   7.166 +    /* free up the root vmcb */
   7.167 +    free_vmcb(root_vmcb[cpu]);
   7.168 +    root_vmcb[cpu] = NULL;
   7.169 +    root_vmcb_pa[cpu] = 0;
   7.170 +
   7.171      printk("AMD SVM Extension is disabled.\n");
   7.172  }
   7.173  
   7.174 -int svm_initialize_guest_resources(struct vcpu *v)
   7.175 -{
   7.176 -    svm_final_setup_guest(v);
   7.177 -    return 1;
   7.178 -}
   7.179  
   7.180  static void svm_store_cpu_guest_regs(
   7.181      struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
   7.182 @@ -233,12 +251,16 @@ static void svm_store_cpu_guest_regs(
   7.183      }
   7.184  }
   7.185  
   7.186 -static void svm_load_cpu_guest_regs(
   7.187 -    struct vcpu *v, struct cpu_user_regs *regs)
   7.188 +static int svm_paging_enabled(struct vcpu *v)
   7.189  {
   7.190 -    svm_load_cpu_user_regs(v, regs);
   7.191 +    unsigned long cr0;
   7.192 +
   7.193 +    cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
   7.194 +
   7.195 +    return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
   7.196  }
   7.197  
   7.198 +
   7.199  #define IS_CANO_ADDRESS(add) 1
   7.200  
   7.201  static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
   7.202 @@ -281,7 +303,6 @@ static inline int long_mode_do_msr_read(
   7.203      case MSR_SYSCALL_MASK:
   7.204           msr_content = vmcb->sfmask;
   7.205           break;
   7.206 -
   7.207      default:
   7.208          return 0;
   7.209      }
   7.210 @@ -296,7 +317,7 @@ static inline int long_mode_do_msr_read(
   7.211  
   7.212  static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
   7.213  {
   7.214 -    u64 msr_content = regs->eax | ((u64)regs->edx << 32); 
   7.215 +    u64 msr_content = regs->eax | ((u64)regs->edx << 32);
   7.216      struct vcpu *vc = current;
   7.217      struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
   7.218  
   7.219 @@ -318,7 +339,7 @@ static inline int long_mode_do_msr_write
   7.220  
   7.221          /* LME: 0 -> 1 */
   7.222          if ( msr_content & EFER_LME &&
   7.223 -             !test_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state) )
   7.224 +             !test_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state))
   7.225          {
   7.226              if ( svm_paging_enabled(vc) ||
   7.227                   !test_bit(SVM_CPU_STATE_PAE_ENABLED,
   7.228 @@ -385,7 +406,7 @@ static inline int long_mode_do_msr_write
   7.229      return 1;
   7.230  }
   7.231  
   7.232 -int svm_realmode(struct vcpu *v)
   7.233 +static int svm_realmode(struct vcpu *v)
   7.234  {
   7.235      unsigned long cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
   7.236      unsigned long eflags = v->arch.hvm_svm.vmcb->rflags;
   7.237 @@ -393,7 +414,7 @@ int svm_realmode(struct vcpu *v)
   7.238      return (eflags & X86_EFLAGS_VM) || !(cr0 & X86_CR0_PE);
   7.239  }
   7.240  
   7.241 -int svm_instruction_length(struct vcpu *v)
   7.242 +static int svm_instruction_length(struct vcpu *v)
   7.243  {
   7.244      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.245      unsigned long cr0 = vmcb->cr0, eflags = vmcb->rflags, mode;
   7.246 @@ -405,7 +426,7 @@ int svm_instruction_length(struct vcpu *
   7.247      return svm_instrlen(guest_cpu_user_regs(), mode);
   7.248  }
   7.249  
   7.250 -unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
   7.251 +static unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
   7.252  {
   7.253      switch ( num )
   7.254      {
   7.255 @@ -422,9 +443,34 @@ unsigned long svm_get_ctrl_reg(struct vc
   7.256  }
   7.257  
   7.258  
   7.259 +/* Make sure that xen intercepts any FP accesses from current */
   7.260 +static void svm_stts(struct vcpu *v) 
   7.261 +{
   7.262 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.263 +
   7.264 +    /*
   7.265 +     * If the guest does not have TS enabled then we must cause and handle an 
   7.266 +     * exception on first use of the FPU. If the guest *does* have TS enabled 
   7.267 +     * then this is not necessary: no FPU activity can occur until the guest 
   7.268 +     * clears CR0.TS, and we will initialise the FPU when that happens.
   7.269 +     */
   7.270 +    if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
   7.271 +    {
   7.272 +        v->arch.hvm_svm.vmcb->exception_intercepts |= EXCEPTION_BITMAP_NM;
   7.273 +        vmcb->cr0 |= X86_CR0_TS;
   7.274 +    }
   7.275 +}
   7.276 +
   7.277 +
   7.278 +static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
   7.279 +{
   7.280 +    v->arch.hvm_svm.vmcb->tsc_offset = offset;
   7.281 +}
   7.282 +
   7.283 +
   7.284  /* SVM-specific intitialization code for VCPU application processors */
   7.285 -void svm_init_ap_context(struct vcpu_guest_context *ctxt, 
   7.286 -        int vcpuid, int trampoline_vector)
   7.287 +static void svm_init_ap_context(struct vcpu_guest_context *ctxt, 
   7.288 +                                int vcpuid, int trampoline_vector)
   7.289  {
   7.290      int i;
   7.291      struct vcpu *v, *bsp = current;
   7.292 @@ -453,7 +499,7 @@ void svm_init_ap_context(struct vcpu_gue
   7.293       * the code. We will execute this code in real mode. 
   7.294       */
   7.295      ctxt->user_regs.eip = 0x0;
   7.296 -    ctxt->user_regs.cs = (trampoline_vector << 8); 
   7.297 +    ctxt->user_regs.cs = (trampoline_vector << 8);
   7.298      ctxt->flags = VGCF_HVM_GUEST;
   7.299  }
   7.300  
   7.301 @@ -479,60 +525,8 @@ static void svm_init_hypercall_page(stru
   7.302      *(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
   7.303  }
   7.304  
   7.305 -int start_svm(void)
   7.306 -{
   7.307 -    u32 eax, ecx, edx;
   7.308 -    u32 phys_hsa_lo, phys_hsa_hi;   
   7.309 -    u64 phys_hsa;
   7.310 -    int cpu = smp_processor_id();
   7.311 - 
   7.312 -   /* Xen does not fill x86_capability words except 0. */
   7.313 -    ecx = cpuid_ecx(0x80000001);
   7.314 -    boot_cpu_data.x86_capability[5] = ecx;
   7.315 -    
   7.316 -    if (!(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)))
   7.317 -        return 0;
   7.318 -    svm_globals[cpu].hsa = alloc_host_save_area();
   7.319 -    if (! svm_globals[cpu].hsa)
   7.320 -        return 0;
   7.321 -    
   7.322 -    rdmsr(MSR_EFER, eax, edx);
   7.323 -    eax |= EFER_SVME;
   7.324 -    wrmsr(MSR_EFER, eax, edx);
   7.325 -    asidpool_init( cpu );    
   7.326 -    printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
   7.327 -
   7.328 -    /* Initialize the HSA for this core */
   7.329 -    phys_hsa = (u64) virt_to_maddr( svm_globals[cpu].hsa ); 
   7.330 -    phys_hsa_lo = (u32) phys_hsa;
   7.331 -    phys_hsa_hi = (u32) (phys_hsa >> 32);    
   7.332 -    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
   7.333 -    svm_globals[cpu].hsa_pa = phys_hsa;
   7.334 -  
   7.335 -    svm_globals[cpu].scratch_hsa    = alloc_host_save_area();
   7.336 -    svm_globals[cpu].scratch_hsa_pa = (u64)virt_to_maddr( svm_globals[cpu].scratch_hsa );
   7.337 -
   7.338 -    /* Setup HVM interfaces */
   7.339 -    hvm_funcs.disable = stop_svm;
   7.340 -
   7.341 -    hvm_funcs.initialize_guest_resources = svm_initialize_guest_resources;
   7.342 -    hvm_funcs.relinquish_guest_resources = svm_relinquish_guest_resources;
   7.343 -
   7.344 -    hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
   7.345 -    hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
   7.346 -
   7.347 -    hvm_funcs.realmode = svm_realmode;
   7.348 -    hvm_funcs.paging_enabled = svm_paging_enabled;
   7.349 -    hvm_funcs.instruction_length = svm_instruction_length;
   7.350 -    hvm_funcs.get_guest_ctrl_reg = svm_get_ctrl_reg;
   7.351 -    hvm_funcs.init_ap_context = svm_init_ap_context;
   7.352 -
   7.353 -    hvm_funcs.init_hypercall_page = svm_init_hypercall_page;
   7.354 -
   7.355 -    hvm_enabled = 1;    
   7.356 -
   7.357 -    return 1;
   7.358 -}
   7.359 +
   7.360 +
   7.361  
   7.362  int svm_dbg_on = 0;
   7.363  
   7.364 @@ -596,7 +590,7 @@ static inline int svm_do_debugout(unsign
   7.365      return 1;
   7.366  }
   7.367  
   7.368 -void save_svm_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *ctxt)
   7.369 +static void save_svm_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *ctxt)
   7.370  {
   7.371      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.372  
   7.373 @@ -615,7 +609,7 @@ void save_svm_cpu_user_regs(struct vcpu 
   7.374      ctxt->ds = vmcb->ds.sel;
   7.375  }
   7.376  
   7.377 -void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
   7.378 +static void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
   7.379  {
   7.380      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.381  
   7.382 @@ -629,7 +623,7 @@ void svm_store_cpu_user_regs(struct cpu_
   7.383  }
   7.384  
   7.385  /* XXX Use svm_load_cpu_guest_regs instead */
   7.386 -void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
   7.387 +static void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
   7.388  { 
   7.389      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.390      u32 *intercepts = &v->arch.hvm_svm.vmcb->exception_intercepts;
   7.391 @@ -647,38 +641,14 @@ void svm_load_cpu_user_regs(struct vcpu 
   7.392          *intercepts &= ~EXCEPTION_BITMAP_DB;
   7.393  }
   7.394  
   7.395 -int svm_paging_enabled(struct vcpu *v)
   7.396 -{
   7.397 -    unsigned long cr0;
   7.398 -
   7.399 -    cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
   7.400 -
   7.401 -    return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
   7.402 -}
   7.403 -
   7.404 -
   7.405 -/* Make sure that xen intercepts any FP accesses from current */
   7.406 -void svm_stts(struct vcpu *v) 
   7.407 +static void svm_load_cpu_guest_regs(
   7.408 +    struct vcpu *v, struct cpu_user_regs *regs)
   7.409  {
   7.410 -    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   7.411 -
   7.412 -    /* FPU state already dirty? Then no need to setup_fpu() lazily. */
   7.413 -    if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
   7.414 -        return;
   7.415 -
   7.416 -    /*
   7.417 -     * If the guest does not have TS enabled then we must cause and handle an 
   7.418 -     * exception on first use of the FPU. If the guest *does* have TS enabled 
   7.419 -     * then this is not necessary: no FPU activity can occur until the guest 
   7.420 -     * clears CR0.TS, and we will initialise the FPU when that happens.
   7.421 -     */
   7.422 -    if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
   7.423 -    {
   7.424 -        v->arch.hvm_svm.vmcb->exception_intercepts |= EXCEPTION_BITMAP_NM;
   7.425 -        vmcb->cr0 |= X86_CR0_TS;
   7.426 -    }
   7.427 +    svm_load_cpu_user_regs(v, regs);
   7.428  }
   7.429  
   7.430 +
   7.431 +
   7.432  static void arch_svm_do_launch(struct vcpu *v) 
   7.433  {
   7.434      cpu_user_regs_t *regs = &current->arch.guest_context.user_regs;
   7.435 @@ -708,9 +678,9 @@ static void arch_svm_do_launch(struct vc
   7.436      {
   7.437      	u16	cs_sel = regs->cs;
   7.438      	/*
   7.439 -	 * This is the launch of an AP; set state so that we begin executing
   7.440 +         * This is the launch of an AP; set state so that we begin executing
   7.441      	 * the trampoline code in real-mode.
   7.442 -	 */
   7.443 +         */
   7.444      	svm_do_vmmcall_reset_to_realmode(v, regs); 	
   7.445      	/* Adjust the state to execute the trampoline code.*/
   7.446      	v->arch.hvm_svm.vmcb->rip = 0;
   7.447 @@ -731,6 +701,7 @@ static void svm_freeze_time(struct vcpu 
   7.448      }
   7.449  }
   7.450  
   7.451 +
   7.452  static void svm_ctxt_switch_from(struct vcpu *v)
   7.453  {
   7.454      svm_freeze_time(v);
   7.455 @@ -738,7 +709,7 @@ static void svm_ctxt_switch_from(struct 
   7.456  
   7.457  static void svm_ctxt_switch_to(struct vcpu *v)
   7.458  {
   7.459 -#if __x86_64__
   7.460 +#ifdef  __x86_64__
   7.461      /* 
   7.462       * This is required, because VMRUN does consistency check
   7.463       * and some of the DOM0 selectors are pointing to 
   7.464 @@ -751,7 +722,8 @@ static void svm_ctxt_switch_to(struct vc
   7.465  #endif
   7.466  }
   7.467  
   7.468 -void svm_final_setup_guest(struct vcpu *v)
   7.469 +
   7.470 +static void svm_final_setup_guest(struct vcpu *v)
   7.471  {
   7.472      struct domain *d = v->domain;
   7.473      struct vcpu *vc;
   7.474 @@ -778,15 +750,82 @@ void svm_final_setup_guest(struct vcpu *
   7.475       * Put the domain in shadow mode even though we're going to be using
   7.476       * the shared 1:1 page table initially. It shouldn't hurt 
   7.477       */
   7.478 -    shadow_mode_enable(d,
   7.479 -                       SHM_enable|SHM_refcounts|
   7.480 +    shadow_mode_enable(d, SHM_enable|SHM_refcounts|
   7.481                         SHM_translate|SHM_external|SHM_wr_pt_pte);
   7.482  }
   7.483  
   7.484  
   7.485 +static int svm_initialize_guest_resources(struct vcpu *v)
   7.486 +{
   7.487 +    svm_final_setup_guest(v);
   7.488 +    return 1;
   7.489 +}
   7.490 +
   7.491 +
   7.492 +int start_svm(void)
   7.493 +{
   7.494 +    u32 eax, ecx, edx;
   7.495 +    u32 phys_hsa_lo, phys_hsa_hi;   
   7.496 +    u64 phys_hsa;
   7.497 +    int cpu = smp_processor_id();
   7.498 + 
   7.499 +   /* Xen does not fill x86_capability words except 0. */
   7.500 +    ecx = cpuid_ecx(0x80000001);
   7.501 +    boot_cpu_data.x86_capability[5] = ecx;
   7.502 +    
   7.503 +    if (!(test_bit(X86_FEATURE_SVME, &boot_cpu_data.x86_capability)))
   7.504 +        return 0;
   7.505 +    
   7.506 +    if (!(hsa[cpu] = alloc_host_save_area()))
   7.507 +        return 0;
   7.508 +    
   7.509 +    rdmsr(MSR_EFER, eax, edx);
   7.510 +    eax |= EFER_SVME;
   7.511 +    wrmsr(MSR_EFER, eax, edx);
   7.512 +    asidpool_init( cpu );    
   7.513 +    printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
   7.514 +
   7.515 +    /* Initialize the HSA for this core */
   7.516 +    phys_hsa = (u64) virt_to_maddr(hsa[cpu]);
   7.517 +    phys_hsa_lo = (u32) phys_hsa;
   7.518 +    phys_hsa_hi = (u32) (phys_hsa >> 32);    
   7.519 +    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
   7.520 +  
   7.521 +    if (!(root_vmcb[cpu] = alloc_vmcb())) 
   7.522 +        return 0;
   7.523 +    root_vmcb_pa[cpu] = virt_to_maddr(root_vmcb[cpu]);
   7.524 +
   7.525 +    if (cpu == 0)
   7.526 +        setup_vmcb_dump();
   7.527 +
   7.528 +    /* Setup HVM interfaces */
   7.529 +    hvm_funcs.disable = stop_svm;
   7.530 +
   7.531 +    hvm_funcs.initialize_guest_resources = svm_initialize_guest_resources;
   7.532 +    hvm_funcs.relinquish_guest_resources = svm_relinquish_guest_resources;
   7.533 +
   7.534 +    hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
   7.535 +    hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
   7.536 +
   7.537 +    hvm_funcs.realmode = svm_realmode;
   7.538 +    hvm_funcs.paging_enabled = svm_paging_enabled;
   7.539 +    hvm_funcs.instruction_length = svm_instruction_length;
   7.540 +    hvm_funcs.get_guest_ctrl_reg = svm_get_ctrl_reg;
   7.541 +
   7.542 +    hvm_funcs.stts = svm_stts;
   7.543 +    hvm_funcs.set_tsc_offset = svm_set_tsc_offset;
   7.544 +
   7.545 +    hvm_funcs.init_ap_context = svm_init_ap_context;
   7.546 +    hvm_funcs.init_hypercall_page = svm_init_hypercall_page;
   7.547 +
   7.548 +    hvm_enabled = 1;
   7.549 +
   7.550 +    return 1;
   7.551 +}
   7.552 +
   7.553 +
   7.554  static void svm_relinquish_guest_resources(struct domain *d)
   7.555  {
   7.556 -    extern void destroy_vmcb(struct arch_svm_struct *); /* XXX */
   7.557      struct vcpu *v;
   7.558  
   7.559      for_each_vcpu ( d, v )
   7.560 @@ -817,11 +856,25 @@ static void svm_relinquish_guest_resourc
   7.561  }
   7.562  
   7.563  
   7.564 +static void svm_migrate_timers(struct vcpu *v)
   7.565 +{
   7.566 +    struct periodic_time *pt = 
   7.567 +        &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
   7.568 +
   7.569 +    if ( pt->enabled ) {
   7.570 +        migrate_timer( &pt->timer, v->processor );
   7.571 +        migrate_timer( &v->arch.hvm_svm.hlt_timer, v->processor );
   7.572 +    }
   7.573 +    if ( hvm_apic_support(v->domain) && VLAPIC( v ))
   7.574 +        migrate_timer( &(VLAPIC(v)->vlapic_timer ), v->processor );
   7.575 +}
   7.576 +
   7.577 +
   7.578  void arch_svm_do_resume(struct vcpu *v) 
   7.579  {
   7.580      /* pinning VCPU to a different core? */
   7.581      if ( v->arch.hvm_svm.launch_core == smp_processor_id()) {
   7.582 -        svm_do_resume( v );
   7.583 +        hvm_do_resume( v );
   7.584          reset_stack_and_jump( svm_asm_do_resume );
   7.585      }
   7.586      else {
   7.587 @@ -830,24 +883,12 @@ void arch_svm_do_resume(struct vcpu *v)
   7.588                  v->arch.hvm_svm.launch_core, smp_processor_id() );
   7.589          v->arch.hvm_svm.launch_core = smp_processor_id();
   7.590          svm_migrate_timers( v );
   7.591 -        svm_do_resume( v );
   7.592 +        hvm_do_resume( v );
   7.593          reset_stack_and_jump( svm_asm_do_resume );
   7.594      }
   7.595  }
   7.596  
   7.597  
   7.598 -void svm_migrate_timers(struct vcpu *v)
   7.599 -{
   7.600 -    struct periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
   7.601 -
   7.602 -    if ( pt->enabled ) {
   7.603 -        migrate_timer( &pt->timer, v->processor );
   7.604 -        migrate_timer( &v->arch.hvm_svm.hlt_timer, v->processor );
   7.605 -    }
   7.606 -    if ( hvm_apic_support(v->domain) && VLAPIC( v ))
   7.607 -        migrate_timer( &(VLAPIC(v)->vlapic_timer ), v->processor );
   7.608 -}
   7.609 -
   7.610  
   7.611  static int svm_do_page_fault(unsigned long va, struct cpu_user_regs *regs) 
   7.612  {
   7.613 @@ -888,7 +929,7 @@ static int svm_do_page_fault(unsigned lo
   7.614              inst_len = svm_instruction_length(v);
   7.615              if (inst_len == -1)
   7.616              {
   7.617 -                printf("%s: INST_LEN - Unable to decode properly.\n", __func__);
   7.618 +                printf("%s: INST_LEN - Unable to decode properly\n", __func__);
   7.619                  domain_crash_synchronous();
   7.620              }
   7.621  
   7.622 @@ -1137,7 +1178,7 @@ static inline unsigned long *get_reg_p(u
   7.623      case SVM_REG_ESP:
   7.624          reg_p = (unsigned long *)&vmcb->rsp;
   7.625          break;
   7.626 -#if __x86_64__
   7.627 +#ifdef __x86_64__
   7.628      case SVM_REG_R8:
   7.629          reg_p = (unsigned long *)&regs->r8;
   7.630          break;
   7.631 @@ -1195,7 +1236,7 @@ static void svm_dr_access (struct vcpu *
   7.632      unsigned long *reg_p = 0;
   7.633      unsigned int gpreg = 0;
   7.634      unsigned long eip;
   7.635 -    int inst_len; 
   7.636 +    int inst_len;
   7.637      int index;
   7.638      struct vmcb_struct *vmcb;
   7.639      u8 buffer[MAX_INST_LEN];
   7.640 @@ -1264,7 +1305,7 @@ static void svm_get_prefix_info(
   7.641          case 0xf2: /* REPNZ */
   7.642          case 0xf0: /* LOCK */
   7.643          case 0x66: /* data32 */
   7.644 -#if __x86_64__
   7.645 +#ifdef __x86_64__
   7.646              /* REX prefixes */
   7.647          case 0x40:
   7.648          case 0x41:
   7.649 @@ -1330,7 +1371,7 @@ static inline int svm_get_io_address(
   7.650  
   7.651      info.bytes = vmcb->exitinfo1;
   7.652  
   7.653 -    /* If we're in long mode, we shouldn't check the segment presence and limit */
   7.654 +    /* If we're in long mode, we shouldn't check the segment presence & limit */
   7.655      long_mode = vmcb->cs.attributes.fields.l && vmcb->efer & EFER_LMA;
   7.656  
   7.657      /* d field of cs.attributes is 1 for 32-bit, 0 for 16 or 64 bit. 
   7.658 @@ -1832,7 +1873,8 @@ static int mov_to_cr(int gpreg, int cr, 
   7.659                   * arch->shadow_table should hold the next CR3 for shadow
   7.660                   */
   7.661  
   7.662 -                HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
   7.663 +                HVM_DBG_LOG(DBG_LEVEL_VMMU, 
   7.664 +                            "Update CR3 value = %lx, mfn = %lx",
   7.665                              v->arch.hvm_svm.cpu_cr3, mfn);
   7.666  #endif
   7.667              }
   7.668 @@ -1847,7 +1889,7 @@ static int mov_to_cr(int gpreg, int cr, 
   7.669                       * it must enable PG after that, and it is a 32-bit PAE
   7.670                       * guest */
   7.671  
   7.672 -                    if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L3) )
   7.673 +                    if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L3))
   7.674                      {
   7.675                          printk("Unsupported guest paging levels\n");
   7.676                          domain_crash_synchronous();
   7.677 @@ -1855,8 +1897,7 @@ static int mov_to_cr(int gpreg, int cr, 
   7.678                  }
   7.679                  else
   7.680                  {
   7.681 -                    if ( !shadow_set_guest_paging_levels(v->domain,
   7.682 -                                                            PAGING_L4) )
   7.683 +                    if ( !shadow_set_guest_paging_levels(v->domain, PAGING_L4))
   7.684                      {
   7.685                          printk("Unsupported guest paging levels\n");
   7.686                          domain_crash_synchronous();
   7.687 @@ -1920,9 +1961,9 @@ static int svm_cr_access(struct vcpu *v,
   7.688      ASSERT(vmcb);
   7.689  
   7.690      inst_copy_from_guest(buffer, svm_rip2pointer(vmcb), sizeof(buffer));
   7.691 -    /* get index to first actual instruction byte - as we will need to know where the 
   7.692 -     * prefix lives later on
   7.693 -     */
   7.694 +
   7.695 +    /* get index to first actual instruction byte - as we will need to know 
   7.696 +       where the prefix lives later on */
   7.697      index = skip_prefix_bytes(buffer, sizeof(buffer));
   7.698      
   7.699      if (type == TYPE_MOV_TO_CR) 
   7.700 @@ -2071,7 +2112,7 @@ static inline void svm_do_msr_access(
   7.701          switch (regs->ecx)
   7.702          {
   7.703          case MSR_IA32_TIME_STAMP_COUNTER:
   7.704 -            svm_set_guest_time(v, msr_content);
   7.705 +            hvm_set_guest_time(v, msr_content);
   7.706              break;
   7.707          case MSR_IA32_SYSENTER_CS:
   7.708              vmcb->sysenter_cs = msr_content;
   7.709 @@ -2116,7 +2157,7 @@ static inline void svm_vmexit_do_hlt(str
   7.710  
   7.711      /* check for interrupt not handled or new interrupt */
   7.712      if ( vmcb->vintr.fields.irq || cpu_has_pending_irq(v) )
   7.713 -       return; 
   7.714 +       return;
   7.715  
   7.716      if ( !v->vcpu_id )
   7.717          next_pit = get_scheduled(v, pt->irq, pt);
   7.718 @@ -2138,8 +2179,8 @@ static void svm_vmexit_do_invd(struct vm
   7.719       * have cache-snooping that solves it anyways. -- Mats P. 
   7.720       */
   7.721  
   7.722 -    /* Tell the user that we did this - just in case someone runs some really weird 
   7.723 -     * operating system and wants to know why it's not working as it should...
   7.724 +    /* Tell the user that we did this - just in case someone runs some really 
   7.725 +     * weird operating system and wants to know why it's not working...
   7.726       */
   7.727      printk("INVD instruction intercepted - ignored\n");
   7.728      
   7.729 @@ -2198,7 +2239,8 @@ void svm_handle_invlpg(const short invlp
   7.730       */
   7.731      if (inst_copy_from_guest(opcode, svm_rip2pointer(vmcb), length) < length)
   7.732      {
   7.733 -        printk("svm_handle_invlpg (): Error reading memory %d bytes\n", length);
   7.734 +        printk("svm_handle_invlpg (): Error reading memory %d bytes\n", 
   7.735 +               length);
   7.736         __hvm_bug(regs);
   7.737      }
   7.738  
   7.739 @@ -2463,7 +2505,7 @@ void svm_dump_host_regs(const char *from
   7.740  
   7.741      __asm__ __volatile__ ("\tmov %%cr0,%0\n"
   7.742                            "\tmov %%cr3,%1\n"
   7.743 -                          : "=r" (cr0), "=r"(cr3)); 
   7.744 +                          : "=r" (cr0), "=r"(cr3));
   7.745      printf("%s: pt = %lx, cr3 = %lx, cr0 = %lx\n", __func__, pt, cr3, cr0);
   7.746  }
   7.747  
   7.748 @@ -2626,18 +2668,22 @@ void walk_shadow_and_guest_pt(unsigned l
   7.749  
   7.750      spte = l1e_empty();
   7.751  
   7.752 -    /* This is actually overkill - we only need to make sure the hl2 is in-sync. */
   7.753 +    /* This is actually overkill - we only need to ensure the hl2 is in-sync.*/
   7.754      shadow_sync_va(v, gva);
   7.755  
   7.756      gpte.l1 = 0;
   7.757 -    __copy_from_user(&gpte, &linear_pg_table[ l1_linear_offset(gva) ], sizeof(gpte) );
   7.758 +    __copy_from_user(&gpte, &linear_pg_table[ l1_linear_offset(gva) ],
   7.759 +                     sizeof(gpte) );
   7.760      printk( "G-PTE = %x, flags=%x\n", gpte.l1, l1e_get_flags(gpte) );
   7.761 -    __copy_from_user( &spte, &phys_to_machine_mapping[ l1e_get_pfn( gpte ) ], 
   7.762 +    __copy_from_user( &spte, &phys_to_machine_mapping[ l1e_get_pfn( gpte ) ],
   7.763                        sizeof(spte) );
   7.764      printk( "S-PTE = %x, flags=%x\n", spte.l1, l1e_get_flags(spte));
   7.765  }
   7.766  #endif /* SVM_WALK_GUEST_PAGES */
   7.767  
   7.768 +
   7.769 +
   7.770 +
   7.771  asmlinkage void svm_vmexit_handler(struct cpu_user_regs regs)
   7.772  {
   7.773      unsigned int exit_reason;
   7.774 @@ -2654,6 +2700,13 @@ asmlinkage void svm_vmexit_handler(struc
   7.775  
   7.776      vmcb->tlb_control = 1;
   7.777  
   7.778 +
   7.779 +    if (exit_reason == VMEXIT_INVALID)
   7.780 +    {
   7.781 +        svm_dump_vmcb(__func__, vmcb);
   7.782 +        domain_crash_synchronous();
   7.783 +    }
   7.784 +
   7.785  #ifdef SVM_EXTRA_DEBUG
   7.786  {
   7.787  #if defined(__i386__)
   7.788 @@ -2666,8 +2719,8 @@ asmlinkage void svm_vmexit_handler(struc
   7.789      {
   7.790          if (svm_paging_enabled(v) && !mmio_space(gva_to_gpa(vmcb->exitinfo2)))
   7.791          {
   7.792 -            printk("I%08ld,ExC=%s(%d),IP=%x:%llx,I1=%llx,I2=%llx,INT=%llx, gpa=%llx\n", 
   7.793 -                    intercepts_counter,
   7.794 +            printk("I%08ld,ExC=%s(%d),IP=%x:%llx,I1=%llx,I2=%llx,INT=%llx, "
   7.795 +                   "gpa=%llx\n", intercepts_counter,
   7.796                      exit_reasons[exit_reason], exit_reason, regs.cs,
   7.797  		    (unsigned long long) regs.rip,
   7.798  		    (unsigned long long) vmcb->exitinfo1,
   7.799 @@ -2750,13 +2803,6 @@ asmlinkage void svm_vmexit_handler(struc
   7.800  }
   7.801  #endif /* SVM_EXTRA_DEBUG */
   7.802  
   7.803 -    if (exit_reason == -1)
   7.804 -    {
   7.805 -        svm_dump_vmcb(__func__, vmcb);
   7.806 -        printk("%s: exit_reason == -1 - Did someone clobber the VMCB\n", 
   7.807 -                __func__);
   7.808 -        domain_crash_synchronous();
   7.809 -    }
   7.810  
   7.811      perfc_incra(svmexits, exit_reason);
   7.812      eip = vmcb->rip;
   7.813 @@ -3011,7 +3057,7 @@ asmlinkage void svm_vmexit_handler(struc
   7.814  #ifdef SVM_EXTRA_DEBUG
   7.815      if (do_debug) 
   7.816      {
   7.817 -        printk("%s: Done switch on vmexit_code\n", __func__); 
   7.818 +        printk("%s: Done switch on vmexit_code\n", __func__);
   7.819          svm_dump_regs(__func__, &regs);
   7.820      }
   7.821  
   7.822 @@ -3058,9 +3104,6 @@ asmlinkage void svm_asid(void)
   7.823          v->arch.hvm_svm.asid_core = v->arch.hvm_svm.launch_core;
   7.824          clear_bit( ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags );
   7.825      }
   7.826 -
   7.827 -    /* make sure the HSA is set for the current core */
   7.828 -    set_hsa_to_guest( &v->arch.hvm_svm );
   7.829  }
   7.830  
   7.831  /*
     8.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Tue Aug 15 17:03:06 2006 +0100
     8.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Tue Aug 15 18:20:03 2006 +0100
     8.3 @@ -35,72 +35,61 @@
     8.4  #include <xen/event.h>
     8.5  #include <xen/kernel.h>
     8.6  #include <xen/domain_page.h>
     8.7 +#include <xen/keyhandler.h>
     8.8  
     8.9 -extern struct svm_percore_globals svm_globals[];
    8.10  extern int svm_dbg_on;
    8.11  extern int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
    8.12                                    int oldcore, int newcore);
    8.13 -extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
    8.14 -
    8.15 -#define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
    8.16  
    8.17  #define GUEST_SEGMENT_LIMIT 0xffffffff
    8.18  
    8.19  #define IOPM_SIZE   (12 * 1024)
    8.20  #define MSRPM_SIZE  (8  * 1024)
    8.21  
    8.22 +/* VMCBs and HSAs are architecturally defined to be a 4K page each */
    8.23 +#define VMCB_ORDER 0 
    8.24 +#define HSA_ORDER  0 
    8.25 +
    8.26 +
    8.27  struct vmcb_struct *alloc_vmcb(void) 
    8.28  {
    8.29 -    struct vmcb_struct *vmcb = NULL;
    8.30 -    unsigned int order;
    8.31 -    order = get_order_from_bytes(sizeof(struct vmcb_struct)); 
    8.32 -    ASSERT(order >= 0);
    8.33 -    vmcb = alloc_xenheap_pages(order);
    8.34 -    ASSERT(vmcb);
    8.35 +    struct vmcb_struct *vmcb = alloc_xenheap_pages(VMCB_ORDER);
    8.36  
    8.37 -    if (vmcb)
    8.38 -        memset(vmcb, 0, sizeof(struct vmcb_struct));
    8.39 +    if (!vmcb) {
    8.40 +        printk("Warning: failed to allocate vmcb.\n");
    8.41 +        return NULL;
    8.42 +    }
    8.43  
    8.44 +    memset(vmcb, 0, (PAGE_SIZE << VMCB_ORDER));
    8.45      return vmcb;
    8.46  }
    8.47  
    8.48  
    8.49  void free_vmcb(struct vmcb_struct *vmcb)
    8.50  {
    8.51 -    unsigned int order;
    8.52 -
    8.53 -    order = get_order_from_bytes(sizeof(struct vmcb_struct));
    8.54      ASSERT(vmcb);
    8.55 -
    8.56 -    if (vmcb)
    8.57 -        free_xenheap_pages(vmcb, order);
    8.58 +    free_xenheap_pages(vmcb, VMCB_ORDER);
    8.59  }
    8.60  
    8.61  
    8.62  struct host_save_area *alloc_host_save_area(void)
    8.63  {
    8.64 -    unsigned int order = 0;
    8.65 -    struct host_save_area *hsa = NULL;
    8.66 +    struct host_save_area *hsa = alloc_xenheap_pages(HSA_ORDER);
    8.67  
    8.68 -    hsa = alloc_xenheap_pages(order);
    8.69 -    ASSERT(hsa);
    8.70 +    if (!hsa) {
    8.71 +        printk("Warning: failed to allocate vmcb.\n");
    8.72 +        return NULL;
    8.73 +    }
    8.74  
    8.75 -    if (hsa)
    8.76 -        memset(hsa, 0, PAGE_SIZE);
    8.77 -
    8.78 +    memset(hsa, 0, (PAGE_SIZE << HSA_ORDER));
    8.79      return hsa;
    8.80  }
    8.81  
    8.82  
    8.83  void free_host_save_area(struct host_save_area *hsa)
    8.84  {
    8.85 -    unsigned int order;
    8.86 -
    8.87 -    order = get_order_from_bytes(PAGE_SIZE);
    8.88      ASSERT(hsa);
    8.89 -
    8.90 -    if (hsa)
    8.91 -        free_xenheap_pages(hsa, order);
    8.92 +    free_xenheap_pages(hsa, HSA_ORDER);
    8.93  }
    8.94  
    8.95  
    8.96 @@ -187,7 +176,7 @@ static int construct_init_vmcb_guest(str
    8.97      vmcb->cs.sel = regs->cs;
    8.98      vmcb->es.sel = regs->es;
    8.99      vmcb->ss.sel = regs->ss;
   8.100 -    vmcb->ds.sel = regs->ds; 
   8.101 +    vmcb->ds.sel = regs->ds;
   8.102      vmcb->fs.sel = regs->fs;
   8.103      vmcb->gs.sel = regs->gs;
   8.104  
   8.105 @@ -221,7 +210,7 @@ static int construct_init_vmcb_guest(str
   8.106      attrib.fields.g = 1; /* 4K pages in limit */
   8.107  
   8.108      /* Data selectors */
   8.109 -    vmcb->es.attributes = attrib; 
   8.110 +    vmcb->es.attributes = attrib;
   8.111      vmcb->ss.attributes = attrib;
   8.112      vmcb->ds.attributes = attrib;
   8.113      vmcb->fs.attributes = attrib;
   8.114 @@ -257,7 +246,7 @@ static int construct_init_vmcb_guest(str
   8.115  
   8.116      /* CR3 is set in svm_final_setup_guest */
   8.117  
   8.118 -    __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) :); 
   8.119 +    __asm__ __volatile__ ("mov %%cr4,%0" : "=r" (crn) :);
   8.120      crn &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
   8.121      arch_svm->cpu_shadow_cr4 = crn;
   8.122      vmcb->cr4 = crn | SVM_CR4_HOST_MASK;
   8.123 @@ -306,7 +295,8 @@ void destroy_vmcb(struct arch_svm_struct
   8.124   * construct the vmcb.
   8.125   */
   8.126  
   8.127 -int construct_vmcb(struct arch_svm_struct *arch_svm, struct cpu_user_regs *regs)
   8.128 +int construct_vmcb(struct arch_svm_struct *arch_svm, 
   8.129 +                   struct cpu_user_regs *regs)
   8.130  {
   8.131      int error;
   8.132      long rc=0;
   8.133 @@ -320,7 +310,9 @@ int construct_vmcb(struct arch_svm_struc
   8.134      }
   8.135  
   8.136      /* update the HSA for the current Core */
   8.137 +#if 0
   8.138      set_hsa_to_guest( arch_svm );
   8.139 +#endif
   8.140      arch_svm->vmcb_pa  = (u64) virt_to_maddr(arch_svm->vmcb);
   8.141  
   8.142      if ((error = construct_vmcb_controls(arch_svm))) 
   8.143 @@ -359,7 +351,7 @@ void svm_do_launch(struct vcpu *v)
   8.144      ASSERT(vmcb);
   8.145  
   8.146      /* Update CR3, GDT, LDT, TR */
   8.147 -    svm_stts(v);
   8.148 +    hvm_stts(v);
   8.149  
   8.150      /* current core is the one we intend to perform the VMRUN on */
   8.151      v->arch.hvm_svm.launch_core = v->arch.hvm_svm.asid_core = core;
   8.152 @@ -393,10 +385,8 @@ void svm_do_launch(struct vcpu *v)
   8.153          printk("%s: phys_table   = %lx\n", __func__, pt);
   8.154      }
   8.155  
   8.156 -    if ( svm_paging_enabled(v) )
   8.157 -        vmcb->cr3 = pagetable_get_paddr(v->arch.guest_table);
   8.158 -    else
   8.159 -        vmcb->cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
   8.160 +    /* At launch we always use the phys_table */
   8.161 +    vmcb->cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
   8.162  
   8.163      if (svm_dbg_on) 
   8.164      {
   8.165 @@ -410,7 +400,7 @@ void svm_do_launch(struct vcpu *v)
   8.166  
   8.167      v->arch.hvm_svm.saved_irq_vector = -1;
   8.168  
   8.169 -    svm_set_guest_time(v, 0);
   8.170 +    hvm_set_guest_time(v, 0);
   8.171  	
   8.172      if (svm_dbg_on)
   8.173          svm_dump_vmcb(__func__, vmcb);
   8.174 @@ -419,61 +409,12 @@ void svm_do_launch(struct vcpu *v)
   8.175  }
   8.176  
   8.177  
   8.178 -void set_hsa_to_guest( struct arch_svm_struct *arch_svm ) 
   8.179 -{
   8.180 -  arch_svm->host_save_pa = svm_globals[ smp_processor_id() ].scratch_hsa_pa;
   8.181 -}
   8.182  
   8.183 -/* 
   8.184 - * Resume the guest.
   8.185 - */
   8.186 -/* XXX svm_do_resume and vmx_do_resume are remarkably similar; could
   8.187 -   they be unified? */
   8.188 -void svm_do_resume(struct vcpu *v) 
   8.189 -{
   8.190 -    struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
   8.191 -    ioreq_t *p;
   8.192 -
   8.193 -    svm_stts(v);
   8.194 -
   8.195 -    /* pick up the elapsed PIT ticks and re-enable pit_timer */
   8.196 -    if ( pt->enabled && pt->first_injected ) {
   8.197 -        if ( v->arch.hvm_vcpu.guest_time ) {
   8.198 -            svm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
   8.199 -            v->arch.hvm_vcpu.guest_time = 0;
   8.200 -        }
   8.201 -        pickup_deactive_ticks(pt);
   8.202 -    }
   8.203 -
   8.204 -    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
   8.205 -    wait_on_xen_event_channel(v->arch.hvm.xen_port,
   8.206 -                              p->state != STATE_IOREQ_READY &&
   8.207 -                              p->state != STATE_IOREQ_INPROCESS);
   8.208 -    if ( p->state == STATE_IORESP_READY )
   8.209 -        hvm_io_assist(v);
   8.210 -    if ( p->state != STATE_INVALID ) {
   8.211 -        printf("Weird HVM iorequest state %d.\n", p->state);
   8.212 -        domain_crash(v->domain);
   8.213 -    }
   8.214 -}
   8.215 -
   8.216 -void svm_launch_fail(unsigned long eflags)
   8.217 -{
   8.218 -    BUG();
   8.219 -}
   8.220 -
   8.221 -
   8.222 -void svm_resume_fail(unsigned long eflags)
   8.223 -{
   8.224 -    BUG();
   8.225 -}
   8.226 -
   8.227 -
   8.228 -void svm_dump_sel(char *name, segment_selector_t *s)
   8.229 +static void svm_dump_sel(char *name, segment_selector_t *s)
   8.230  {
   8.231      printf("%s: sel=0x%04x, attr=0x%04x, limit=0x%08x, base=0x%016llx\n", 
   8.232             name, s->sel, s->attributes.bytes, s->limit,
   8.233 -	   (unsigned long long)s->base);
   8.234 +           (unsigned long long)s->base);
   8.235  }
   8.236  
   8.237  
   8.238 @@ -483,9 +424,10 @@ void svm_dump_vmcb(const char *from, str
   8.239      printf("Size of VMCB = %d, address = %p\n", 
   8.240              (int) sizeof(struct vmcb_struct), vmcb);
   8.241  
   8.242 -    printf("cr_intercepts = 0x%08x dr_intercepts = 0x%08x exception_intercepts "
   8.243 -            "= 0x%08x\n", vmcb->cr_intercepts, vmcb->dr_intercepts, 
   8.244 -            vmcb->exception_intercepts);
   8.245 +    printf("cr_intercepts = 0x%08x dr_intercepts = 0x%08x "
   8.246 +           "exception_intercepts = 0x%08x\n", 
   8.247 +           vmcb->cr_intercepts, vmcb->dr_intercepts, 
   8.248 +           vmcb->exception_intercepts);
   8.249      printf("general1_intercepts = 0x%08x general2_intercepts = 0x%08x\n", 
   8.250             vmcb->general1_intercepts, vmcb->general2_intercepts);
   8.251      printf("iopm_base_pa = %016llx msrpm_base_pa = 0x%016llx tsc_offset = "
   8.252 @@ -519,7 +461,8 @@ void svm_dump_vmcb(const char *from, str
   8.253      printf("DR6 = 0x%016llx, DR7 = 0x%016llx\n", 
   8.254             (unsigned long long) vmcb->dr6, (unsigned long long) vmcb->dr7);
   8.255      printf("CSTAR = 0x%016llx SFMask = 0x%016llx\n",
   8.256 -           (unsigned long long) vmcb->cstar, (unsigned long long) vmcb->sfmask);
   8.257 +           (unsigned long long) vmcb->cstar, 
   8.258 +           (unsigned long long) vmcb->sfmask);
   8.259      printf("KernGSBase = 0x%016llx PAT = 0x%016llx \n", 
   8.260             (unsigned long long) vmcb->kerngsbase,
   8.261  	   (unsigned long long) vmcb->g_pat);
   8.262 @@ -537,6 +480,38 @@ void svm_dump_vmcb(const char *from, str
   8.263      svm_dump_sel("TR", &vmcb->tr);
   8.264  }
   8.265  
   8.266 +static void vmcb_dump(unsigned char ch)
   8.267 +{
   8.268 +    struct domain *d;
   8.269 +    struct vcpu *v;
   8.270 +    
   8.271 +    printk("*********** VMCB Areas **************\n");
   8.272 +    for_each_domain(d) {
   8.273 +        printk("\n>>> Domain %d <<<\n", d->domain_id);
   8.274 +        for_each_vcpu(d, v) {
   8.275 +
   8.276 +            /* 
   8.277 +             * Presumably, if a domain is not an HVM guest,
   8.278 +             * the very first CPU will not pass this test
   8.279 +             */
   8.280 +            if (!hvm_guest(v)) {
   8.281 +                printk("\t\tNot HVM guest\n");
   8.282 +                break;
   8.283 +            }
   8.284 +            printk("\tVCPU %d\n", v->vcpu_id);
   8.285 +
   8.286 +            svm_dump_vmcb("key_handler", v->arch.hvm_svm.vmcb);
   8.287 +        }
   8.288 +    }
   8.289 +
   8.290 +    printk("**************************************\n");
   8.291 +}
   8.292 +
   8.293 +void setup_vmcb_dump(void)
   8.294 +{
   8.295 +    register_keyhandler('v', vmcb_dump, "dump AMD-V VMCBs");
   8.296 +}
   8.297 +
   8.298  /*
   8.299   * Local variables:
   8.300   * mode: C
     9.1 --- a/xen/arch/x86/hvm/svm/x86_32/exits.S	Tue Aug 15 17:03:06 2006 +0100
     9.2 +++ b/xen/arch/x86/hvm/svm/x86_32/exits.S	Tue Aug 15 18:20:03 2006 +0100
     9.3 @@ -95,7 +95,8 @@ ENTRY(svm_asm_do_launch)
     9.4          movl VCPU_svm_vmcb(%ebx), %ecx
     9.5          movl 24(%esp), %eax
     9.6          movl %eax, VMCB_rax(%ecx)
     9.7 -        movl VCPU_svm_hsa_pa(%ebx), %eax
     9.8 +	movl VCPU_processor(%ebx), %eax
     9.9 +	movl root_vmcb_pa(,%eax,8), %eax
    9.10          VMSAVE
    9.11  
    9.12          movl VCPU_svm_vmcb_pa(%ebx), %eax
    9.13 @@ -119,7 +120,8 @@ ENTRY(svm_asm_do_launch)
    9.14  
    9.15          GET_CURRENT(%eax)
    9.16  
    9.17 -        movl VCPU_svm_hsa_pa(%eax), %eax
    9.18 +	movl VCPU_processor(%eax), %eax
    9.19 +	movl root_vmcb_pa(,%eax,8), %eax
    9.20          VMLOAD
    9.21  
    9.22          HVM_SAVE_ALL_NOSEGREGS
    9.23 @@ -133,7 +135,7 @@ ENTRY(svm_asm_do_resume)
    9.24  svm_test_all_events:
    9.25          GET_CURRENT(%ebx)
    9.26          pushl %ebx
    9.27 -        call svm_do_resume
    9.28 +        call hvm_do_resume
    9.29          addl $4, %esp
    9.30  /*test_all_events:*/
    9.31          xorl %ecx,%ecx
    10.1 --- a/xen/arch/x86/hvm/svm/x86_64/exits.S	Tue Aug 15 17:03:06 2006 +0100
    10.2 +++ b/xen/arch/x86/hvm/svm/x86_64/exits.S	Tue Aug 15 18:20:03 2006 +0100
    10.3 @@ -105,7 +105,10 @@ ENTRY(svm_asm_do_launch)
    10.4          movq VCPU_svm_vmcb(%rbx), %rcx
    10.5          movq UREGS_rax(%rsp), %rax
    10.6          movq %rax, VMCB_rax(%rcx)
    10.7 -        movq VCPU_svm_hsa_pa(%rbx), %rax
    10.8 +	leaq root_vmcb_pa(%rip), %rax
    10.9 +	movl VCPU_processor(%rbx), %ecx
   10.10 +	shll $3, %ecx
   10.11 +	addq %rcx, %rax
   10.12          VMSAVE
   10.13  
   10.14          movq VCPU_svm_vmcb_pa(%rbx), %rax
   10.15 @@ -133,13 +136,15 @@ ENTRY(svm_asm_do_launch)
   10.16          VMLOAD
   10.17          VMRUN
   10.18          VMSAVE
   10.19 -        /* rax is the only register we're allowed to touch here... */
   10.20 +        HVM_SAVE_ALL_NOSEGREGS
   10.21  
   10.22 -        GET_CURRENT(%rax)
   10.23 -        movq VCPU_svm_hsa_pa(%rax), %rax
   10.24 +        GET_CURRENT(%rbx)
   10.25 +	movl VCPU_processor(%rbx), %ecx
   10.26 +	leaq root_vmcb_pa(%rip), %rax
   10.27 +	shll $3, %ecx
   10.28 +	addq %rcx, %rax
   10.29          VMLOAD
   10.30  
   10.31 -        HVM_SAVE_ALL_NOSEGREGS
   10.32          STGI
   10.33          call svm_vmexit_handler
   10.34          jmp  svm_asm_do_resume
   10.35 @@ -148,7 +153,7 @@ ENTRY(svm_asm_do_resume)
   10.36  svm_test_all_events:
   10.37  	GET_CURRENT(%rbx)
   10.38          movq %rbx, %rdi
   10.39 -        call svm_do_resume
   10.40 +        call hvm_do_resume
   10.41  /*test_all_events:*/
   10.42          cli                             # tests must not race interrupts
   10.43  /*test_softirqs:*/
    11.1 --- a/xen/arch/x86/hvm/vlapic.c	Tue Aug 15 17:03:06 2006 +0100
    11.2 +++ b/xen/arch/x86/hvm/vlapic.c	Tue Aug 15 18:20:03 2006 +0100
    11.3 @@ -493,7 +493,7 @@ static void vlapic_read_aligned(struct v
    11.4  
    11.5      case APIC_ESR:
    11.6          vlapic->err_write_count = 0;
    11.7 -        *result = vlapic_get_reg(vlapic, offset); 
    11.8 +        *result = vlapic_get_reg(vlapic, offset);
    11.9          break;
   11.10  
   11.11      default:
    12.1 --- a/xen/arch/x86/hvm/vmx/io.c	Tue Aug 15 17:03:06 2006 +0100
    12.2 +++ b/xen/arch/x86/hvm/vmx/io.c	Tue Aug 15 18:20:03 2006 +0100
    12.3 @@ -38,57 +38,6 @@
    12.4  #include <asm/hvm/vlapic.h>
    12.5  #include <public/hvm/ioreq.h>
    12.6  
    12.7 -#define BSP_CPU(v)    (!(v->vcpu_id))
    12.8 -
    12.9 -static inline 
   12.10 -void __set_tsc_offset(u64  offset)
   12.11 -{
   12.12 -    __vmwrite(TSC_OFFSET, offset);
   12.13 -#if defined (__i386__)
   12.14 -    __vmwrite(TSC_OFFSET_HIGH, offset >> 32);
   12.15 -#endif
   12.16 -}
   12.17 -
   12.18 -void set_guest_time(struct vcpu *v, u64 gtime)
   12.19 -{
   12.20 -    u64    host_tsc;
   12.21 -   
   12.22 -    rdtscll(host_tsc);
   12.23 -    
   12.24 -    v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc;
   12.25 -    __set_tsc_offset(v->arch.hvm_vcpu.cache_tsc_offset);
   12.26 -}
   12.27 -
   12.28 -static inline void
   12.29 -interrupt_post_injection(struct vcpu * v, int vector, int type)
   12.30 -{
   12.31 -    struct periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
   12.32 -
   12.33 -    if ( is_pit_irq(v, vector, type) ) {
   12.34 -        if ( !pt->first_injected ) {
   12.35 -            pt->pending_intr_nr = 0;
   12.36 -            pt->last_plt_gtime = hvm_get_guest_time(v);
   12.37 -            pt->scheduled = NOW() + pt->period;
   12.38 -            set_timer(&pt->timer, pt->scheduled);
   12.39 -            pt->first_injected = 1;
   12.40 -        } else {
   12.41 -            pt->pending_intr_nr--;
   12.42 -            pt->last_plt_gtime += pt->period_cycles;
   12.43 -            set_guest_time(v, pt->last_plt_gtime);
   12.44 -            pit_time_fired(v, pt->priv);
   12.45 -        }
   12.46 -    }
   12.47 -
   12.48 -    switch(type)
   12.49 -    {
   12.50 -    case APIC_DM_EXTINT:
   12.51 -        break;
   12.52 -
   12.53 -    default:
   12.54 -        vlapic_post_injection(v, vector, type);
   12.55 -        break;
   12.56 -    }
   12.57 -}
   12.58  
   12.59  static inline void
   12.60  enable_irq_window(struct vcpu *v)
   12.61 @@ -194,7 +143,8 @@ asmlinkage void vmx_intr_assist(void)
   12.62  
   12.63      if (likely(!has_ext_irq)) return;
   12.64  
   12.65 -    if (unlikely(is_interruptibility_state())) {    /* pre-cleared for emulated instruction */
   12.66 +    if (unlikely(is_interruptibility_state())) {    
   12.67 +        /* pre-cleared for emulated instruction */
   12.68          enable_irq_window(v);
   12.69          HVM_DBG_LOG(DBG_LEVEL_1, "interruptibility");
   12.70          return;
   12.71 @@ -206,7 +156,7 @@ asmlinkage void vmx_intr_assist(void)
   12.72          return;
   12.73      }
   12.74  
   12.75 -    highest_vector = cpu_get_interrupt(v, &intr_type); 
   12.76 +    highest_vector = cpu_get_interrupt(v, &intr_type);
   12.77      switch (intr_type) {
   12.78      case APIC_DM_EXTINT:
   12.79      case APIC_DM_FIXED:
   12.80 @@ -224,39 +174,11 @@ asmlinkage void vmx_intr_assist(void)
   12.81          BUG();
   12.82          break;
   12.83      }
   12.84 -
   12.85 -    interrupt_post_injection(v, highest_vector, intr_type);
   12.86 +    
   12.87 +    hvm_interrupt_post(v, highest_vector, intr_type);
   12.88      return;
   12.89  }
   12.90  
   12.91 -void vmx_do_resume(struct vcpu *v)
   12.92 -{
   12.93 -    ioreq_t *p;
   12.94 -    struct periodic_time *pt = &v->domain->arch.hvm_domain.pl_time.periodic_tm;
   12.95 -
   12.96 -    vmx_stts();
   12.97 -
   12.98 -    /* pick up the elapsed PIT ticks and re-enable pit_timer */
   12.99 -    if ( pt->enabled && pt->first_injected ) {
  12.100 -        if ( v->arch.hvm_vcpu.guest_time ) {
  12.101 -            set_guest_time(v, v->arch.hvm_vcpu.guest_time);
  12.102 -            v->arch.hvm_vcpu.guest_time = 0;
  12.103 -        }
  12.104 -        pickup_deactive_ticks(pt);
  12.105 -    }
  12.106 -
  12.107 -    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
  12.108 -    wait_on_xen_event_channel(v->arch.hvm.xen_port,
  12.109 -                              p->state != STATE_IOREQ_READY &&
  12.110 -                              p->state != STATE_IOREQ_INPROCESS);
  12.111 -    if ( p->state == STATE_IORESP_READY )
  12.112 -        hvm_io_assist(v);
  12.113 -    if ( p->state != STATE_INVALID ) {
  12.114 -        printf("Weird HVM iorequest state %d.\n", p->state);
  12.115 -        domain_crash(v->domain);
  12.116 -    }
  12.117 -}
  12.118 -
  12.119  /*
  12.120   * Local variables:
  12.121   * mode: C
    13.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Tue Aug 15 17:03:06 2006 +0100
    13.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Tue Aug 15 18:20:03 2006 +0100
    13.3 @@ -261,7 +261,7 @@ static void vmx_do_launch(struct vcpu *v
    13.4  
    13.5      error |= __vmwrite(CR4_READ_SHADOW, cr4);
    13.6  
    13.7 -    vmx_stts();
    13.8 +    hvm_stts(v);
    13.9  
   13.10      if(hvm_apic_support(v->domain))
   13.11          vlapic_init(v);
   13.12 @@ -282,7 +282,7 @@ static void vmx_do_launch(struct vcpu *v
   13.13      v->arch.schedule_tail = arch_vmx_do_resume;
   13.14  
   13.15      /* init guest tsc to start from 0 */
   13.16 -    set_guest_time(v, 0);
   13.17 +    hvm_set_guest_time(v, 0);
   13.18  }
   13.19  
   13.20  /*
   13.21 @@ -539,7 +539,7 @@ void arch_vmx_do_resume(struct vcpu *v)
   13.22          vmx_set_host_env(v);
   13.23      }
   13.24  
   13.25 -    vmx_do_resume(v);
   13.26 +    hvm_do_resume(v);
   13.27      reset_stack_and_jump(vmx_asm_do_vmentry);
   13.28  }
   13.29  
   13.30 @@ -642,13 +642,11 @@ static void vmcs_dump(unsigned char ch)
   13.31      printk("**************************************\n");
   13.32  }
   13.33  
   13.34 -static int __init setup_vmcs_dump(void)
   13.35 +void setup_vmcs_dump(void)
   13.36  {
   13.37      register_keyhandler('v', vmcs_dump, "dump Intel's VMCS");
   13.38 -    return 0;
   13.39  }
   13.40  
   13.41 -__initcall(setup_vmcs_dump);
   13.42  
   13.43  /*
   13.44   * Local variables:
    14.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Tue Aug 15 17:03:06 2006 +0100
    14.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Tue Aug 15 18:20:03 2006 +0100
    14.3 @@ -628,6 +628,45 @@ static unsigned long vmx_get_ctrl_reg(st
    14.4      return 0;                   /* dummy */
    14.5  }
    14.6  
    14.7 +
    14.8 +
    14.9 +/* Make sure that xen intercepts any FP accesses from current */
   14.10 +static void vmx_stts(struct vcpu *v)
   14.11 +{
   14.12 +    unsigned long cr0;
   14.13 +
   14.14 +    /* VMX depends on operating on the current vcpu */
   14.15 +    ASSERT(v == current);
   14.16 +
   14.17 +    /*
   14.18 +     * If the guest does not have TS enabled then we must cause and handle an
   14.19 +     * exception on first use of the FPU. If the guest *does* have TS enabled
   14.20 +     * then this is not necessary: no FPU activity can occur until the guest
   14.21 +     * clears CR0.TS, and we will initialise the FPU when that happens.
   14.22 +     */
   14.23 +    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
   14.24 +    if ( !(cr0 & X86_CR0_TS) )
   14.25 +    {
   14.26 +        __vmread_vcpu(v, GUEST_CR0, &cr0);
   14.27 +        __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
   14.28 +        __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
   14.29 +    }
   14.30 +}
   14.31 +
   14.32 +
   14.33 +static void vmx_set_tsc_offset(struct vcpu *v, u64 offset)
   14.34 +{
   14.35 +    /* VMX depends on operating on the current vcpu */
   14.36 +    ASSERT(v == current);
   14.37 +
   14.38 +    __vmwrite(TSC_OFFSET, offset);
   14.39 +#if defined (__i386__)
   14.40 +    __vmwrite(TSC_OFFSET_HIGH, offset >> 32);
   14.41 +#endif
   14.42 +}
   14.43 +
   14.44 +
   14.45 +
   14.46  /* SMP VMX guest support */
   14.47  static void vmx_init_ap_context(struct vcpu_guest_context *ctxt,
   14.48                           int vcpuid, int trampoline_vector)
   14.49 @@ -717,6 +756,9 @@ static void vmx_setup_hvm_funcs(void)
   14.50      hvm_funcs.instruction_length = vmx_instruction_length;
   14.51      hvm_funcs.get_guest_ctrl_reg = vmx_get_ctrl_reg;
   14.52  
   14.53 +    hvm_funcs.stts = vmx_stts;
   14.54 +    hvm_funcs.set_tsc_offset = vmx_set_tsc_offset;
   14.55 +
   14.56      hvm_funcs.init_ap_context = vmx_init_ap_context;
   14.57  
   14.58      hvm_funcs.init_hypercall_page = vmx_init_hypercall_page;
   14.59 @@ -768,6 +810,8 @@ int start_vmx(void)
   14.60      set_in_cr4(X86_CR4_VMXE);
   14.61  
   14.62      vmx_init_vmcs_config();
   14.63 +    
   14.64 +    setup_vmcs_dump();
   14.65  
   14.66      if ( (vmcs = vmx_alloc_host_vmcs()) == NULL )
   14.67      {
   14.68 @@ -916,7 +960,7 @@ static void vmx_vmexit_do_cpuid(struct c
   14.69          if ( input == CPUID_LEAF_0x1 )
   14.70          {
   14.71              /* mask off reserved bits */
   14.72 -            ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED; 
   14.73 +            ecx &= ~VMX_VCPU_CPUID_L1_ECX_RESERVED;
   14.74  
   14.75              if ( !hvm_apic_support(v->domain) ||
   14.76                   !vlapic_global_enabled((VLAPIC(v))) )
   14.77 @@ -930,7 +974,7 @@ static void vmx_vmexit_do_cpuid(struct c
   14.78  #if CONFIG_PAGING_LEVELS < 3
   14.79              edx &= ~(bitmaskof(X86_FEATURE_PAE)  |
   14.80                       bitmaskof(X86_FEATURE_PSE)  |
   14.81 -                     bitmaskof(X86_FEATURE_PSE36)); 
   14.82 +                     bitmaskof(X86_FEATURE_PSE36));
   14.83  #else
   14.84              if ( v->domain->arch.ops->guest_paging_levels == PAGING_L2 )
   14.85              {
   14.86 @@ -1044,6 +1088,7 @@ static void vmx_vmexit_do_invlpg(unsigne
   14.87      shadow_invlpg(v, va);
   14.88  }
   14.89  
   14.90 +
   14.91  static int check_for_null_selector(unsigned long eip)
   14.92  {
   14.93      unsigned char inst[MAX_INST_LEN];
   14.94 @@ -1977,7 +2022,7 @@ static inline void vmx_do_msr_write(stru
   14.95  
   14.96      switch (regs->ecx) {
   14.97      case MSR_IA32_TIME_STAMP_COUNTER:
   14.98 -        set_guest_time(v, msr_content);
   14.99 +        hvm_set_guest_time(v, msr_content);
  14.100          break;
  14.101      case MSR_IA32_SYSENTER_CS:
  14.102          __vmwrite(GUEST_SYSENTER_CS, msr_content);
    15.1 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S	Tue Aug 15 17:03:06 2006 +0100
    15.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S	Tue Aug 15 18:20:03 2006 +0100
    15.3 @@ -95,7 +95,7 @@ vmx_process_softirqs:
    15.4  ENTRY(vmx_asm_do_vmentry)
    15.5          GET_CURRENT(%ebx)
    15.6          pushl %ebx
    15.7 -        call vmx_do_resume
    15.8 +        call hvm_do_resume
    15.9          addl $4, %esp
   15.10          cli                             # tests must not race interrupts
   15.11  
    16.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S	Tue Aug 15 17:03:06 2006 +0100
    16.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S	Tue Aug 15 18:20:03 2006 +0100
    16.3 @@ -106,7 +106,7 @@ vmx_process_softirqs:
    16.4  ENTRY(vmx_asm_do_vmentry)
    16.5          GET_CURRENT(%rbx)
    16.6          movq %rbx, %rdi
    16.7 -        call vmx_do_resume
    16.8 +        call hvm_do_resume
    16.9          cli                             # tests must not race interrupts
   16.10  
   16.11          movl  VCPU_processor(%rbx),%eax
    17.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Tue Aug 15 17:03:06 2006 +0100
    17.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Tue Aug 15 18:20:03 2006 +0100
    17.3 @@ -81,7 +81,6 @@ void __dummy__(void)
    17.4      BLANK();
    17.5  
    17.6      OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
    17.7 -    OFFSET(VCPU_svm_hsa_pa,  struct vcpu, arch.hvm_svm.host_save_pa);
    17.8      OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
    17.9      OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc);
   17.10      BLANK();
    18.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Tue Aug 15 17:03:06 2006 +0100
    18.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Tue Aug 15 18:20:03 2006 +0100
    18.3 @@ -75,7 +75,6 @@ void __dummy__(void)
    18.4      BLANK();
    18.5  
    18.6      OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
    18.7 -    OFFSET(VCPU_svm_hsa_pa,  struct vcpu, arch.hvm_svm.host_save_pa);
    18.8      OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
    18.9      OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc);
   18.10      BLANK();
    19.1 --- a/xen/include/asm-x86/hvm/hvm.h	Tue Aug 15 17:03:06 2006 +0100
    19.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Tue Aug 15 18:20:03 2006 +0100
    19.3 @@ -59,6 +59,14 @@ struct hvm_function_table {
    19.4      int (*instruction_length)(struct vcpu *v);
    19.5      unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
    19.6  
    19.7 +    /*
    19.8 +     * Update specifics of the guest state:
    19.9 +     * 1) TS bit in guest cr0 
   19.10 +     * 2) TSC offset in guest
   19.11 +     */
   19.12 +    void (*stts)(struct vcpu *v); 
   19.13 +    void (*set_tsc_offset)(struct vcpu *v, u64 offset); 
   19.14 +
   19.15      void (*init_ap_context)(struct vcpu_guest_context *ctxt,
   19.16                              int vcpuid, int trampoline_vector);
   19.17  
   19.18 @@ -142,6 +150,10 @@ hvm_get_guest_ctrl_reg(struct vcpu *v, u
   19.19      return 0;                   /* force to fail */
   19.20  }
   19.21  
   19.22 +extern void hvm_stts(struct vcpu *v); 
   19.23 +extern void hvm_set_guest_time(struct vcpu *v, u64 gtime); 
   19.24 +extern void hvm_do_resume(struct vcpu *v); 
   19.25 +
   19.26  static inline void
   19.27  hvm_init_ap_context(struct vcpu_guest_context *ctxt,
   19.28                      int vcpuid, int trampoline_vector)
    20.1 --- a/xen/include/asm-x86/hvm/io.h	Tue Aug 15 17:03:06 2006 +0100
    20.2 +++ b/xen/include/asm-x86/hvm/io.h	Tue Aug 15 18:20:03 2006 +0100
    20.3 @@ -150,6 +150,7 @@ static inline int irq_masked(unsigned lo
    20.4  #endif
    20.5  
    20.6  extern void handle_mmio(unsigned long, unsigned long);
    20.7 +extern void hvm_interrupt_post(struct vcpu *v, int vector, int type);
    20.8  extern void hvm_io_assist(struct vcpu *v);
    20.9  extern void pic_irq_request(void *data, int level);
   20.10  extern void hvm_pic_assist(struct vcpu *v);
    21.1 --- a/xen/include/asm-x86/hvm/svm/svm.h	Tue Aug 15 17:03:06 2006 +0100
    21.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h	Tue Aug 15 18:20:03 2006 +0100
    21.3 @@ -28,54 +28,12 @@
    21.4  #include <asm/hvm/svm/vmcb.h>
    21.5  #include <asm/i387.h>
    21.6  
    21.7 -extern void asidpool_retire( struct vmcb_struct *vmcb, int core );
    21.8 -
    21.9 -extern void svm_asm_vmexit_handler(struct cpu_user_regs);
   21.10 -extern void svm_setup_function_table(struct vcpu *v);
   21.11 -
   21.12 -extern int vmcb_size;
   21.13 -extern unsigned int cpu_rev;
   21.14 -
   21.15 -extern void svm_stop(void);
   21.16 -extern void svm_save_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs);
   21.17 -extern void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs);
   21.18 -extern void svm_vmread(struct vcpu *v, int index, unsigned long *value);
   21.19 -extern void svm_vmwrite(struct vcpu *v, int index, unsigned long value);
   21.20 -extern void svm_final_setup_guest(struct vcpu *v); 
   21.21 -extern int svm_paging_enabled(struct vcpu *v); 
   21.22 +extern void asidpool_retire(struct vmcb_struct *vmcb, int core);
   21.23  extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
   21.24 -extern void svm_stts(struct vcpu *v); 
   21.25  extern void svm_do_launch(struct vcpu *v);
   21.26 -extern void svm_do_resume(struct vcpu *v);
   21.27 -extern void svm_set_guest_time(struct vcpu *v, u64 gtime);
   21.28  extern void arch_svm_do_resume(struct vcpu *v);
   21.29 -extern int load_vmcb(struct arch_svm_struct *arch_svm, u64 phys_hsa);
   21.30 -/* For debugging. Remove when no longer needed. */
   21.31 -extern void svm_dump_host_regs(const char *from);
   21.32  
   21.33 -extern void svm_migrate_timers(struct vcpu *v);
   21.34 -
   21.35 -/* ASID API */
   21.36 -enum {
   21.37 -    ASID_AVAILABLE = 0,
   21.38 -    ASID_INUSE,
   21.39 -    ASID_RETIRED
   21.40 -};
   21.41 -#define   INITIAL_ASID      0
   21.42 -#define   ASID_MAX          64
   21.43 - 
   21.44 -struct asid_pool {
   21.45 -    spinlock_t asid_lock;
   21.46 -    u32 asid[ASID_MAX];
   21.47 -};
   21.48 -
   21.49 -struct svm_percore_globals {
   21.50 -  void *hsa;
   21.51 -  u64  hsa_pa;
   21.52 -  void *scratch_hsa;
   21.53 -  u64  scratch_hsa_pa;
   21.54 -  struct asid_pool ASIDpool;
   21.55 -};
   21.56 +extern u64 root_vmcb_pa[NR_CPUS];
   21.57  
   21.58  #define SVM_REG_EAX (0) 
   21.59  #define SVM_REG_ECX (1) 
    22.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Tue Aug 15 17:03:06 2006 +0100
    22.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Tue Aug 15 18:20:03 2006 +0100
    22.3 @@ -434,8 +434,6 @@ struct vmcb_struct {
    22.4  
    22.5  struct arch_svm_struct {
    22.6      struct vmcb_struct	*vmcb;
    22.7 -    void		        *host_save_area;
    22.8 -    u64                 host_save_pa;
    22.9      u64                 vmcb_pa;
   22.10      u32                 *iopm;
   22.11      u32                 *msrpm;
   22.12 @@ -453,12 +451,15 @@ struct arch_svm_struct {
   22.13      struct timer        hlt_timer;  /* hlt ins emulation wakeup timer */
   22.14  };
   22.15  
   22.16 -struct vmcb_struct *alloc_vmcb(void);
   22.17 -struct host_save_area *alloc_host_save_area(void);
   22.18 -void free_vmcb(struct vmcb_struct *vmcb);
   22.19 -void free_host_save_area(struct host_save_area *hsa);
   22.20 -void dump_vmcb(void);
   22.21 -int  construct_vmcb(struct arch_svm_struct *, struct cpu_user_regs *); 
   22.22 +extern struct vmcb_struct *alloc_vmcb(void);
   22.23 +extern struct host_save_area *alloc_host_save_area(void);
   22.24 +extern void free_vmcb(struct vmcb_struct *vmcb);
   22.25 +extern void free_host_save_area(struct host_save_area *hsa);
   22.26 +
   22.27 +extern int  construct_vmcb(struct arch_svm_struct *, struct cpu_user_regs *);
   22.28 +extern void destroy_vmcb(struct arch_svm_struct *);
   22.29 +
   22.30 +extern void setup_vmcb_dump(void);
   22.31  
   22.32  #define VMCB_USE_HOST_ENV       1
   22.33  #define VMCB_USE_SEPARATE_ENV   0
    23.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Tue Aug 15 17:03:06 2006 +0100
    23.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Tue Aug 15 18:20:03 2006 +0100
    23.3 @@ -27,6 +27,7 @@
    23.4  extern int start_vmx(void);
    23.5  extern void vmcs_dump_vcpu(void);
    23.6  extern void vmx_init_vmcs_config(void);
    23.7 +extern void setup_vmcs_dump(void); 
    23.8  
    23.9  enum {
   23.10      VMX_CPU_STATE_PAE_ENABLED=0,
    24.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Tue Aug 15 17:03:06 2006 +0100
    24.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Tue Aug 15 18:20:03 2006 +0100
    24.3 @@ -395,31 +395,6 @@ static inline int __vmxon (u64 addr)
    24.4      return rc;
    24.5  }
    24.6  
    24.7 -/* Make sure that xen intercepts any FP accesses from current */
    24.8 -static inline void vmx_stts(void)
    24.9 -{
   24.10 -    unsigned long cr0;
   24.11 -    struct vcpu *v = current;
   24.12 -
   24.13 -    /* FPU state already dirty? Then no need to setup_fpu() lazily. */
   24.14 -    if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
   24.15 -        return;
   24.16 -
   24.17 -    /*
   24.18 -     * If the guest does not have TS enabled then we must cause and handle an
   24.19 -     * exception on first use of the FPU. If the guest *does* have TS enabled
   24.20 -     * then this is not necessary: no FPU activity can occur until the guest
   24.21 -     * clears CR0.TS, and we will initialise the FPU when that happens.
   24.22 -     */
   24.23 -    __vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
   24.24 -    if ( !(cr0 & X86_CR0_TS) )
   24.25 -    {
   24.26 -        __vmread_vcpu(v, GUEST_CR0, &cr0);
   24.27 -        __vmwrite(GUEST_CR0, cr0 | X86_CR0_TS);
   24.28 -        __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
   24.29 -    }
   24.30 -}
   24.31 -
   24.32  /* Works only for vcpu == current */
   24.33  static inline int vmx_paging_enabled(struct vcpu *v)
   24.34  {
    25.1 --- a/xen/include/asm-x86/processor.h	Tue Aug 15 17:03:06 2006 +0100
    25.2 +++ b/xen/include/asm-x86/processor.h	Tue Aug 15 18:20:03 2006 +0100
    25.3 @@ -277,27 +277,43 @@ static always_inline unsigned int cpuid_
    25.4  }
    25.5  
    25.6  
    25.7 -#define read_cr0() ({ \
    25.8 -	unsigned long __dummy; \
    25.9 -	__asm__( \
   25.10 -		"mov %%cr0,%0\n\t" \
   25.11 -		:"=r" (__dummy)); \
   25.12 -	__dummy; \
   25.13 -})
   25.14  
   25.15 -#define write_cr0(x) \
   25.16 -	__asm__("mov %0,%%cr0": :"r" ((unsigned long)x));
   25.17 +static inline unsigned long read_cr0(void)
   25.18 +{
   25.19 +	unsigned long __cr0;
   25.20 +	__asm__("mov %%cr0,%0\n\t" :"=r" (__cr0)); 
   25.21 +    return __cr0; 
   25.22 +} 
   25.23 +
   25.24 +static inline void write_cr0(unsigned long val)
   25.25 +{
   25.26 +	__asm__("mov %0,%%cr0": :"r" ((unsigned long)val)); 
   25.27 +}
   25.28  
   25.29 -#define read_cr4() ({ \
   25.30 -	unsigned long __dummy; \
   25.31 -	__asm__( \
   25.32 -		"mov %%cr4,%0\n\t" \
   25.33 -		:"=r" (__dummy)); \
   25.34 -	__dummy; \
   25.35 -})
   25.36 +static inline unsigned long read_cr4(void)
   25.37 +{
   25.38 +	unsigned long __cr4;
   25.39 +	__asm__("mov %%cr4,%0\n\t" :"=r" (__cr4)); 
   25.40 +    return __cr4; 
   25.41 +} 
   25.42 +    
   25.43 +static inline void write_cr4(unsigned long val)
   25.44 +{
   25.45 +	__asm__("mov %0,%%cr4": :"r" ((unsigned long)val)); 
   25.46 +}
   25.47  
   25.48 -#define write_cr4(x) \
   25.49 -	__asm__("mov %0,%%cr4": :"r" ((unsigned long)x));
   25.50 +
   25.51 +/* Clear and set 'TS' bit respectively */
   25.52 +static inline void clts(void) 
   25.53 +{
   25.54 +    __asm__ __volatile__ ("clts");
   25.55 +}
   25.56 +
   25.57 +static inline void stts(void) 
   25.58 +{
   25.59 +    write_cr0(X86_CR0_TS|read_cr0());
   25.60 +}
   25.61 +
   25.62  
   25.63  /*
   25.64   * Save the cr4 feature set we're using (ie
    26.1 --- a/xen/include/asm-x86/system.h	Tue Aug 15 17:03:06 2006 +0100
    26.2 +++ b/xen/include/asm-x86/system.h	Tue Aug 15 18:20:03 2006 +0100
    26.3 @@ -11,10 +11,6 @@
    26.4      __sel;                                                              \
    26.5  })
    26.6  
    26.7 -/* Clear and set 'TS' bit respectively */
    26.8 -#define clts() __asm__ __volatile__ ("clts")
    26.9 -#define stts() write_cr0(X86_CR0_TS|read_cr0())
   26.10 -
   26.11  #define wbinvd() \
   26.12  	__asm__ __volatile__ ("wbinvd": : :"memory");
   26.13