ia64/xen-unstable

changeset 15090:384a29655270

svm: Avoid VMSAVE/VMLOAD/VMSAVE/VMLOAD sequence on every vmexit/vmentry.

Instead do this only on context switches. In cases where we need
access to state that is only saved to the VMCB on VMSAVE, we track
whether the state is in sync via a per-vcpu flag and VMSAVE on demand.

The context switch code can be further improved:
1. No need to VMLOAD host state if we are switching to another SVM VCPU.
2. No need to VMSAVE host state at all (except once at start of day)
because the registers that are saved do not change (or at least, none
of the ones that matter change).

The performance is improvement is about 650 cycles for a null
hypercall. This reduces the total null-hypercall time for a non-debug
build of Xen down to around 3300 cycles on my AMD X2 system.

Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Sat May 12 19:04:35 2007 +0100 (2007-05-12)
parents 05c128b0188a
children 65ce4866d20b
files xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/x86_32/exits.S xen/arch/x86/hvm/svm/x86_64/exits.S xen/arch/x86/smpboot.c xen/arch/x86/traps.c xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/asm-offsets.c xen/include/asm-x86/hvm/svm/svm.h xen/include/asm-x86/hvm/svm/vmcb.h
line diff
     1.1 --- a/xen/arch/x86/hvm/svm/svm.c	Sat May 12 16:24:50 2007 +0100
     1.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Sat May 12 19:04:35 2007 +0100
     1.3 @@ -65,9 +65,6 @@ static void *hsa[NR_CPUS] __read_mostly;
     1.4  /* vmcb used for extended host state */
     1.5  static void *root_vmcb[NR_CPUS] __read_mostly;
     1.6  
     1.7 -/* physical address of above for host VMSAVE/VMLOAD */
     1.8 -u64 root_vmcb_pa[NR_CPUS] __read_mostly;
     1.9 -
    1.10  /* hardware assisted paging bits */
    1.11  extern int opt_hap_enabled;
    1.12  
    1.13 @@ -551,14 +548,12 @@ int svm_load_vmcb_ctxt(struct vcpu *v, s
    1.14      return 0;
    1.15  }
    1.16  
    1.17 -
    1.18  static inline void svm_restore_dr(struct vcpu *v)
    1.19  {
    1.20      if ( unlikely(v->arch.guest_context.debugreg[7] & 0xFF) )
    1.21          __restore_debug_registers(v);
    1.22  }
    1.23  
    1.24 -
    1.25  static int svm_realmode(struct vcpu *v)
    1.26  {
    1.27      unsigned long cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
    1.28 @@ -586,12 +581,12 @@ static int svm_guest_x86_mode(struct vcp
    1.29      return (vmcb->cs.attr.fields.db ? 4 : 2);
    1.30  }
    1.31  
    1.32 -void svm_update_host_cr3(struct vcpu *v)
    1.33 +static void svm_update_host_cr3(struct vcpu *v)
    1.34  {
    1.35      /* SVM doesn't have a HOST_CR3 equivalent to update. */
    1.36  }
    1.37  
    1.38 -void svm_update_guest_cr3(struct vcpu *v)
    1.39 +static void svm_update_guest_cr3(struct vcpu *v)
    1.40  {
    1.41      v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
    1.42  }
    1.43 @@ -603,7 +598,7 @@ static void svm_update_vtpr(struct vcpu 
    1.44      vmcb->vintr.fields.tpr = value & 0x0f;
    1.45  }
    1.46  
    1.47 -unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
    1.48 +static unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
    1.49  {
    1.50      switch ( num )
    1.51      {
    1.52 @@ -621,6 +616,20 @@ unsigned long svm_get_ctrl_reg(struct vc
    1.53      return 0;                   /* dummy */
    1.54  }
    1.55  
    1.56 +static void svm_sync_vmcb(struct vcpu *v)
    1.57 +{
    1.58 +    struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
    1.59 +
    1.60 +    if ( arch_svm->vmcb_in_sync )
    1.61 +        return;
    1.62 +
    1.63 +    arch_svm->vmcb_in_sync = 1;
    1.64 +
    1.65 +    asm volatile (
    1.66 +        ".byte 0x0f,0x01,0xdb" /* vmsave */
    1.67 +        : : "a" (__pa(arch_svm->vmcb)) );
    1.68 +}
    1.69 +
    1.70  static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
    1.71  {
    1.72      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.73 @@ -634,13 +643,13 @@ static unsigned long svm_get_segment_bas
    1.74      case x86_seg_cs: return long_mode ? 0 : vmcb->cs.base;
    1.75      case x86_seg_ds: return long_mode ? 0 : vmcb->ds.base;
    1.76      case x86_seg_es: return long_mode ? 0 : vmcb->es.base;
    1.77 -    case x86_seg_fs: return vmcb->fs.base;
    1.78 -    case x86_seg_gs: return vmcb->gs.base;
    1.79 +    case x86_seg_fs: svm_sync_vmcb(v); return vmcb->fs.base;
    1.80 +    case x86_seg_gs: svm_sync_vmcb(v); return vmcb->gs.base;
    1.81      case x86_seg_ss: return long_mode ? 0 : vmcb->ss.base;
    1.82 -    case x86_seg_tr: return vmcb->tr.base;
    1.83 +    case x86_seg_tr: svm_sync_vmcb(v); return vmcb->tr.base;
    1.84      case x86_seg_gdtr: return vmcb->gdtr.base;
    1.85      case x86_seg_idtr: return vmcb->idtr.base;
    1.86 -    case x86_seg_ldtr: return vmcb->ldtr.base;
    1.87 +    case x86_seg_ldtr: svm_sync_vmcb(v); return vmcb->ldtr.base;
    1.88      }
    1.89      BUG();
    1.90      return 0;
    1.91 @@ -652,16 +661,40 @@ static void svm_get_segment_register(str
    1.92      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    1.93      switch ( seg )
    1.94      {
    1.95 -    case x86_seg_cs:   memcpy(reg, &vmcb->cs,   sizeof(*reg)); break;
    1.96 -    case x86_seg_ds:   memcpy(reg, &vmcb->ds,   sizeof(*reg)); break;
    1.97 -    case x86_seg_es:   memcpy(reg, &vmcb->es,   sizeof(*reg)); break;
    1.98 -    case x86_seg_fs:   memcpy(reg, &vmcb->fs,   sizeof(*reg)); break;
    1.99 -    case x86_seg_gs:   memcpy(reg, &vmcb->gs,   sizeof(*reg)); break;
   1.100 -    case x86_seg_ss:   memcpy(reg, &vmcb->ss,   sizeof(*reg)); break;
   1.101 -    case x86_seg_tr:   memcpy(reg, &vmcb->tr,   sizeof(*reg)); break;
   1.102 -    case x86_seg_gdtr: memcpy(reg, &vmcb->gdtr, sizeof(*reg)); break;
   1.103 -    case x86_seg_idtr: memcpy(reg, &vmcb->idtr, sizeof(*reg)); break;
   1.104 -    case x86_seg_ldtr: memcpy(reg, &vmcb->ldtr, sizeof(*reg)); break;
   1.105 +    case x86_seg_cs:
   1.106 +        memcpy(reg, &vmcb->cs, sizeof(*reg));
   1.107 +        break;
   1.108 +    case x86_seg_ds:
   1.109 +        memcpy(reg, &vmcb->ds, sizeof(*reg));
   1.110 +        break;
   1.111 +    case x86_seg_es:
   1.112 +        memcpy(reg, &vmcb->es, sizeof(*reg));
   1.113 +        break;
   1.114 +    case x86_seg_fs:
   1.115 +        svm_sync_vmcb(v);
   1.116 +        memcpy(reg, &vmcb->fs, sizeof(*reg));
   1.117 +        break;
   1.118 +    case x86_seg_gs:
   1.119 +        svm_sync_vmcb(v);
   1.120 +        memcpy(reg, &vmcb->gs, sizeof(*reg));
   1.121 +        break;
   1.122 +    case x86_seg_ss:
   1.123 +        memcpy(reg, &vmcb->ss, sizeof(*reg));
   1.124 +        break;
   1.125 +    case x86_seg_tr:
   1.126 +        svm_sync_vmcb(v);
   1.127 +        memcpy(reg, &vmcb->tr, sizeof(*reg));
   1.128 +        break;
   1.129 +    case x86_seg_gdtr:
   1.130 +        memcpy(reg, &vmcb->gdtr, sizeof(*reg));
   1.131 +        break;
   1.132 +    case x86_seg_idtr:
   1.133 +        memcpy(reg, &vmcb->idtr, sizeof(*reg));
   1.134 +        break;
   1.135 +    case x86_seg_ldtr:
   1.136 +        svm_sync_vmcb(v);
   1.137 +        memcpy(reg, &vmcb->ldtr, sizeof(*reg));
   1.138 +        break;
   1.139      default: BUG();
   1.140      }
   1.141  }
   1.142 @@ -761,11 +794,26 @@ static void svm_load_cpu_guest_regs(stru
   1.143  
   1.144  static void svm_ctxt_switch_from(struct vcpu *v)
   1.145  {
   1.146 +    int cpu = smp_processor_id();
   1.147 +
   1.148      svm_save_dr(v);
   1.149 +
   1.150 +    svm_sync_vmcb(v);
   1.151 +
   1.152 +    asm volatile (
   1.153 +        ".byte 0x0f,0x01,0xda" /* vmload */
   1.154 +        : : "a" (__pa(root_vmcb[cpu])) );
   1.155 +
   1.156 +#ifdef __x86_64__
   1.157 +    /* Resume use of IST2 for NMIs now that the host TR is reinstated. */
   1.158 +    idt_tables[cpu][TRAP_nmi].a |= 2UL << 32;
   1.159 +#endif
   1.160  }
   1.161  
   1.162  static void svm_ctxt_switch_to(struct vcpu *v)
   1.163  {
   1.164 +    int cpu = smp_processor_id();
   1.165 +
   1.166  #ifdef  __x86_64__
   1.167      /* 
   1.168       * This is required, because VMRUN does consistency check
   1.169 @@ -776,8 +824,22 @@ static void svm_ctxt_switch_to(struct vc
   1.170      set_segment_register(ds, 0);
   1.171      set_segment_register(es, 0);
   1.172      set_segment_register(ss, 0);
   1.173 +
   1.174 +    /*
   1.175 +     * Cannot use IST2 for NMIs while we are running with the guest TR. But
   1.176 +     * this doesn't matter: the IST is only needed to handle SYSCALL/SYSRET.
   1.177 +     */
   1.178 +    idt_tables[cpu][TRAP_nmi].a &= ~(2UL << 32);
   1.179  #endif
   1.180 +
   1.181      svm_restore_dr(v);
   1.182 +
   1.183 +    asm volatile (
   1.184 +        ".byte 0x0f,0x01,0xdb" /* vmsave */
   1.185 +        : : "a" (__pa(root_vmcb[cpu])) );
   1.186 +    asm volatile (
   1.187 +        ".byte 0x0f,0x01,0xda" /* vmload */
   1.188 +        : : "a" (__pa(v->arch.hvm_svm.vmcb)) );
   1.189  }
   1.190  
   1.191  static void svm_do_resume(struct vcpu *v) 
   1.192 @@ -925,8 +987,6 @@ int start_svm(void)
   1.193      phys_hsa_hi = (u32) (phys_hsa >> 32);    
   1.194      wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
   1.195  
   1.196 -    root_vmcb_pa[cpu] = virt_to_maddr(root_vmcb[cpu]);
   1.197 -  
   1.198      if ( cpu != 0 )
   1.199          return 1;
   1.200  
   1.201 @@ -1196,9 +1256,11 @@ static void svm_get_prefix_info(struct v
   1.202              *seg = &vmcb->es;
   1.203              continue;
   1.204          case 0x64: /* FS */
   1.205 +            svm_sync_vmcb(v);
   1.206              *seg = &vmcb->fs;
   1.207              continue;
   1.208          case 0x65: /* GS */
   1.209 +            svm_sync_vmcb(v);
   1.210              *seg = &vmcb->gs;
   1.211              continue;
   1.212          case 0x3e: /* DS */
     2.1 --- a/xen/arch/x86/hvm/svm/x86_32/exits.S	Sat May 12 16:24:50 2007 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/x86_32/exits.S	Sat May 12 19:04:35 2007 +0100
     2.3 @@ -45,8 +45,6 @@
     2.4          pushl %ebx;
     2.5  
     2.6  #define VMRUN  .byte 0x0F,0x01,0xD8
     2.7 -#define VMLOAD .byte 0x0F,0x01,0xDA
     2.8 -#define VMSAVE .byte 0x0F,0x01,0xDB
     2.9  #define STGI   .byte 0x0F,0x01,0xDC
    2.10  #define CLGI   .byte 0x0F,0x01,0xDD
    2.11  
    2.12 @@ -66,9 +64,6 @@ ENTRY(svm_asm_do_resume)
    2.13          movl VCPU_svm_vmcb(%ebx),%ecx
    2.14          movl UREGS_eax(%esp),%eax
    2.15          movl %eax,VMCB_rax(%ecx)
    2.16 -        movl VCPU_processor(%ebx),%eax
    2.17 -        movl root_vmcb_pa(,%eax,8),%eax
    2.18 -        VMSAVE
    2.19  
    2.20          movl VCPU_svm_vmcb_pa(%ebx),%eax
    2.21          popl %ebx
    2.22 @@ -78,19 +73,16 @@ ENTRY(svm_asm_do_resume)
    2.23          popl %edi
    2.24          popl %ebp
    2.25          addl $(NR_SKIPPED_REGS*4),%esp
    2.26 -        VMLOAD
    2.27 +
    2.28          VMRUN
    2.29 -        VMSAVE
    2.30  
    2.31          HVM_SAVE_ALL_NOSEGREGS
    2.32  
    2.33          GET_CURRENT(%ebx)
    2.34 +        movb $0,VCPU_svm_vmcb_in_sync(%ebx)
    2.35          movl VCPU_svm_vmcb(%ebx),%ecx
    2.36          movl VMCB_rax(%ecx),%eax
    2.37          movl %eax,UREGS_eax(%esp)
    2.38 -        movl VCPU_processor(%ebx),%eax
    2.39 -        movl root_vmcb_pa(,%eax,8),%eax
    2.40 -        VMLOAD
    2.41  
    2.42          STGI
    2.43  .globl svm_stgi_label;
     3.1 --- a/xen/arch/x86/hvm/svm/x86_64/exits.S	Sat May 12 16:24:50 2007 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/x86_64/exits.S	Sat May 12 19:04:35 2007 +0100
     3.3 @@ -54,8 +54,6 @@
     3.4          pushq %r15;
     3.5  
     3.6  #define VMRUN  .byte 0x0F,0x01,0xD8
     3.7 -#define VMLOAD .byte 0x0F,0x01,0xDA
     3.8 -#define VMSAVE .byte 0x0F,0x01,0xDB
     3.9  #define STGI   .byte 0x0F,0x01,0xDC
    3.10  #define CLGI   .byte 0x0F,0x01,0xDD
    3.11  
    3.12 @@ -76,10 +74,6 @@ ENTRY(svm_asm_do_resume)
    3.13          movq VCPU_svm_vmcb(%rbx),%rcx
    3.14          movq UREGS_rax(%rsp),%rax
    3.15          movq %rax,VMCB_rax(%rcx)
    3.16 -        leaq root_vmcb_pa(%rip),%rax
    3.17 -        movl VCPU_processor(%rbx),%ecx
    3.18 -        movq (%rax,%rcx,8),%rax
    3.19 -        VMSAVE
    3.20  
    3.21          movq VCPU_svm_vmcb_pa(%rbx),%rax
    3.22          popq %r15
    3.23 @@ -99,20 +93,15 @@ ENTRY(svm_asm_do_resume)
    3.24          popq %rdi
    3.25          addq $(NR_SKIPPED_REGS*8),%rsp
    3.26  
    3.27 -        VMLOAD
    3.28          VMRUN
    3.29 -        VMSAVE
    3.30  
    3.31          HVM_SAVE_ALL_NOSEGREGS
    3.32  
    3.33          GET_CURRENT(%rbx)
    3.34 +        movb $0,VCPU_svm_vmcb_in_sync(%rbx)
    3.35          movq VCPU_svm_vmcb(%rbx),%rcx
    3.36          movq VMCB_rax(%rcx),%rax
    3.37          movq %rax,UREGS_rax(%rsp)
    3.38 -        leaq root_vmcb_pa(%rip),%rax
    3.39 -        movl VCPU_processor(%rbx),%ecx
    3.40 -        movq (%rax,%rcx,8),%rax
    3.41 -        VMLOAD
    3.42  
    3.43          STGI
    3.44  .globl svm_stgi_label;
     4.1 --- a/xen/arch/x86/smpboot.c	Sat May 12 16:24:50 2007 +0100
     4.2 +++ b/xen/arch/x86/smpboot.c	Sat May 12 19:04:35 2007 +0100
     4.3 @@ -460,7 +460,6 @@ set_cpu_sibling_map(int cpu)
     4.4  	}
     4.5  }
     4.6  
     4.7 -#ifdef CONFIG_X86_32
     4.8  static void construct_percpu_idt(unsigned int cpu)
     4.9  {
    4.10  	unsigned char idt_load[10];
    4.11 @@ -472,7 +471,6 @@ static void construct_percpu_idt(unsigne
    4.12  	*(unsigned long  *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
    4.13  	__asm__ __volatile__ ( "lidt %0" : "=m" (idt_load) );
    4.14  }
    4.15 -#endif
    4.16  
    4.17  /*
    4.18   * Activate a secondary processor.
    4.19 @@ -500,13 +498,11 @@ void __devinit start_secondary(void *unu
    4.20  	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
    4.21  		rep_nop();
    4.22  
    4.23 -#ifdef CONFIG_X86_32
    4.24  	/*
    4.25  	 * At this point, boot CPU has fully initialised the IDT. It is
    4.26  	 * now safe to make ourselves a private copy.
    4.27  	 */
    4.28  	construct_percpu_idt(cpu);
    4.29 -#endif
    4.30  
    4.31  	setup_secondary_APIC_clock();
    4.32  	enable_APIC_timer();
     5.1 --- a/xen/arch/x86/traps.c	Sat May 12 16:24:50 2007 +0100
     5.2 +++ b/xen/arch/x86/traps.c	Sat May 12 19:04:35 2007 +0100
     5.3 @@ -75,9 +75,12 @@ char opt_nmi[10] = "fatal";
     5.4  #endif
     5.5  string_param("nmi", opt_nmi);
     5.6  
     5.7 -/* Master table, used by all CPUs on x86/64, and by CPU0 on x86/32.*/
     5.8 +/* Master table, used by CPU0. */
     5.9  idt_entry_t idt_table[IDT_ENTRIES];
    5.10  
    5.11 +/* Pointer to the IDT of every CPU. */
    5.12 +idt_entry_t *idt_tables[NR_CPUS] __read_mostly;
    5.13 +
    5.14  #define DECLARE_TRAP_HANDLER(_name)                     \
    5.15  asmlinkage void _name(void);                            \
    5.16  asmlinkage int do_ ## _name(struct cpu_user_regs *regs)
    5.17 @@ -2025,13 +2028,11 @@ asmlinkage int do_spurious_interrupt_bug
    5.18  
    5.19  void set_intr_gate(unsigned int n, void *addr)
    5.20  {
    5.21 -#ifdef __i386__
    5.22      int i;
    5.23      /* Keep secondary tables in sync with IRQ updates. */
    5.24      for ( i = 1; i < NR_CPUS; i++ )
    5.25          if ( idt_tables[i] != NULL )
    5.26              _set_gate(&idt_tables[i][n], 14, 0, addr);
    5.27 -#endif
    5.28      _set_gate(&idt_table[n], 14, 0, addr);
    5.29  }
    5.30  
    5.31 @@ -2094,6 +2095,9 @@ void __init trap_init(void)
    5.32      set_intr_gate(TRAP_machine_check,&machine_check);
    5.33      set_intr_gate(TRAP_simd_error,&simd_coprocessor_error);
    5.34  
    5.35 +    /* CPU0 uses the master IDT. */
    5.36 +    idt_tables[0] = idt_table;
    5.37 +
    5.38      percpu_traps_init();
    5.39  
    5.40      cpu_init();
     6.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Sat May 12 16:24:50 2007 +0100
     6.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Sat May 12 19:04:35 2007 +0100
     6.3 @@ -81,7 +81,7 @@ void __dummy__(void)
     6.4  
     6.5      OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
     6.6      OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
     6.7 -    OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc);
     6.8 +    OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
     6.9      BLANK();
    6.10  
    6.11      OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
    6.12 @@ -89,7 +89,6 @@ void __dummy__(void)
    6.13      BLANK();
    6.14  
    6.15      OFFSET(VMCB_rax, struct vmcb_struct, rax);
    6.16 -    OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset);
    6.17      BLANK();
    6.18  
    6.19      OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
     7.1 --- a/xen/arch/x86/x86_32/traps.c	Sat May 12 16:24:50 2007 +0100
     7.2 +++ b/xen/arch/x86/x86_32/traps.c	Sat May 12 19:04:35 2007 +0100
     7.3 @@ -18,9 +18,6 @@
     7.4  
     7.5  #include <public/callback.h>
     7.6  
     7.7 -/* All CPUs have their own IDT to allow int80 direct trap. */
     7.8 -idt_entry_t *idt_tables[NR_CPUS] __read_mostly;
     7.9 -
    7.10  static void print_xen_info(void)
    7.11  {
    7.12      char taint_str[TAINT_STRING_MAX_LEN];
    7.13 @@ -252,9 +249,6 @@ void __init percpu_traps_init(void)
    7.14      if ( smp_processor_id() != 0 )
    7.15          return;
    7.16  
    7.17 -    /* CPU0 uses the master IDT. */
    7.18 -    idt_tables[0] = idt_table;
    7.19 -
    7.20      /* The hypercall entry vector is only accessible from ring 1. */
    7.21      _set_gate(idt_table+HYPERCALL_VECTOR, 14, 1, &hypercall);
    7.22  
     8.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Sat May 12 16:24:50 2007 +0100
     8.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Sat May 12 19:04:35 2007 +0100
     8.3 @@ -84,7 +84,7 @@ void __dummy__(void)
     8.4  
     8.5      OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
     8.6      OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
     8.7 -    OFFSET(VCPU_svm_vmexit_tsc, struct vcpu, arch.hvm_svm.vmexit_tsc);
     8.8 +    OFFSET(VCPU_svm_vmcb_in_sync, struct vcpu, arch.hvm_svm.vmcb_in_sync);
     8.9      BLANK();
    8.10  
    8.11      OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
    8.12 @@ -95,7 +95,6 @@ void __dummy__(void)
    8.13      BLANK();
    8.14  
    8.15      OFFSET(VMCB_rax, struct vmcb_struct, rax);
    8.16 -    OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset);
    8.17      BLANK();
    8.18  
    8.19      OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
     9.1 --- a/xen/include/asm-x86/hvm/svm/svm.h	Sat May 12 16:24:50 2007 +0100
     9.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h	Sat May 12 19:04:35 2007 +0100
     9.3 @@ -30,8 +30,6 @@
     9.4  
     9.5  extern void svm_dump_vmcb(const char *from, struct vmcb_struct *vmcb);
     9.6  
     9.7 -extern u64 root_vmcb_pa[NR_CPUS];
     9.8 -
     9.9  static inline int svm_long_mode_enabled(struct vcpu *v)
    9.10  {
    9.11      u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
    10.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Sat May 12 16:24:50 2007 +0100
    10.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Sat May 12 19:04:35 2007 +0100
    10.3 @@ -444,11 +444,9 @@ struct vmcb_struct {
    10.4  struct arch_svm_struct {
    10.5      struct vmcb_struct *vmcb;
    10.6      u64                 vmcb_pa;
    10.7 -    u32                 *msrpm;
    10.8 -    u64                 vmexit_tsc; /* tsc read at #VMEXIT. for TSC_OFFSET */
    10.9 +    u32                *msrpm;
   10.10      int                 launch_core;
   10.11 -    
   10.12 -    unsigned long       flags;            /* VMCB flags */
   10.13 +    bool_t              vmcb_in_sync;     /* VMCB sync'ed with VMSAVE? */
   10.14      unsigned long       cpu_shadow_cr0;   /* Guest value for CR0 */
   10.15      unsigned long       cpu_shadow_cr4;   /* Guest value for CR4 */
   10.16      unsigned long       cpu_shadow_efer;  /* Guest value for EFER */