ia64/xen-unstable

changeset 8771:707cb68a391f

Remove CONFIG_VMX and CONFIG_SVM compile options. Support
will always be compiled into Xen.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Feb 06 23:33:58 2006 +0100 (2006-02-06)
parents f030f4b565a5
children 55268b90a519
files xen/arch/x86/cpu/amd.c xen/arch/x86/cpu/intel.c xen/arch/x86/hvm/svm/emulate.c xen/arch/x86/hvm/svm/instrlen.c xen/arch/x86/hvm/svm/intr.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/svm/vmcb.c xen/arch/x86/hvm/svm/x86_32/exits.S xen/arch/x86/hvm/svm/x86_64/exits.S xen/arch/x86/hvm/vmx/io.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/hvm/vmx/x86_32/exits.S xen/arch/x86/hvm/vmx/x86_64/exits.S xen/arch/x86/x86_32/asm-offsets.c xen/arch/x86/x86_64/asm-offsets.c xen/include/asm-x86/config.h xen/include/asm-x86/hvm/svm/emulate.h xen/include/asm-x86/hvm/svm/intr.h xen/include/asm-x86/hvm/svm/svm.h xen/include/asm-x86/hvm/svm/vmcb.h xen/include/asm-x86/hvm/svm/vmmcall.h xen/include/asm-x86/hvm/vcpu.h xen/include/asm-x86/hvm/vmx/cpu.h xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/hvm/vmx/vmx.h
line diff
     1.1 --- a/xen/arch/x86/cpu/amd.c	Mon Feb 06 23:25:31 2006 +0100
     1.2 +++ b/xen/arch/x86/cpu/amd.c	Mon Feb 06 23:33:58 2006 +0100
     1.3 @@ -248,9 +248,7 @@ static void __init init_amd(struct cpuin
     1.4  	}
     1.5  #endif
     1.6  
     1.7 -#ifdef CONFIG_SVM
     1.8  	start_svm();
     1.9 -#endif
    1.10  }
    1.11  
    1.12  static unsigned int amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
     2.1 --- a/xen/arch/x86/cpu/intel.c	Mon Feb 06 23:25:31 2006 +0100
     2.2 +++ b/xen/arch/x86/cpu/intel.c	Mon Feb 06 23:33:58 2006 +0100
     2.3 @@ -187,9 +187,7 @@ static void __init init_intel(struct cpu
     2.4  	if (c->x86 == 6) 
     2.5  		set_bit(X86_FEATURE_P3, c->x86_capability);
     2.6  
     2.7 -#ifdef CONFIG_VMX
     2.8  	start_vmx();
     2.9 -#endif
    2.10  }
    2.11  
    2.12  
     3.1 --- a/xen/arch/x86/hvm/svm/emulate.c	Mon Feb 06 23:25:31 2006 +0100
     3.2 +++ b/xen/arch/x86/hvm/svm/emulate.c	Mon Feb 06 23:33:58 2006 +0100
     3.3 @@ -27,8 +27,6 @@
     3.4  #include <asm/hvm/svm/vmcb.h>
     3.5  #include <asm/hvm/svm/emulate.h>
     3.6  
     3.7 -#ifdef CONFIG_SVM
     3.8 -
     3.9  extern int inst_copy_from_guest(unsigned char *buf, unsigned long guest_eip,
    3.10          int inst_len);
    3.11  
    3.12 @@ -504,7 +502,6 @@ unsigned int __get_instruction_length_fr
    3.13              "eip = %lx\n",  __func__, (unsigned long)vmcb->rip);
    3.14      return 0;
    3.15  }
    3.16 -#endif /* CONFIG_SVM */
    3.17  
    3.18  /*
    3.19   * Local variables:
     4.1 --- a/xen/arch/x86/hvm/svm/instrlen.c	Mon Feb 06 23:25:31 2006 +0100
     4.2 +++ b/xen/arch/x86/hvm/svm/instrlen.c	Mon Feb 06 23:33:58 2006 +0100
     4.3 @@ -23,7 +23,6 @@
     4.4  #define DPRINTF DPRINTK
     4.5  #include <asm-x86/x86_emulate.h>
     4.6  
     4.7 -#ifdef CONFIG_SVM
     4.8  /*
     4.9   * Opcode effective-address decode tables.
    4.10   * Note that we only emulate instructions that have at least one memory
    4.11 @@ -432,4 +431,3 @@ cannot_emulate:
    4.12      svm_dump_inst(_regs.eip);
    4.13      return (unsigned long)-1;
    4.14  }
    4.15 -#endif /* CONFIG_SVM */
     5.1 --- a/xen/arch/x86/hvm/svm/intr.c	Mon Feb 06 23:25:31 2006 +0100
     5.2 +++ b/xen/arch/x86/hvm/svm/intr.c	Mon Feb 06 23:33:58 2006 +0100
     5.3 @@ -38,8 +38,6 @@
     5.4  #include <public/hvm/ioreq.h>
     5.5  #include <xen/domain_page.h>
     5.6  
     5.7 -#ifdef CONFIG_SVM
     5.8 -
     5.9  /*
    5.10   * Most of this code is copied from vmx_io.c and modified 
    5.11   * to be suitable for SVM.
    5.12 @@ -206,8 +204,6 @@ asmlinkage void svm_intr_assist(void)
    5.13      }
    5.14  }
    5.15  
    5.16 -#endif /* CONFIG_SVM */
    5.17 -
    5.18  /*
    5.19   * Local variables:
    5.20   * mode: C
     6.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Feb 06 23:25:31 2006 +0100
     6.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Feb 06 23:33:58 2006 +0100
     6.3 @@ -50,8 +50,6 @@
     6.4  #include <public/sched.h>
     6.5  #include <public/hvm/ioreq.h>
     6.6  
     6.7 -#ifdef CONFIG_SVM
     6.8 -
     6.9  #define SVM_EXTRA_DEBUG
    6.10  
    6.11  #ifdef TRACE_BUFFER
    6.12 @@ -2698,7 +2696,6 @@ asmlinkage void svm_asid(void)
    6.13      }
    6.14      clear_bit(ARCH_SVM_VMCB_ASSIGN_ASID, &v->arch.hvm_svm.flags);
    6.15  }
    6.16 -#endif /* CONFIG_SVM */
    6.17  
    6.18  /*
    6.19   * Local variables:
     7.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Mon Feb 06 23:25:31 2006 +0100
     7.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Mon Feb 06 23:33:58 2006 +0100
     7.3 @@ -36,8 +36,6 @@
     7.4  #include <xen/kernel.h>
     7.5  #include <xen/domain_page.h>
     7.6  
     7.7 -#ifdef CONFIG_SVM
     7.8 -
     7.9  extern int svm_dbg_on;
    7.10  extern int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
    7.11                                    int oldcore, int newcore);
    7.12 @@ -589,7 +587,6 @@ void svm_dump_vmcb(const char *from, str
    7.13      svm_dump_sel("IDTR", &vmcb->idtr);
    7.14      svm_dump_sel("TR", &vmcb->tr);
    7.15  }
    7.16 -#endif /* CONFIG_SVM */
    7.17  
    7.18  /*
    7.19   * Local variables:
     8.1 --- a/xen/arch/x86/hvm/svm/x86_32/exits.S	Mon Feb 06 23:25:31 2006 +0100
     8.2 +++ b/xen/arch/x86/hvm/svm/x86_32/exits.S	Mon Feb 06 23:33:58 2006 +0100
     8.3 @@ -80,8 +80,6 @@
     8.4          popl %eax;  \
     8.5          addl $(NR_SKIPPED_REGS*4), %esp
     8.6  
     8.7 -#ifdef CONFIG_SVM
     8.8 -
     8.9          ALIGN
    8.10  
    8.11  #define VMRUN  .byte 0x0F,0x01,0xD8
    8.12 @@ -216,5 +214,3 @@ svm_process_softirqs:
    8.13          sti       
    8.14          call do_softirq
    8.15          jmp  svm_test_all_events
    8.16 -#endif /* CONFIG_SVM */
    8.17 -
     9.1 --- a/xen/arch/x86/hvm/svm/x86_64/exits.S	Mon Feb 06 23:25:31 2006 +0100
     9.2 +++ b/xen/arch/x86/hvm/svm/x86_64/exits.S	Mon Feb 06 23:33:58 2006 +0100
     9.3 @@ -92,7 +92,6 @@
     9.4          popq %rdi; \
     9.5          addq $(NR_SKIPPED_REGS*8), %rsp; \
     9.6  
     9.7 -#ifdef CONFIG_SVM
     9.8  #define VMRUN  .byte 0x0F,0x01,0xD8
     9.9  #define VMLOAD .byte 0x0F,0x01,0xDA
    9.10  #define VMSAVE .byte 0x0F,0x01,0xDB
    9.11 @@ -175,5 +174,3 @@ svm_process_softirqs:
    9.12          sti
    9.13          call do_softirq
    9.14          jmp  svm_test_all_events
    9.15 -#endif /* CONFIG_SVM */
    9.16 -
    10.1 --- a/xen/arch/x86/hvm/vmx/io.c	Mon Feb 06 23:25:31 2006 +0100
    10.2 +++ b/xen/arch/x86/hvm/vmx/io.c	Mon Feb 06 23:33:58 2006 +0100
    10.3 @@ -38,8 +38,6 @@
    10.4  #include <asm/hvm/vlapic.h>
    10.5  #include <public/hvm/ioreq.h>
    10.6  
    10.7 -#ifdef CONFIG_VMX
    10.8 -
    10.9  #define BSP_CPU(v)    (!(v->vcpu_id))
   10.10  
   10.11  void vmx_set_tsc_shift(struct vcpu *v, struct hvm_virpit *vpit)
   10.12 @@ -194,7 +192,6 @@ void vmx_do_resume(struct vcpu *v)
   10.13      /* We can't resume the guest if we're waiting on I/O */
   10.14      ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
   10.15  }
   10.16 -#endif /* CONFIG_VMX */
   10.17  
   10.18  /*
   10.19   * Local variables:
    11.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Mon Feb 06 23:25:31 2006 +0100
    11.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Mon Feb 06 23:33:58 2006 +0100
    11.3 @@ -39,8 +39,6 @@
    11.4  #include <asm/shadow_64.h>
    11.5  #endif
    11.6  
    11.7 -#ifdef CONFIG_VMX
    11.8 -
    11.9  int vmcs_size;
   11.10  
   11.11  struct vmcs_struct *alloc_vmcs(void)
   11.12 @@ -539,7 +537,6 @@ void arch_vmx_do_launch(struct vcpu *v)
   11.13      vmx_do_launch(v);
   11.14      reset_stack_and_jump(vmx_asm_do_launch);
   11.15  }
   11.16 -#endif /* CONFIG_VMX */
   11.17  
   11.18  /*
   11.19   * Local variables:
    12.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Feb 06 23:25:31 2006 +0100
    12.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Feb 06 23:33:58 2006 +0100
    12.3 @@ -48,9 +48,6 @@
    12.4  #include <asm/hvm/vpic.h>
    12.5  #include <asm/hvm/vlapic.h>
    12.6  
    12.7 -
    12.8 -#ifdef CONFIG_VMX
    12.9 -
   12.10  static unsigned long trace_values[NR_CPUS][4];
   12.11  #define TRACE_VMEXIT(index,value) trace_values[smp_processor_id()][index]=value
   12.12  
   12.13 @@ -2001,7 +1998,6 @@ asmlinkage void vmx_trace_vmexit (void)
   12.14      TRACE_3D(TRC_VMEXIT,0,0,0);
   12.15      return;
   12.16  }
   12.17 -#endif /* CONFIG_VMX */
   12.18  
   12.19  /*
   12.20   * Local variables:
    13.1 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S	Mon Feb 06 23:25:31 2006 +0100
    13.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S	Mon Feb 06 23:33:58 2006 +0100
    13.3 @@ -81,8 +81,6 @@
    13.4  
    13.5          ALIGN
    13.6  
    13.7 -#ifdef CONFIG_VMX
    13.8 -
    13.9  ENTRY(vmx_asm_vmexit_handler)
   13.10          /* selectors are restored/saved by VMX */
   13.11          HVM_SAVE_ALL_NOSEGREGS
   13.12 @@ -148,5 +146,3 @@ ENTRY(vmx_asm_do_resume)
   13.13  
   13.14  ENTRY(vmx_asm_do_relaunch)
   13.15      vmx_asm_common 1 1
   13.16 -
   13.17 -#endif /* CONFIG_VMX */
    14.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S	Mon Feb 06 23:25:31 2006 +0100
    14.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S	Mon Feb 06 23:33:58 2006 +0100
    14.3 @@ -91,7 +91,6 @@
    14.4          popq %rdi; \
    14.5          addq $(NR_SKIPPED_REGS*8), %rsp; \
    14.6  
    14.7 -#ifdef CONFIG_VMX
    14.8  ENTRY(vmx_asm_vmexit_handler)
    14.9          /* selectors are restored/saved by VMX */
   14.10          HVM_SAVE_ALL_NOSEGREGS
   14.11 @@ -155,6 +154,3 @@ ENTRY(vmx_asm_do_resume)
   14.12  
   14.13  ENTRY(vmx_asm_do_relaunch)
   14.14        vmx_asm_common 1 1
   14.15 -
   14.16 -#endif /* CONFIG_VMX */
   14.17 -
    15.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Mon Feb 06 23:25:31 2006 +0100
    15.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Mon Feb 06 23:33:58 2006 +0100
    15.3 @@ -72,7 +72,6 @@ void __dummy__(void)
    15.4      DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
    15.5      BLANK();
    15.6  
    15.7 -#ifdef CONFIG_SVM
    15.8      OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
    15.9      OFFSET(VCPU_svm_hsa_pa,  struct vcpu, arch.hvm_svm.host_save_pa);
   15.10      OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
   15.11 @@ -82,7 +81,6 @@ void __dummy__(void)
   15.12      OFFSET(VMCB_rax, struct vmcb_struct, rax);
   15.13      OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset);
   15.14      BLANK();
   15.15 -#endif
   15.16  
   15.17      OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
   15.18      OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask);
    16.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Mon Feb 06 23:25:31 2006 +0100
    16.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Mon Feb 06 23:33:58 2006 +0100
    16.3 @@ -72,7 +72,6 @@ void __dummy__(void)
    16.4      DEFINE(_VCPUF_nmi_masked, _VCPUF_nmi_masked);
    16.5      BLANK();
    16.6  
    16.7 -#ifdef CONFIG_SVM
    16.8      OFFSET(VCPU_svm_vmcb_pa, struct vcpu, arch.hvm_svm.vmcb_pa);
    16.9      OFFSET(VCPU_svm_hsa_pa,  struct vcpu, arch.hvm_svm.host_save_pa);
   16.10      OFFSET(VCPU_svm_vmcb, struct vcpu, arch.hvm_svm.vmcb);
   16.11 @@ -82,7 +81,6 @@ void __dummy__(void)
   16.12      OFFSET(VMCB_rax, struct vmcb_struct, rax);
   16.13      OFFSET(VMCB_tsc_offset, struct vmcb_struct, tsc_offset);
   16.14      BLANK();
   16.15 -#endif
   16.16  
   16.17      OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
   16.18      OFFSET(VCPUINFO_upcall_mask, vcpu_info_t, evtchn_upcall_mask);
    17.1 --- a/xen/include/asm-x86/config.h	Mon Feb 06 23:25:31 2006 +0100
    17.2 +++ b/xen/include/asm-x86/config.h	Mon Feb 06 23:33:58 2006 +0100
    17.3 @@ -18,8 +18,6 @@
    17.4  #define CONFIG_X86 1
    17.5  #define CONFIG_X86_HT 1
    17.6  #define CONFIG_SHADOW 1
    17.7 -#define CONFIG_VMX 1
    17.8 -#define CONFIG_SVM 1
    17.9  #define CONFIG_SMP 1
   17.10  #define CONFIG_X86_LOCAL_APIC 1
   17.11  #define CONFIG_X86_GOOD_APIC 1
    18.1 --- a/xen/include/asm-x86/hvm/svm/emulate.h	Mon Feb 06 23:25:31 2006 +0100
    18.2 +++ b/xen/include/asm-x86/hvm/svm/emulate.h	Mon Feb 06 23:33:58 2006 +0100
    18.3 @@ -21,8 +21,6 @@
    18.4  #ifndef __ASM_X86_HVM_SVM_EMULATE_H__
    18.5  #define __ASM_X86_HVM_SVM_EMULATE_H__
    18.6  
    18.7 -#ifdef CONFIG_SVM
    18.8 -
    18.9  typedef enum OPERATING_MODE_ {
   18.10      INVALID_OPERATING_MODE = -1,
   18.11      LEGACY_MODE,
   18.12 @@ -146,8 +144,6 @@ static void inline __update_guest_eip(st
   18.13      vmcb->rip += inst_len;
   18.14  }
   18.15  
   18.16 -#endif /* CONFIG_SVM */
   18.17 -
   18.18  #endif /* __ASM_X86_HVM_SVM_EMULATE_H__ */
   18.19  
   18.20  /*
    19.1 --- a/xen/include/asm-x86/hvm/svm/intr.h	Mon Feb 06 23:25:31 2006 +0100
    19.2 +++ b/xen/include/asm-x86/hvm/svm/intr.h	Mon Feb 06 23:33:58 2006 +0100
    19.3 @@ -21,14 +21,10 @@
    19.4  #ifndef __ASM_X86_HVM_SVM_INTR_H__
    19.5  #define __ASM_X86_HVM_SVM_INTR_H__
    19.6  
    19.7 -#ifdef CONFIG_SVM
    19.8 -
    19.9  extern void svm_set_tsc_shift(struct vcpu *v, struct hvm_virpit *vpit);
   19.10  extern void svm_intr_assist(void);
   19.11  extern void svm_intr_assist_update(struct vcpu *v, int highest_vector);
   19.12  extern void svm_intr_assist_test_valid(struct vcpu *v, 
   19.13          unsigned long *intr_result);
   19.14  
   19.15 -#endif /* CONFIG_SVM */
   19.16 -
   19.17  #endif /* __ASM_X86_HVM_SVM_INTR_H__ */
    20.1 --- a/xen/include/asm-x86/hvm/svm/svm.h	Mon Feb 06 23:25:31 2006 +0100
    20.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h	Mon Feb 06 23:33:58 2006 +0100
    20.3 @@ -28,7 +28,6 @@
    20.4  #include <asm/hvm/svm/vmcb.h>
    20.5  #include <asm/i387.h>
    20.6  
    20.7 -#ifdef CONFIG_SVM
    20.8  extern void asidpool_retire( struct vmcb_struct *vmcb, int core );
    20.9  
   20.10  extern void svm_asm_vmexit_handler(struct cpu_user_regs);
   20.11 @@ -86,6 +85,4 @@ struct asid_pool {
   20.12  #define SVM_REG_R14 (14)
   20.13  #define SVM_REG_R15 (15)
   20.14  
   20.15 -#endif /* CONFIG_SVM */
   20.16 -
   20.17  #endif /* __ASM_X86_HVM_SVM_H__ */
    21.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Mon Feb 06 23:25:31 2006 +0100
    21.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Mon Feb 06 23:33:58 2006 +0100
    21.3 @@ -23,8 +23,6 @@
    21.4  #include <asm/config.h>
    21.5  #include <asm/hvm/hvm.h>
    21.6  
    21.7 -#ifdef CONFIG_SVM
    21.8 -
    21.9  extern int start_svm(void);
   21.10  
   21.11  /* general 1 intercepts */
   21.12 @@ -488,8 +486,6 @@ enum {
   21.13  #define VMCB_EFLAGS_RESERVED_0          0xffc08028 /* bitmap for 0 */
   21.14  #define VMCB_EFLAGS_RESERVED_1          0x00000002 /* bitmap for 1 */
   21.15  
   21.16 -#endif /* CONFIG_SVM */
   21.17 -
   21.18  #endif /* ASM_X86_HVM_SVM_VMCS_H__ */
   21.19  
   21.20  /*
    22.1 --- a/xen/include/asm-x86/hvm/svm/vmmcall.h	Mon Feb 06 23:25:31 2006 +0100
    22.2 +++ b/xen/include/asm-x86/hvm/svm/vmmcall.h	Mon Feb 06 23:33:58 2006 +0100
    22.3 @@ -22,8 +22,6 @@
    22.4  #ifndef __ASM_X86_HVM_SVM_VMMCALL_H__
    22.5  #define __ASM_X86_HVM_SVM_VMMCALL_H__
    22.6  
    22.7 -#ifdef CONFIG_SVM
    22.8 -
    22.9  /* VMMCALL command fields */
   22.10  #define VMMCALL_CODE_CPL_MASK     0xC0000000
   22.11  #define VMMCALL_CODE_MBZ_MASK     0x3FFF0000
   22.12 @@ -43,6 +41,4 @@ static inline int get_vmmcall_cpl(int cm
   22.13      return (cmd & VMMCALL_CODE_CPL_MASK) >> 30;
   22.14  }
   22.15  
   22.16 -#endif /* CONFIG_SVM */
   22.17 -
   22.18  #endif /* __ASM_X86_HVM_SVM_VMMCALL_H__ */
    23.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Mon Feb 06 23:25:31 2006 +0100
    23.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Mon Feb 06 23:33:58 2006 +0100
    23.3 @@ -22,13 +22,8 @@
    23.4  
    23.5  #include <asm/hvm/io.h>
    23.6  #include <asm/hvm/vlapic.h>
    23.7 -
    23.8 -#ifdef CONFIG_VMX
    23.9  #include <asm/hvm/vmx/vmcs.h>
   23.10 -#endif
   23.11 -#ifdef CONFIG_SVM
   23.12  #include <asm/hvm/svm/vmcb.h>
   23.13 -#endif
   23.14  
   23.15  struct hvm_vcpu {
   23.16      unsigned long       ioflags;
   23.17 @@ -36,12 +31,8 @@ struct hvm_vcpu {
   23.18      struct vlapic       *vlapic;
   23.19  
   23.20      union {
   23.21 -#ifdef CONFIG_VMX
   23.22          struct arch_vmx_struct vmx;
   23.23 -#endif
   23.24 -#ifdef CONFIG_SVM
   23.25          struct arch_svm_struct svm;
   23.26 -#endif
   23.27      } u;
   23.28  };
   23.29  
    24.1 --- a/xen/include/asm-x86/hvm/vmx/cpu.h	Mon Feb 06 23:25:31 2006 +0100
    24.2 +++ b/xen/include/asm-x86/hvm/vmx/cpu.h	Mon Feb 06 23:33:58 2006 +0100
    24.3 @@ -19,8 +19,6 @@
    24.4  #ifndef __ASM_X86_HVM_VMX_CPU_H__
    24.5  #define __ASM_X86_HVM_VMX_CPU_H__
    24.6  
    24.7 -#ifdef CONFIG_VMX
    24.8 -
    24.9  /*
   24.10   * Virtual CPU
   24.11   */
   24.12 @@ -34,6 +32,4 @@ struct arch_state_struct {
   24.13  #define VMX_MF_32       1
   24.14  #define VMX_MF_64       2
   24.15  
   24.16 -#endif /* CONFIG_VMX */
   24.17 -
   24.18  #endif /* __ASM_X86_HVM_VMX_CPU_H__ */
    25.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Mon Feb 06 23:25:31 2006 +0100
    25.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Mon Feb 06 23:33:58 2006 +0100
    25.3 @@ -24,8 +24,6 @@
    25.4  #include <asm/hvm/vmx/cpu.h>
    25.5  #include <public/hvm/vmx_assist.h>
    25.6  
    25.7 -#ifdef CONFIG_VMX
    25.8 -
    25.9  extern int start_vmx(void);
   25.10  extern void stop_vmx(void);
   25.11  
   25.12 @@ -248,8 +246,6 @@ enum vmcs_field {
   25.13      HOST_RIP                        = 0x00006c16,
   25.14  };
   25.15  
   25.16 -#endif /* CONFIG_VMX */
   25.17 -
   25.18  #endif /* ASM_X86_HVM_VMX_VMCS_H__ */
   25.19  
   25.20  /*
    26.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Mon Feb 06 23:25:31 2006 +0100
    26.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Mon Feb 06 23:33:58 2006 +0100
    26.3 @@ -26,8 +26,6 @@
    26.4  #include <asm/hvm/vmx/vmcs.h>
    26.5  #include <asm/i387.h>
    26.6  
    26.7 -#ifdef CONFIG_VMX
    26.8 -
    26.9  extern void vmx_asm_vmexit_handler(struct cpu_user_regs);
   26.10  extern void vmx_asm_do_resume(void);
   26.11  extern void vmx_asm_do_launch(void);
   26.12 @@ -458,6 +456,4 @@ static inline int vmx_reflect_exception(
   26.13      return 0;
   26.14  }
   26.15  
   26.16 -#endif /* CONFIG_VMX */
   26.17 -
   26.18  #endif /* __ASM_X86_HVM_VMX_VMX_H__ */