ia64/xen-unstable

changeset 10648:82f481bda1c7

[HVM][VMX] Move vmcs and I/O bitmap allocation into
vmx_initialise_guest_resources().

Signed-off-by: Xin B Li <xin.b.li@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Wed Jul 05 11:21:19 2006 +0100 (2006-07-05)
parents 4d2354be4aa6
children 8e1ae72e905e
files xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/hvm/vmx/vmcs.h
line diff
     1.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Jul 05 10:32:33 2006 +0100
     1.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed Jul 05 11:21:19 2006 +0100
     1.3 @@ -41,34 +41,52 @@
     1.4  #include <asm/shadow_64.h>
     1.5  #endif
     1.6  
     1.7 -int vmcs_size;
     1.8 +static int vmcs_size;
     1.9 +static int vmcs_order;
    1.10 +static u32 vmcs_revision_id;
    1.11  
    1.12 -struct vmcs_struct *vmx_alloc_vmcs(void)
    1.13 +void vmx_init_vmcs_config(void)
    1.14 +{
    1.15 +    u32 vmx_msr_low, vmx_msr_high;
    1.16 +
    1.17 +    if ( vmcs_size )
    1.18 +        return;
    1.19 +
    1.20 +    rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
    1.21 +
    1.22 +    vmcs_revision_id = vmx_msr_low;
    1.23 +
    1.24 +    vmcs_size  = vmx_msr_high & 0x1fff;
    1.25 +    vmcs_order = get_order_from_bytes(vmcs_size);
    1.26 +}
    1.27 +
    1.28 +static struct vmcs_struct *vmx_alloc_vmcs(void)
    1.29  {
    1.30      struct vmcs_struct *vmcs;
    1.31 -    u32 vmx_msr_low, vmx_msr_high;
    1.32  
    1.33 -    rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
    1.34 -    vmcs_size = vmx_msr_high & 0x1fff;
    1.35 -    vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size));
    1.36 -    memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
    1.37 +    if ( (vmcs = alloc_xenheap_pages(vmcs_order)) == NULL )
    1.38 +    {
    1.39 +        DPRINTK("Failed to allocate VMCS.\n");
    1.40 +        return NULL;
    1.41 +    }
    1.42  
    1.43 -    vmcs->vmcs_revision_id = vmx_msr_low;
    1.44 +    memset(vmcs, 0, vmcs_size); /* don't remove this */
    1.45 +    vmcs->vmcs_revision_id = vmcs_revision_id;
    1.46 +
    1.47      return vmcs;
    1.48  }
    1.49  
    1.50 -static void free_vmcs(struct vmcs_struct *vmcs)
    1.51 +static void vmx_free_vmcs(struct vmcs_struct *vmcs)
    1.52  {
    1.53 -    int order;
    1.54 -
    1.55 -    order = get_order_from_bytes(vmcs_size);
    1.56 -    free_xenheap_pages(vmcs, order);
    1.57 +    free_xenheap_pages(vmcs, vmcs_order);
    1.58  }
    1.59  
    1.60  static void __vmx_clear_vmcs(void *info)
    1.61  {
    1.62      struct vcpu *v = info;
    1.63 +
    1.64      __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
    1.65 +
    1.66      v->arch.hvm_vmx.active_cpu = -1;
    1.67      v->arch.hvm_vmx.launched   = 0;
    1.68  }
    1.69 @@ -128,11 +146,19 @@ void vmx_vmcs_exit(struct vcpu *v)
    1.70      vcpu_unpause(v);
    1.71  }
    1.72  
    1.73 +struct vmcs_struct *vmx_alloc_host_vmcs(void)
    1.74 +{
    1.75 +    return vmx_alloc_vmcs();
    1.76 +}
    1.77 +
    1.78 +void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
    1.79 +{
    1.80 +    vmx_free_vmcs(vmcs);
    1.81 +}
    1.82 +
    1.83  static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx)
    1.84  {
    1.85      int error = 0;
    1.86 -    void *io_bitmap_a;
    1.87 -    void *io_bitmap_b;
    1.88  
    1.89      error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
    1.90                         MONITOR_PIN_BASED_EXEC_CONTROLS);
    1.91 @@ -141,19 +167,8 @@ static inline int construct_vmcs_control
    1.92  
    1.93      error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
    1.94  
    1.95 -    /* need to use 0x1000 instead of PAGE_SIZE */
    1.96 -    io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
    1.97 -    io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
    1.98 -    memset(io_bitmap_a, 0xff, 0x1000);
    1.99 -    /* don't bother debug port access */
   1.100 -    clear_bit(PC_DEBUG_PORT, io_bitmap_a);
   1.101 -    memset(io_bitmap_b, 0xff, 0x1000);
   1.102 -
   1.103 -    error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_maddr(io_bitmap_a));
   1.104 -    error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_maddr(io_bitmap_b));
   1.105 -
   1.106 -    arch_vmx->io_bitmap_a = io_bitmap_a;
   1.107 -    arch_vmx->io_bitmap_b = io_bitmap_b;
   1.108 +    error |= __vmwrite(IO_BITMAP_A, (u64)virt_to_maddr(arch_vmx->io_bitmap_a));
   1.109 +    error |= __vmwrite(IO_BITMAP_B, (u64)virt_to_maddr(arch_vmx->io_bitmap_b));
   1.110  
   1.111      return error;
   1.112  }
   1.113 @@ -429,67 +444,52 @@ static inline int construct_vmcs_host(vo
   1.114  }
   1.115  
   1.116  /*
   1.117 - * Need to extend to support full virtualization.
   1.118 + * the working VMCS pointer has been set properly
   1.119 + * just before entering this function.
   1.120   */
   1.121  static int construct_vmcs(struct vcpu *v,
   1.122                            cpu_user_regs_t *regs)
   1.123  {
   1.124      struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
   1.125      int error;
   1.126 -    long rc;
   1.127 -
   1.128 -    memset(arch_vmx, 0, sizeof(struct arch_vmx_struct));
   1.129 -
   1.130 -    spin_lock_init(&arch_vmx->vmcs_lock);
   1.131  
   1.132 -    /*
   1.133 -     * Create a new VMCS
   1.134 -     */
   1.135 -    if (!(arch_vmx->vmcs = vmx_alloc_vmcs())) {
   1.136 -        printk("Failed to create a new VMCS\n");
   1.137 -        return -ENOMEM;
   1.138 -    }
   1.139 -
   1.140 -    __vmx_clear_vmcs(v);
   1.141 -    vmx_load_vmcs(v);
   1.142 -
   1.143 -    if ((error = construct_vmcs_controls(arch_vmx))) {
   1.144 -        printk("construct_vmcs: construct_vmcs_controls failed\n");
   1.145 -        rc = -EINVAL;
   1.146 -        goto err_out;
   1.147 +    if ( (error = construct_vmcs_controls(arch_vmx)) ) {
   1.148 +        printk("construct_vmcs: construct_vmcs_controls failed.\n");
   1.149 +        return error;
   1.150      }
   1.151  
   1.152      /* host selectors */
   1.153 -    if ((error = construct_vmcs_host())) {
   1.154 -        printk("construct_vmcs: construct_vmcs_host failed\n");
   1.155 -        rc = -EINVAL;
   1.156 -        goto err_out;
   1.157 +    if ( (error = construct_vmcs_host()) ) {
   1.158 +        printk("construct_vmcs: construct_vmcs_host failed.\n");
   1.159 +        return error;
   1.160      }
   1.161  
   1.162      /* guest selectors */
   1.163 -    if ((error = construct_init_vmcs_guest(regs))) {
   1.164 -        printk("construct_vmcs: construct_vmcs_guest failed\n");
   1.165 -        rc = -EINVAL;
   1.166 -        goto err_out;
   1.167 +    if ( (error = construct_init_vmcs_guest(regs)) ) {
   1.168 +        printk("construct_vmcs: construct_vmcs_guest failed.\n");
   1.169 +        return error;
   1.170      }
   1.171  
   1.172 -    if ((error |= __vmwrite(EXCEPTION_BITMAP,
   1.173 -                            MONITOR_DEFAULT_EXCEPTION_BITMAP))) {
   1.174 -        printk("construct_vmcs: setting Exception bitmap failed\n");
   1.175 -        rc = -EINVAL;
   1.176 -        goto err_out;
   1.177 +    if ( (error = __vmwrite(EXCEPTION_BITMAP,
   1.178 +                            MONITOR_DEFAULT_EXCEPTION_BITMAP)) ) {
   1.179 +        printk("construct_vmcs: setting exception bitmap failed.\n");
   1.180 +        return error;
   1.181      }
   1.182  
   1.183 -    if (regs->eflags & EF_TF)
   1.184 -        __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
   1.185 +    if ( regs->eflags & EF_TF )
   1.186 +        error = __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
   1.187      else
   1.188 -        __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
   1.189 +        error = __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
   1.190  
   1.191 -    return 0;
   1.192 +    return error;
   1.193 +}
   1.194  
   1.195 -err_out:
   1.196 -    vmx_destroy_vmcs(v);
   1.197 -    return rc;
   1.198 +int vmx_create_vmcs(struct vcpu *v)
   1.199 +{
   1.200 +    if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
   1.201 +        return -ENOMEM;
   1.202 +    __vmx_clear_vmcs(v);
   1.203 +    return 0;
   1.204  }
   1.205  
   1.206  void vmx_destroy_vmcs(struct vcpu *v)
   1.207 @@ -501,14 +501,14 @@ void vmx_destroy_vmcs(struct vcpu *v)
   1.208  
   1.209      vmx_clear_vmcs(v);
   1.210  
   1.211 -    free_vmcs(arch_vmx->vmcs);
   1.212 -    arch_vmx->vmcs = NULL;
   1.213 +    free_xenheap_pages(arch_vmx->io_bitmap_a, IO_BITMAP_ORDER);
   1.214 +    free_xenheap_pages(arch_vmx->io_bitmap_b, IO_BITMAP_ORDER);
   1.215  
   1.216 -    free_xenheap_pages(arch_vmx->io_bitmap_a, get_order_from_bytes(0x1000));
   1.217      arch_vmx->io_bitmap_a = NULL;
   1.218 +    arch_vmx->io_bitmap_b = NULL;
   1.219  
   1.220 -    free_xenheap_pages(arch_vmx->io_bitmap_b, get_order_from_bytes(0x1000));
   1.221 -    arch_vmx->io_bitmap_b = NULL;
   1.222 +    vmx_free_vmcs(arch_vmx->vmcs);
   1.223 +    arch_vmx->vmcs = NULL;
   1.224  }
   1.225  
   1.226  void vm_launch_fail(unsigned long eflags)
   1.227 @@ -547,19 +547,20 @@ void arch_vmx_do_resume(struct vcpu *v)
   1.228  
   1.229  void arch_vmx_do_launch(struct vcpu *v)
   1.230  {
   1.231 -    int error;
   1.232      cpu_user_regs_t *regs = &current->arch.guest_context.user_regs;
   1.233  
   1.234 -    error = construct_vmcs(v, regs);
   1.235 -    if ( error < 0 )
   1.236 +    vmx_load_vmcs(v);
   1.237 +
   1.238 +    if ( construct_vmcs(v, regs) < 0 )
   1.239      {
   1.240 -        if (v->vcpu_id == 0) {
   1.241 -            printk("Failed to construct a new VMCS for BSP.\n");
   1.242 +        if ( v->vcpu_id == 0 ) {
   1.243 +            printk("Failed to construct VMCS for BSP.\n");
   1.244          } else {
   1.245 -            printk("Failed to construct a new VMCS for AP %d\n", v->vcpu_id);
   1.246 +            printk("Failed to construct VMCS for AP %d.\n", v->vcpu_id);
   1.247          }
   1.248          domain_crash_synchronous();
   1.249      }
   1.250 +
   1.251      vmx_do_launch(v);
   1.252      reset_stack_and_jump(vmx_asm_do_vmentry);
   1.253  }
     2.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Wed Jul 05 10:32:33 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Wed Jul 05 11:21:19 2006 +0100
     2.3 @@ -54,34 +54,73 @@ static unsigned long trace_values[NR_CPU
     2.4  static void vmx_ctxt_switch_from(struct vcpu *v);
     2.5  static void vmx_ctxt_switch_to(struct vcpu *v);
     2.6  
     2.7 -void vmx_final_setup_guest(struct vcpu *v)
     2.8 +static int vmx_initialize_guest_resources(struct vcpu *v)
     2.9  {
    2.10 +    struct domain *d = v->domain;
    2.11 +    struct vcpu *vc;
    2.12 +    void *io_bitmap_a, *io_bitmap_b;
    2.13 +    int rc;
    2.14 +
    2.15      v->arch.schedule_tail    = arch_vmx_do_launch;
    2.16      v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
    2.17      v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
    2.18  
    2.19 -    if ( v->vcpu_id == 0 )
    2.20 -    {
    2.21 -        struct domain *d = v->domain;
    2.22 -        struct vcpu *vc;
    2.23 -
    2.24 -        /* Initialize monitor page table */
    2.25 -        for_each_vcpu(d, vc)
    2.26 -            vc->arch.monitor_table = pagetable_null();
    2.27 +    if ( v->vcpu_id != 0 )
    2.28 +        return 1;
    2.29  
    2.30 -        /*
    2.31 -         * Required to do this once per domain
    2.32 -         * XXX todo: add a seperate function to do these.
    2.33 -         */
    2.34 -        memset(&d->shared_info->evtchn_mask[0], 0xff,
    2.35 -               sizeof(d->shared_info->evtchn_mask));
    2.36 +    for_each_vcpu ( d, vc )
    2.37 +    {
    2.38 +        /* Initialize monitor page table */
    2.39 +        vc->arch.monitor_table = pagetable_null();
    2.40  
    2.41 -        /* Put the domain in shadow mode even though we're going to be using
    2.42 -         * the shared 1:1 page table initially. It shouldn't hurt */
    2.43 -        shadow_mode_enable(d,
    2.44 -                           SHM_enable|SHM_refcounts|
    2.45 -                           SHM_translate|SHM_external|SHM_wr_pt_pte);
    2.46 +        memset(&vc->arch.hvm_vmx, 0, sizeof(struct arch_vmx_struct));
    2.47 +
    2.48 +        if ( (rc = vmx_create_vmcs(vc)) != 0 )
    2.49 +        {
    2.50 +            DPRINTK("Failed to create VMCS for vcpu %d: err=%d.\n",
    2.51 +                    vc->vcpu_id, rc);
    2.52 +            return 0;
    2.53 +        }
    2.54 +
    2.55 +        spin_lock_init(&vc->arch.hvm_vmx.vmcs_lock);
    2.56 +
    2.57 +        if ( (io_bitmap_a = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL )
    2.58 +        {
    2.59 +            DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n",
    2.60 +                    vc->vcpu_id);
    2.61 +            return 0;
    2.62 +        }
    2.63 +
    2.64 +        if ( (io_bitmap_b = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL )
    2.65 +        {
    2.66 +            DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n",
    2.67 +                    vc->vcpu_id);
    2.68 +            return 0;
    2.69 +        }
    2.70 +
    2.71 +        memset(io_bitmap_a, 0xff, 0x1000);
    2.72 +        memset(io_bitmap_b, 0xff, 0x1000);
    2.73 +
    2.74 +        /* don't bother debug port access */
    2.75 +        clear_bit(PC_DEBUG_PORT, io_bitmap_a);
    2.76 +
    2.77 +        vc->arch.hvm_vmx.io_bitmap_a = io_bitmap_a;
    2.78 +        vc->arch.hvm_vmx.io_bitmap_b = io_bitmap_b;
    2.79      }
    2.80 +
    2.81 +    /*
    2.82 +     * Required to do this once per domain XXX todo: add a seperate function 
    2.83 +     * to do these.
    2.84 +     */
    2.85 +    memset(&d->shared_info->evtchn_mask[0], 0xff,
    2.86 +           sizeof(d->shared_info->evtchn_mask));
    2.87 +
    2.88 +    /* Put the domain in shadow mode even though we're going to be using
    2.89 +     * the shared 1:1 page table initially. It shouldn't hurt */
    2.90 +    shadow_mode_enable(
    2.91 +        d, SHM_enable|SHM_refcounts|SHM_translate|SHM_external|SHM_wr_pt_pte);
    2.92 +
    2.93 +    return 1;
    2.94  }
    2.95  
    2.96  static void vmx_relinquish_guest_resources(struct domain *d)
    2.97 @@ -90,9 +129,9 @@ static void vmx_relinquish_guest_resourc
    2.98  
    2.99      for_each_vcpu ( d, v )
   2.100      {
   2.101 +        vmx_destroy_vmcs(v);
   2.102          if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
   2.103              continue;
   2.104 -        vmx_destroy_vmcs(v);
   2.105          free_monitor_pagetable(v);
   2.106          kill_timer(&v->arch.hvm_vmx.hlt_timer);
   2.107          if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
   2.108 @@ -444,12 +483,6 @@ void stop_vmx(void)
   2.109          __vmxoff();
   2.110  }
   2.111  
   2.112 -int vmx_initialize_guest_resources(struct vcpu *v)
   2.113 -{
   2.114 -    vmx_final_setup_guest(v);
   2.115 -    return 1;
   2.116 -}
   2.117 -
   2.118  void vmx_migrate_timers(struct vcpu *v)
   2.119  {
   2.120      struct periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
   2.121 @@ -638,58 +671,61 @@ static int check_vmx_controls(u32 ctrls,
   2.122  
   2.123  int start_vmx(void)
   2.124  {
   2.125 +    u32 eax, edx;
   2.126      struct vmcs_struct *vmcs;
   2.127 -    u32 ecx;
   2.128 -    u32 eax, edx;
   2.129 -    u64 phys_vmcs;      /* debugging */
   2.130  
   2.131      /*
   2.132       * Xen does not fill x86_capability words except 0.
   2.133       */
   2.134 -    ecx = cpuid_ecx(1);
   2.135 -    boot_cpu_data.x86_capability[4] = ecx;
   2.136 +    boot_cpu_data.x86_capability[4] = cpuid_ecx(1);
   2.137  
   2.138      if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
   2.139          return 0;
   2.140  
   2.141      rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
   2.142  
   2.143 -    if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) {
   2.144 -        if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) {
   2.145 +    if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK )
   2.146 +    {
   2.147 +        if ( (eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0 )
   2.148 +        {
   2.149              printk("VMX disabled by Feature Control MSR.\n");
   2.150              return 0;
   2.151          }
   2.152      }
   2.153 -    else {
   2.154 +    else
   2.155 +    {
   2.156          wrmsr(IA32_FEATURE_CONTROL_MSR,
   2.157                IA32_FEATURE_CONTROL_MSR_LOCK |
   2.158                IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
   2.159      }
   2.160  
   2.161 -    if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
   2.162 -                            MSR_IA32_VMX_PINBASED_CTLS_MSR))
   2.163 +    if ( !check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
   2.164 +                             MSR_IA32_VMX_PINBASED_CTLS_MSR) )
   2.165          return 0;
   2.166 -    if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
   2.167 -                            MSR_IA32_VMX_PROCBASED_CTLS_MSR))
   2.168 +    if ( !check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
   2.169 +                             MSR_IA32_VMX_PROCBASED_CTLS_MSR) )
   2.170          return 0;
   2.171 -    if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
   2.172 -                            MSR_IA32_VMX_EXIT_CTLS_MSR))
   2.173 +    if ( !check_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
   2.174 +                             MSR_IA32_VMX_EXIT_CTLS_MSR) )
   2.175          return 0;
   2.176 -    if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
   2.177 -                            MSR_IA32_VMX_ENTRY_CTLS_MSR))
   2.178 +    if ( !check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
   2.179 +                             MSR_IA32_VMX_ENTRY_CTLS_MSR) )
   2.180          return 0;
   2.181  
   2.182 -    set_in_cr4(X86_CR4_VMXE);   /* Enable VMXE */
   2.183 +    set_in_cr4(X86_CR4_VMXE);
   2.184  
   2.185 -    if (!(vmcs = vmx_alloc_vmcs())) {
   2.186 -        printk("Failed to allocate VMCS\n");
   2.187 +    vmx_init_vmcs_config();
   2.188 +
   2.189 +    if ( (vmcs = vmx_alloc_host_vmcs()) == NULL )
   2.190 +    {
   2.191 +        printk("Failed to allocate host VMCS\n");
   2.192          return 0;
   2.193      }
   2.194  
   2.195 -    phys_vmcs = (u64) virt_to_maddr(vmcs);
   2.196 -
   2.197 -    if (__vmxon(phys_vmcs)) {
   2.198 +    if ( __vmxon(virt_to_maddr(vmcs)) )
   2.199 +    {
   2.200          printk("VMXON failed\n");
   2.201 +        vmx_free_host_vmcs(vmcs);
   2.202          return 0;
   2.203      }
   2.204  
     3.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Jul 05 10:32:33 2006 +0100
     3.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Wed Jul 05 11:21:19 2006 +0100
     3.3 @@ -27,9 +27,7 @@
     3.4  extern int start_vmx(void);
     3.5  extern void stop_vmx(void);
     3.6  extern void vmcs_dump_vcpu(void);
     3.7 -void vmx_final_setup_guest(struct vcpu *v);
     3.8 -
     3.9 -void vmx_enter_scheduler(void);
    3.10 +extern void vmx_init_vmcs_config(void);
    3.11  
    3.12  enum {
    3.13      VMX_CPU_STATE_PAE_ENABLED=0,
    3.14 @@ -46,8 +44,6 @@ struct vmcs_struct {
    3.15      unsigned char data [0]; /* vmcs size is read from MSR */
    3.16  };
    3.17  
    3.18 -extern int vmcs_size;
    3.19 -
    3.20  enum {
    3.21      VMX_INDEX_MSR_LSTAR = 0,
    3.22      VMX_INDEX_MSR_STAR,
    3.23 @@ -64,6 +60,10 @@ struct vmx_msr_state {
    3.24      unsigned long shadow_gs;
    3.25  };
    3.26  
    3.27 +/* io bitmap is 4KBytes in size */
    3.28 +#define IO_BITMAP_SIZE      0x1000
    3.29 +#define IO_BITMAP_ORDER     (get_order_from_bytes(IO_BITMAP_SIZE))
    3.30 +
    3.31  struct arch_vmx_struct {
    3.32      /* Virtual address of VMCS. */
    3.33      struct vmcs_struct  *vmcs;
    3.34 @@ -101,7 +101,10 @@ struct arch_vmx_struct {
    3.35  
    3.36  void vmx_do_resume(struct vcpu *);
    3.37  
    3.38 -struct vmcs_struct *vmx_alloc_vmcs(void);
    3.39 +struct vmcs_struct *vmx_alloc_host_vmcs(void);
    3.40 +void vmx_free_host_vmcs(struct vmcs_struct *vmcs);
    3.41 +
    3.42 +int vmx_create_vmcs(struct vcpu *v);
    3.43  void vmx_destroy_vmcs(struct vcpu *v);
    3.44  void vmx_vmcs_enter(struct vcpu *v);
    3.45  void vmx_vmcs_exit(struct vcpu *v);