ia64/xen-unstable

changeset 15589:e704430b5b32

x86: Various cleanups around CR4 handling, cpu_possible_map, and VMX initialisation.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu Jul 12 11:49:02 2007 +0100 (2007-07-12)
parents bd2f9628114e
children b27add01a929
files xen/arch/x86/acpi/power.c xen/arch/x86/cpu/common.c xen/arch/x86/crash.c xen/arch/x86/domain.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/i8259.c xen/arch/x86/machine_kexec.c xen/arch/x86/mm.c xen/arch/x86/shutdown.c xen/arch/x86/smp.c xen/arch/x86/smpboot.c xen/include/asm-x86/hvm/hvm.h xen/include/asm-x86/hvm/support.h xen/include/asm-x86/hvm/vmx/vmcs.h xen/include/asm-x86/page.h xen/include/asm-x86/processor.h xen/include/xen/cpumask.h
line diff
     1.1 --- a/xen/arch/x86/acpi/power.c	Thu Jul 12 10:06:44 2007 +0100
     1.2 +++ b/xen/arch/x86/acpi/power.c	Thu Jul 12 11:49:02 2007 +0100
     1.3 @@ -118,7 +118,7 @@ int enter_state(u32 state)
     1.4      
     1.5      freeze_domains();
     1.6  
     1.7 -    hvm_suspend_cpu();
     1.8 +    hvm_cpu_down();
     1.9  
    1.10      pmprintk(XENLOG_INFO, "PM: Preparing system for %s sleep\n",
    1.11          acpi_states[state]);
    1.12 @@ -152,7 +152,8 @@ int enter_state(u32 state)
    1.13   Done:
    1.14      local_irq_restore(flags);
    1.15  
    1.16 -    hvm_resume_cpu();
    1.17 +    if ( !hvm_cpu_up() )
    1.18 +        BUG();
    1.19  
    1.20      thaw_domains();
    1.21      spin_unlock(&pm_lock);
     2.1 --- a/xen/arch/x86/cpu/common.c	Thu Jul 12 10:06:44 2007 +0100
     2.2 +++ b/xen/arch/x86/cpu/common.c	Thu Jul 12 11:49:02 2007 +0100
     2.3 @@ -557,9 +557,6 @@ void __devinit cpu_init(void)
     2.4  	}
     2.5  	printk(KERN_INFO "Initializing CPU#%d\n", cpu);
     2.6  
     2.7 -	if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
     2.8 -		clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
     2.9 -
    2.10  	*(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE;
    2.11  	*(unsigned long  *)(&gdt_load[2]) = GDT_VIRT_START(current);
    2.12  	__asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) );
     3.1 --- a/xen/arch/x86/crash.c	Thu Jul 12 10:06:44 2007 +0100
     3.2 +++ b/xen/arch/x86/crash.c	Thu Jul 12 11:49:02 2007 +0100
     3.3 @@ -43,7 +43,7 @@ static int crash_nmi_callback(struct cpu
     3.4      kexec_crash_save_cpu();
     3.5      disable_local_APIC();
     3.6      atomic_dec(&waiting_for_crash_ipi);
     3.7 -    hvm_disable();
     3.8 +    hvm_cpu_down();
     3.9  
    3.10      for ( ; ; )
    3.11          __asm__ __volatile__ ( "hlt" );
    3.12 @@ -99,7 +99,7 @@ void machine_crash_shutdown(void)
    3.13  
    3.14      disable_IO_APIC();
    3.15  
    3.16 -    hvm_disable();
    3.17 +    hvm_cpu_down();
    3.18  
    3.19      info = kexec_crash_save_info();
    3.20      info->dom0_pfn_to_mfn_frame_list_list =
     4.1 --- a/xen/arch/x86/domain.c	Thu Jul 12 10:06:44 2007 +0100
     4.2 +++ b/xen/arch/x86/domain.c	Thu Jul 12 11:49:02 2007 +0100
     4.3 @@ -43,6 +43,7 @@
     4.4  #include <asm/hvm/hvm.h>
     4.5  #include <asm/hvm/support.h>
     4.6  #include <asm/msr.h>
     4.7 +#include <asm/nmi.h>
     4.8  #ifdef CONFIG_COMPAT
     4.9  #include <compat/vcpu.h>
    4.10  #endif
    4.11 @@ -76,10 +77,7 @@ static void default_idle(void)
    4.12          local_irq_enable();
    4.13  }
    4.14  
    4.15 -#ifdef CONFIG_HOTPLUG_CPU
    4.16 -#include <asm/nmi.h>
    4.17 -/* We don't actually take CPU down, just spin without interrupts. */
    4.18 -static inline void play_dead(void)
    4.19 +static void play_dead(void)
    4.20  {
    4.21      __cpu_disable();
    4.22      /* This must be done before dead CPU ack */
    4.23 @@ -94,12 +92,6 @@ static inline void play_dead(void)
    4.24      for ( ; ; )
    4.25          halt();
    4.26  }
    4.27 -#else
    4.28 -static inline void play_dead(void)
    4.29 -{
    4.30 -    BUG();
    4.31 -}
    4.32 -#endif /* CONFIG_HOTPLUG_CPU */
    4.33  
    4.34  void idle_loop(void)
    4.35  {
     5.1 --- a/xen/arch/x86/hvm/hvm.c	Thu Jul 12 10:06:44 2007 +0100
     5.2 +++ b/xen/arch/x86/hvm/hvm.c	Thu Jul 12 11:49:02 2007 +0100
     5.3 @@ -76,11 +76,6 @@ void hvm_enable(struct hvm_function_tabl
     5.4      hvm_enabled = 1;
     5.5  }
     5.6  
     5.7 -void hvm_disable(void)
     5.8 -{
     5.9 -    hvm_suspend_cpu();
    5.10 -}
    5.11 -
    5.12  void hvm_stts(struct vcpu *v)
    5.13  {
    5.14      /* FPU state already dirty? Then no need to setup_fpu() lazily. */
     6.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Jul 12 10:06:44 2007 +0100
     6.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Jul 12 11:49:02 2007 +0100
     6.3 @@ -94,7 +94,7 @@ static void svm_inject_exception(struct 
     6.4      vmcb->eventinj = event;
     6.5  }
     6.6  
     6.7 -static void svm_suspend_cpu(void)
     6.8 +static void svm_cpu_down(void)
     6.9  {
    6.10      write_efer(read_efer() & ~EFER_SVME);
    6.11  }
    6.12 @@ -973,7 +973,7 @@ static int svm_event_injection_faulted(s
    6.13  
    6.14  static struct hvm_function_table svm_function_table = {
    6.15      .name                 = "SVM",
    6.16 -    .suspend_cpu          = svm_suspend_cpu,
    6.17 +    .cpu_down             = svm_cpu_down,
    6.18      .domain_initialise    = svm_domain_initialise,
    6.19      .domain_destroy       = svm_domain_destroy,
    6.20      .vcpu_initialise      = svm_vcpu_initialise,
     7.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Thu Jul 12 10:06:44 2007 +0100
     7.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Thu Jul 12 11:49:02 2007 +0100
     7.3 @@ -66,7 +66,7 @@ static u32 adjust_vmx_controls(u32 ctl_m
     7.4      return ctl;
     7.5  }
     7.6  
     7.7 -void vmx_init_vmcs_config(void)
     7.8 +static void vmx_init_vmcs_config(void)
     7.9  {
    7.10      u32 vmx_msr_low, vmx_msr_high, min, opt;
    7.11      u32 _vmx_pin_based_exec_control;
    7.12 @@ -130,8 +130,9 @@ void vmx_init_vmcs_config(void)
    7.13  
    7.14      rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
    7.15  
    7.16 -    if ( smp_processor_id() == 0 )
    7.17 +    if ( !vmx_pin_based_exec_control )
    7.18      {
    7.19 +        /* First time through. */
    7.20          vmcs_revision_id = vmx_msr_low;
    7.21          vmx_pin_based_exec_control = _vmx_pin_based_exec_control;
    7.22          vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
    7.23 @@ -142,6 +143,7 @@ void vmx_init_vmcs_config(void)
    7.24      }
    7.25      else
    7.26      {
    7.27 +        /* Globals are already initialised: re-check them. */
    7.28          BUG_ON(vmcs_revision_id != vmx_msr_low);
    7.29          BUG_ON(vmx_pin_based_exec_control != _vmx_pin_based_exec_control);
    7.30          BUG_ON(vmx_cpu_based_exec_control != _vmx_cpu_based_exec_control);
    7.31 @@ -189,7 +191,7 @@ static void __vmx_clear_vmcs(void *info)
    7.32      struct vcpu *v = info;
    7.33      struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
    7.34  
    7.35 -    /* Otherwise we can nest (vmx_suspend_cpu() vs. vmx_clear_vmcs()). */
    7.36 +    /* Otherwise we can nest (vmx_cpu_down() vs. vmx_clear_vmcs()). */
    7.37      ASSERT(!local_irq_is_enabled());
    7.38  
    7.39      if ( arch_vmx->active_cpu == smp_processor_id() )
    7.40 @@ -234,7 +236,54 @@ static void vmx_load_vmcs(struct vcpu *v
    7.41      local_irq_restore(flags);
    7.42  }
    7.43  
    7.44 -void vmx_suspend_cpu(void)
    7.45 +int vmx_cpu_up(void)
    7.46 +{
    7.47 +    u32 eax, edx;
    7.48 +    int cpu = smp_processor_id();
    7.49 +
    7.50 +    BUG_ON(!(read_cr4() & X86_CR4_VMXE));
    7.51 +
    7.52 +    rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
    7.53 +
    7.54 +    if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK )
    7.55 +    {
    7.56 +        if ( !(eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) )
    7.57 +        {
    7.58 +            printk("CPU%d: VMX disabled\n", cpu);
    7.59 +            return 0;
    7.60 +        }
    7.61 +    }
    7.62 +    else
    7.63 +    {
    7.64 +        wrmsr(IA32_FEATURE_CONTROL_MSR,
    7.65 +              IA32_FEATURE_CONTROL_MSR_LOCK |
    7.66 +              IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
    7.67 +    }
    7.68 +
    7.69 +    vmx_init_vmcs_config();
    7.70 +
    7.71 +    INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
    7.72 +
    7.73 +    if ( this_cpu(host_vmcs) == NULL )
    7.74 +    {
    7.75 +        this_cpu(host_vmcs) = vmx_alloc_vmcs();
    7.76 +        if ( this_cpu(host_vmcs) == NULL )
    7.77 +        {
    7.78 +            printk("CPU%d: Could not allocate host VMCS\n", cpu);
    7.79 +            return 0;
    7.80 +        }
    7.81 +    }
    7.82 +
    7.83 +    if ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
    7.84 +    {
    7.85 +        printk("CPU%d: VMXON failed\n", cpu);
    7.86 +        return 0;
    7.87 +    }
    7.88 +
    7.89 +    return 1;
    7.90 +}
    7.91 +
    7.92 +void vmx_cpu_down(void)
    7.93  {
    7.94      struct list_head *active_vmcs_list = &this_cpu(active_vmcs_list);
    7.95      unsigned long flags;
    7.96 @@ -245,25 +294,12 @@ void vmx_suspend_cpu(void)
    7.97          __vmx_clear_vmcs(list_entry(active_vmcs_list->next,
    7.98                                      struct vcpu, arch.hvm_vmx.active_list));
    7.99  
   7.100 -    if ( read_cr4() & X86_CR4_VMXE )
   7.101 -    {
   7.102 -        __vmxoff();
   7.103 -        clear_in_cr4(X86_CR4_VMXE);
   7.104 -    }
   7.105 +    BUG_ON(!(read_cr4() & X86_CR4_VMXE));
   7.106 +    __vmxoff();
   7.107  
   7.108      local_irq_restore(flags);
   7.109  }
   7.110  
   7.111 -void vmx_resume_cpu(void)
   7.112 -{
   7.113 -    if ( !read_cr4() & X86_CR4_VMXE )
   7.114 -    {
   7.115 -        set_in_cr4(X86_CR4_VMXE);
   7.116 -        if ( __vmxon(virt_to_maddr(this_cpu(host_vmcs))) )
   7.117 -            BUG();
   7.118 -    }
   7.119 -}
   7.120 -
   7.121  void vmx_vmcs_enter(struct vcpu *v)
   7.122  {
   7.123      /*
   7.124 @@ -294,21 +330,6 @@ void vmx_vmcs_exit(struct vcpu *v)
   7.125      vcpu_unpause(v);
   7.126  }
   7.127  
   7.128 -struct vmcs_struct *vmx_alloc_host_vmcs(void)
   7.129 -{
   7.130 -    ASSERT(this_cpu(host_vmcs) == NULL);
   7.131 -    this_cpu(host_vmcs) = vmx_alloc_vmcs();
   7.132 -    INIT_LIST_HEAD(&this_cpu(active_vmcs_list));
   7.133 -    return this_cpu(host_vmcs);
   7.134 -}
   7.135 -
   7.136 -void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
   7.137 -{
   7.138 -    ASSERT(vmcs == this_cpu(host_vmcs));
   7.139 -    vmx_free_vmcs(vmcs);
   7.140 -    this_cpu(host_vmcs) = NULL;
   7.141 -}
   7.142 -
   7.143  struct xgt_desc {
   7.144      unsigned short size;
   7.145      unsigned long address __attribute__((packed));
     8.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Jul 12 10:06:44 2007 +0100
     8.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Jul 12 11:49:02 2007 +0100
     8.3 @@ -1262,67 +1262,45 @@ static struct hvm_function_table vmx_fun
     8.4      .init_ap_context      = vmx_init_ap_context,
     8.5      .init_hypercall_page  = vmx_init_hypercall_page,
     8.6      .event_injection_faulted = vmx_event_injection_faulted,
     8.7 -    .suspend_cpu          = vmx_suspend_cpu,
     8.8 -    .resume_cpu           = vmx_resume_cpu,
     8.9 +    .cpu_up               = vmx_cpu_up,
    8.10 +    .cpu_down             = vmx_cpu_down,
    8.11  };
    8.12  
    8.13 -int start_vmx(void)
    8.14 +void start_vmx(void)
    8.15  {
    8.16 -    u32 eax, edx;
    8.17 -    struct vmcs_struct *vmcs;
    8.18 -
    8.19 -    /*
    8.20 -     * Xen does not fill x86_capability words except 0.
    8.21 -     */
    8.22 +    static int bootstrapped;
    8.23 +
    8.24 +    if ( bootstrapped )
    8.25 +    {
    8.26 +        if ( hvm_enabled && !vmx_cpu_up() )
    8.27 +        {
    8.28 +            printk("VMX: FATAL: failed to initialise CPU%d!\n",
    8.29 +                   smp_processor_id());
    8.30 +            BUG();
    8.31 +        }
    8.32 +        return;
    8.33 +    }
    8.34 +
    8.35 +    bootstrapped = 1;
    8.36 +
    8.37 +    /* Xen does not fill x86_capability words except 0. */
    8.38      boot_cpu_data.x86_capability[4] = cpuid_ecx(1);
    8.39  
    8.40      if ( !test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability) )
    8.41 -        return 0;
    8.42 -
    8.43 -    rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
    8.44 -
    8.45 -    if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK )
    8.46 -    {
    8.47 -        if ( (eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0 )
    8.48 -        {
    8.49 -            printk("VMX disabled by Feature Control MSR.\n");
    8.50 -            return 0;
    8.51 -        }
    8.52 -    }
    8.53 -    else
    8.54 -    {
    8.55 -        wrmsr(IA32_FEATURE_CONTROL_MSR,
    8.56 -              IA32_FEATURE_CONTROL_MSR_LOCK |
    8.57 -              IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
    8.58 -    }
    8.59 +        return;
    8.60  
    8.61      set_in_cr4(X86_CR4_VMXE);
    8.62  
    8.63 -    vmx_init_vmcs_config();
    8.64 -
    8.65 -    if ( smp_processor_id() == 0 )
    8.66 -        setup_vmcs_dump();
    8.67 -
    8.68 -    if ( (vmcs = vmx_alloc_host_vmcs()) == NULL )
    8.69 +    if ( !vmx_cpu_up() )
    8.70      {
    8.71 -        clear_in_cr4(X86_CR4_VMXE);
    8.72 -        printk("Failed to allocate host VMCS\n");
    8.73 -        return 0;
    8.74 +        printk("VMX: failed to initialise.\n");
    8.75 +        return;
    8.76      }
    8.77  
    8.78 -    if ( __vmxon(virt_to_maddr(vmcs)) )
    8.79 -    {
    8.80 -        clear_in_cr4(X86_CR4_VMXE);
    8.81 -        printk("VMXON failed\n");
    8.82 -        vmx_free_host_vmcs(vmcs);
    8.83 -        return 0;
    8.84 -    }
    8.85 +    setup_vmcs_dump();
    8.86  
    8.87      vmx_save_host_msrs();
    8.88  
    8.89 -    if ( smp_processor_id() != 0 )
    8.90 -        return 1;
    8.91 -
    8.92      hvm_enable(&vmx_function_table);
    8.93  
    8.94      if ( cpu_has_vmx_msr_bitmap )
    8.95 @@ -1339,8 +1317,6 @@ int start_vmx(void)
    8.96          disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP);
    8.97          disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP);
    8.98      }
    8.99 -
   8.100 -    return 1;
   8.101  }
   8.102  
   8.103  /*
     9.1 --- a/xen/arch/x86/i8259.c	Thu Jul 12 10:06:44 2007 +0100
     9.2 +++ b/xen/arch/x86/i8259.c	Thu Jul 12 11:49:02 2007 +0100
     9.3 @@ -397,7 +397,8 @@ void __init init_IRQ(void)
     9.4          irq_desc[i].depth   = 1;
     9.5          spin_lock_init(&irq_desc[i].lock);
     9.6          cpus_setall(irq_desc[i].affinity);
     9.7 -        set_intr_gate(i, interrupt[i]);
     9.8 +        if ( i >= 0x20 )
     9.9 +            set_intr_gate(i, interrupt[i]);
    9.10      }
    9.11  
    9.12      for ( i = 0; i < 16; i++ )
    10.1 --- a/xen/arch/x86/machine_kexec.c	Thu Jul 12 10:06:44 2007 +0100
    10.2 +++ b/xen/arch/x86/machine_kexec.c	Thu Jul 12 11:49:02 2007 +0100
    10.3 @@ -82,10 +82,8 @@ static void __machine_reboot_kexec(void 
    10.4  
    10.5      smp_send_stop();
    10.6  
    10.7 -#ifdef CONFIG_X86_IO_APIC
    10.8      disable_IO_APIC();
    10.9 -#endif
   10.10 -    hvm_disable();
   10.11 +    hvm_cpu_down();
   10.12  
   10.13      machine_kexec(image);
   10.14  }
    11.1 --- a/xen/arch/x86/mm.c	Thu Jul 12 10:06:44 2007 +0100
    11.2 +++ b/xen/arch/x86/mm.c	Thu Jul 12 11:49:02 2007 +0100
    11.3 @@ -3642,8 +3642,6 @@ static void __memguard_change_range(void
    11.4      unsigned long flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
    11.5  
    11.6      /* Ensure we are dealing with a page-aligned whole number of pages. */
    11.7 -    ASSERT((_p&PAGE_MASK) != 0);
    11.8 -    ASSERT((_l&PAGE_MASK) != 0);
    11.9      ASSERT((_p&~PAGE_MASK) == 0);
   11.10      ASSERT((_l&~PAGE_MASK) == 0);
   11.11  
    12.1 --- a/xen/arch/x86/shutdown.c	Thu Jul 12 10:06:44 2007 +0100
    12.2 +++ b/xen/arch/x86/shutdown.c	Thu Jul 12 11:49:02 2007 +0100
    12.3 @@ -222,7 +222,7 @@ void machine_restart(char *cmd)
    12.4       */
    12.5      smp_send_stop();
    12.6      disable_IO_APIC();
    12.7 -    hvm_disable();
    12.8 +    hvm_cpu_down();
    12.9  
   12.10      /* Rebooting needs to touch the page at absolute address 0. */
   12.11      *((unsigned short *)__va(0x472)) = reboot_mode;
    13.1 --- a/xen/arch/x86/smp.c	Thu Jul 12 10:06:44 2007 +0100
    13.2 +++ b/xen/arch/x86/smp.c	Thu Jul 12 11:49:02 2007 +0100
    13.3 @@ -310,7 +310,7 @@ static void stop_this_cpu (void *dummy)
    13.4  
    13.5      local_irq_disable();
    13.6      disable_local_APIC();
    13.7 -    hvm_disable();
    13.8 +    hvm_cpu_down();
    13.9  
   13.10      for ( ; ; )
   13.11          __asm__ __volatile__ ( "hlt" );
    14.1 --- a/xen/arch/x86/smpboot.c	Thu Jul 12 10:06:44 2007 +0100
    14.2 +++ b/xen/arch/x86/smpboot.c	Thu Jul 12 11:49:02 2007 +0100
    14.3 @@ -87,11 +87,7 @@ EXPORT_SYMBOL(cpu_online_map);
    14.4  cpumask_t cpu_callin_map;
    14.5  cpumask_t cpu_callout_map;
    14.6  EXPORT_SYMBOL(cpu_callout_map);
    14.7 -#ifdef CONFIG_HOTPLUG_CPU
    14.8 -cpumask_t cpu_possible_map = CPU_MASK_ALL;
    14.9 -#else
   14.10  cpumask_t cpu_possible_map;
   14.11 -#endif
   14.12  EXPORT_SYMBOL(cpu_possible_map);
   14.13  static cpumask_t smp_commenced_mask;
   14.14  
    15.1 --- a/xen/include/asm-x86/hvm/hvm.h	Thu Jul 12 10:06:44 2007 +0100
    15.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Thu Jul 12 11:49:02 2007 +0100
    15.3 @@ -156,8 +156,8 @@ struct hvm_function_table {
    15.4  
    15.5      int  (*event_injection_faulted)(struct vcpu *v);
    15.6  
    15.7 -    void (*suspend_cpu)(void);
    15.8 -    void (*resume_cpu)(void);
    15.9 +    int  (*cpu_up)(void);
   15.10 +    void (*cpu_down)(void);
   15.11  };
   15.12  
   15.13  extern struct hvm_function_table hvm_funcs;
   15.14 @@ -314,16 +314,17 @@ static inline int hvm_event_injection_fa
   15.15  /* These exceptions must always be intercepted. */
   15.16  #define HVM_TRAP_MASK (1U << TRAP_machine_check)
   15.17  
   15.18 -static inline void hvm_suspend_cpu(void)
   15.19 +static inline int hvm_cpu_up(void)
   15.20  {
   15.21 -    if ( hvm_funcs.suspend_cpu )
   15.22 -        hvm_funcs.suspend_cpu();
   15.23 +    if ( hvm_funcs.cpu_up )
   15.24 +        return hvm_funcs.cpu_up();
   15.25 +    return 1;
   15.26  }
   15.27  
   15.28 -static inline void hvm_resume_cpu(void)
   15.29 +static inline void hvm_cpu_down(void)
   15.30  {
   15.31 -    if ( hvm_funcs.resume_cpu )
   15.32 -        hvm_funcs.resume_cpu();
   15.33 +    if ( hvm_funcs.cpu_down )
   15.34 +        hvm_funcs.cpu_down();
   15.35  }
   15.36  
   15.37  #endif /* __ASM_X86_HVM_HVM_H__ */
    16.1 --- a/xen/include/asm-x86/hvm/support.h	Thu Jul 12 10:06:44 2007 +0100
    16.2 +++ b/xen/include/asm-x86/hvm/support.h	Thu Jul 12 11:49:02 2007 +0100
    16.3 @@ -217,7 +217,6 @@ int hvm_load(struct domain *d, hvm_domai
    16.4  extern char hvm_io_bitmap[];
    16.5  
    16.6  void hvm_enable(struct hvm_function_table *);
    16.7 -void hvm_disable(void);
    16.8  
    16.9  int hvm_copy_to_guest_phys(paddr_t paddr, void *buf, int size);
   16.10  int hvm_copy_from_guest_phys(void *buf, paddr_t paddr, int size);
    17.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Jul 12 10:06:44 2007 +0100
    17.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Jul 12 11:49:02 2007 +0100
    17.3 @@ -24,12 +24,11 @@
    17.4  #include <asm/hvm/vmx/cpu.h>
    17.5  #include <public/hvm/vmx_assist.h>
    17.6  
    17.7 -extern int start_vmx(void);
    17.8 +extern void start_vmx(void);
    17.9  extern void vmcs_dump_vcpu(void);
   17.10 -extern void vmx_init_vmcs_config(void);
   17.11  extern void setup_vmcs_dump(void);
   17.12 -extern void vmx_suspend_cpu(void);
   17.13 -extern void vmx_resume_cpu(void);
   17.14 +extern int  vmx_cpu_up(void);
   17.15 +extern void vmx_cpu_down(void);
   17.16  
   17.17  struct vmcs_struct {
   17.18      u32 vmcs_revision_id;
   17.19 @@ -89,9 +88,6 @@ struct arch_vmx_struct {
   17.20      unsigned char        pm_irqbase[2];
   17.21  };
   17.22  
   17.23 -struct vmcs_struct *vmx_alloc_host_vmcs(void);
   17.24 -void vmx_free_host_vmcs(struct vmcs_struct *vmcs);
   17.25 -
   17.26  int vmx_create_vmcs(struct vcpu *v);
   17.27  void vmx_destroy_vmcs(struct vcpu *v);
   17.28  void vmx_vmcs_enter(struct vcpu *v);
    18.1 --- a/xen/include/asm-x86/page.h	Thu Jul 12 10:06:44 2007 +0100
    18.2 +++ b/xen/include/asm-x86/page.h	Thu Jul 12 11:49:02 2007 +0100
    18.3 @@ -294,19 +294,8 @@ void paging_init(void);
    18.4  void setup_idle_pagetable(void);
    18.5  #endif /* !defined(__ASSEMBLY__) */
    18.6  
    18.7 -#define __pge_off()                                                     \
    18.8 -    do {                                                                \
    18.9 -        __asm__ __volatile__(                                           \
   18.10 -            "mov %0, %%cr4;  # turn off PGE     "                       \
   18.11 -            : : "r" (mmu_cr4_features & ~X86_CR4_PGE) );                \
   18.12 -        } while ( 0 )
   18.13 -
   18.14 -#define __pge_on()                                                      \
   18.15 -    do {                                                                \
   18.16 -        __asm__ __volatile__(                                           \
   18.17 -            "mov %0, %%cr4;  # turn off PGE     "                       \
   18.18 -            : : "r" (mmu_cr4_features) );                               \
   18.19 -    } while ( 0 )
   18.20 +#define __pge_off() write_cr4(mmu_cr4_features & ~X86_CR4_PGE)
   18.21 +#define __pge_on()  write_cr4(mmu_cr4_features)
   18.22  
   18.23  #define _PAGE_PRESENT  0x001U
   18.24  #define _PAGE_RW       0x002U
    19.1 --- a/xen/include/asm-x86/processor.h	Thu Jul 12 10:06:44 2007 +0100
    19.2 +++ b/xen/include/asm-x86/processor.h	Thu Jul 12 11:49:02 2007 +0100
    19.3 @@ -331,24 +331,8 @@ extern unsigned long mmu_cr4_features;
    19.4  
    19.5  static always_inline void set_in_cr4 (unsigned long mask)
    19.6  {
    19.7 -    unsigned long dummy;
    19.8      mmu_cr4_features |= mask;
    19.9 -    __asm__ __volatile__ (
   19.10 -        "mov %%cr4,%0\n\t"
   19.11 -        "or %1,%0\n\t"
   19.12 -        "mov %0,%%cr4\n"
   19.13 -        : "=&r" (dummy) : "irg" (mask) );
   19.14 -}
   19.15 -
   19.16 -static always_inline void clear_in_cr4 (unsigned long mask)
   19.17 -{
   19.18 -    unsigned long dummy;
   19.19 -    mmu_cr4_features &= ~mask;
   19.20 -    __asm__ __volatile__ (
   19.21 -        "mov %%cr4,%0\n\t"
   19.22 -        "and %1,%0\n\t"
   19.23 -        "mov %0,%%cr4\n"
   19.24 -        : "=&r" (dummy) : "irg" (~mask) );
   19.25 +    write_cr4(mmu_cr4_features);
   19.26  }
   19.27  
   19.28  /*
    20.1 --- a/xen/include/xen/cpumask.h	Thu Jul 12 10:06:44 2007 +0100
    20.2 +++ b/xen/include/xen/cpumask.h	Thu Jul 12 11:49:02 2007 +0100
    20.3 @@ -305,7 +305,7 @@ static inline int __cpulist_scnprintf(ch
    20.4   * bitmap of size NR_CPUS.
    20.5   *
    20.6   *  #ifdef CONFIG_HOTPLUG_CPU
    20.7 - *     cpu_possible_map - all NR_CPUS bits set
    20.8 + *     cpu_possible_map - has bit 'cpu' set iff cpu is populatable
    20.9   *     cpu_present_map  - has bit 'cpu' set iff cpu is populated
   20.10   *     cpu_online_map   - has bit 'cpu' set iff cpu available to scheduler
   20.11   *  #else