ia64/xen-unstable

changeset 16267:4034317507de

x86: allow pv guests to disable TSC for applications

Linux, under CONFIG_SECCOMP, has been capable of hiding the TSC from
processes for quite a while. This patch enables this to actually work
for pv kernels, by allowing them to control CR4.TSD (and, as a simple
thing to do at the same time, CR4.DE).

Applies cleanly only on top of the previously submitted debug register
handling patch.

Signed-off-by: Jan Beulich <jbeulich@novell.com>

Also clean up CR4 and EFER handling, and hack-n-slash header file
inclusion madness to get the tree building again.

Signed-off-by: Keir Fraser <keir@xensource.com>
author Keir Fraser <keir@xensource.com>
date Mon Oct 29 16:49:02 2007 +0000 (2007-10-29)
parents ba8c2bbaad79
children 09d8b6eb3131
files xen/arch/x86/acpi/boot.c xen/arch/x86/acpi/power.c xen/arch/x86/domain.c xen/arch/x86/flushtlb.c xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-init.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/setup.c xen/arch/x86/smp.c xen/arch/x86/smpboot.c xen/arch/x86/traps.c xen/include/asm-x86/amd-iommu.h xen/include/asm-x86/apic.h xen/include/asm-x86/domain.h xen/include/asm-x86/hvm/io.h xen/include/asm-x86/hvm/irq.h xen/include/asm-x86/io_apic.h xen/include/asm-x86/iommu.h xen/include/asm-x86/msr.h xen/include/asm-x86/page.h xen/include/asm-x86/processor.h xen/include/asm-x86/smp.h xen/include/asm-x86/x86_32/elf.h xen/include/asm-x86/x86_64/elf.h
line diff
     1.1 --- a/xen/arch/x86/acpi/boot.c	Mon Oct 29 15:05:27 2007 +0000
     1.2 +++ b/xen/arch/x86/acpi/boot.c	Mon Oct 29 16:49:02 2007 +0000
     1.3 @@ -36,6 +36,7 @@
     1.4  #include <asm/apic.h>
     1.5  #include <asm/io.h>
     1.6  #include <asm/mpspec.h>
     1.7 +#include <asm/processor.h>
     1.8  #include <mach_apic.h>
     1.9  #include <mach_mpparse.h>
    1.10  
     2.1 --- a/xen/arch/x86/acpi/power.c	Mon Oct 29 15:05:27 2007 +0000
     2.2 +++ b/xen/arch/x86/acpi/power.c	Mon Oct 29 16:49:02 2007 +0000
     2.3 @@ -155,6 +155,10 @@ static int enter_state(u32 state)
     2.4  
     2.5      pmprintk(XENLOG_DEBUG, "Back to C.");
     2.6  
     2.7 +    /* Restore CR4 and EFER from cached values. */
     2.8 +    write_cr4(read_cr4());
     2.9 +    write_efer(read_efer());
    2.10 +
    2.11      device_power_up();
    2.12  
    2.13      pmprintk(XENLOG_INFO, "Finishing wakeup from ACPI S%d state.", state);
     3.1 --- a/xen/arch/x86/domain.c	Mon Oct 29 15:05:27 2007 +0000
     3.2 +++ b/xen/arch/x86/domain.c	Mon Oct 29 16:49:02 2007 +0000
     3.3 @@ -50,7 +50,8 @@
     3.4  #endif
     3.5  
     3.6  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
     3.7 -DEFINE_PER_CPU(__u64, efer);
     3.8 +DEFINE_PER_CPU(u64, efer);
     3.9 +DEFINE_PER_CPU(unsigned long, cr4);
    3.10  
    3.11  static void unmap_vcpu_info(struct vcpu *v);
    3.12  
    3.13 @@ -413,6 +414,8 @@ int vcpu_initialise(struct vcpu *v)
    3.14              v->arch.schedule_tail = continue_idle_domain;
    3.15              v->arch.cr3           = __pa(idle_pg_table);
    3.16          }
    3.17 +
    3.18 +        v->arch.guest_context.ctrlreg[4] = mmu_cr4_features;
    3.19      }
    3.20  
    3.21      v->arch.perdomain_ptes =
    3.22 @@ -568,13 +571,28 @@ void arch_domain_destroy(struct domain *
    3.23      free_xenheap_page(d->shared_info);
    3.24  }
    3.25  
    3.26 +unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4)
    3.27 +{
    3.28 +    unsigned long hv_cr4 = read_cr4(), hv_cr4_mask = ~X86_CR4_TSD;
    3.29 +    if ( cpu_has_de )
    3.30 +        hv_cr4_mask &= ~X86_CR4_DE;
    3.31 +
    3.32 +    if ( (guest_cr4 & hv_cr4_mask) !=
    3.33 +         (hv_cr4 & hv_cr4_mask & ~(X86_CR4_PGE|X86_CR4_PSE)) )
    3.34 +        gdprintk(XENLOG_WARNING,
    3.35 +                 "Attempt to change CR4 flags %08lx -> %08lx\n",
    3.36 +                 hv_cr4 & ~(X86_CR4_PGE|X86_CR4_PSE), guest_cr4);
    3.37 +
    3.38 +    return  (hv_cr4 & hv_cr4_mask) | (guest_cr4 & ~hv_cr4_mask);
    3.39 +}
    3.40 +
    3.41  /* This is called by arch_final_setup_guest and do_boot_vcpu */
    3.42  int arch_set_info_guest(
    3.43      struct vcpu *v, vcpu_guest_context_u c)
    3.44  {
    3.45      struct domain *d = v->domain;
    3.46      unsigned long cr3_pfn = INVALID_MFN;
    3.47 -    unsigned long flags;
    3.48 +    unsigned long flags, cr4;
    3.49      int i, rc = 0, compat;
    3.50  
    3.51      /* The context is a compat-mode one if the target domain is compat-mode;
    3.52 @@ -665,6 +683,10 @@ int arch_set_info_guest(
    3.53      /* Ensure real hardware interrupts are enabled. */
    3.54      v->arch.guest_context.user_regs.eflags |= EF_IE;
    3.55  
    3.56 +    cr4 = v->arch.guest_context.ctrlreg[4];
    3.57 +    v->arch.guest_context.ctrlreg[4] =
    3.58 +        (cr4 == 0) ? mmu_cr4_features : pv_guest_cr4_fixup(cr4);
    3.59 +
    3.60      if ( v->is_initialised )
    3.61          goto out;
    3.62  
    3.63 @@ -1194,6 +1216,9 @@ static void paravirt_ctxt_switch_to(stru
    3.64  {
    3.65      set_int80_direct_trap(v);
    3.66      switch_kernel_stack(v);
    3.67 +
    3.68 +    if ( unlikely(read_cr4() != v->arch.guest_context.ctrlreg[4]) )
    3.69 +        write_cr4(v->arch.guest_context.ctrlreg[4]);
    3.70  }
    3.71  
    3.72  #define loaddebug(_v,_reg) \
     4.1 --- a/xen/arch/x86/flushtlb.c	Mon Oct 29 15:05:27 2007 +0000
     4.2 +++ b/xen/arch/x86/flushtlb.c	Mon Oct 29 16:49:02 2007 +0000
     4.3 @@ -83,9 +83,12 @@ void write_cr3(unsigned long cr3)
     4.4      hvm_flush_guest_tlbs();
     4.5  
     4.6  #ifdef USER_MAPPINGS_ARE_GLOBAL
     4.7 -    __pge_off();
     4.8 -    asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
     4.9 -    __pge_on();
    4.10 +    {
    4.11 +        unsigned long cr4 = read_cr4();
    4.12 +        write_cr4(cr4 & ~X86_CR4_PGE);
    4.13 +        asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
    4.14 +        write_cr4(cr4);
    4.15 +    }
    4.16  #else
    4.17      asm volatile ( "mov %0, %%cr3" : : "r" (cr3) : "memory" );
    4.18  #endif
    4.19 @@ -124,8 +127,7 @@ void flush_area_local(const void *va, un
    4.20              hvm_flush_guest_tlbs();
    4.21  
    4.22  #ifndef USER_MAPPINGS_ARE_GLOBAL
    4.23 -            if ( !(flags & FLUSH_TLB_GLOBAL) ||
    4.24 -                 !(mmu_cr4_features & X86_CR4_PGE) )
    4.25 +            if ( !(flags & FLUSH_TLB_GLOBAL) || !(read_cr4() & X86_CR4_PGE) )
    4.26              {
    4.27                  asm volatile ( "mov %0, %%cr3"
    4.28                                 : : "r" (read_cr3()) : "memory" );
    4.29 @@ -133,9 +135,10 @@ void flush_area_local(const void *va, un
    4.30              else
    4.31  #endif
    4.32              {
    4.33 -                __pge_off();
    4.34 +                unsigned long cr4 = read_cr4();
    4.35 +                write_cr4(cr4 & ~X86_CR4_PGE);
    4.36                  barrier();
    4.37 -                __pge_on();
    4.38 +                write_cr4(cr4);
    4.39              }
    4.40  
    4.41              post_flush(t);
     5.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c	Mon Oct 29 15:05:27 2007 +0000
     5.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-detect.c	Mon Oct 29 16:49:02 2007 +0000
     5.3 @@ -18,6 +18,8 @@
     5.4   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
     5.5   */
     5.6  
     5.7 +#include <xen/config.h>
     5.8 +#include <xen/errno.h>
     5.9  #include <asm/iommu.h>
    5.10  #include <asm/amd-iommu.h>
    5.11  #include <asm/hvm/svm/amd-iommu-proto.h>
     6.1 --- a/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-init.c	Mon Oct 29 15:05:27 2007 +0000
     6.2 +++ b/xen/arch/x86/hvm/svm/amd_iommu/amd-iommu-init.c	Mon Oct 29 16:49:02 2007 +0000
     6.3 @@ -18,6 +18,8 @@
     6.4   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
     6.5   */
     6.6  
     6.7 +#include <xen/config.h>
     6.8 +#include <xen/errno.h>
     6.9  #include <asm/amd-iommu.h>
    6.10  #include <asm/hvm/svm/amd-iommu-proto.h>
    6.11  #include <asm-x86/fixmap.h>
     7.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Oct 29 15:05:27 2007 +0000
     7.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Oct 29 16:49:02 2007 +0000
     7.3 @@ -441,7 +441,7 @@ static enum hvm_intblk svm_interrupt_blo
     7.4      ASSERT((intack.source == hvm_intsrc_pic) ||
     7.5             (intack.source == hvm_intsrc_lapic));
     7.6  
     7.7 -    if ( irq_masked(guest_cpu_user_regs()->eflags) )
     7.8 +    if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
     7.9          return hvm_intblk_rflags_ie;
    7.10  
    7.11      if ( (intack.source == hvm_intsrc_lapic) &&
     8.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Mon Oct 29 15:05:27 2007 +0000
     8.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Mon Oct 29 16:49:02 2007 +0000
     8.3 @@ -498,7 +498,7 @@ static int construct_vmcs(struct vcpu *v
     8.4  
     8.5      /* Host control registers. */
     8.6      __vmwrite(HOST_CR0, read_cr0() | X86_CR0_TS);
     8.7 -    __vmwrite(HOST_CR4, read_cr4());
     8.8 +    __vmwrite(HOST_CR4, mmu_cr4_features);
     8.9  
    8.10      /* Host CS:RIP. */
    8.11      __vmwrite(HOST_CS_SELECTOR, __HYPERVISOR_CS);
     9.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Oct 29 15:05:27 2007 +0000
     9.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Oct 29 16:49:02 2007 +0000
     9.3 @@ -727,6 +727,10 @@ static void vmx_ctxt_switch_from(struct 
     9.4  
     9.5  static void vmx_ctxt_switch_to(struct vcpu *v)
     9.6  {
     9.7 +    /* HOST_CR4 in VMCS is always mmu_cr4_features. Sync CR4 now. */
     9.8 +    if ( unlikely(read_cr4() != mmu_cr4_features) )
     9.9 +        write_cr4(mmu_cr4_features);
    9.10 +
    9.11      vmx_restore_guest_msrs(v);
    9.12      vmx_restore_dr(v);
    9.13  }
    9.14 @@ -990,7 +994,7 @@ static enum hvm_intblk vmx_interrupt_blo
    9.15      ASSERT((intack.source == hvm_intsrc_pic) ||
    9.16             (intack.source == hvm_intsrc_lapic));
    9.17  
    9.18 -    if ( irq_masked(guest_cpu_user_regs()->eflags) )
    9.19 +    if ( !(guest_cpu_user_regs()->eflags & X86_EFLAGS_IF) )
    9.20          return hvm_intblk_rflags_ie;
    9.21  
    9.22      if ( intack.source == hvm_intsrc_lapic )
    10.1 --- a/xen/arch/x86/setup.c	Mon Oct 29 15:05:27 2007 +0000
    10.2 +++ b/xen/arch/x86/setup.c	Mon Oct 29 16:49:02 2007 +0000
    10.3 @@ -415,6 +415,8 @@ void __init __start_xen(unsigned long mb
    10.4      set_current((struct vcpu *)0xfffff000); /* debug sanity */
    10.5      idle_vcpu[0] = current;
    10.6      set_processor_id(0); /* needed early, for smp_processor_id() */
    10.7 +    rdmsrl(MSR_EFER, this_cpu(efer));
    10.8 +    asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) );
    10.9  
   10.10      smp_prepare_boot_cpu();
   10.11  
    11.1 --- a/xen/arch/x86/smp.c	Mon Oct 29 15:05:27 2007 +0000
    11.2 +++ b/xen/arch/x86/smp.c	Mon Oct 29 16:49:02 2007 +0000
    11.3 @@ -86,6 +86,12 @@ static inline void check_IPI_mask(cpumas
    11.4      ASSERT(!cpus_empty(cpumask));
    11.5  }
    11.6  
    11.7 +void apic_wait_icr_idle(void)
    11.8 +{
    11.9 +	while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY )
   11.10 +		cpu_relax();
   11.11 +}
   11.12 +
   11.13  void send_IPI_mask_flat(cpumask_t cpumask, int vector)
   11.14  {
   11.15      unsigned long mask = cpus_addr(cpumask)[0];
    12.1 --- a/xen/arch/x86/smpboot.c	Mon Oct 29 15:05:27 2007 +0000
    12.2 +++ b/xen/arch/x86/smpboot.c	Mon Oct 29 16:49:02 2007 +0000
    12.3 @@ -495,6 +495,8 @@ void __devinit start_secondary(void *unu
    12.4  	set_processor_id(cpu);
    12.5  	set_current(idle_vcpu[cpu]);
    12.6  	this_cpu(curr_vcpu) = idle_vcpu[cpu];
    12.7 +	rdmsrl(MSR_EFER, this_cpu(efer));
    12.8 +	asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) );
    12.9  
   12.10  	percpu_traps_init();
   12.11  
    13.1 --- a/xen/arch/x86/traps.c	Mon Oct 29 15:05:27 2007 +0000
    13.2 +++ b/xen/arch/x86/traps.c	Mon Oct 29 16:49:02 2007 +0000
    13.3 @@ -1794,10 +1794,8 @@ static int emulate_privileged_op(struct 
    13.4              break;
    13.5  
    13.6          case 4: /* Write CR4 */
    13.7 -            if ( *reg != (read_cr4() & ~(X86_CR4_PGE|X86_CR4_PSE)) )
    13.8 -                gdprintk(XENLOG_WARNING,
    13.9 -                         "Attempt to change CR4 flags %08lx -> %08lx\n",
   13.10 -                         read_cr4() & ~(X86_CR4_PGE|X86_CR4_PSE), *reg);
   13.11 +            v->arch.guest_context.ctrlreg[4] = pv_guest_cr4_fixup(*reg);
   13.12 +            write_cr4(v->arch.guest_context.ctrlreg[4]);
   13.13              break;
   13.14  
   13.15          default:
   13.16 @@ -1868,6 +1866,10 @@ static int emulate_privileged_op(struct 
   13.17          }
   13.18          break;
   13.19  
   13.20 +    case 0x31: /* RDTSC */
   13.21 +        rdtsc(regs->eax, regs->edx);
   13.22 +        break;
   13.23 +
   13.24      case 0x32: /* RDMSR */
   13.25          switch ( regs->ecx )
   13.26          {
    14.1 --- a/xen/include/asm-x86/amd-iommu.h	Mon Oct 29 15:05:27 2007 +0000
    14.2 +++ b/xen/include/asm-x86/amd-iommu.h	Mon Oct 29 16:49:02 2007 +0000
    14.3 @@ -22,8 +22,8 @@
    14.4  
    14.5  #include <xen/init.h>
    14.6  #include <xen/types.h>
    14.7 +#include <xen/list.h>
    14.8  #include <xen/spinlock.h>
    14.9 -#include <xen/mm.h>
   14.10  #include <asm/hvm/svm/amd-iommu-defs.h>
   14.11  
   14.12  #define iommu_found()           (!list_empty(&amd_iommu_head))
    15.1 --- a/xen/include/asm-x86/apic.h	Mon Oct 29 15:05:27 2007 +0000
    15.2 +++ b/xen/include/asm-x86/apic.h	Mon Oct 29 16:49:02 2007 +0000
    15.3 @@ -2,9 +2,7 @@
    15.4  #define __ASM_APIC_H
    15.5  
    15.6  #include <xen/config.h>
    15.7 -#include <asm/fixmap.h>
    15.8  #include <asm/apicdef.h>
    15.9 -#include <asm/processor.h>
   15.10  #include <asm/system.h>
   15.11  
   15.12  #define Dprintk(x...)
   15.13 @@ -51,11 +49,7 @@ static __inline u32 apic_read(unsigned l
   15.14  	return *((volatile u32 *)(APIC_BASE+reg));
   15.15  }
   15.16  
   15.17 -static __inline__ void apic_wait_icr_idle(void)
   15.18 -{
   15.19 -	while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY )
   15.20 -		cpu_relax();
   15.21 -}
   15.22 +void apic_wait_icr_idle(void);
   15.23  
   15.24  int get_physical_broadcast(void);
   15.25  
    16.1 --- a/xen/include/asm-x86/domain.h	Mon Oct 29 15:05:27 2007 +0000
    16.2 +++ b/xen/include/asm-x86/domain.h	Mon Oct 29 16:49:02 2007 +0000
    16.3 @@ -350,6 +350,8 @@ struct arch_vcpu
    16.4  /* Continue the current hypercall via func(data) on specified cpu. */
    16.5  int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data);
    16.6  
    16.7 +unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4);
    16.8 +
    16.9  #endif /* __ASM_DOMAIN_H__ */
   16.10  
   16.11  /*
    17.1 --- a/xen/include/asm-x86/hvm/io.h	Mon Oct 29 15:05:27 2007 +0000
    17.2 +++ b/xen/include/asm-x86/hvm/io.h	Mon Oct 29 16:49:02 2007 +0000
    17.3 @@ -149,13 +149,6 @@ static inline int register_buffered_io_h
    17.4      return register_io_handler(d, addr, size, action, HVM_BUFFERED_IO);
    17.5  }
    17.6  
    17.7 -#if defined(__i386__) || defined(__x86_64__)
    17.8 -static inline int irq_masked(unsigned long eflags)
    17.9 -{
   17.10 -    return ((eflags & X86_EFLAGS_IF) == 0);
   17.11 -}
   17.12 -#endif
   17.13 -
   17.14  extern void send_pio_req(unsigned long port, unsigned long count, int size,
   17.15                           paddr_t value, int dir, int df, int value_is_ptr);
   17.16  void send_timeoffset_req(unsigned long timeoff);
    18.1 --- a/xen/include/asm-x86/hvm/irq.h	Mon Oct 29 15:05:27 2007 +0000
    18.2 +++ b/xen/include/asm-x86/hvm/irq.h	Mon Oct 29 16:49:02 2007 +0000
    18.3 @@ -24,6 +24,7 @@
    18.4  
    18.5  #include <xen/types.h>
    18.6  #include <xen/spinlock.h>
    18.7 +#include <asm/irq.h>
    18.8  #include <asm/hvm/hvm.h>
    18.9  #include <asm/hvm/vpic.h>
   18.10  #include <asm/hvm/vioapic.h>
    19.1 --- a/xen/include/asm-x86/io_apic.h	Mon Oct 29 15:05:27 2007 +0000
    19.2 +++ b/xen/include/asm-x86/io_apic.h	Mon Oct 29 16:49:02 2007 +0000
    19.3 @@ -2,9 +2,10 @@
    19.4  #define __ASM_IO_APIC_H
    19.5  
    19.6  #include <xen/config.h>
    19.7 -#include <asm/fixmap.h>
    19.8  #include <asm/types.h>
    19.9  #include <asm/mpspec.h>
   19.10 +#include <asm/apicdef.h>
   19.11 +#include <asm/fixmap.h>
   19.12  
   19.13  /*
   19.14   * Intel IO-APIC support for SMP and UP systems.
    20.1 --- a/xen/include/asm-x86/iommu.h	Mon Oct 29 15:05:27 2007 +0000
    20.2 +++ b/xen/include/asm-x86/iommu.h	Mon Oct 29 16:49:02 2007 +0000
    20.3 @@ -21,11 +21,8 @@
    20.4  #define _IOMMU_H_
    20.5  
    20.6  #include <xen/init.h>
    20.7 -#include <xen/bitmap.h>
    20.8 -#include <xen/irq.h>
    20.9 +#include <xen/list.h>
   20.10  #include <xen/spinlock.h>
   20.11 -#include <xen/mm.h>
   20.12 -#include <xen/xmalloc.h>
   20.13  #include <asm/hvm/vmx/intel-iommu.h>
   20.14  #include <public/hvm/ioreq.h>
   20.15  #include <public/domctl.h>
    21.1 --- a/xen/include/asm-x86/msr.h	Mon Oct 29 15:05:27 2007 +0000
    21.2 +++ b/xen/include/asm-x86/msr.h	Mon Oct 29 16:49:02 2007 +0000
    21.3 @@ -90,16 +90,14 @@ static inline void wrmsrl(unsigned int m
    21.4  			  : "c" (counter))
    21.5  
    21.6  
    21.7 -DECLARE_PER_CPU(__u64, efer);
    21.8 +DECLARE_PER_CPU(u64, efer);
    21.9  
   21.10 -static inline __u64 read_efer(void)
   21.11 +static inline u64 read_efer(void)
   21.12  {
   21.13 -    if (!this_cpu(efer))
   21.14 -        rdmsrl(MSR_EFER, this_cpu(efer));
   21.15      return this_cpu(efer);
   21.16  }
   21.17  
   21.18 -static inline void write_efer(__u64 val)
   21.19 +static inline void write_efer(u64 val)
   21.20  {
   21.21      this_cpu(efer) = val;
   21.22      wrmsrl(MSR_EFER, val);
    22.1 --- a/xen/include/asm-x86/page.h	Mon Oct 29 15:05:27 2007 +0000
    22.2 +++ b/xen/include/asm-x86/page.h	Mon Oct 29 16:49:02 2007 +0000
    22.3 @@ -294,9 +294,6 @@ void paging_init(void);
    22.4  void setup_idle_pagetable(void);
    22.5  #endif /* !defined(__ASSEMBLY__) */
    22.6  
    22.7 -#define __pge_off() write_cr4(mmu_cr4_features & ~X86_CR4_PGE)
    22.8 -#define __pge_on()  write_cr4(mmu_cr4_features)
    22.9 -
   22.10  #define _PAGE_PRESENT  0x001U
   22.11  #define _PAGE_RW       0x002U
   22.12  #define _PAGE_USER     0x004U
    23.1 --- a/xen/include/asm-x86/processor.h	Mon Oct 29 15:05:27 2007 +0000
    23.2 +++ b/xen/include/asm-x86/processor.h	Mon Oct 29 16:49:02 2007 +0000
    23.3 @@ -8,6 +8,8 @@
    23.4  #include <xen/config.h>
    23.5  #include <xen/cache.h>
    23.6  #include <xen/types.h>
    23.7 +#include <xen/smp.h>
    23.8 +#include <xen/percpu.h>
    23.9  #include <public/xen.h>
   23.10  #include <asm/types.h>
   23.11  #include <asm/cpufeature.h>
   23.12 @@ -298,16 +300,17 @@ static inline unsigned long read_cr2(voi
   23.13      return cr2;
   23.14  }
   23.15  
   23.16 +DECLARE_PER_CPU(unsigned long, cr4);
   23.17 +
   23.18  static inline unsigned long read_cr4(void)
   23.19  {
   23.20 -    unsigned long cr4;
   23.21 -    asm volatile ( "mov %%cr4,%0\n\t" : "=r" (cr4) );
   23.22 -    return cr4;
   23.23 -} 
   23.24 -    
   23.25 +    return this_cpu(cr4);
   23.26 +}
   23.27 +
   23.28  static inline void write_cr4(unsigned long val)
   23.29  {
   23.30 -    asm volatile ( "mov %0,%%cr4" : : "r" ((unsigned long)val) );
   23.31 +    this_cpu(cr4) = val;
   23.32 +    asm volatile ( "mov %0,%%cr4" : : "r" (val) );
   23.33  }
   23.34  
   23.35  /* Clear and set 'TS' bit respectively */
   23.36 @@ -332,13 +335,13 @@ extern unsigned long mmu_cr4_features;
   23.37  static always_inline void set_in_cr4 (unsigned long mask)
   23.38  {
   23.39      mmu_cr4_features |= mask;
   23.40 -    write_cr4(mmu_cr4_features);
   23.41 +    write_cr4(read_cr4() | mask);
   23.42  }
   23.43  
   23.44  static always_inline void clear_in_cr4 (unsigned long mask)
   23.45  {
   23.46 -	mmu_cr4_features &= ~mask;
   23.47 -	write_cr4(mmu_cr4_features);
   23.48 +    mmu_cr4_features &= ~mask;
   23.49 +    write_cr4(read_cr4() & ~mask);
   23.50  }
   23.51  
   23.52  /*
    24.1 --- a/xen/include/asm-x86/smp.h	Mon Oct 29 15:05:27 2007 +0000
    24.2 +++ b/xen/include/asm-x86/smp.h	Mon Oct 29 16:49:02 2007 +0000
    24.3 @@ -13,7 +13,6 @@
    24.4  
    24.5  #ifdef CONFIG_X86_LOCAL_APIC
    24.6  #ifndef __ASSEMBLY__
    24.7 -#include <asm/fixmap.h>
    24.8  #include <asm/bitops.h>
    24.9  #include <asm/mpspec.h>
   24.10  #ifdef CONFIG_X86_IO_APIC
    25.1 --- a/xen/include/asm-x86/x86_32/elf.h	Mon Oct 29 15:05:27 2007 +0000
    25.2 +++ b/xen/include/asm-x86/x86_32/elf.h	Mon Oct 29 16:49:02 2007 +0000
    25.3 @@ -1,8 +1,6 @@
    25.4  #ifndef __X86_32_ELF_H__
    25.5  #define __X86_32_ELF_H__
    25.6  
    25.7 -#include <asm/processor.h>
    25.8 -
    25.9  typedef struct {
   25.10      unsigned long ebx;
   25.11      unsigned long ecx;
   25.12 @@ -40,7 +38,7 @@ static inline void elf_core_save_regs(EL
   25.13      asm volatile("movw %%fs, %%ax;" :"=a"(core_regs->fs));
   25.14      asm volatile("movw %%gs, %%ax;" :"=a"(core_regs->gs));
   25.15      /* orig_eax not filled in for now */
   25.16 -    core_regs->eip = (unsigned long)current_text_addr();
   25.17 +    core_regs->eip = (unsigned long)elf_core_save_regs;
   25.18      asm volatile("movw %%cs, %%ax;" :"=a"(core_regs->cs));
   25.19      asm volatile("pushfl; popl %0" :"=m"(core_regs->eflags));
   25.20      asm volatile("movl %%esp,%0" : "=m"(core_regs->esp));
    26.1 --- a/xen/include/asm-x86/x86_64/elf.h	Mon Oct 29 15:05:27 2007 +0000
    26.2 +++ b/xen/include/asm-x86/x86_64/elf.h	Mon Oct 29 16:49:02 2007 +0000
    26.3 @@ -1,8 +1,6 @@
    26.4  #ifndef __X86_64_ELF_H__
    26.5  #define __X86_64_ELF_H__
    26.6  
    26.7 -#include <asm/processor.h>
    26.8 -
    26.9  typedef struct {
   26.10      unsigned long r15;
   26.11      unsigned long r14;
   26.12 @@ -54,7 +52,7 @@ static inline void elf_core_save_regs(EL
   26.13      asm volatile("movq %%rsi,%0" : "=m"(core_regs->rsi));
   26.14      asm volatile("movq %%rdi,%0" : "=m"(core_regs->rdi));
   26.15      /* orig_rax not filled in for now */
   26.16 -    core_regs->rip = (unsigned long)current_text_addr();
   26.17 +    core_regs->rip = (unsigned long)elf_core_save_regs;
   26.18      asm volatile("movl %%cs, %%eax;" :"=a"(core_regs->cs));
   26.19      asm volatile("pushfq; popq %0" :"=m"(core_regs->eflags));
   26.20      asm volatile("movq %%rsp,%0" : "=m"(core_regs->rsp));