ia64/xen-unstable

changeset 1480:fc5d9f57f546

bitkeeper revision 1.968 (40cf1596Prlo7Ak2J5KWtvKaolWxOg)

More x86_64 stuff.
author kaf24@scramble.cl.cam.ac.uk
date Tue Jun 15 15:28:22 2004 +0000 (2004-06-15)
parents 268d475892cc
children 44512070eb7b
files .rootkeys xen/arch/x86/i387.c xen/arch/x86/setup.c xen/arch/x86/smpboot.c xen/arch/x86/traps.c xen/common/dom0_ops.c xen/common/memory.c xen/include/asm-x86/config.h xen/include/asm-x86/current.h xen/include/asm-x86/pda.h xen/include/asm-x86/processor.h xen/include/asm-x86/ptrace.h xen/include/asm-x86/system.h xen/include/asm-x86/uaccess.h xen/include/asm-x86/x86_32/current.h xen/include/asm-x86/x86_32/ptrace.h xen/include/asm-x86/x86_32/uaccess.h xen/include/asm-x86/x86_64/processor.h xen/include/asm-x86/x86_64/uaccess.h xen/include/xen/mm.h
line diff
     1.1 --- a/.rootkeys	Tue Jun 15 12:29:06 2004 +0000
     1.2 +++ b/.rootkeys	Tue Jun 15 15:28:22 2004 +0000
     1.3 @@ -406,7 +406,7 @@ 3ddb79c3rM-Ote0Xn6Ytg8Y6YqAG-A xen/inclu
     1.4  3ddb79c3KhTI0F_Iw_hRL9QEyOVK-g xen/include/asm-x86/cache.h
     1.5  404f1b920OQVnrbnXnySS-WxrH9Wzw xen/include/asm-x86/config.h
     1.6  3ddb79c2LLt11EQHjrd6sB7FUqvFfA xen/include/asm-x86/cpufeature.h
     1.7 -3ddb79c2ADvRmdexd9y3AYK9_NTx-Q xen/include/asm-x86/current.h
     1.8 +40cf1596ajIU1KJfF22XD-tSLfH6XA xen/include/asm-x86/current.h
     1.9  3ddb79c2jFkPAZTDmU35L6IUssYMgQ xen/include/asm-x86/debugreg.h
    1.10  3ddb79c3r9-31dIsewPV3P3i8HALsQ xen/include/asm-x86/delay.h
    1.11  3ddb79c34BFiXjBJ_cCKB0aCsV1IDw xen/include/asm-x86/desc.h
    1.12 @@ -429,7 +429,7 @@ 3ddb79c3ysKUbxZuwKBRK3WXU2TlEg xen/inclu
    1.13  404f1bb41Yl-5ZjIWnG66HDCj6OIWA xen/include/asm-x86/pda.h
    1.14  4022a73diKn2Ax4-R4gzk59lm1YdDg xen/include/asm-x86/pdb.h
    1.15  3ddb79c2QF5-pZGzuX4QukPCDAl59A xen/include/asm-x86/processor.h
    1.16 -3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/ptrace.h
    1.17 +40cf1596bim9F9DNdV75klgRSZ6Y2A xen/include/asm-x86/ptrace.h
    1.18  3ddb79c2plf7ciNgoNjU-RsbUzawsw xen/include/asm-x86/rwlock.h
    1.19  3ddb79c3Hgbb2g8CyWLMCK-6_ZVQSQ xen/include/asm-x86/smp.h
    1.20  3ddb79c3jn8ALV_S9W5aeTYUQRKBpg xen/include/asm-x86/smpboot.h
    1.21 @@ -440,13 +440,15 @@ 3ddb79c3ezddh34MdelJpa5tNR00Dw xen/inclu
    1.22  3e397e66xPNc8eaSqC9pPbyAtRGzHA xen/include/asm-x86/time.h
    1.23  3e450943TfE-iovQIY_tMO_VdGsPhA xen/include/asm-x86/timex.h
    1.24  3ddb79c4HugMq7IYGxcQKFBpKwKhzA xen/include/asm-x86/types.h
    1.25 -3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/uaccess.h
    1.26 +40cf1596saFaHD5DC5zvrSn7CDCWGQ xen/include/asm-x86/uaccess.h
    1.27  3ddb79c3uPGcP_l_2xyGgBSWd5aC-Q xen/include/asm-x86/unaligned.h
    1.28 +3ddb79c2ADvRmdexd9y3AYK9_NTx-Q xen/include/asm-x86/x86_32/current.h
    1.29 +3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/x86_32/ptrace.h
    1.30 +3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/x86_32/uaccess.h
    1.31  404f1b9ceJeGVaPNIENm2FkK0AgEOQ xen/include/asm-x86/x86_64/current.h
    1.32  404f1b9fl6AQ_a-T1TDK3fuwTPXmHw xen/include/asm-x86/x86_64/desc.h
    1.33  404f1badfXZJZ2sU8sh9PS2EZvd19Q xen/include/asm-x86/x86_64/ldt.h
    1.34  404f1bb1LSCqrMDSfRAti5NdMQPJBQ xen/include/asm-x86/x86_64/page.h
    1.35 -404f1bb756fZfxk5HDx7J7BW3R-1jQ xen/include/asm-x86/x86_64/processor.h
    1.36  404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/ptrace.h
    1.37  404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h
    1.38  400304fcmRQmDdFYEzDh0wcBba9alg xen/include/hypervisor-ifs/COPYING
     2.1 --- a/xen/arch/x86/i387.c	Tue Jun 15 12:29:06 2004 +0000
     2.2 +++ b/xen/arch/x86/i387.c	Tue Jun 15 15:28:22 2004 +0000
     2.3 @@ -24,10 +24,10 @@ static inline void __save_init_fpu( stru
     2.4  {
     2.5      if ( cpu_has_fxsr ) {
     2.6          asm volatile( "fxsave %0 ; fnclex"
     2.7 -                      : "=m" (tsk->thread.i387.fxsave) );
     2.8 +                      : "=m" (tsk->thread.i387) );
     2.9      } else {
    2.10          asm volatile( "fnsave %0 ; fwait"
    2.11 -                      : "=m" (tsk->thread.i387.fsave) );
    2.12 +                      : "=m" (tsk->thread.i387) );
    2.13      }
    2.14      clear_bit(PF_USEDFPU, &tsk->flags);
    2.15  }
    2.16 @@ -48,9 +48,9 @@ void restore_fpu( struct task_struct *ts
    2.17  {
    2.18      if ( cpu_has_fxsr ) {
    2.19          asm volatile( "fxrstor %0"
    2.20 -                      : : "m" (tsk->thread.i387.fxsave) );
    2.21 +                      : : "m" (tsk->thread.i387) );
    2.22      } else {
    2.23          asm volatile( "frstor %0"
    2.24 -                      : : "m" (tsk->thread.i387.fsave) );
    2.25 +                      : : "m" (tsk->thread.i387) );
    2.26      }
    2.27  }
     3.1 --- a/xen/arch/x86/setup.c	Tue Jun 15 12:29:06 2004 +0000
     3.2 +++ b/xen/arch/x86/setup.c	Tue Jun 15 15:28:22 2004 +0000
     3.3 @@ -18,7 +18,7 @@
     3.4  #include <asm/pdb.h>
     3.5  
     3.6  char ignore_irq13;		/* set if exception 16 works */
     3.7 -struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
     3.8 +struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
     3.9  
    3.10  /* Lots of nice things, since we only target PPro+. */
    3.11  unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE;
     4.1 --- a/xen/arch/x86/smpboot.c	Tue Jun 15 12:29:06 2004 +0000
     4.2 +++ b/xen/arch/x86/smpboot.c	Tue Jun 15 15:28:22 2004 +0000
     4.3 @@ -52,9 +52,6 @@
     4.4  
     4.5  #ifdef CONFIG_SMP
     4.6  
     4.7 -/* Set if we find a B stepping CPU			*/
     4.8 -static int smp_b_stepping;
     4.9 -
    4.10  /* Setup configured maximum number of CPUs to activate */
    4.11  static int max_cpus = -1;
    4.12  
    4.13 @@ -113,25 +110,8 @@ void __init smp_alloc_memory(void)
    4.14  
    4.15  void __init smp_store_cpu_info(int id)
    4.16  {
    4.17 -    struct cpuinfo_x86 *c = cpu_data + id;
    4.18 -
    4.19 -    *c = boot_cpu_data;
    4.20 -    c->pte_quick = 0;
    4.21 -    c->pmd_quick = 0;
    4.22 -    c->pgd_quick = 0;
    4.23 -    c->pgtable_cache_sz = 0;
    4.24 -    identify_cpu(c);
    4.25 -    /*
    4.26 -     * Mask B, Pentium, but not Pentium MMX
    4.27 -     */
    4.28 -    if (c->x86_vendor == X86_VENDOR_INTEL &&
    4.29 -        c->x86 == 5 &&
    4.30 -        c->x86_mask >= 1 && c->x86_mask <= 4 &&
    4.31 -        c->x86_model <= 3)
    4.32 -        /*
    4.33 -         * Remember we have B step Pentia with bugs
    4.34 -         */
    4.35 -        smp_b_stepping = 1;
    4.36 +    cpu_data[id] = boot_cpu_data;
    4.37 +    identify_cpu(&cpu_data[id]);
    4.38  }
    4.39  
    4.40  /*
    4.41 @@ -926,9 +906,6 @@ void __init smp_boot_cpus(void)
    4.42      }
    4.43      smp_num_cpus = cpucount + 1;
    4.44  
    4.45 -    if (smp_b_stepping)
    4.46 -        printk("WARNING: SMP operation may"
    4.47 -               " be unreliable with B stepping processors.\n");
    4.48      Dprintk("Boot done.\n");
    4.49  
    4.50      /*
     5.1 --- a/xen/arch/x86/traps.c	Tue Jun 15 12:29:06 2004 +0000
     5.2 +++ b/xen/arch/x86/traps.c	Tue Jun 15 15:28:22 2004 +0000
     5.3 @@ -288,12 +288,12 @@ asmlinkage void do_double_fault(void)
     5.4  
     5.5      /* Find information saved during fault and dump it to the console. */
     5.6      tss = &init_tss[cpu];
     5.7 -    printk("CPU:    %d\nEIP:    %04x:[<%08lx>]      \nEFLAGS: %08lx\n",
     5.8 +    printk("CPU:    %d\nEIP:    %04x:[<%08x>]      \nEFLAGS: %08x\n",
     5.9             cpu, tss->cs, tss->eip, tss->eflags);
    5.10 -    printk("CR3:    %08lx\n", tss->__cr3);
    5.11 -    printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
    5.12 +    printk("CR3:    %08x\n", tss->__cr3);
    5.13 +    printk("eax: %08x   ebx: %08x   ecx: %08x   edx: %08x\n",
    5.14             tss->eax, tss->ebx, tss->ecx, tss->edx);
    5.15 -    printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
    5.16 +    printk("esi: %08x   edi: %08x   ebp: %08x   esp: %08x\n",
    5.17             tss->esi, tss->edi, tss->ebp, tss->esp);
    5.18      printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x\n",
    5.19             tss->ds, tss->es, tss->fs, tss->gs, tss->ss);
     6.1 --- a/xen/common/dom0_ops.c	Tue Jun 15 12:29:06 2004 +0000
     6.2 +++ b/xen/common/dom0_ops.c	Tue Jun 15 15:28:22 2004 +0000
     6.3 @@ -21,11 +21,9 @@
     6.4  #include <xen/shadow.h>
     6.5  #include <hypervisor-ifs/sched_ctl.h>
     6.6  
     6.7 -
     6.8  #define TRC_DOM0OP_ENTER_BASE  0x00020000
     6.9  #define TRC_DOM0OP_LEAVE_BASE  0x00030000
    6.10  
    6.11 -
    6.12  extern unsigned int alloc_new_dom_mem(struct task_struct *, unsigned int);
    6.13  
    6.14  static int msr_cpu_mask;
    6.15 @@ -45,7 +43,6 @@ static void read_msr_for(void *unused)
    6.16          rdmsr(msr_addr, msr_lo, msr_hi);
    6.17  }
    6.18  
    6.19 -    
    6.20  long do_dom0_op(dom0_op_t *u_dom0_op)
    6.21  {
    6.22      long ret = 0;
     7.1 --- a/xen/common/memory.c	Tue Jun 15 12:29:06 2004 +0000
     7.2 +++ b/xen/common/memory.c	Tue Jun 15 15:28:22 2004 +0000
     7.3 @@ -214,8 +214,8 @@ void __init init_frametable(unsigned lon
     7.4         This costs 4MB -- may want to fix some day */
     7.5  
     7.6      /* Pin the ownership of the MP table so that DOM0 can map it later. */
     7.7 -    for ( mfn = virt_to_phys((void *)RDWR_MPT_VIRT_START)>>PAGE_SHIFT;
     7.8 -          mfn < virt_to_phys((void *)RDWR_MPT_VIRT_END)>>PAGE_SHIFT;
     7.9 +    for ( mfn = virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
    7.10 +          mfn < virt_to_phys(&machine_to_phys_mapping[1024*1024])>>PAGE_SHIFT;
    7.11            mfn++ )
    7.12      {
    7.13          frame_table[mfn].count_and_flags = 1 | PGC_allocated;
     8.1 --- a/xen/include/asm-x86/config.h	Tue Jun 15 12:29:06 2004 +0000
     8.2 +++ b/xen/include/asm-x86/config.h	Tue Jun 15 15:28:22 2004 +0000
     8.3 @@ -95,7 +95,7 @@ extern void __out_of_line_bug(int line) 
     8.4  #if defined(__x86_64__)
     8.5  
     8.6  #define PML4_ENTRY_BITS  39
     8.7 -#define PML4_ENTRY_BYTES (1<<PML4_ENTRY_BITS)
     8.8 +#define PML4_ENTRY_BYTES (1UL<<PML4_ENTRY_BITS)
     8.9  
    8.10  /*
    8.11   * Memory layout:
    8.12 @@ -192,10 +192,6 @@ extern void __out_of_line_bug(int line) 
    8.13  /* Next 4MB of virtual address space used for per-domain mappings (eg. GDT). */
    8.14  #define PERDOMAIN_VIRT_START  (SH_LINEAR_PT_VIRT_END)
    8.15  #define PERDOMAIN_VIRT_END    (PERDOMAIN_VIRT_START + (4*1024*1024))
    8.16 -#define GDT_VIRT_START        (PERDOMAIN_VIRT_START)
    8.17 -#define GDT_VIRT_END          (GDT_VIRT_START + (64*1024))
    8.18 -#define LDT_VIRT_START        (GDT_VIRT_END)
    8.19 -#define LDT_VIRT_END          (LDT_VIRT_START + (64*1024))
    8.20  /* Penultimate 4MB of virtual address space used for domain page mappings. */
    8.21  #define MAPCACHE_VIRT_START   (PERDOMAIN_VIRT_END)
    8.22  #define MAPCACHE_VIRT_END     (MAPCACHE_VIRT_START + (4*1024*1024))
    8.23 @@ -214,4 +210,9 @@ extern void __out_of_line_bug(int line) 
    8.24  
    8.25  #endif /* __i386__ */
    8.26  
    8.27 +#define GDT_VIRT_START        (PERDOMAIN_VIRT_START)
    8.28 +#define GDT_VIRT_END          (GDT_VIRT_START + (64*1024))
    8.29 +#define LDT_VIRT_START        (GDT_VIRT_END)
    8.30 +#define LDT_VIRT_END          (LDT_VIRT_START + (64*1024))
    8.31 +
    8.32  #endif /* __XEN_I386_CONFIG_H__ */
     9.1 --- a/xen/include/asm-x86/current.h	Tue Jun 15 12:29:06 2004 +0000
     9.2 +++ b/xen/include/asm-x86/current.h	Tue Jun 15 15:28:22 2004 +0000
     9.3 @@ -1,52 +1,6 @@
     9.4 -#ifndef _X86_CURRENT_H
     9.5 -#define _X86_CURRENT_H
     9.6 -
     9.7 -struct task_struct;
     9.8 -
     9.9 -#define STACK_RESERVED \
    9.10 -    (sizeof(execution_context_t) + sizeof(struct task_struct *))
    9.11 -
    9.12 -static inline struct task_struct * get_current(void)
    9.13 -{
    9.14 -    struct task_struct *current;
    9.15 -    __asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0" 
    9.16 -              : "=r" (current) : "0" (STACK_SIZE-4) );
    9.17 -    return current;
    9.18 -}
    9.19 - 
    9.20 -#define current get_current()
    9.21 -
    9.22 -static inline void set_current(struct task_struct *p)
    9.23 -{
    9.24 -    __asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)" 
    9.25 -              : : "r" (STACK_SIZE-4), "r" (p) );    
    9.26 -}
    9.27  
    9.28 -static inline execution_context_t *get_execution_context(void)
    9.29 -{
    9.30 -    execution_context_t *execution_context;
    9.31 -    __asm__ ( "andl %%esp,%0; addl %2,%0"
    9.32 -              : "=r" (execution_context) 
    9.33 -              : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
    9.34 -    return execution_context;
    9.35 -}
    9.36 -
    9.37 -static inline unsigned long get_stack_top(void)
    9.38 -{
    9.39 -    unsigned long p;
    9.40 -    __asm__ ( "orl %%esp,%0; andl $~3,%0" 
    9.41 -              : "=r" (p) : "0" (STACK_SIZE-4) );
    9.42 -    return p;
    9.43 -}
    9.44 -
    9.45 -#define schedule_tail(_p)                                         \
    9.46 -    __asm__ __volatile__ (                                        \
    9.47 -        "andl %%esp,%0; addl %2,%0; movl %0,%%esp; jmp *%1"       \
    9.48 -        : : "r" (~(STACK_SIZE-1)),                                \
    9.49 -            "r" (unlikely(is_idle_task((_p))) ?                   \
    9.50 -                                continue_cpu_idle_loop :          \
    9.51 -                                continue_nonidle_task),           \
    9.52 -            "i" (STACK_SIZE-STACK_RESERVED) )
    9.53 -
    9.54 -
    9.55 -#endif /* _X86_CURRENT_H */
    9.56 +#ifdef __x86_64__
    9.57 +#include <asm/x86_64/current.h>
    9.58 +#else
    9.59 +#include <asm/x86_32/current.h>
    9.60 +#endif
    10.1 --- a/xen/include/asm-x86/pda.h	Tue Jun 15 12:29:06 2004 +0000
    10.2 +++ b/xen/include/asm-x86/pda.h	Tue Jun 15 15:28:22 2004 +0000
    10.3 @@ -12,11 +12,6 @@ struct x8664_pda {
    10.4  	struct task_struct *pcurrent;	/* Current process */
    10.5          int irqcount;		    /* Irq nesting counter. Starts with -1 */  	
    10.6  	int cpunumber;		    /* Logical CPU number */
    10.7 -	/* XXX: could be a single list */
    10.8 -	unsigned long *pgd_quick;
    10.9 -	unsigned long *pmd_quick;
   10.10 -	unsigned long *pte_quick;
   10.11 -	unsigned long pgtable_cache_sz;
   10.12  	char *irqstackptr;	/* top of irqstack */
   10.13  	unsigned long volatile *level4_pgt; 
   10.14  } ____cacheline_aligned;
    11.1 --- a/xen/include/asm-x86/processor.h	Tue Jun 15 12:29:06 2004 +0000
    11.2 +++ b/xen/include/asm-x86/processor.h	Tue Jun 15 15:28:22 2004 +0000
    11.3 @@ -1,11 +1,11 @@
    11.4  /*
    11.5 - * include/asm-i386/processor.h
    11.6 + * include/asm-x86/processor.h
    11.7   *
    11.8   * Copyright (C) 1994 Linus Torvalds
    11.9   */
   11.10  
   11.11 -#ifndef __ASM_I386_PROCESSOR_H
   11.12 -#define __ASM_I386_PROCESSOR_H
   11.13 +#ifndef __ASM_X86_PROCESSOR_H
   11.14 +#define __ASM_X86_PROCESSOR_H
   11.15  
   11.16  #include <asm/page.h>
   11.17  #include <asm/types.h>
   11.18 @@ -23,8 +23,12 @@ struct task_struct;
   11.19   * Default implementation of macro that returns current
   11.20   * instruction pointer ("program counter").
   11.21   */
   11.22 +#ifdef __x86_64__
   11.23 +#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
   11.24 +#else
   11.25  #define current_text_addr() \
   11.26    ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
   11.27 +#endif
   11.28  
   11.29  /*
   11.30   *  CPU type and hardware bug flags. Kept separately for each CPU.
   11.31 @@ -37,24 +41,12 @@ struct cpuinfo_x86 {
   11.32      __u8    x86_vendor;     /* CPU vendor */
   11.33      __u8    x86_model;
   11.34      __u8    x86_mask;
   11.35 -    char    wp_works_ok;    /* It doesn't on 386's */
   11.36 -    char    hlt_works_ok;   /* Problems on some 486Dx4's and old 386's */
   11.37 -    char    hard_math;
   11.38 -    char    rfu;
   11.39      int     cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
   11.40      __u32   x86_capability[NCAPINTS];
   11.41      char    x86_vendor_id[16];
   11.42 -    char    x86_model_id[64];
   11.43 -    int     x86_cache_size;  /* in KB - valid for CPUS which support this
   11.44 -                                call  */
   11.45 -    int     fdiv_bug;
   11.46 -    int     f00f_bug;
   11.47 -    int     coma_bug;
   11.48 -    unsigned long loops_per_jiffy;
   11.49 -    unsigned long *pgd_quick;
   11.50 -    unsigned long *pmd_quick;
   11.51 -    unsigned long *pte_quick;
   11.52 -    unsigned long pgtable_cache_sz;
   11.53 +    int     x86_cache_size;  /* in KB - for CPUS that support this call  */
   11.54 +    int	    x86_clflush_size;
   11.55 +    int	    x86_tlbsize;     /* number of 4K pages in DTLB/ITLB combined */
   11.56  } __attribute__((__aligned__(SMP_CACHE_BYTES)));
   11.57  
   11.58  #define X86_VENDOR_INTEL 0
   11.59 @@ -184,15 +176,15 @@ static inline unsigned int cpuid_edx(uns
   11.60  #define X86_CR0_PG              0x80000000 /* Paging                   (RW) */
   11.61  
   11.62  #define read_cr0() ({ \
   11.63 -	unsigned int __dummy; \
   11.64 +	unsigned long __dummy; \
   11.65  	__asm__( \
   11.66 -		"movl %%cr0,%0\n\t" \
   11.67 +		"mov"__OS" %%cr0,%0\n\t" \
   11.68  		:"=r" (__dummy)); \
   11.69  	__dummy; \
   11.70  })
   11.71  
   11.72  #define write_cr0(x) \
   11.73 -	__asm__("movl %0,%%cr0": :"r" (x));
   11.74 +	__asm__("mov"__OS" %0,%%cr0": :"r" (x));
   11.75  
   11.76  
   11.77  /*
   11.78 @@ -221,9 +213,9 @@ extern unsigned long mmu_cr4_features;
   11.79  static inline void set_in_cr4 (unsigned long mask)
   11.80  {
   11.81      mmu_cr4_features |= mask;
   11.82 -    __asm__("movl %%cr4,%%eax\n\t"
   11.83 -            "orl %0,%%eax\n\t"
   11.84 -            "movl %%eax,%%cr4\n"
   11.85 +    __asm__("mov"__OS" %%cr4,%%"__OP"ax\n\t"
   11.86 +            "or"__OS" %0,%%"__OP"ax\n\t"
   11.87 +            "mov"__OS" %%"__OP"ax,%%cr4\n"
   11.88              : : "irg" (mask)
   11.89              :"ax");
   11.90  }
   11.91 @@ -231,62 +223,13 @@ static inline void set_in_cr4 (unsigned 
   11.92  static inline void clear_in_cr4 (unsigned long mask)
   11.93  {
   11.94      mmu_cr4_features &= ~mask;
   11.95 -    __asm__("movl %%cr4,%%eax\n\t"
   11.96 -            "andl %0,%%eax\n\t"
   11.97 -            "movl %%eax,%%cr4\n"
   11.98 +    __asm__("mov"__OS" %%cr4,%%"__OP"ax\n\t"
   11.99 +            "and"__OS" %0,%%"__OP"ax\n\t"
  11.100 +            "movl"__OS" %%"__OP"ax,%%cr4\n"
  11.101              : : "irg" (~mask)
  11.102              :"ax");
  11.103  }
  11.104  
  11.105 -
  11.106 -
  11.107 -/*
  11.108 - *      Cyrix CPU configuration register indexes
  11.109 - */
  11.110 -#define CX86_CCR0 0xc0
  11.111 -#define CX86_CCR1 0xc1
  11.112 -#define CX86_CCR2 0xc2
  11.113 -#define CX86_CCR3 0xc3
  11.114 -#define CX86_CCR4 0xe8
  11.115 -#define CX86_CCR5 0xe9
  11.116 -#define CX86_CCR6 0xea
  11.117 -#define CX86_CCR7 0xeb
  11.118 -#define CX86_DIR0 0xfe
  11.119 -#define CX86_DIR1 0xff
  11.120 -#define CX86_ARR_BASE 0xc4
  11.121 -#define CX86_RCR_BASE 0xdc
  11.122 -
  11.123 -/*
  11.124 - *      Cyrix CPU indexed register access macros
  11.125 - */
  11.126 -
  11.127 -#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
  11.128 -
  11.129 -#define setCx86(reg, data) do { \
  11.130 -	outb((reg), 0x22); \
  11.131 -	outb((data), 0x23); \
  11.132 -} while (0)
  11.133 -
  11.134 -#define EISA_bus (0)
  11.135 -#define MCA_bus  (0)
  11.136 -
  11.137 -/* from system description table in BIOS.  Mostly for MCA use, but
  11.138 -others may find it useful. */
  11.139 -extern unsigned int machine_id;
  11.140 -extern unsigned int machine_submodel_id;
  11.141 -extern unsigned int BIOS_revision;
  11.142 -extern unsigned int mca_pentium_flag;
  11.143 -
  11.144 -/*
  11.145 - * User space process size: 3GB (default).
  11.146 - */
  11.147 -#define TASK_SIZE	(PAGE_OFFSET)
  11.148 -
  11.149 -/* This decides where the kernel will search for a free chunk of vm
  11.150 - * space during mmap's.
  11.151 - */
  11.152 -#define TASK_UNMAPPED_BASE	(TASK_SIZE / 3)
  11.153 -
  11.154  /*
  11.155   * Size of io_bitmap in longwords:
  11.156   * For Xen we support the full 8kbyte IO bitmap but use the io_bitmap_sel field
  11.157 @@ -298,105 +241,75 @@ extern unsigned int mca_pentium_flag;
  11.158  #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
  11.159  #define INVALID_IO_BITMAP_OFFSET 0x8000
  11.160  
  11.161 -struct i387_fsave_struct {
  11.162 -    long	cwd;
  11.163 -    long	swd;
  11.164 -    long	twd;
  11.165 -    long	fip;
  11.166 -    long	fcs;
  11.167 -    long	foo;
  11.168 -    long	fos;
  11.169 -    long	st_space[20];	/* 8*10 bytes for each FP-reg = 80 bytes */
  11.170 -    long	status;		/* software status information */
  11.171 -};
  11.172 -
  11.173 -struct i387_fxsave_struct {
  11.174 -    unsigned short	cwd;
  11.175 -    unsigned short	swd;
  11.176 -    unsigned short	twd;
  11.177 -    unsigned short	fop;
  11.178 -    long	fip;
  11.179 -    long	fcs;
  11.180 -    long	foo;
  11.181 -    long	fos;
  11.182 -    long	mxcsr;
  11.183 -    long	reserved;
  11.184 -    long	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
  11.185 -    long	xmm_space[32];	/* 8*16 bytes for each XMM-reg = 128 bytes */
  11.186 -    long	padding[56];
  11.187 +struct i387_state {
  11.188 +    u8 state[512]; /* big enough for FXSAVE */
  11.189  } __attribute__ ((aligned (16)));
  11.190  
  11.191 -struct i387_soft_struct {
  11.192 -    long    cwd;
  11.193 -    long    swd;
  11.194 -    long    twd;
  11.195 -    long    fip;
  11.196 -    long    fcs;
  11.197 -    long    foo;
  11.198 -    long    fos;
  11.199 -    long    st_space[20];   /* 8*10 bytes for each FP-reg = 80 bytes */
  11.200 -    unsigned char   ftop, changed, lookahead, no_update, rm, alimit;
  11.201 -    struct info     *info;
  11.202 -    unsigned long   entry_eip;
  11.203 -};
  11.204 -
  11.205 -union i387_union {
  11.206 -    struct i387_fsave_struct	fsave;
  11.207 -    struct i387_fxsave_struct	fxsave;
  11.208 -    struct i387_soft_struct soft;
  11.209 -};
  11.210 -
  11.211  typedef struct {
  11.212      unsigned long seg;
  11.213  } mm_segment_t;
  11.214  
  11.215  struct tss_struct {
  11.216      unsigned short	back_link,__blh;
  11.217 -    unsigned long	esp0;
  11.218 -    unsigned short	ss0,__ss0h;
  11.219 -    unsigned long	esp1;
  11.220 -    unsigned short	ss1,__ss1h;
  11.221 -    unsigned long	esp2;
  11.222 -    unsigned short	ss2,__ss2h;
  11.223 -    unsigned long	__cr3;
  11.224 -    unsigned long	eip;
  11.225 -    unsigned long	eflags;
  11.226 -    unsigned long	eax,ecx,edx,ebx;
  11.227 -    unsigned long	esp;
  11.228 -    unsigned long	ebp;
  11.229 -    unsigned long	esi;
  11.230 -    unsigned long	edi;
  11.231 -    unsigned short	es, __esh;
  11.232 -    unsigned short	cs, __csh;
  11.233 -    unsigned short	ss, __ssh;
  11.234 -    unsigned short	ds, __dsh;
  11.235 -    unsigned short	fs, __fsh;
  11.236 -    unsigned short	gs, __gsh;
  11.237 -    unsigned short	ldt, __ldth;
  11.238 -    unsigned short	trace, bitmap;
  11.239 -    unsigned long	io_bitmap[IO_BITMAP_SIZE+1];
  11.240 -    /*
  11.241 -     * pads the TSS to be cacheline-aligned (total size is 0x2080)
  11.242 -     */
  11.243 -    unsigned long __cacheline_filler[5];
  11.244 +#ifdef __x86_64__
  11.245 +    u64 rsp0;
  11.246 +    u64 rsp1;
  11.247 +    u64 rsp2;
  11.248 +    u64 reserved1;
  11.249 +    u64 ist[7];
  11.250 +    u64 reserved2;
  11.251 +    u16 reserved3;
  11.252 +#else
  11.253 +    u32 esp0;
  11.254 +    u16 ss0,__ss0h;
  11.255 +    u32 esp1;
  11.256 +    u16 ss1,__ss1h;
  11.257 +    u32 esp2;
  11.258 +    u16 ss2,__ss2h;
  11.259 +    u32 __cr3;
  11.260 +    u32 eip;
  11.261 +    u32 eflags;
  11.262 +    u32 eax,ecx,edx,ebx;
  11.263 +    u32 esp;
  11.264 +    u32 ebp;
  11.265 +    u32 esi;
  11.266 +    u32 edi;
  11.267 +    u16 es, __esh;
  11.268 +    u16 cs, __csh;
  11.269 +    u16 ss, __ssh;
  11.270 +    u16 ds, __dsh;
  11.271 +    u16 fs, __fsh;
  11.272 +    u16 gs, __gsh;
  11.273 +    u16 ldt, __ldth;
  11.274 +    u16 trace;
  11.275 +#endif
  11.276 +    u16 bitmap;
  11.277 +    u32 io_bitmap[IO_BITMAP_SIZE+1];
  11.278 +    /* Pads the TSS to be cacheline-aligned (total size is 0x2080). */
  11.279 +    u32 __cacheline_filler[5];
  11.280  };
  11.281  
  11.282  struct thread_struct {
  11.283 -    unsigned long guestos_sp, guestos_ss;
  11.284 +    unsigned long      guestos_sp;
  11.285 +    unsigned long      guestos_ss;
  11.286  /* Hardware debugging registers */
  11.287 -    unsigned long	debugreg[8];  /* %%db0-7 debug registers */
  11.288 +    unsigned long      debugreg[8];  /* %%db0-7 debug registers */
  11.289  /* floating point info */
  11.290 -    union i387_union	i387;
  11.291 +    struct i387_state  i387;
  11.292  /* Trap info. */
  11.293 -    int                 fast_trap_idx;
  11.294 -    struct desc_struct  fast_trap_desc;
  11.295 -    trap_info_t         traps[256];
  11.296 +#ifdef __i386__
  11.297 +    int                fast_trap_idx;
  11.298 +    struct desc_struct fast_trap_desc;
  11.299 +#endif
  11.300 +    trap_info_t        traps[256];
  11.301  };
  11.302  
  11.303  #define IDT_ENTRIES 256
  11.304  extern struct desc_struct idt_table[];
  11.305  extern struct desc_struct *idt_tables[];
  11.306  
  11.307 +#if defined(__i386__)
  11.308 +
  11.309  #define SET_DEFAULT_FAST_TRAP(_p) \
  11.310      (_p)->fast_trap_idx = 0x20;   \
  11.311      (_p)->fast_trap_desc.a = 0;   \
  11.312 @@ -443,6 +356,19 @@ long set_fast_trap(struct task_struct *p
  11.313  	{ [0 ... IO_BITMAP_SIZE] = ~0UL }, /* ioperm */         \
  11.314  }
  11.315  
  11.316 +#elif defined(__x86_64__)
  11.317 +
  11.318 +#define INIT_THREAD { 0 }
  11.319 +
  11.320 +#define INIT_TSS {                                              \
  11.321 +	0,0,                                                    \
  11.322 +	0,0,0,0,{0},0,0,                                        \
  11.323 +	0, INVALID_IO_BITMAP_OFFSET,                            \
  11.324 +	{ [0 ... IO_BITMAP_SIZE] = ~0UL }                       \
  11.325 +}
  11.326 +
  11.327 +#endif /* __x86_64__ */
  11.328 +
  11.329  struct mm_struct {
  11.330      /*
  11.331       * Every domain has a L1 pagetable of its own. Per-domain mappings
  11.332 @@ -454,7 +380,7 @@ struct mm_struct {
  11.333      /* shadow mode status and controls */
  11.334      unsigned int shadow_mode;  /* flags to control shadow table operation */
  11.335      pagetable_t  shadow_table;
  11.336 -    spinlock_t shadow_lock;
  11.337 +    spinlock_t   shadow_lock;
  11.338      unsigned int shadow_max_page_count; // currently unused
  11.339  
  11.340      /* shadow hashtable */
  11.341 @@ -472,11 +398,10 @@ struct mm_struct {
  11.342      unsigned int shadow_fault_count;     
  11.343      unsigned int shadow_dirty_count;     
  11.344  
  11.345 -
  11.346      /* Current LDT details. */
  11.347      unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
  11.348      /* Next entry is passed to LGDT on domain switch. */
  11.349 -    char gdt[6];
  11.350 +    char gdt[10]; /* NB. 10 bytes needed for x86_64. Use 6 bytes for x86_32. */
  11.351  };
  11.352  
  11.353  static inline void write_ptbase(struct mm_struct *mm)
  11.354 @@ -488,7 +413,7 @@ static inline void write_ptbase(struct m
  11.355      else
  11.356          pa = pagetable_val(mm->pagetable);
  11.357  
  11.358 -    __asm__ __volatile__ ( "movl %0, %%cr3" : : "r" (pa) : "memory" );
  11.359 +    __asm__ __volatile__ ( "mov"__OS" %0, %%cr3" : : "r" (pa) : "memory" );
  11.360  }
  11.361  
  11.362  #define IDLE0_MM                                                    \
  11.363 @@ -499,9 +424,9 @@ static inline void write_ptbase(struct m
  11.364  
  11.365  /* Convenient accessor for mm.gdt. */
  11.366  #define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (_e))
  11.367 -#define SET_GDT_ADDRESS(_p, _a) ((*(u32 *)((_p)->mm.gdt + 2)) = (_a))
  11.368 +#define SET_GDT_ADDRESS(_p, _a) ((*(unsigned long *)((_p)->mm.gdt + 2)) = (_a))
  11.369  #define GET_GDT_ENTRIES(_p)     ((*(u16 *)((_p)->mm.gdt + 0)))
  11.370 -#define GET_GDT_ADDRESS(_p)     ((*(u32 *)((_p)->mm.gdt + 2)))
  11.371 +#define GET_GDT_ADDRESS(_p)     ((*(unsigned long *)((_p)->mm.gdt + 2)))
  11.372  
  11.373  long set_gdt(struct task_struct *p, 
  11.374               unsigned long *frames, 
  11.375 @@ -560,4 +485,4 @@ extern inline void prefetchw(const void 
  11.376  
  11.377  #endif
  11.378  
  11.379 -#endif /* __ASM_I386_PROCESSOR_H */
  11.380 +#endif /* __ASM_X86_PROCESSOR_H */
    12.1 --- a/xen/include/asm-x86/ptrace.h	Tue Jun 15 12:29:06 2004 +0000
    12.2 +++ b/xen/include/asm-x86/ptrace.h	Tue Jun 15 15:28:22 2004 +0000
    12.3 @@ -1,51 +1,6 @@
    12.4 -#ifndef _I386_PTRACE_H
    12.5 -#define _I386_PTRACE_H
    12.6 -
    12.7 -struct pt_regs {
    12.8 -	long ebx;
    12.9 -	long ecx;
   12.10 -	long edx;
   12.11 -	long esi;
   12.12 -	long edi;
   12.13 -	long ebp;
   12.14 -	long eax;
   12.15 -	int  xds;
   12.16 -	int  xes;
   12.17 -	int  xfs;
   12.18 -	int  xgs;
   12.19 -	long orig_eax;
   12.20 -	long eip;
   12.21 -	int  xcs;
   12.22 -	long eflags;
   12.23 -	long esp;
   12.24 -	int  xss;
   12.25 -};
   12.26  
   12.27 -enum EFLAGS {
   12.28 -        EF_CF   = 0x00000001,
   12.29 -        EF_PF   = 0x00000004,
   12.30 -        EF_AF   = 0x00000010,
   12.31 -        EF_ZF   = 0x00000040,
   12.32 -        EF_SF   = 0x00000080,
   12.33 -        EF_TF   = 0x00000100,
   12.34 -        EF_IE   = 0x00000200,
   12.35 -        EF_DF   = 0x00000400,
   12.36 -        EF_OF   = 0x00000800,
   12.37 -        EF_IOPL = 0x00003000,
   12.38 -        EF_IOPL_RING0 = 0x00000000,
   12.39 -        EF_IOPL_RING1 = 0x00001000,
   12.40 -        EF_IOPL_RING2 = 0x00002000,
   12.41 -        EF_NT   = 0x00004000,   /* nested task */
   12.42 -        EF_RF   = 0x00010000,   /* resume */
   12.43 -        EF_VM   = 0x00020000,   /* virtual mode */
   12.44 -        EF_AC   = 0x00040000,   /* alignment */
   12.45 -        EF_VIF  = 0x00080000,   /* virtual interrupt */
   12.46 -        EF_VIP  = 0x00100000,   /* virtual interrupt pending */
   12.47 -        EF_ID   = 0x00200000,   /* id */
   12.48 -};
   12.49 -
   12.50 -#ifdef __KERNEL__
   12.51 -#define user_mode(regs) ((3 & (regs)->xcs))
   12.52 +#ifdef __x86_64__
   12.53 +#include <asm/x86_64/ptrace.h>
   12.54 +#else
   12.55 +#include <asm/x86_32/ptrace.h>
   12.56  #endif
   12.57 -
   12.58 -#endif
    13.1 --- a/xen/include/asm-x86/system.h	Tue Jun 15 12:29:06 2004 +0000
    13.2 +++ b/xen/include/asm-x86/system.h	Tue Jun 15 15:28:22 2004 +0000
    13.3 @@ -168,13 +168,21 @@ static inline unsigned long __cmpxchg(vo
    13.4   * Some non intel clones support out of order store. wmb() ceases to be a
    13.5   * nop for these.
    13.6   */
    13.7 - 
    13.8 +#if defined(__i386__)
    13.9  #define mb() 	__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
   13.10 -#define rmb()	mb()
   13.11 -
   13.12 +#define rmb()	__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
   13.13  #ifdef CONFIG_X86_OOSTORE
   13.14  #define wmb() 	__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
   13.15 -#else
   13.16 +#endif
   13.17 +#elif defined(__x86_64__)
   13.18 +#define mb()    __asm__ __volatile__ ("mfence":::"memory")
   13.19 +#define rmb()   __asm__ __volatile__ ("lfence":::"memory")
   13.20 +#ifdef CONFIG_X86_OOSTORE
   13.21 +#define wmb()   __asm__ __volatile__ ("sfence":::"memory")
   13.22 +#endif
   13.23 +#endif
   13.24 +
   13.25 +#ifndef CONFIG_X86_OOSTORE
   13.26  #define wmb()	__asm__ __volatile__ ("": : :"memory")
   13.27  #endif
   13.28  
    14.1 --- a/xen/include/asm-x86/uaccess.h	Tue Jun 15 12:29:06 2004 +0000
    14.2 +++ b/xen/include/asm-x86/uaccess.h	Tue Jun 15 15:28:22 2004 +0000
    14.3 @@ -1,600 +1,6 @@
    14.4 -#ifndef __i386_UACCESS_H
    14.5 -#define __i386_UACCESS_H
    14.6 -
    14.7 -/*
    14.8 - * User space memory access functions
    14.9 - */
   14.10 -#include <xen/config.h>
   14.11 -#include <xen/errno.h>
   14.12 -#include <xen/sched.h>
   14.13 -#include <xen/prefetch.h>
   14.14 -#include <asm/page.h>
   14.15 -
   14.16 -#define VERIFY_READ 0
   14.17 -#define VERIFY_WRITE 1
   14.18 -
   14.19 -/*
   14.20 - * The fs value determines whether argument validity checking should be
   14.21 - * performed or not.  If get_fs() == USER_DS, checking is performed, with
   14.22 - * get_fs() == KERNEL_DS, checking is bypassed.
   14.23 - *
   14.24 - * For historical reasons, these macros are grossly misnamed.
   14.25 - */
   14.26 -
   14.27 -#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
   14.28 -
   14.29 -
   14.30 -#define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
   14.31 -#define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
   14.32 -
   14.33 -#define get_ds()	(KERNEL_DS)
   14.34 -#define get_fs()	(current->addr_limit)
   14.35 -#define set_fs(x)	(current->addr_limit = (x))
   14.36 -
   14.37 -#define segment_eq(a,b)	((a).seg == (b).seg)
   14.38 -
   14.39 -extern int __verify_write(const void *, unsigned long);
   14.40 -
   14.41 -#define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
   14.42 -
   14.43 -/*
   14.44 - * Uhhuh, this needs 33-bit arithmetic. We have a carry..
   14.45 - */
   14.46 -#define __range_ok(addr,size) ({ \
   14.47 -	unsigned long flag,sum; \
   14.48 -	asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
   14.49 -		:"=&r" (flag), "=r" (sum) \
   14.50 -		:"1" (addr),"g" ((int)(size)),"g" (current->addr_limit.seg)); \
   14.51 -	flag; })
   14.52 -
   14.53 -#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
   14.54 -
   14.55 -static inline int verify_area(int type, const void * addr, unsigned long size)
   14.56 -{
   14.57 -	return access_ok(type,addr,size) ? 0 : -EFAULT;
   14.58 -}
   14.59 -
   14.60 -
   14.61 -/*
   14.62 - * The exception table consists of pairs of addresses: the first is the
   14.63 - * address of an instruction that is allowed to fault, and the second is
   14.64 - * the address at which the program should continue.  No registers are
   14.65 - * modified, so it is entirely up to the continuation code to figure out
   14.66 - * what to do.
   14.67 - *
   14.68 - * All the routines below use bits of fixup code that are out of line
   14.69 - * with the main instruction path.  This means when everything is well,
   14.70 - * we don't even have to jump over them.  Further, they do not intrude
   14.71 - * on our cache or tlb entries.
   14.72 - */
   14.73 -
   14.74 -struct exception_table_entry
   14.75 -{
   14.76 -	unsigned long insn, fixup;
   14.77 -};
   14.78 -
   14.79 -/* Returns 0 if exception not found and fixup otherwise.  */
   14.80 -extern unsigned long search_exception_table(unsigned long);
   14.81 -
   14.82 -
   14.83 -/*
   14.84 - * These are the main single-value transfer routines.  They automatically
   14.85 - * use the right size if we just have the right pointer type.
   14.86 - *
   14.87 - * This gets kind of ugly. We want to return _two_ values in "get_user()"
   14.88 - * and yet we don't want to do any pointers, because that is too much
   14.89 - * of a performance impact. Thus we have a few rather ugly macros here,
   14.90 - * and hide all the uglyness from the user.
   14.91 - *
   14.92 - * The "__xxx" versions of the user access functions are versions that
   14.93 - * do not verify the address space, that must have been done previously
   14.94 - * with a separate "access_ok()" call (this is used when we do multiple
   14.95 - * accesses to the same area of user memory).
   14.96 - */
   14.97 -
   14.98 -extern void __get_user_1(void);
   14.99 -extern void __get_user_2(void);
  14.100 -extern void __get_user_4(void);
  14.101 -
  14.102 -#define __get_user_x(size,ret,x,ptr) \
  14.103 -	__asm__ __volatile__("call __get_user_" #size \
  14.104 -		:"=a" (ret),"=d" (x) \
  14.105 -		:"0" (ptr))
  14.106 -
  14.107 -/* Careful: we have to cast the result to the type of the pointer for sign reasons */
  14.108 -#define get_user(x,ptr)							\
  14.109 -({	int __ret_gu=1,__val_gu;						\
  14.110 -	switch(sizeof (*(ptr))) {					\
  14.111 -	case 1: __ret_gu=copy_from_user(&__val_gu,ptr,1); break;			\
  14.112 -	case 2: __ret_gu=copy_from_user(&__val_gu,ptr,2); break;                 \
  14.113 -	case 4: __ret_gu=copy_from_user(&__val_gu,ptr,4); break;                 \
  14.114 -	default: __ret_gu=copy_from_user(&__val_gu,ptr,8); break;                 \
  14.115 -	/*case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;*/		\
  14.116 -	/*case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;*/		\
  14.117 -	/*case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;*/		\
  14.118 -	/*default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;*/		\
  14.119 -	}								\
  14.120 -	(x) = (__typeof__(*(ptr)))__val_gu;				\
  14.121 -	__ret_gu;							\
  14.122 -})
  14.123 -
  14.124 -extern void __put_user_1(void);
  14.125 -extern void __put_user_2(void);
  14.126 -extern void __put_user_4(void);
  14.127 -extern void __put_user_8(void);
  14.128 -
  14.129 -extern void __put_user_bad(void);
  14.130 -
  14.131 -#define put_user(x,ptr)							\
  14.132 -  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
  14.133 -
  14.134 -#define __get_user(x,ptr) \
  14.135 -  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
  14.136 -#define __put_user(x,ptr) \
  14.137 -  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
  14.138 -
  14.139 -#define __put_user_nocheck(x,ptr,size)			\
  14.140 -({							\
  14.141 -	long __pu_err;					\
  14.142 -	__put_user_size((x),(ptr),(size),__pu_err);	\
  14.143 -	__pu_err;					\
  14.144 -})
  14.145 -
  14.146 -
  14.147 -#define __put_user_check(x,ptr,size)			\
  14.148 -({							\
  14.149 -	long __pu_err = -EFAULT;					\
  14.150 -	__typeof__(*(ptr)) *__pu_addr = (ptr);		\
  14.151 -	if (access_ok(VERIFY_WRITE,__pu_addr,size))	\
  14.152 -		__put_user_size((x),__pu_addr,(size),__pu_err);	\
  14.153 -	__pu_err;					\
  14.154 -})							
  14.155 -
  14.156 -#define __put_user_u64(x, addr, err)				\
  14.157 -	__asm__ __volatile__(					\
  14.158 -		"1:	movl %%eax,0(%2)\n"			\
  14.159 -		"2:	movl %%edx,4(%2)\n"			\
  14.160 -		"3:\n"						\
  14.161 -		".section .fixup,\"ax\"\n"			\
  14.162 -		"4:	movl %3,%0\n"				\
  14.163 -		"	jmp 3b\n"				\
  14.164 -		".previous\n"					\
  14.165 -		".section __ex_table,\"a\"\n"			\
  14.166 -		"	.align 4\n"				\
  14.167 -		"	.long 1b,4b\n"				\
  14.168 -		"	.long 2b,4b\n"				\
  14.169 -		".previous"					\
  14.170 -		: "=r"(err)					\
  14.171 -		: "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
  14.172 -
  14.173 -#define __put_user_size(x,ptr,size,retval)				\
  14.174 -do {									\
  14.175 -	retval = 0;							\
  14.176 -	switch (size) {							\
  14.177 -	  case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break;	\
  14.178 -	  case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break;	\
  14.179 -	  case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break;	\
  14.180 -	  case 8: __put_user_u64(x,ptr,retval); break;			\
  14.181 -	  default: __put_user_bad();					\
  14.182 -	}								\
  14.183 -} while (0)
  14.184 -
  14.185 -struct __large_struct { unsigned long buf[100]; };
  14.186 -#define __m(x) (*(struct __large_struct *)(x))
  14.187 -
  14.188 -/*
  14.189 - * Tell gcc we read from memory instead of writing: this is because
  14.190 - * we do not write to any memory gcc knows about, so there are no
  14.191 - * aliasing issues.
  14.192 - */
  14.193 -#define __put_user_asm(x, addr, err, itype, rtype, ltype)	\
  14.194 -	__asm__ __volatile__(					\
  14.195 -		"1:	mov"itype" %"rtype"1,%2\n"		\
  14.196 -		"2:\n"						\
  14.197 -		".section .fixup,\"ax\"\n"			\
  14.198 -		"3:	movl %3,%0\n"				\
  14.199 -		"	jmp 2b\n"				\
  14.200 -		".previous\n"					\
  14.201 -		".section __ex_table,\"a\"\n"			\
  14.202 -		"	.align 4\n"				\
  14.203 -		"	.long 1b,3b\n"				\
  14.204 -		".previous"					\
  14.205 -		: "=r"(err)					\
  14.206 -		: ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
  14.207 -
  14.208 -
  14.209 -#define __get_user_nocheck(x,ptr,size)				\
  14.210 -({								\
  14.211 -	long __gu_err, __gu_val;				\
  14.212 -	__get_user_size(__gu_val,(ptr),(size),__gu_err);	\
  14.213 -	(x) = (__typeof__(*(ptr)))__gu_val;			\
  14.214 -	__gu_err;						\
  14.215 -})
  14.216 -
  14.217 -extern long __get_user_bad(void);
  14.218 -
  14.219 -#define __get_user_size(x,ptr,size,retval)				\
  14.220 -do {									\
  14.221 -	retval = 0;							\
  14.222 -	switch (size) {							\
  14.223 -	  case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break;	\
  14.224 -	  case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break;	\
  14.225 -	  case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break;	\
  14.226 -	  default: (x) = __get_user_bad();				\
  14.227 -	}								\
  14.228 -} while (0)
  14.229 -
  14.230 -#define __get_user_asm(x, addr, err, itype, rtype, ltype)	\
  14.231 -	__asm__ __volatile__(					\
  14.232 -		"1:	mov"itype" %2,%"rtype"1\n"		\
  14.233 -		"2:\n"						\
  14.234 -		".section .fixup,\"ax\"\n"			\
  14.235 -		"3:	movl %3,%0\n"				\
  14.236 -		"	xor"itype" %"rtype"1,%"rtype"1\n"	\
  14.237 -		"	jmp 2b\n"				\
  14.238 -		".previous\n"					\
  14.239 -		".section __ex_table,\"a\"\n"			\
  14.240 -		"	.align 4\n"				\
  14.241 -		"	.long 1b,3b\n"				\
  14.242 -		".previous"					\
  14.243 -		: "=r"(err), ltype (x)				\
  14.244 -		: "m"(__m(addr)), "i"(-EFAULT), "0"(err))
  14.245 -
  14.246 -
  14.247 -/*
  14.248 - * Copy To/From Userspace
  14.249 - */
  14.250 -
  14.251 -/* Generic arbitrary sized copy.  */
  14.252 -#define __copy_user(to,from,size)					\
  14.253 -do {									\
  14.254 -	int __d0, __d1;							\
  14.255 -	__asm__ __volatile__(						\
  14.256 -		"0:	rep; movsl\n"					\
  14.257 -		"	movl %3,%0\n"					\
  14.258 -		"1:	rep; movsb\n"					\
  14.259 -		"2:\n"							\
  14.260 -		".section .fixup,\"ax\"\n"				\
  14.261 -		"3:	lea 0(%3,%0,4),%0\n"				\
  14.262 -		"	jmp 2b\n"					\
  14.263 -		".previous\n"						\
  14.264 -		".section __ex_table,\"a\"\n"				\
  14.265 -		"	.align 4\n"					\
  14.266 -		"	.long 0b,3b\n"					\
  14.267 -		"	.long 1b,2b\n"					\
  14.268 -		".previous"						\
  14.269 -		: "=&c"(size), "=&D" (__d0), "=&S" (__d1)		\
  14.270 -		: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from)	\
  14.271 -		: "memory");						\
  14.272 -} while (0)
  14.273 -
  14.274 -#define __copy_user_zeroing(to,from,size)				\
  14.275 -do {									\
  14.276 -	int __d0, __d1;							\
  14.277 -	__asm__ __volatile__(						\
  14.278 -		"0:	rep; movsl\n"					\
  14.279 -		"	movl %3,%0\n"					\
  14.280 -		"1:	rep; movsb\n"					\
  14.281 -		"2:\n"							\
  14.282 -		".section .fixup,\"ax\"\n"				\
  14.283 -		"3:	lea 0(%3,%0,4),%0\n"				\
  14.284 -		"4:	pushl %0\n"					\
  14.285 -		"	pushl %%eax\n"					\
  14.286 -		"	xorl %%eax,%%eax\n"				\
  14.287 -		"	rep; stosb\n"					\
  14.288 -		"	popl %%eax\n"					\
  14.289 -		"	popl %0\n"					\
  14.290 -		"	jmp 2b\n"					\
  14.291 -		".previous\n"						\
  14.292 -		".section __ex_table,\"a\"\n"				\
  14.293 -		"	.align 4\n"					\
  14.294 -		"	.long 0b,3b\n"					\
  14.295 -		"	.long 1b,4b\n"					\
  14.296 -		".previous"						\
  14.297 -		: "=&c"(size), "=&D" (__d0), "=&S" (__d1)		\
  14.298 -		: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from)	\
  14.299 -		: "memory");						\
  14.300 -} while (0)
  14.301  
  14.302 -/* We let the __ versions of copy_from/to_user inline, because they're often
  14.303 - * used in fast paths and have only a small space overhead.
  14.304 - */
  14.305 -static inline unsigned long
  14.306 -__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
  14.307 -{
  14.308 -	__copy_user_zeroing(to,from,n);
  14.309 -	return n;
  14.310 -}
  14.311 -
  14.312 -static inline unsigned long
  14.313 -__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
  14.314 -{
  14.315 -	__copy_user(to,from,n);
  14.316 -	return n;
  14.317 -}
  14.318 -
  14.319 -
  14.320 -/* Optimize just a little bit when we know the size of the move. */
  14.321 -#define __constant_copy_user(to, from, size)			\
  14.322 -do {								\
  14.323 -	int __d0, __d1;						\
  14.324 -	switch (size & 3) {					\
  14.325 -	default:						\
  14.326 -		__asm__ __volatile__(				\
  14.327 -			"0:	rep; movsl\n"			\
  14.328 -			"1:\n"					\
  14.329 -			".section .fixup,\"ax\"\n"		\
  14.330 -			"2:	shl $2,%0\n"			\
  14.331 -			"	jmp 1b\n"			\
  14.332 -			".previous\n"				\
  14.333 -			".section __ex_table,\"a\"\n"		\
  14.334 -			"	.align 4\n"			\
  14.335 -			"	.long 0b,2b\n"			\
  14.336 -			".previous"				\
  14.337 -			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  14.338 -			: "1"(from), "2"(to), "0"(size/4)	\
  14.339 -			: "memory");				\
  14.340 -		break;						\
  14.341 -	case 1:							\
  14.342 -		__asm__ __volatile__(				\
  14.343 -			"0:	rep; movsl\n"			\
  14.344 -			"1:	movsb\n"			\
  14.345 -			"2:\n"					\
  14.346 -			".section .fixup,\"ax\"\n"		\
  14.347 -			"3:	shl $2,%0\n"			\
  14.348 -			"4:	incl %0\n"			\
  14.349 -			"	jmp 2b\n"			\
  14.350 -			".previous\n"				\
  14.351 -			".section __ex_table,\"a\"\n"		\
  14.352 -			"	.align 4\n"			\
  14.353 -			"	.long 0b,3b\n"			\
  14.354 -			"	.long 1b,4b\n"			\
  14.355 -			".previous"				\
  14.356 -			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  14.357 -			: "1"(from), "2"(to), "0"(size/4)	\
  14.358 -			: "memory");				\
  14.359 -		break;						\
  14.360 -	case 2:							\
  14.361 -		__asm__ __volatile__(				\
  14.362 -			"0:	rep; movsl\n"			\
  14.363 -			"1:	movsw\n"			\
  14.364 -			"2:\n"					\
  14.365 -			".section .fixup,\"ax\"\n"		\
  14.366 -			"3:	shl $2,%0\n"			\
  14.367 -			"4:	addl $2,%0\n"			\
  14.368 -			"	jmp 2b\n"			\
  14.369 -			".previous\n"				\
  14.370 -			".section __ex_table,\"a\"\n"		\
  14.371 -			"	.align 4\n"			\
  14.372 -			"	.long 0b,3b\n"			\
  14.373 -			"	.long 1b,4b\n"			\
  14.374 -			".previous"				\
  14.375 -			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  14.376 -			: "1"(from), "2"(to), "0"(size/4)	\
  14.377 -			: "memory");				\
  14.378 -		break;						\
  14.379 -	case 3:							\
  14.380 -		__asm__ __volatile__(				\
  14.381 -			"0:	rep; movsl\n"			\
  14.382 -			"1:	movsw\n"			\
  14.383 -			"2:	movsb\n"			\
  14.384 -			"3:\n"					\
  14.385 -			".section .fixup,\"ax\"\n"		\
  14.386 -			"4:	shl $2,%0\n"			\
  14.387 -			"5:	addl $2,%0\n"			\
  14.388 -			"6:	incl %0\n"			\
  14.389 -			"	jmp 3b\n"			\
  14.390 -			".previous\n"				\
  14.391 -			".section __ex_table,\"a\"\n"		\
  14.392 -			"	.align 4\n"			\
  14.393 -			"	.long 0b,4b\n"			\
  14.394 -			"	.long 1b,5b\n"			\
  14.395 -			"	.long 2b,6b\n"			\
  14.396 -			".previous"				\
  14.397 -			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  14.398 -			: "1"(from), "2"(to), "0"(size/4)	\
  14.399 -			: "memory");				\
  14.400 -		break;						\
  14.401 -	}							\
  14.402 -} while (0)
  14.403 -
  14.404 -/* Optimize just a little bit when we know the size of the move. */
  14.405 -#define __constant_copy_user_zeroing(to, from, size)		\
  14.406 -do {								\
  14.407 -	int __d0, __d1;						\
  14.408 -	switch (size & 3) {					\
  14.409 -	default:						\
  14.410 -		__asm__ __volatile__(				\
  14.411 -			"0:	rep; movsl\n"			\
  14.412 -			"1:\n"					\
  14.413 -			".section .fixup,\"ax\"\n"		\
  14.414 -			"2:	pushl %0\n"			\
  14.415 -			"	pushl %%eax\n"			\
  14.416 -			"	xorl %%eax,%%eax\n"		\
  14.417 -			"	rep; stosl\n"			\
  14.418 -			"	popl %%eax\n"			\
  14.419 -			"	popl %0\n"			\
  14.420 -			"	shl $2,%0\n"			\
  14.421 -			"	jmp 1b\n"			\
  14.422 -			".previous\n"				\
  14.423 -			".section __ex_table,\"a\"\n"		\
  14.424 -			"	.align 4\n"			\
  14.425 -			"	.long 0b,2b\n"			\
  14.426 -			".previous"				\
  14.427 -			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  14.428 -			: "1"(from), "2"(to), "0"(size/4)	\
  14.429 -			: "memory");				\
  14.430 -		break;						\
  14.431 -	case 1:							\
  14.432 -		__asm__ __volatile__(				\
  14.433 -			"0:	rep; movsl\n"			\
  14.434 -			"1:	movsb\n"			\
  14.435 -			"2:\n"					\
  14.436 -			".section .fixup,\"ax\"\n"		\
  14.437 -			"3:	pushl %0\n"			\
  14.438 -			"	pushl %%eax\n"			\
  14.439 -			"	xorl %%eax,%%eax\n"		\
  14.440 -			"	rep; stosl\n"			\
  14.441 -			"	stosb\n"			\
  14.442 -			"	popl %%eax\n"			\
  14.443 -			"	popl %0\n"			\
  14.444 -			"	shl $2,%0\n"			\
  14.445 -			"	incl %0\n"			\
  14.446 -			"	jmp 2b\n"			\
  14.447 -			"4:	pushl %%eax\n"			\
  14.448 -			"	xorl %%eax,%%eax\n"		\
  14.449 -			"	stosb\n"			\
  14.450 -			"	popl %%eax\n"			\
  14.451 -			"	incl %0\n"			\
  14.452 -			"	jmp 2b\n"			\
  14.453 -			".previous\n"				\
  14.454 -			".section __ex_table,\"a\"\n"		\
  14.455 -			"	.align 4\n"			\
  14.456 -			"	.long 0b,3b\n"			\
  14.457 -			"	.long 1b,4b\n"			\
  14.458 -			".previous"				\
  14.459 -			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  14.460 -			: "1"(from), "2"(to), "0"(size/4)	\
  14.461 -			: "memory");				\
  14.462 -		break;						\
  14.463 -	case 2:							\
  14.464 -		__asm__ __volatile__(				\
  14.465 -			"0:	rep; movsl\n"			\
  14.466 -			"1:	movsw\n"			\
  14.467 -			"2:\n"					\
  14.468 -			".section .fixup,\"ax\"\n"		\
  14.469 -			"3:	pushl %0\n"			\
  14.470 -			"	pushl %%eax\n"			\
  14.471 -			"	xorl %%eax,%%eax\n"		\
  14.472 -			"	rep; stosl\n"			\
  14.473 -			"	stosw\n"			\
  14.474 -			"	popl %%eax\n"			\
  14.475 -			"	popl %0\n"			\
  14.476 -			"	shl $2,%0\n"			\
  14.477 -			"	addl $2,%0\n"			\
  14.478 -			"	jmp 2b\n"			\
  14.479 -			"4:	pushl %%eax\n"			\
  14.480 -			"	xorl %%eax,%%eax\n"		\
  14.481 -			"	stosw\n"			\
  14.482 -			"	popl %%eax\n"			\
  14.483 -			"	addl $2,%0\n"			\
  14.484 -			"	jmp 2b\n"			\
  14.485 -			".previous\n"				\
  14.486 -			".section __ex_table,\"a\"\n"		\
  14.487 -			"	.align 4\n"			\
  14.488 -			"	.long 0b,3b\n"			\
  14.489 -			"	.long 1b,4b\n"			\
  14.490 -			".previous"				\
  14.491 -			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  14.492 -			: "1"(from), "2"(to), "0"(size/4)	\
  14.493 -			: "memory");				\
  14.494 -		break;						\
  14.495 -	case 3:							\
  14.496 -		__asm__ __volatile__(				\
  14.497 -			"0:	rep; movsl\n"			\
  14.498 -			"1:	movsw\n"			\
  14.499 -			"2:	movsb\n"			\
  14.500 -			"3:\n"					\
  14.501 -			".section .fixup,\"ax\"\n"		\
  14.502 -			"4:	pushl %0\n"			\
  14.503 -			"	pushl %%eax\n"			\
  14.504 -			"	xorl %%eax,%%eax\n"		\
  14.505 -			"	rep; stosl\n"			\
  14.506 -			"	stosw\n"			\
  14.507 -			"	stosb\n"			\
  14.508 -			"	popl %%eax\n"			\
  14.509 -			"	popl %0\n"			\
  14.510 -			"	shl $2,%0\n"			\
  14.511 -			"	addl $3,%0\n"			\
  14.512 -			"	jmp 2b\n"			\
  14.513 -			"5:	pushl %%eax\n"			\
  14.514 -			"	xorl %%eax,%%eax\n"		\
  14.515 -			"	stosw\n"			\
  14.516 -			"	stosb\n"			\
  14.517 -			"	popl %%eax\n"			\
  14.518 -			"	addl $3,%0\n"			\
  14.519 -			"	jmp 2b\n"			\
  14.520 -			"6:	pushl %%eax\n"			\
  14.521 -			"	xorl %%eax,%%eax\n"		\
  14.522 -			"	stosb\n"			\
  14.523 -			"	popl %%eax\n"			\
  14.524 -			"	incl %0\n"			\
  14.525 -			"	jmp 3b\n"			\
  14.526 -			".previous\n"				\
  14.527 -			".section __ex_table,\"a\"\n"		\
  14.528 -			"	.align 4\n"			\
  14.529 -			"	.long 0b,4b\n"			\
  14.530 -			"	.long 1b,5b\n"			\
  14.531 -			"	.long 2b,6b\n"			\
  14.532 -			".previous"				\
  14.533 -			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  14.534 -			: "1"(from), "2"(to), "0"(size/4)	\
  14.535 -			: "memory");				\
  14.536 -		break;						\
  14.537 -	}							\
  14.538 -} while (0)
  14.539 -
  14.540 -unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
  14.541 -unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
  14.542 -
  14.543 -static inline unsigned long
  14.544 -__constant_copy_to_user(void *to, const void *from, unsigned long n)
  14.545 -{
  14.546 -	prefetch(from);
  14.547 -	if (access_ok(VERIFY_WRITE, to, n))
  14.548 -		__constant_copy_user(to,from,n);
  14.549 -	return n;
  14.550 -}
  14.551 -
  14.552 -static inline unsigned long
  14.553 -__constant_copy_from_user(void *to, const void *from, unsigned long n)
  14.554 -{
  14.555 -	if (access_ok(VERIFY_READ, from, n))
  14.556 -		__constant_copy_user_zeroing(to,from,n);
  14.557 -	else
  14.558 -		memset(to, 0, n);
  14.559 -	return n;
  14.560 -}
  14.561 -
  14.562 -static inline unsigned long
  14.563 -__constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
  14.564 -{
  14.565 -	__constant_copy_user(to,from,n);
  14.566 -	return n;
  14.567 -}
  14.568 -
  14.569 -static inline unsigned long
  14.570 -__constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
  14.571 -{
  14.572 -	__constant_copy_user_zeroing(to,from,n);
  14.573 -	return n;
  14.574 -}
  14.575 -
  14.576 -#define copy_to_user(to,from,n)				\
  14.577 -	(__builtin_constant_p(n) ?			\
  14.578 -	 __constant_copy_to_user((to),(from),(n)) :	\
  14.579 -	 __generic_copy_to_user((to),(from),(n)))
  14.580 -
  14.581 -#define copy_from_user(to,from,n)			\
  14.582 -	(__builtin_constant_p(n) ?			\
  14.583 -	 __constant_copy_from_user((to),(from),(n)) :	\
  14.584 -	 __generic_copy_from_user((to),(from),(n)))
  14.585 -
  14.586 -#define __copy_to_user(to,from,n)			\
  14.587 -	(__builtin_constant_p(n) ?			\
  14.588 -	 __constant_copy_to_user_nocheck((to),(from),(n)) :	\
  14.589 -	 __generic_copy_to_user_nocheck((to),(from),(n)))
  14.590 -
  14.591 -#define __copy_from_user(to,from,n)			\
  14.592 -	(__builtin_constant_p(n) ?			\
  14.593 -	 __constant_copy_from_user_nocheck((to),(from),(n)) :	\
  14.594 -	 __generic_copy_from_user_nocheck((to),(from),(n)))
  14.595 -
  14.596 -long strncpy_from_user(char *dst, const char *src, long count);
  14.597 -long __strncpy_from_user(char *dst, const char *src, long count);
  14.598 -#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
  14.599 -long strnlen_user(const char *str, long n);
  14.600 -unsigned long clear_user(void *mem, unsigned long len);
  14.601 -unsigned long __clear_user(void *mem, unsigned long len);
  14.602 -
  14.603 -#endif /* __i386_UACCESS_H */
  14.604 +#ifdef __x86_64__
  14.605 +#include <asm/x86_64/uaccess.h>
  14.606 +#else
  14.607 +#include <asm/x86_32/uaccess.h>
  14.608 +#endif
    15.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.2 +++ b/xen/include/asm-x86/x86_32/current.h	Tue Jun 15 15:28:22 2004 +0000
    15.3 @@ -0,0 +1,52 @@
    15.4 +#ifndef _X86_CURRENT_H
    15.5 +#define _X86_CURRENT_H
    15.6 +
    15.7 +struct task_struct;
    15.8 +
    15.9 +#define STACK_RESERVED \
   15.10 +    (sizeof(execution_context_t) + sizeof(struct task_struct *))
   15.11 +
   15.12 +static inline struct task_struct * get_current(void)
   15.13 +{
   15.14 +    struct task_struct *current;
   15.15 +    __asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0" 
   15.16 +              : "=r" (current) : "0" (STACK_SIZE-4) );
   15.17 +    return current;
   15.18 +}
   15.19 + 
   15.20 +#define current get_current()
   15.21 +
   15.22 +static inline void set_current(struct task_struct *p)
   15.23 +{
   15.24 +    __asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)" 
   15.25 +              : : "r" (STACK_SIZE-4), "r" (p) );    
   15.26 +}
   15.27 +
   15.28 +static inline execution_context_t *get_execution_context(void)
   15.29 +{
   15.30 +    execution_context_t *execution_context;
   15.31 +    __asm__ ( "andl %%esp,%0; addl %2,%0"
   15.32 +              : "=r" (execution_context) 
   15.33 +              : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
   15.34 +    return execution_context;
   15.35 +}
   15.36 +
   15.37 +static inline unsigned long get_stack_top(void)
   15.38 +{
   15.39 +    unsigned long p;
   15.40 +    __asm__ ( "orl %%esp,%0; andl $~3,%0" 
   15.41 +              : "=r" (p) : "0" (STACK_SIZE-4) );
   15.42 +    return p;
   15.43 +}
   15.44 +
   15.45 +#define schedule_tail(_p)                                         \
   15.46 +    __asm__ __volatile__ (                                        \
   15.47 +        "andl %%esp,%0; addl %2,%0; movl %0,%%esp; jmp *%1"       \
   15.48 +        : : "r" (~(STACK_SIZE-1)),                                \
   15.49 +            "r" (unlikely(is_idle_task((_p))) ?                   \
   15.50 +                                continue_cpu_idle_loop :          \
   15.51 +                                continue_nonidle_task),           \
   15.52 +            "i" (STACK_SIZE-STACK_RESERVED) )
   15.53 +
   15.54 +
   15.55 +#endif /* _X86_CURRENT_H */
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/xen/include/asm-x86/x86_32/ptrace.h	Tue Jun 15 15:28:22 2004 +0000
    16.3 @@ -0,0 +1,51 @@
    16.4 +#ifndef _I386_PTRACE_H
    16.5 +#define _I386_PTRACE_H
    16.6 +
    16.7 +struct pt_regs {
    16.8 +	long ebx;
    16.9 +	long ecx;
   16.10 +	long edx;
   16.11 +	long esi;
   16.12 +	long edi;
   16.13 +	long ebp;
   16.14 +	long eax;
   16.15 +	int  xds;
   16.16 +	int  xes;
   16.17 +	int  xfs;
   16.18 +	int  xgs;
   16.19 +	long orig_eax;
   16.20 +	long eip;
   16.21 +	int  xcs;
   16.22 +	long eflags;
   16.23 +	long esp;
   16.24 +	int  xss;
   16.25 +};
   16.26 +
   16.27 +enum EFLAGS {
   16.28 +        EF_CF   = 0x00000001,
   16.29 +        EF_PF   = 0x00000004,
   16.30 +        EF_AF   = 0x00000010,
   16.31 +        EF_ZF   = 0x00000040,
   16.32 +        EF_SF   = 0x00000080,
   16.33 +        EF_TF   = 0x00000100,
   16.34 +        EF_IE   = 0x00000200,
   16.35 +        EF_DF   = 0x00000400,
   16.36 +        EF_OF   = 0x00000800,
   16.37 +        EF_IOPL = 0x00003000,
   16.38 +        EF_IOPL_RING0 = 0x00000000,
   16.39 +        EF_IOPL_RING1 = 0x00001000,
   16.40 +        EF_IOPL_RING2 = 0x00002000,
   16.41 +        EF_NT   = 0x00004000,   /* nested task */
   16.42 +        EF_RF   = 0x00010000,   /* resume */
   16.43 +        EF_VM   = 0x00020000,   /* virtual mode */
   16.44 +        EF_AC   = 0x00040000,   /* alignment */
   16.45 +        EF_VIF  = 0x00080000,   /* virtual interrupt */
   16.46 +        EF_VIP  = 0x00100000,   /* virtual interrupt pending */
   16.47 +        EF_ID   = 0x00200000,   /* id */
   16.48 +};
   16.49 +
   16.50 +#ifdef __KERNEL__
   16.51 +#define user_mode(regs) ((3 & (regs)->xcs))
   16.52 +#endif
   16.53 +
   16.54 +#endif
    17.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.2 +++ b/xen/include/asm-x86/x86_32/uaccess.h	Tue Jun 15 15:28:22 2004 +0000
    17.3 @@ -0,0 +1,600 @@
    17.4 +#ifndef __i386_UACCESS_H
    17.5 +#define __i386_UACCESS_H
    17.6 +
    17.7 +/*
    17.8 + * User space memory access functions
    17.9 + */
   17.10 +#include <xen/config.h>
   17.11 +#include <xen/errno.h>
   17.12 +#include <xen/sched.h>
   17.13 +#include <xen/prefetch.h>
   17.14 +#include <asm/page.h>
   17.15 +
   17.16 +#define VERIFY_READ 0
   17.17 +#define VERIFY_WRITE 1
   17.18 +
   17.19 +/*
   17.20 + * The fs value determines whether argument validity checking should be
   17.21 + * performed or not.  If get_fs() == USER_DS, checking is performed, with
   17.22 + * get_fs() == KERNEL_DS, checking is bypassed.
   17.23 + *
   17.24 + * For historical reasons, these macros are grossly misnamed.
   17.25 + */
   17.26 +
   17.27 +#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
   17.28 +
   17.29 +
   17.30 +#define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
   17.31 +#define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
   17.32 +
   17.33 +#define get_ds()	(KERNEL_DS)
   17.34 +#define get_fs()	(current->addr_limit)
   17.35 +#define set_fs(x)	(current->addr_limit = (x))
   17.36 +
   17.37 +#define segment_eq(a,b)	((a).seg == (b).seg)
   17.38 +
   17.39 +extern int __verify_write(const void *, unsigned long);
   17.40 +
   17.41 +#define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
   17.42 +
   17.43 +/*
   17.44 + * Uhhuh, this needs 33-bit arithmetic. We have a carry..
   17.45 + */
   17.46 +#define __range_ok(addr,size) ({ \
   17.47 +	unsigned long flag,sum; \
   17.48 +	asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
   17.49 +		:"=&r" (flag), "=r" (sum) \
   17.50 +		:"1" (addr),"g" ((int)(size)),"g" (current->addr_limit.seg)); \
   17.51 +	flag; })
   17.52 +
   17.53 +#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
   17.54 +
   17.55 +static inline int verify_area(int type, const void * addr, unsigned long size)
   17.56 +{
   17.57 +	return access_ok(type,addr,size) ? 0 : -EFAULT;
   17.58 +}
   17.59 +
   17.60 +
   17.61 +/*
   17.62 + * The exception table consists of pairs of addresses: the first is the
   17.63 + * address of an instruction that is allowed to fault, and the second is
   17.64 + * the address at which the program should continue.  No registers are
   17.65 + * modified, so it is entirely up to the continuation code to figure out
   17.66 + * what to do.
   17.67 + *
   17.68 + * All the routines below use bits of fixup code that are out of line
   17.69 + * with the main instruction path.  This means when everything is well,
   17.70 + * we don't even have to jump over them.  Further, they do not intrude
   17.71 + * on our cache or tlb entries.
   17.72 + */
   17.73 +
   17.74 +struct exception_table_entry
   17.75 +{
   17.76 +	unsigned long insn, fixup;
   17.77 +};
   17.78 +
   17.79 +/* Returns 0 if exception not found and fixup otherwise.  */
   17.80 +extern unsigned long search_exception_table(unsigned long);
   17.81 +
   17.82 +
   17.83 +/*
   17.84 + * These are the main single-value transfer routines.  They automatically
   17.85 + * use the right size if we just have the right pointer type.
   17.86 + *
   17.87 + * This gets kind of ugly. We want to return _two_ values in "get_user()"
   17.88 + * and yet we don't want to do any pointers, because that is too much
   17.89 + * of a performance impact. Thus we have a few rather ugly macros here,
   17.90 + * and hide all the uglyness from the user.
   17.91 + *
   17.92 + * The "__xxx" versions of the user access functions are versions that
   17.93 + * do not verify the address space, that must have been done previously
   17.94 + * with a separate "access_ok()" call (this is used when we do multiple
   17.95 + * accesses to the same area of user memory).
   17.96 + */
   17.97 +
   17.98 +extern void __get_user_1(void);
   17.99 +extern void __get_user_2(void);
  17.100 +extern void __get_user_4(void);
  17.101 +
  17.102 +#define __get_user_x(size,ret,x,ptr) \
  17.103 +	__asm__ __volatile__("call __get_user_" #size \
  17.104 +		:"=a" (ret),"=d" (x) \
  17.105 +		:"0" (ptr))
  17.106 +
  17.107 +/* Careful: we have to cast the result to the type of the pointer for sign reasons */
  17.108 +#define get_user(x,ptr)							\
  17.109 +({	int __ret_gu=1,__val_gu;						\
  17.110 +	switch(sizeof (*(ptr))) {					\
  17.111 +	case 1: __ret_gu=copy_from_user(&__val_gu,ptr,1); break;			\
  17.112 +	case 2: __ret_gu=copy_from_user(&__val_gu,ptr,2); break;                 \
  17.113 +	case 4: __ret_gu=copy_from_user(&__val_gu,ptr,4); break;                 \
  17.114 +	default: __ret_gu=copy_from_user(&__val_gu,ptr,8); break;                 \
  17.115 +	/*case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;*/		\
  17.116 +	/*case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;*/		\
  17.117 +	/*case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;*/		\
  17.118 +	/*default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;*/		\
  17.119 +	}								\
  17.120 +	(x) = (__typeof__(*(ptr)))__val_gu;				\
  17.121 +	__ret_gu;							\
  17.122 +})
  17.123 +
  17.124 +extern void __put_user_1(void);
  17.125 +extern void __put_user_2(void);
  17.126 +extern void __put_user_4(void);
  17.127 +extern void __put_user_8(void);
  17.128 +
  17.129 +extern void __put_user_bad(void);
  17.130 +
  17.131 +#define put_user(x,ptr)							\
  17.132 +  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
  17.133 +
  17.134 +#define __get_user(x,ptr) \
  17.135 +  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
  17.136 +#define __put_user(x,ptr) \
  17.137 +  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
  17.138 +
  17.139 +#define __put_user_nocheck(x,ptr,size)			\
  17.140 +({							\
  17.141 +	long __pu_err;					\
  17.142 +	__put_user_size((x),(ptr),(size),__pu_err);	\
  17.143 +	__pu_err;					\
  17.144 +})
  17.145 +
  17.146 +
  17.147 +#define __put_user_check(x,ptr,size)			\
  17.148 +({							\
  17.149 +	long __pu_err = -EFAULT;					\
  17.150 +	__typeof__(*(ptr)) *__pu_addr = (ptr);		\
  17.151 +	if (access_ok(VERIFY_WRITE,__pu_addr,size))	\
  17.152 +		__put_user_size((x),__pu_addr,(size),__pu_err);	\
  17.153 +	__pu_err;					\
  17.154 +})							
  17.155 +
  17.156 +#define __put_user_u64(x, addr, err)				\
  17.157 +	__asm__ __volatile__(					\
  17.158 +		"1:	movl %%eax,0(%2)\n"			\
  17.159 +		"2:	movl %%edx,4(%2)\n"			\
  17.160 +		"3:\n"						\
  17.161 +		".section .fixup,\"ax\"\n"			\
  17.162 +		"4:	movl %3,%0\n"				\
  17.163 +		"	jmp 3b\n"				\
  17.164 +		".previous\n"					\
  17.165 +		".section __ex_table,\"a\"\n"			\
  17.166 +		"	.align 4\n"				\
  17.167 +		"	.long 1b,4b\n"				\
  17.168 +		"	.long 2b,4b\n"				\
  17.169 +		".previous"					\
  17.170 +		: "=r"(err)					\
  17.171 +		: "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
  17.172 +
  17.173 +#define __put_user_size(x,ptr,size,retval)				\
  17.174 +do {									\
  17.175 +	retval = 0;							\
  17.176 +	switch (size) {							\
  17.177 +	  case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break;	\
  17.178 +	  case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break;	\
  17.179 +	  case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break;	\
  17.180 +	  case 8: __put_user_u64(x,ptr,retval); break;			\
  17.181 +	  default: __put_user_bad();					\
  17.182 +	}								\
  17.183 +} while (0)
  17.184 +
  17.185 +struct __large_struct { unsigned long buf[100]; };
  17.186 +#define __m(x) (*(struct __large_struct *)(x))
  17.187 +
  17.188 +/*
  17.189 + * Tell gcc we read from memory instead of writing: this is because
  17.190 + * we do not write to any memory gcc knows about, so there are no
  17.191 + * aliasing issues.
  17.192 + */
  17.193 +#define __put_user_asm(x, addr, err, itype, rtype, ltype)	\
  17.194 +	__asm__ __volatile__(					\
  17.195 +		"1:	mov"itype" %"rtype"1,%2\n"		\
  17.196 +		"2:\n"						\
  17.197 +		".section .fixup,\"ax\"\n"			\
  17.198 +		"3:	movl %3,%0\n"				\
  17.199 +		"	jmp 2b\n"				\
  17.200 +		".previous\n"					\
  17.201 +		".section __ex_table,\"a\"\n"			\
  17.202 +		"	.align 4\n"				\
  17.203 +		"	.long 1b,3b\n"				\
  17.204 +		".previous"					\
  17.205 +		: "=r"(err)					\
  17.206 +		: ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
  17.207 +
  17.208 +
  17.209 +#define __get_user_nocheck(x,ptr,size)				\
  17.210 +({								\
  17.211 +	long __gu_err, __gu_val;				\
  17.212 +	__get_user_size(__gu_val,(ptr),(size),__gu_err);	\
  17.213 +	(x) = (__typeof__(*(ptr)))__gu_val;			\
  17.214 +	__gu_err;						\
  17.215 +})
  17.216 +
  17.217 +extern long __get_user_bad(void);
  17.218 +
  17.219 +#define __get_user_size(x,ptr,size,retval)				\
  17.220 +do {									\
  17.221 +	retval = 0;							\
  17.222 +	switch (size) {							\
  17.223 +	  case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break;	\
  17.224 +	  case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break;	\
  17.225 +	  case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break;	\
  17.226 +	  default: (x) = __get_user_bad();				\
  17.227 +	}								\
  17.228 +} while (0)
  17.229 +
  17.230 +#define __get_user_asm(x, addr, err, itype, rtype, ltype)	\
  17.231 +	__asm__ __volatile__(					\
  17.232 +		"1:	mov"itype" %2,%"rtype"1\n"		\
  17.233 +		"2:\n"						\
  17.234 +		".section .fixup,\"ax\"\n"			\
  17.235 +		"3:	movl %3,%0\n"				\
  17.236 +		"	xor"itype" %"rtype"1,%"rtype"1\n"	\
  17.237 +		"	jmp 2b\n"				\
  17.238 +		".previous\n"					\
  17.239 +		".section __ex_table,\"a\"\n"			\
  17.240 +		"	.align 4\n"				\
  17.241 +		"	.long 1b,3b\n"				\
  17.242 +		".previous"					\
  17.243 +		: "=r"(err), ltype (x)				\
  17.244 +		: "m"(__m(addr)), "i"(-EFAULT), "0"(err))
  17.245 +
  17.246 +
  17.247 +/*
  17.248 + * Copy To/From Userspace
  17.249 + */
  17.250 +
  17.251 +/* Generic arbitrary sized copy.  */
  17.252 +#define __copy_user(to,from,size)					\
  17.253 +do {									\
  17.254 +	int __d0, __d1;							\
  17.255 +	__asm__ __volatile__(						\
  17.256 +		"0:	rep; movsl\n"					\
  17.257 +		"	movl %3,%0\n"					\
  17.258 +		"1:	rep; movsb\n"					\
  17.259 +		"2:\n"							\
  17.260 +		".section .fixup,\"ax\"\n"				\
  17.261 +		"3:	lea 0(%3,%0,4),%0\n"				\
  17.262 +		"	jmp 2b\n"					\
  17.263 +		".previous\n"						\
  17.264 +		".section __ex_table,\"a\"\n"				\
  17.265 +		"	.align 4\n"					\
  17.266 +		"	.long 0b,3b\n"					\
  17.267 +		"	.long 1b,2b\n"					\
  17.268 +		".previous"						\
  17.269 +		: "=&c"(size), "=&D" (__d0), "=&S" (__d1)		\
  17.270 +		: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from)	\
  17.271 +		: "memory");						\
  17.272 +} while (0)
  17.273 +
  17.274 +#define __copy_user_zeroing(to,from,size)				\
  17.275 +do {									\
  17.276 +	int __d0, __d1;							\
  17.277 +	__asm__ __volatile__(						\
  17.278 +		"0:	rep; movsl\n"					\
  17.279 +		"	movl %3,%0\n"					\
  17.280 +		"1:	rep; movsb\n"					\
  17.281 +		"2:\n"							\
  17.282 +		".section .fixup,\"ax\"\n"				\
  17.283 +		"3:	lea 0(%3,%0,4),%0\n"				\
  17.284 +		"4:	pushl %0\n"					\
  17.285 +		"	pushl %%eax\n"					\
  17.286 +		"	xorl %%eax,%%eax\n"				\
  17.287 +		"	rep; stosb\n"					\
  17.288 +		"	popl %%eax\n"					\
  17.289 +		"	popl %0\n"					\
  17.290 +		"	jmp 2b\n"					\
  17.291 +		".previous\n"						\
  17.292 +		".section __ex_table,\"a\"\n"				\
  17.293 +		"	.align 4\n"					\
  17.294 +		"	.long 0b,3b\n"					\
  17.295 +		"	.long 1b,4b\n"					\
  17.296 +		".previous"						\
  17.297 +		: "=&c"(size), "=&D" (__d0), "=&S" (__d1)		\
  17.298 +		: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from)	\
  17.299 +		: "memory");						\
  17.300 +} while (0)
  17.301 +
  17.302 +/* We let the __ versions of copy_from/to_user inline, because they're often
  17.303 + * used in fast paths and have only a small space overhead.
  17.304 + */
  17.305 +static inline unsigned long
  17.306 +__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
  17.307 +{
  17.308 +	__copy_user_zeroing(to,from,n);
  17.309 +	return n;
  17.310 +}
  17.311 +
  17.312 +static inline unsigned long
  17.313 +__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
  17.314 +{
  17.315 +	__copy_user(to,from,n);
  17.316 +	return n;
  17.317 +}
  17.318 +
  17.319 +
  17.320 +/* Optimize just a little bit when we know the size of the move. */
  17.321 +#define __constant_copy_user(to, from, size)			\
  17.322 +do {								\
  17.323 +	int __d0, __d1;						\
  17.324 +	switch (size & 3) {					\
  17.325 +	default:						\
  17.326 +		__asm__ __volatile__(				\
  17.327 +			"0:	rep; movsl\n"			\
  17.328 +			"1:\n"					\
  17.329 +			".section .fixup,\"ax\"\n"		\
  17.330 +			"2:	shl $2,%0\n"			\
  17.331 +			"	jmp 1b\n"			\
  17.332 +			".previous\n"				\
  17.333 +			".section __ex_table,\"a\"\n"		\
  17.334 +			"	.align 4\n"			\
  17.335 +			"	.long 0b,2b\n"			\
  17.336 +			".previous"				\
  17.337 +			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  17.338 +			: "1"(from), "2"(to), "0"(size/4)	\
  17.339 +			: "memory");				\
  17.340 +		break;						\
  17.341 +	case 1:							\
  17.342 +		__asm__ __volatile__(				\
  17.343 +			"0:	rep; movsl\n"			\
  17.344 +			"1:	movsb\n"			\
  17.345 +			"2:\n"					\
  17.346 +			".section .fixup,\"ax\"\n"		\
  17.347 +			"3:	shl $2,%0\n"			\
  17.348 +			"4:	incl %0\n"			\
  17.349 +			"	jmp 2b\n"			\
  17.350 +			".previous\n"				\
  17.351 +			".section __ex_table,\"a\"\n"		\
  17.352 +			"	.align 4\n"			\
  17.353 +			"	.long 0b,3b\n"			\
  17.354 +			"	.long 1b,4b\n"			\
  17.355 +			".previous"				\
  17.356 +			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  17.357 +			: "1"(from), "2"(to), "0"(size/4)	\
  17.358 +			: "memory");				\
  17.359 +		break;						\
  17.360 +	case 2:							\
  17.361 +		__asm__ __volatile__(				\
  17.362 +			"0:	rep; movsl\n"			\
  17.363 +			"1:	movsw\n"			\
  17.364 +			"2:\n"					\
  17.365 +			".section .fixup,\"ax\"\n"		\
  17.366 +			"3:	shl $2,%0\n"			\
  17.367 +			"4:	addl $2,%0\n"			\
  17.368 +			"	jmp 2b\n"			\
  17.369 +			".previous\n"				\
  17.370 +			".section __ex_table,\"a\"\n"		\
  17.371 +			"	.align 4\n"			\
  17.372 +			"	.long 0b,3b\n"			\
  17.373 +			"	.long 1b,4b\n"			\
  17.374 +			".previous"				\
  17.375 +			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  17.376 +			: "1"(from), "2"(to), "0"(size/4)	\
  17.377 +			: "memory");				\
  17.378 +		break;						\
  17.379 +	case 3:							\
  17.380 +		__asm__ __volatile__(				\
  17.381 +			"0:	rep; movsl\n"			\
  17.382 +			"1:	movsw\n"			\
  17.383 +			"2:	movsb\n"			\
  17.384 +			"3:\n"					\
  17.385 +			".section .fixup,\"ax\"\n"		\
  17.386 +			"4:	shl $2,%0\n"			\
  17.387 +			"5:	addl $2,%0\n"			\
  17.388 +			"6:	incl %0\n"			\
  17.389 +			"	jmp 3b\n"			\
  17.390 +			".previous\n"				\
  17.391 +			".section __ex_table,\"a\"\n"		\
  17.392 +			"	.align 4\n"			\
  17.393 +			"	.long 0b,4b\n"			\
  17.394 +			"	.long 1b,5b\n"			\
  17.395 +			"	.long 2b,6b\n"			\
  17.396 +			".previous"				\
  17.397 +			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  17.398 +			: "1"(from), "2"(to), "0"(size/4)	\
  17.399 +			: "memory");				\
  17.400 +		break;						\
  17.401 +	}							\
  17.402 +} while (0)
  17.403 +
  17.404 +/* Optimize just a little bit when we know the size of the move. */
  17.405 +#define __constant_copy_user_zeroing(to, from, size)		\
  17.406 +do {								\
  17.407 +	int __d0, __d1;						\
  17.408 +	switch (size & 3) {					\
  17.409 +	default:						\
  17.410 +		__asm__ __volatile__(				\
  17.411 +			"0:	rep; movsl\n"			\
  17.412 +			"1:\n"					\
  17.413 +			".section .fixup,\"ax\"\n"		\
  17.414 +			"2:	pushl %0\n"			\
  17.415 +			"	pushl %%eax\n"			\
  17.416 +			"	xorl %%eax,%%eax\n"		\
  17.417 +			"	rep; stosl\n"			\
  17.418 +			"	popl %%eax\n"			\
  17.419 +			"	popl %0\n"			\
  17.420 +			"	shl $2,%0\n"			\
  17.421 +			"	jmp 1b\n"			\
  17.422 +			".previous\n"				\
  17.423 +			".section __ex_table,\"a\"\n"		\
  17.424 +			"	.align 4\n"			\
  17.425 +			"	.long 0b,2b\n"			\
  17.426 +			".previous"				\
  17.427 +			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  17.428 +			: "1"(from), "2"(to), "0"(size/4)	\
  17.429 +			: "memory");				\
  17.430 +		break;						\
  17.431 +	case 1:							\
  17.432 +		__asm__ __volatile__(				\
  17.433 +			"0:	rep; movsl\n"			\
  17.434 +			"1:	movsb\n"			\
  17.435 +			"2:\n"					\
  17.436 +			".section .fixup,\"ax\"\n"		\
  17.437 +			"3:	pushl %0\n"			\
  17.438 +			"	pushl %%eax\n"			\
  17.439 +			"	xorl %%eax,%%eax\n"		\
  17.440 +			"	rep; stosl\n"			\
  17.441 +			"	stosb\n"			\
  17.442 +			"	popl %%eax\n"			\
  17.443 +			"	popl %0\n"			\
  17.444 +			"	shl $2,%0\n"			\
  17.445 +			"	incl %0\n"			\
  17.446 +			"	jmp 2b\n"			\
  17.447 +			"4:	pushl %%eax\n"			\
  17.448 +			"	xorl %%eax,%%eax\n"		\
  17.449 +			"	stosb\n"			\
  17.450 +			"	popl %%eax\n"			\
  17.451 +			"	incl %0\n"			\
  17.452 +			"	jmp 2b\n"			\
  17.453 +			".previous\n"				\
  17.454 +			".section __ex_table,\"a\"\n"		\
  17.455 +			"	.align 4\n"			\
  17.456 +			"	.long 0b,3b\n"			\
  17.457 +			"	.long 1b,4b\n"			\
  17.458 +			".previous"				\
  17.459 +			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  17.460 +			: "1"(from), "2"(to), "0"(size/4)	\
  17.461 +			: "memory");				\
  17.462 +		break;						\
  17.463 +	case 2:							\
  17.464 +		__asm__ __volatile__(				\
  17.465 +			"0:	rep; movsl\n"			\
  17.466 +			"1:	movsw\n"			\
  17.467 +			"2:\n"					\
  17.468 +			".section .fixup,\"ax\"\n"		\
  17.469 +			"3:	pushl %0\n"			\
  17.470 +			"	pushl %%eax\n"			\
  17.471 +			"	xorl %%eax,%%eax\n"		\
  17.472 +			"	rep; stosl\n"			\
  17.473 +			"	stosw\n"			\
  17.474 +			"	popl %%eax\n"			\
  17.475 +			"	popl %0\n"			\
  17.476 +			"	shl $2,%0\n"			\
  17.477 +			"	addl $2,%0\n"			\
  17.478 +			"	jmp 2b\n"			\
  17.479 +			"4:	pushl %%eax\n"			\
  17.480 +			"	xorl %%eax,%%eax\n"		\
  17.481 +			"	stosw\n"			\
  17.482 +			"	popl %%eax\n"			\
  17.483 +			"	addl $2,%0\n"			\
  17.484 +			"	jmp 2b\n"			\
  17.485 +			".previous\n"				\
  17.486 +			".section __ex_table,\"a\"\n"		\
  17.487 +			"	.align 4\n"			\
  17.488 +			"	.long 0b,3b\n"			\
  17.489 +			"	.long 1b,4b\n"			\
  17.490 +			".previous"				\
  17.491 +			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  17.492 +			: "1"(from), "2"(to), "0"(size/4)	\
  17.493 +			: "memory");				\
  17.494 +		break;						\
  17.495 +	case 3:							\
  17.496 +		__asm__ __volatile__(				\
  17.497 +			"0:	rep; movsl\n"			\
  17.498 +			"1:	movsw\n"			\
  17.499 +			"2:	movsb\n"			\
  17.500 +			"3:\n"					\
  17.501 +			".section .fixup,\"ax\"\n"		\
  17.502 +			"4:	pushl %0\n"			\
  17.503 +			"	pushl %%eax\n"			\
  17.504 +			"	xorl %%eax,%%eax\n"		\
  17.505 +			"	rep; stosl\n"			\
  17.506 +			"	stosw\n"			\
  17.507 +			"	stosb\n"			\
  17.508 +			"	popl %%eax\n"			\
  17.509 +			"	popl %0\n"			\
  17.510 +			"	shl $2,%0\n"			\
  17.511 +			"	addl $3,%0\n"			\
  17.512 +			"	jmp 2b\n"			\
  17.513 +			"5:	pushl %%eax\n"			\
  17.514 +			"	xorl %%eax,%%eax\n"		\
  17.515 +			"	stosw\n"			\
  17.516 +			"	stosb\n"			\
  17.517 +			"	popl %%eax\n"			\
  17.518 +			"	addl $3,%0\n"			\
  17.519 +			"	jmp 2b\n"			\
  17.520 +			"6:	pushl %%eax\n"			\
  17.521 +			"	xorl %%eax,%%eax\n"		\
  17.522 +			"	stosb\n"			\
  17.523 +			"	popl %%eax\n"			\
  17.524 +			"	incl %0\n"			\
  17.525 +			"	jmp 3b\n"			\
  17.526 +			".previous\n"				\
  17.527 +			".section __ex_table,\"a\"\n"		\
  17.528 +			"	.align 4\n"			\
  17.529 +			"	.long 0b,4b\n"			\
  17.530 +			"	.long 1b,5b\n"			\
  17.531 +			"	.long 2b,6b\n"			\
  17.532 +			".previous"				\
  17.533 +			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
  17.534 +			: "1"(from), "2"(to), "0"(size/4)	\
  17.535 +			: "memory");				\
  17.536 +		break;						\
  17.537 +	}							\
  17.538 +} while (0)
  17.539 +
  17.540 +unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
  17.541 +unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
  17.542 +
  17.543 +static inline unsigned long
  17.544 +__constant_copy_to_user(void *to, const void *from, unsigned long n)
  17.545 +{
  17.546 +	prefetch(from);
  17.547 +	if (access_ok(VERIFY_WRITE, to, n))
  17.548 +		__constant_copy_user(to,from,n);
  17.549 +	return n;
  17.550 +}
  17.551 +
  17.552 +static inline unsigned long
  17.553 +__constant_copy_from_user(void *to, const void *from, unsigned long n)
  17.554 +{
  17.555 +	if (access_ok(VERIFY_READ, from, n))
  17.556 +		__constant_copy_user_zeroing(to,from,n);
  17.557 +	else
  17.558 +		memset(to, 0, n);
  17.559 +	return n;
  17.560 +}
  17.561 +
  17.562 +static inline unsigned long
  17.563 +__constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
  17.564 +{
  17.565 +	__constant_copy_user(to,from,n);
  17.566 +	return n;
  17.567 +}
  17.568 +
  17.569 +static inline unsigned long
  17.570 +__constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
  17.571 +{
  17.572 +	__constant_copy_user_zeroing(to,from,n);
  17.573 +	return n;
  17.574 +}
  17.575 +
  17.576 +#define copy_to_user(to,from,n)				\
  17.577 +	(__builtin_constant_p(n) ?			\
  17.578 +	 __constant_copy_to_user((to),(from),(n)) :	\
  17.579 +	 __generic_copy_to_user((to),(from),(n)))
  17.580 +
  17.581 +#define copy_from_user(to,from,n)			\
  17.582 +	(__builtin_constant_p(n) ?			\
  17.583 +	 __constant_copy_from_user((to),(from),(n)) :	\
  17.584 +	 __generic_copy_from_user((to),(from),(n)))
  17.585 +
  17.586 +#define __copy_to_user(to,from,n)			\
  17.587 +	(__builtin_constant_p(n) ?			\
  17.588 +	 __constant_copy_to_user_nocheck((to),(from),(n)) :	\
  17.589 +	 __generic_copy_to_user_nocheck((to),(from),(n)))
  17.590 +
  17.591 +#define __copy_from_user(to,from,n)			\
  17.592 +	(__builtin_constant_p(n) ?			\
  17.593 +	 __constant_copy_from_user_nocheck((to),(from),(n)) :	\
  17.594 +	 __generic_copy_from_user_nocheck((to),(from),(n)))
  17.595 +
  17.596 +long strncpy_from_user(char *dst, const char *src, long count);
  17.597 +long __strncpy_from_user(char *dst, const char *src, long count);
  17.598 +#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
  17.599 +long strnlen_user(const char *str, long n);
  17.600 +unsigned long clear_user(void *mem, unsigned long len);
  17.601 +unsigned long __clear_user(void *mem, unsigned long len);
  17.602 +
  17.603 +#endif /* __i386_UACCESS_H */
    18.1 --- a/xen/include/asm-x86/x86_64/processor.h	Tue Jun 15 12:29:06 2004 +0000
    18.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.3 @@ -1,463 +0,0 @@
    18.4 -/*
    18.5 - * include/asm-x86_64/processor.h
    18.6 - *
    18.7 - * Copyright (C) 1994 Linus Torvalds
    18.8 - */
    18.9 -
   18.10 -#ifndef __ASM_X86_64_PROCESSOR_H
   18.11 -#define __ASM_X86_64_PROCESSOR_H
   18.12 -
   18.13 -#include <asm/page.h>
   18.14 -#include <asm/types.h>
   18.15 -#include <asm/cpufeature.h>
   18.16 -#include <asm/desc.h>
   18.17 -#include <xen/config.h>
   18.18 -#include <hypervisor-ifs/hypervisor-if.h>
   18.19 -
   18.20 -struct task_struct;
   18.21 -
   18.22 -#define TF_MASK		0x00000100
   18.23 -#define IF_MASK		0x00000200
   18.24 -#define IOPL_MASK	0x00003000
   18.25 -#define NT_MASK		0x00004000
   18.26 -#define VM_MASK		0x00020000
   18.27 -#define AC_MASK		0x00040000
   18.28 -#define VIF_MASK	0x00080000	/* virtual interrupt flag */
   18.29 -#define VIP_MASK	0x00100000	/* virtual interrupt pending */
   18.30 -#define ID_MASK		0x00200000
   18.31 -
   18.32 -/*
   18.33 - * Default implementation of macro that returns current
   18.34 - * instruction pointer ("program counter").
   18.35 - */
   18.36 -#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
   18.37 -
   18.38 -/*
   18.39 - *  CPU type and hardware bug flags. Kept separately for each CPU.
   18.40 - *  Members of this structure are referenced in head.S, so think twice
   18.41 - *  before touching them. [mj]
   18.42 - */
   18.43 -
   18.44 -struct cpuinfo_x86 {
   18.45 -	__u8	x86;		/* CPU family */
   18.46 -	__u8	x86_vendor;	/* CPU vendor */
   18.47 -	__u8	x86_model;
   18.48 -	__u8	x86_mask;
   18.49 -	int	cpuid_level;	/* Maximum supported CPUID level, -1=no CPUID */
   18.50 -	__u32	x86_capability[NCAPINTS];
   18.51 -	char	x86_vendor_id[16];
   18.52 -	char	x86_model_id[64];
   18.53 -	int 	x86_cache_size;  /* in KB - valid for CPUS which support this
   18.54 -				    call  */
   18.55 -	int	x86_clflush_size;
   18.56 -	int	x86_tlbsize;	/* number of 4K pages in DTLB/ITLB combined(in pages)*/
   18.57 -        __u8    x86_virt_bits, x86_phys_bits;
   18.58 -        __u32   x86_power; 
   18.59 -	unsigned long loops_per_jiffy;
   18.60 -} ____cacheline_aligned;
   18.61 -
   18.62 -#define X86_VENDOR_INTEL 0
   18.63 -#define X86_VENDOR_CYRIX 1
   18.64 -#define X86_VENDOR_AMD 2
   18.65 -#define X86_VENDOR_UMC 3
   18.66 -#define X86_VENDOR_NEXGEN 4
   18.67 -#define X86_VENDOR_CENTAUR 5
   18.68 -#define X86_VENDOR_RISE 6
   18.69 -#define X86_VENDOR_TRANSMETA 7
   18.70 -#define X86_VENDOR_UNKNOWN 0xff
   18.71 -
   18.72 -/*
   18.73 - * capabilities of CPUs
   18.74 - */
   18.75 -
   18.76 -extern struct cpuinfo_x86 boot_cpu_data;
   18.77 -extern struct tss_struct init_tss[NR_CPUS];
   18.78 -
   18.79 -#ifdef CONFIG_SMP
   18.80 -extern struct cpuinfo_x86 cpu_data[];
   18.81 -#define current_cpu_data cpu_data[smp_processor_id()]
   18.82 -#else
   18.83 -#define cpu_data (&boot_cpu_data)
   18.84 -#define current_cpu_data boot_cpu_data
   18.85 -#endif
   18.86 -
   18.87 -#define cpu_has_pge 1
   18.88 -#define cpu_has_pse 1
   18.89 -#define cpu_has_pae 1
   18.90 -#define cpu_has_tsc 1
   18.91 -#define cpu_has_de 1
   18.92 -#define cpu_has_vme 1
   18.93 -#define cpu_has_fxsr 1
   18.94 -#define cpu_has_xmm 1
   18.95 -#define cpu_has_apic (test_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability))
   18.96 -
   18.97 -extern char ignore_irq13;
   18.98 -
   18.99 -extern void identify_cpu(struct cpuinfo_x86 *);
  18.100 -extern void print_cpu_info(struct cpuinfo_x86 *);
  18.101 -extern void dodgy_tsc(void);
  18.102 -
  18.103 -/*
  18.104 - * EFLAGS bits
  18.105 - */
  18.106 -#define X86_EFLAGS_CF	0x00000001 /* Carry Flag */
  18.107 -#define X86_EFLAGS_PF	0x00000004 /* Parity Flag */
  18.108 -#define X86_EFLAGS_AF	0x00000010 /* Auxillary carry Flag */
  18.109 -#define X86_EFLAGS_ZF	0x00000040 /* Zero Flag */
  18.110 -#define X86_EFLAGS_SF	0x00000080 /* Sign Flag */
  18.111 -#define X86_EFLAGS_TF	0x00000100 /* Trap Flag */
  18.112 -#define X86_EFLAGS_IF	0x00000200 /* Interrupt Flag */
  18.113 -#define X86_EFLAGS_DF	0x00000400 /* Direction Flag */
  18.114 -#define X86_EFLAGS_OF	0x00000800 /* Overflow Flag */
  18.115 -#define X86_EFLAGS_IOPL	0x00003000 /* IOPL mask */
  18.116 -#define X86_EFLAGS_NT	0x00004000 /* Nested Task */
  18.117 -#define X86_EFLAGS_RF	0x00010000 /* Resume Flag */
  18.118 -#define X86_EFLAGS_VM	0x00020000 /* Virtual Mode */
  18.119 -#define X86_EFLAGS_AC	0x00040000 /* Alignment Check */
  18.120 -#define X86_EFLAGS_VIF	0x00080000 /* Virtual Interrupt Flag */
  18.121 -#define X86_EFLAGS_VIP	0x00100000 /* Virtual Interrupt Pending */
  18.122 -#define X86_EFLAGS_ID	0x00200000 /* CPUID detection flag */
  18.123 -
  18.124 -/*
  18.125 - *	Generic CPUID function
  18.126 - * 	FIXME: This really belongs to msr.h
  18.127 - */
  18.128 -extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
  18.129 -{
  18.130 -	__asm__("cpuid"
  18.131 -		: "=a" (*eax),
  18.132 -		  "=b" (*ebx),
  18.133 -		  "=c" (*ecx),
  18.134 -		  "=d" (*edx)
  18.135 -		: "0" (op));
  18.136 -}
  18.137 -
  18.138 -/*
  18.139 - * CPUID functions returning a single datum
  18.140 - */
  18.141 -extern inline unsigned int cpuid_eax(unsigned int op)
  18.142 -{
  18.143 -	unsigned int eax;
  18.144 -
  18.145 -	__asm__("cpuid"
  18.146 -		: "=a" (eax)
  18.147 -		: "0" (op)
  18.148 -		: "bx", "cx", "dx");
  18.149 -	return eax;
  18.150 -}
  18.151 -extern inline unsigned int cpuid_ebx(unsigned int op)
  18.152 -{
  18.153 -	unsigned int eax, ebx;
  18.154 -
  18.155 -	__asm__("cpuid"
  18.156 -		: "=a" (eax), "=b" (ebx)
  18.157 -		: "0" (op)
  18.158 -		: "cx", "dx" );
  18.159 -	return ebx;
  18.160 -}
  18.161 -extern inline unsigned int cpuid_ecx(unsigned int op)
  18.162 -{
  18.163 -	unsigned int eax, ecx;
  18.164 -
  18.165 -	__asm__("cpuid"
  18.166 -		: "=a" (eax), "=c" (ecx)
  18.167 -		: "0" (op)
  18.168 -		: "bx", "dx" );
  18.169 -	return ecx;
  18.170 -}
  18.171 -extern inline unsigned int cpuid_edx(unsigned int op)
  18.172 -{
  18.173 -	unsigned int eax, edx;
  18.174 -
  18.175 -	__asm__("cpuid"
  18.176 -		: "=a" (eax), "=d" (edx)
  18.177 -		: "0" (op)
  18.178 -		: "bx", "cx");
  18.179 -	return edx;
  18.180 -}
  18.181 -
  18.182 -
  18.183 -/*
  18.184 - * Intel CPU flags in CR0
  18.185 - */
  18.186 -#define X86_CR0_PE              0x00000001 /* Enable Protected Mode    (RW) */
  18.187 -#define X86_CR0_MP              0x00000002 /* Monitor Coprocessor      (RW) */
  18.188 -#define X86_CR0_EM              0x00000004 /* Require FPU Emulation    (RO) */
  18.189 -#define X86_CR0_TS              0x00000008 /* Task Switched            (RW) */
  18.190 -#define X86_CR0_NE              0x00000020 /* Numeric Error Reporting  (RW) */
  18.191 -#define X86_CR0_WP              0x00010000 /* Supervisor Write Protect (RW) */
  18.192 -#define X86_CR0_AM              0x00040000 /* Alignment Checking       (RW) */
  18.193 -#define X86_CR0_NW              0x20000000 /* Not Write-Through        (RW) */
  18.194 -#define X86_CR0_CD              0x40000000 /* Cache Disable            (RW) */
  18.195 -#define X86_CR0_PG              0x80000000 /* Paging                   (RW) */
  18.196 -
  18.197 -#define read_cr0() ({ \
  18.198 -	unsigned long __dummy; \
  18.199 -	__asm__( \
  18.200 -		"movq %%cr0,%0\n\t" \
  18.201 -		:"=r" (__dummy)); \
  18.202 -	__dummy; \
  18.203 -})
  18.204 -
  18.205 -#define write_cr0(x) \
  18.206 -	__asm__("movq %0,%%cr0": :"r" (x));
  18.207 -
  18.208 -
  18.209 -
  18.210 -/*
  18.211 - * Intel CPU features in CR4
  18.212 - */
  18.213 -#define X86_CR4_VME		0x0001	/* enable vm86 extensions */
  18.214 -#define X86_CR4_PVI		0x0002	/* virtual interrupts flag enable */
  18.215 -#define X86_CR4_TSD		0x0004	/* disable time stamp at ipl 3 */
  18.216 -#define X86_CR4_DE		0x0008	/* enable debugging extensions */
  18.217 -#define X86_CR4_PSE		0x0010	/* enable page size extensions */
  18.218 -#define X86_CR4_PAE		0x0020	/* enable physical address extensions */
  18.219 -#define X86_CR4_MCE		0x0040	/* Machine check enable */
  18.220 -#define X86_CR4_PGE		0x0080	/* enable global pages */
  18.221 -#define X86_CR4_PCE		0x0100	/* enable performance counters at ipl 3 */
  18.222 -#define X86_CR4_OSFXSR		0x0200	/* enable fast FPU save and restore */
  18.223 -#define X86_CR4_OSXMMEXCPT	0x0400	/* enable unmasked SSE exceptions */
  18.224 -
  18.225 -/*
  18.226 - * Save the cr4 feature set we're using (ie
  18.227 - * Pentium 4MB enable and PPro Global page
  18.228 - * enable), so that any CPU's that boot up
  18.229 - * after us can get the correct flags.
  18.230 - */
  18.231 -extern unsigned long mmu_cr4_features;
  18.232 -
  18.233 -static inline void set_in_cr4 (unsigned long mask)
  18.234 -{
  18.235 -	mmu_cr4_features |= mask;
  18.236 -	__asm__("movq %%cr4,%%rax\n\t"
  18.237 -		"orq %0,%%rax\n\t"
  18.238 -		"movq %%rax,%%cr4\n"
  18.239 -		: : "irg" (mask)
  18.240 -		:"ax");
  18.241 -}
  18.242 -
  18.243 -static inline void clear_in_cr4 (unsigned long mask)
  18.244 -{
  18.245 -	mmu_cr4_features &= ~mask;
  18.246 -	__asm__("movq %%cr4,%%rax\n\t"
  18.247 -		"andq %0,%%rax\n\t"
  18.248 -		"movq %%rax,%%cr4\n"
  18.249 -		: : "irg" (~mask)
  18.250 -		:"ax");
  18.251 -}
  18.252 -
  18.253 -/*
  18.254 - *      Cyrix CPU configuration register indexes
  18.255 - */
  18.256 -#define CX86_CCR0 0xc0
  18.257 -#define CX86_CCR1 0xc1
  18.258 -#define CX86_CCR2 0xc2
  18.259 -#define CX86_CCR3 0xc3
  18.260 -#define CX86_CCR4 0xe8
  18.261 -#define CX86_CCR5 0xe9
  18.262 -#define CX86_CCR6 0xea
  18.263 -#define CX86_CCR7 0xeb
  18.264 -#define CX86_DIR0 0xfe
  18.265 -#define CX86_DIR1 0xff
  18.266 -#define CX86_ARR_BASE 0xc4
  18.267 -#define CX86_RCR_BASE 0xdc
  18.268 -
  18.269 -/*
  18.270 - *      Cyrix CPU indexed register access macros
  18.271 - */
  18.272 -
  18.273 -#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
  18.274 -
  18.275 -#define setCx86(reg, data) do { \
  18.276 -	outb((reg), 0x22); \
  18.277 -	outb((data), 0x23); \
  18.278 -} while (0)
  18.279 -
  18.280 -/*
  18.281 - * Bus types
  18.282 - */
  18.283 -#define EISA_bus 0
  18.284 -#define MCA_bus 0
  18.285 -#define MCA_bus__is_a_macro
  18.286 -
  18.287 -
  18.288 -/*
  18.289 - * User space process size: 512GB - 1GB (default).
  18.290 - */
  18.291 -#define TASK_SIZE	(0x0000007fc0000000)
  18.292 -
  18.293 -/* This decides where the kernel will search for a free chunk of vm
  18.294 - * space during mmap's.
  18.295 - */
  18.296 -#define TASK_UNMAPPED_32 0xa0000000
  18.297 -#define TASK_UNMAPPED_64 (TASK_SIZE/3) 
  18.298 -#define TASK_UNMAPPED_BASE	\
  18.299 -	((current->thread.flags & THREAD_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)  
  18.300 -
  18.301 -/*
  18.302 - * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
  18.303 - */
  18.304 -#define IO_BITMAP_SIZE	32
  18.305 -#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
  18.306 -#define INVALID_IO_BITMAP_OFFSET 0x8000
  18.307 -
  18.308 -struct i387_fxsave_struct {
  18.309 -	u16	cwd;
  18.310 -	u16	swd;
  18.311 -	u16	twd;
  18.312 -	u16	fop;
  18.313 -	u64	rip;
  18.314 -	u64	rdp; 
  18.315 -	u32	mxcsr;
  18.316 -	u32	mxcsr_mask;
  18.317 -	u32	st_space[32];	/* 8*16 bytes for each FP-reg = 128 bytes */
  18.318 -	u32	xmm_space[64];	/* 16*16 bytes for each XMM-reg = 128 bytes */
  18.319 -	u32	padding[24];
  18.320 -} __attribute__ ((aligned (16)));
  18.321 -
  18.322 -union i387_union {
  18.323 -	struct i387_fxsave_struct	fxsave;
  18.324 -};
  18.325 -
  18.326 -typedef struct {
  18.327 -	unsigned long seg;
  18.328 -} mm_segment_t;
  18.329 -
  18.330 -struct tss_struct {
  18.331 -	unsigned short back_link,__blh;
  18.332 -/*	u32 reserved1; */
  18.333 -	u64 rsp0;	
  18.334 -	u64 rsp1;
  18.335 -	u64 rsp2;
  18.336 -	u64 reserved2;
  18.337 -	u64 ist[7];
  18.338 -	u32 reserved3;
  18.339 -	u32 reserved4;
  18.340 -	u16 reserved5;
  18.341 -	u16 io_map_base;
  18.342 -	u32 io_bitmap[IO_BITMAP_SIZE];
  18.343 -} __attribute__((packed)) ____cacheline_aligned;
  18.344 -
  18.345 -struct thread_struct {
  18.346 -	unsigned long	guestos_sp;
  18.347 -	unsigned long	guestos_ss;
  18.348 -	unsigned long	rip;
  18.349 -	unsigned long	rsp;
  18.350 -	unsigned long 	userrsp;	/* Copy from PDA */ 
  18.351 -	unsigned long	fs;
  18.352 -	unsigned long	gs;
  18.353 -	unsigned short	es, ds, fsindex, gsindex;	
  18.354 -	enum { 
  18.355 -		THREAD_IA32 = 0x0001,
  18.356 -	} flags;
  18.357 -/* Hardware debugging registers */
  18.358 -	unsigned long	debugreg[8];  /* %%db0-7 debug registers */
  18.359 -/* floating point info */
  18.360 -	union i387_union	i387;
  18.361 -/* Trap info. */
  18.362 -	trap_info_t		traps[256];
  18.363 -};
  18.364 -
  18.365 -#define IDT_ENTRIES 256
  18.366 -extern struct gate_struct idt_table[];
  18.367 -extern struct gate_struct *idt_tables[];
  18.368 -
  18.369 -#define INIT_THREAD  {						\
  18.370 -	0, 0,		      		       			\
  18.371 -	0, 0, 0, 0,						\
  18.372 -	0, 0, 0, 0,						\
  18.373 -	0,			/* flags */			\
  18.374 -	{ [0 ... 7] = 0 },	/* debugging registers */	\
  18.375 -	{ { 0, }, },		/* 387 state */			\
  18.376 -	{ {0} }			/* io permissions */		\
  18.377 -}
  18.378 -
  18.379 -#define INIT_TSS  {						\
  18.380 -	0,0, /* back_link, __blh */				\
  18.381 -	0, /* rsp0 */						\
  18.382 -	0, 0, /* rsp1, rsp2 */					\
  18.383 -	0, /* reserved */					\
  18.384 -	{ [0 ... 6] = 0 }, /* ist[] */				\
  18.385 -	0,0, /* reserved */					\
  18.386 -	0, INVALID_IO_BITMAP_OFFSET, /* trace, bitmap */	\
  18.387 -	{~0, } /* ioperm */					\
  18.388 -}
  18.389 -
  18.390 -struct mm_struct {
  18.391 -    /*
  18.392 -     * Every domain has a L1 pagetable of its own. Per-domain mappings
  18.393 -     * are put in this table (eg. the current GDT is mapped here).
  18.394 -     */
  18.395 -    l1_pgentry_t *perdomain_pt;
  18.396 -    pagetable_t  pagetable;
  18.397 -    /* Current LDT details. */
  18.398 -    unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
  18.399 -    /* Next entry is passed to LGDT on domain switch. */
  18.400 -    char gdt[10];
  18.401 -};
  18.402 -
  18.403 -#define IDLE0_MM                                                    \
  18.404 -{                                                                   \
  18.405 -    perdomain_pt: 0,                                                \
  18.406 -    pagetable:   mk_pagetable(__pa(idle_pg_table))                  \
  18.407 -}
  18.408 -
  18.409 -/* Convenient accessor for mm.gdt. */
  18.410 -#define SET_GDT_ENTRIES(_p, _e) ((*(u16 *)((_p)->mm.gdt + 0)) = (_e))
  18.411 -#define SET_GDT_ADDRESS(_p, _a) ((*(u64 *)((_p)->mm.gdt + 2)) = (_a))
  18.412 -#define GET_GDT_ENTRIES(_p)     ((*(u16 *)((_p)->mm.gdt + 0)))
  18.413 -#define GET_GDT_ADDRESS(_p)     ((*(u64 *)((_p)->mm.gdt + 2)))
  18.414 -
  18.415 -long set_gdt(struct task_struct *p, 
  18.416 -             unsigned long *frames, 
  18.417 -             unsigned int entries);
  18.418 -
  18.419 -long set_debugreg(struct task_struct *p, int reg, unsigned long value);
  18.420 -
  18.421 -struct microcode {
  18.422 -    unsigned int hdrver;
  18.423 -    unsigned int rev;
  18.424 -    unsigned int date;
  18.425 -    unsigned int sig;
  18.426 -    unsigned int cksum;
  18.427 -    unsigned int ldrver;
  18.428 -    unsigned int pf;
  18.429 -    unsigned int reserved[5];
  18.430 -    unsigned int bits[500];
  18.431 -};
  18.432 -
  18.433 -/* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
  18.434 -#define MICROCODE_IOCFREE	_IO('6',0)
  18.435 -
  18.436 -/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
  18.437 -static inline void rep_nop(void)
  18.438 -{
  18.439 -    __asm__ __volatile__("rep;nop");
  18.440 -}
  18.441 -
  18.442 -#define cpu_relax()	rep_nop()
  18.443 -
  18.444 -#define init_task	(init_task_union.task)
  18.445 -#define init_stack	(init_task_union.stack)
  18.446 -
  18.447 -/* Avoid speculative execution by the CPU */
  18.448 -extern inline void sync_core(void)
  18.449 -{ 
  18.450 -	int tmp;
  18.451 -	asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
  18.452 -} 
  18.453 -
  18.454 -#define cpu_has_fpu 1
  18.455 -
  18.456 -#define ARCH_HAS_PREFETCH
  18.457 -#define ARCH_HAS_PREFETCHW
  18.458 -#define ARCH_HAS_SPINLOCK_PREFETCH
  18.459 -
  18.460 -#define prefetch(x) __builtin_prefetch((x),0)
  18.461 -#define prefetchw(x) __builtin_prefetch((x),1)
  18.462 -#define spin_lock_prefetch(x)  prefetchw(x)
  18.463 -#define cpu_relax()   rep_nop()
  18.464 -
  18.465 -
  18.466 -#endif /* __ASM_X86_64_PROCESSOR_H */
    19.1 --- a/xen/include/asm-x86/x86_64/uaccess.h	Tue Jun 15 12:29:06 2004 +0000
    19.2 +++ b/xen/include/asm-x86/x86_64/uaccess.h	Tue Jun 15 15:28:22 2004 +0000
    19.3 @@ -103,16 +103,11 @@ extern void __get_user_8(void);
    19.4  ({	long __val_gu;							\
    19.5  	int __ret_gu=1;							\
    19.6  	switch(sizeof (*(ptr))) {					\
    19.7 -+	case 1:  __ret_gu=copy_from_user(&__val_gu,ptr,1);break;		\
    19.8 -+	case 2:  __ret_gu=copy_from_user(&__val_gu,ptr,2);break;		\
    19.9 -+	case 4:  __ret_gu=copy_from_user(&__val_gu,ptr,4);break;		\
   19.10 -+	case 8:  __ret_gu=copy_from_user(&__val_gu,ptr,8);break;		\
   19.11 -+	default: __ret_gu=copy_from_user(&__val_gu,ptr,sizeof(*(ptr)));break;\
   19.12 -	/*case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;*/	\
   19.13 -	/*case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;*/	\
   19.14 -	/*case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;*/	\
   19.15 -	/*case 8:  __get_user_x(8,__ret_gu,__val_gu,ptr); break;*/	\
   19.16 -	/*default: __get_user_bad(); break;*/				\
   19.17 +	case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;	\
   19.18 +	case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;	\
   19.19 +	case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;	\
   19.20 +	case 8:  __get_user_x(8,__ret_gu,__val_gu,ptr); break;	\
   19.21 +	default: __get_user_bad(); break;				\
   19.22  	}								\
   19.23  	(x) = (__typeof__(*(ptr)))__val_gu;				\
   19.24  	__ret_gu;							\
    20.1 --- a/xen/include/xen/mm.h	Tue Jun 15 12:29:06 2004 +0000
    20.2 +++ b/xen/include/xen/mm.h	Tue Jun 15 15:28:22 2004 +0000
    20.3 @@ -313,7 +313,11 @@ int check_descriptor(unsigned long a, un
    20.4   * contiguous (or near contiguous) physical memory.
    20.5   */
    20.6  #undef  machine_to_phys_mapping
    20.7 +#ifdef __x86_64__
    20.8 +extern unsigned long *machine_to_phys_mapping;
    20.9 +#else
   20.10  #define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
   20.11 +#endif
   20.12  
   20.13  /* Part of the domain API. */
   20.14  int do_mmu_update(mmu_update_t *updates, int count, int *success_count);