ia64/xen-unstable

changeset 3607:fd1dd0663b09

bitkeeper revision 1.1159.212.68 (42001e4d1AQiGV2pdPTNrs2AU2LjsQ)

Merge pb001.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into pb001.cl.cam.ac.uk:/auto/groups/xeno/users/iap10/xeno-clone/xen-unstable.bk
author iap10@pb001.cl.cam.ac.uk
date Wed Feb 02 00:26:53 2005 +0000 (2005-02-02)
parents 281346e5fc97 e6af5d8f8b39
children d49c0626928e 49e44c44570c beb0887c54bc
files .rootkeys BitKeeper/etc/logging_ok xen/arch/x86/boot/x86_64.S xen/arch/x86/memory.c xen/arch/x86/traps.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/desc.h xen/include/asm-x86/regs.h xen/include/asm-x86/x86_64/regs.h
line diff
     1.1 --- a/.rootkeys	Wed Feb 02 00:22:15 2005 +0000
     1.2 +++ b/.rootkeys	Wed Feb 02 00:26:53 2005 +0000
     1.3 @@ -901,11 +901,13 @@ 3e32af9aRnYGl4GMOaDKp7JdfhOGhg xen/arch/
     1.4  3ddb79bcecupHj56ZbTa3B0FxDowMg xen/arch/x86/x86_32/entry.S
     1.5  3ddb79bcHwuCQDjBICDTSis52hWguw xen/arch/x86/x86_32/mm.c
     1.6  40f92331jfOlE7MfKwpdkEb1CEf23g xen/arch/x86/x86_32/seg_fixup.c
     1.7 +42000d3ckiFc1qxa4AWqsd0t3lxuyw xen/arch/x86/x86_32/traps.c
     1.8  3ddb79bc4nTpGQOe6_-MbyZzkhlhFQ xen/arch/x86/x86_32/usercopy.c
     1.9  3ddb79bcOMCu9-5mKpjIh5d0qqBDPg xen/arch/x86/x86_32/xen.lds
    1.10  41bf1717Ty3hwN3E9swdu8QfnvGqww xen/arch/x86/x86_64/asm-offsets.c
    1.11  40e96d3aLDI-nViMuYneD7VKYlZrVg xen/arch/x86/x86_64/entry.S
    1.12  41bf1717XhPz_dNT5OKSjgmbFuWBuA xen/arch/x86/x86_64/mm.c
    1.13 +42000d3cMb8o1WuFBXC07c8i3lPZBw xen/arch/x86/x86_64/traps.c
    1.14  40e96d3ahBTZqbTViInnq0lM03vs7A xen/arch/x86/x86_64/usercopy.c
    1.15  40e96d3akN3Hu_J5Bk-WXD8OGscrYQ xen/arch/x86/x86_64/xen.lds
    1.16  3ddb79bdff-gj-jFGKjOejeHLqL8Lg xen/common/Makefile
     2.1 --- a/BitKeeper/etc/logging_ok	Wed Feb 02 00:22:15 2005 +0000
     2.2 +++ b/BitKeeper/etc/logging_ok	Wed Feb 02 00:26:53 2005 +0000
     2.3 @@ -21,6 +21,7 @@ harry@dory.(none)
     2.4  iap10@freefall.cl.cam.ac.uk
     2.5  iap10@labyrinth.cl.cam.ac.uk
     2.6  iap10@nidd.cl.cam.ac.uk
     2.7 +iap10@pb001.cl.cam.ac.uk
     2.8  iap10@pb007.cl.cam.ac.uk
     2.9  iap10@striker.cl.cam.ac.uk
    2.10  iap10@tetris.cl.cam.ac.uk
     3.1 --- a/xen/arch/x86/boot/x86_64.S	Wed Feb 02 00:22:15 2005 +0000
     3.2 +++ b/xen/arch/x86/boot/x86_64.S	Wed Feb 02 00:26:53 2005 +0000
     3.3 @@ -241,26 +241,17 @@ ENTRY(cpu0_stack)    # Initial stack is 
     3.4  ENTRY(stext)
     3.5  ENTRY(_stext)
     3.6  
     3.7 -.globl ret_from_intr, copy_to_user, set_intr_gate, die
     3.8 +.globl switch_to, ret_from_intr, do_iopl
     3.9 +switch_to:
    3.10  ret_from_intr:
    3.11 -copy_to_user:
    3.12 -set_intr_gate:
    3.13 -die:
    3.14 -.globl copy_from_user, show_registers, do_iopl
    3.15 +do_iopl: 
    3.16 +.globl copy_from_user, copy_to_user, copy_user_generic, new_thread
    3.17  copy_from_user: 
    3.18 -show_registers: 
    3.19 -do_iopl: 
    3.20 -.globl idt_table, copy_user_generic, idt_tables, new_thread
    3.21 -idt_table:
    3.22 +copy_to_user:
    3.23  copy_user_generic:
    3.24 -idt_tables:
    3.25  new_thread:
    3.26 -.globl switch_to, __get_user_1, __get_user_4, __get_user_8, trap_init
    3.27 -switch_to:
    3.28 +.globl __get_user_1, __get_user_4, __get_user_8
    3.29  __get_user_1:
    3.30  __get_user_4:
    3.31  __get_user_8:
    3.32 -trap_init: 
    3.33 -.globl set_debugreg
    3.34 -set_debugreg:
    3.35          
     4.1 --- a/xen/arch/x86/memory.c	Wed Feb 02 00:22:15 2005 +0000
     4.2 +++ b/xen/arch/x86/memory.c	Wed Feb 02 00:26:53 2005 +0000
     4.3 @@ -168,6 +168,7 @@ void __init init_frametable(void)
     4.4  
     4.5  void arch_init_memory(void)
     4.6  {
     4.7 +#ifdef __i386__
     4.8      unsigned long i;
     4.9  
    4.10      /*
    4.11 @@ -219,6 +220,7 @@ void arch_init_memory(void)
    4.12          frame_table[m2p_start_mfn+i].u.inuse.type_info = PGT_gdt_page | 1;
    4.13          frame_table[m2p_start_mfn+i].u.inuse.domain    = dom_xen;
    4.14      }
    4.15 +#endif
    4.16  }
    4.17  
    4.18  static void __invalidate_shadow_ldt(struct exec_domain *d)
     5.1 --- a/xen/arch/x86/traps.c	Wed Feb 02 00:22:15 2005 +0000
     5.2 +++ b/xen/arch/x86/traps.c	Wed Feb 02 00:26:53 2005 +0000
     5.3 @@ -66,12 +66,6 @@ char opt_nmi[10] = "fatal";
     5.4  #endif
     5.5  string_param("nmi", opt_nmi);
     5.6  
     5.7 -#define GUEST_FAULT(_r) (likely(VM86_MODE(_r) || !RING_0(_r)))
     5.8 -
     5.9 -#define DOUBLEFAULT_STACK_SIZE 1024
    5.10 -static struct tss_struct doublefault_tss;
    5.11 -static unsigned char doublefault_stack[DOUBLEFAULT_STACK_SIZE];
    5.12 -
    5.13  asmlinkage int hypercall(void);
    5.14  
    5.15  /* Master table, and the one used by CPU0. */
    5.16 @@ -99,116 +93,6 @@ asmlinkage void alignment_check(void);
    5.17  asmlinkage void spurious_interrupt_bug(void);
    5.18  asmlinkage void machine_check(void);
    5.19  
    5.20 -int kstack_depth_to_print = 8*20;
    5.21 -
    5.22 -static inline int kernel_text_address(unsigned long addr)
    5.23 -{
    5.24 -    if (addr >= (unsigned long) &_stext &&
    5.25 -        addr <= (unsigned long) &_etext)
    5.26 -        return 1;
    5.27 -    return 0;
    5.28 -
    5.29 -}
    5.30 -
    5.31 -void show_guest_stack(void)
    5.32 -{
    5.33 -    int i;
    5.34 -    execution_context_t *ec = get_execution_context();
    5.35 -    unsigned long *stack = (unsigned long *)ec->esp;
    5.36 -    printk("Guest EIP is %lx\n",ec->eip);
    5.37 -
    5.38 -    for ( i = 0; i < kstack_depth_to_print; i++ )
    5.39 -    {
    5.40 -        if ( ((long)stack & (STACK_SIZE-1)) == 0 )
    5.41 -            break;
    5.42 -        if ( i && ((i % 8) == 0) )
    5.43 -            printk("\n       ");
    5.44 -            printk("%08lx ", *stack++);            
    5.45 -    }
    5.46 -    printk("\n");
    5.47 -    
    5.48 -}
    5.49 -
    5.50 -void show_trace(unsigned long *esp)
    5.51 -{
    5.52 -    unsigned long *stack, addr;
    5.53 -    int i;
    5.54 -
    5.55 -    printk("Call Trace from ESP=%p: ", esp);
    5.56 -    stack = esp;
    5.57 -    i = 0;
    5.58 -    while (((long) stack & (STACK_SIZE-1)) != 0) {
    5.59 -        addr = *stack++;
    5.60 -        if (kernel_text_address(addr)) {
    5.61 -            if (i && ((i % 6) == 0))
    5.62 -                printk("\n   ");
    5.63 -            printk("[<%08lx>] ", addr);
    5.64 -            i++;
    5.65 -        }
    5.66 -    }
    5.67 -    printk("\n");
    5.68 -}
    5.69 -
    5.70 -void show_stack(unsigned long *esp)
    5.71 -{
    5.72 -    unsigned long *stack;
    5.73 -    int i;
    5.74 -
    5.75 -    printk("Stack trace from ESP=%p:\n", esp);
    5.76 -
    5.77 -    stack = esp;
    5.78 -    for ( i = 0; i < kstack_depth_to_print; i++ )
    5.79 -    {
    5.80 -        if ( ((long)stack & (STACK_SIZE-1)) == 0 )
    5.81 -            break;
    5.82 -        if ( i && ((i % 8) == 0) )
    5.83 -            printk("\n       ");
    5.84 -        if ( kernel_text_address(*stack) )
    5.85 -            printk("[%08lx] ", *stack++);
    5.86 -        else
    5.87 -            printk("%08lx ", *stack++);            
    5.88 -    }
    5.89 -    printk("\n");
    5.90 -
    5.91 -    show_trace( esp );
    5.92 -}
    5.93 -
    5.94 -void show_registers(struct xen_regs *regs)
    5.95 -{
    5.96 -    unsigned long esp;
    5.97 -    unsigned short ss, ds, es, fs, gs;
    5.98 -
    5.99 -    if ( GUEST_FAULT(regs) )
   5.100 -    {
   5.101 -        esp = regs->esp;
   5.102 -        ss  = regs->ss & 0xffff;
   5.103 -        ds  = regs->ds & 0xffff;
   5.104 -        es  = regs->es & 0xffff;
   5.105 -        fs  = regs->fs & 0xffff;
   5.106 -        gs  = regs->gs & 0xffff;
   5.107 -    }
   5.108 -    else
   5.109 -    {
   5.110 -        esp = (unsigned long)(&regs->esp);
   5.111 -        ss  = __HYPERVISOR_DS;
   5.112 -        ds  = __HYPERVISOR_DS;
   5.113 -        es  = __HYPERVISOR_DS;
   5.114 -        fs  = __HYPERVISOR_DS;
   5.115 -        gs  = __HYPERVISOR_DS;
   5.116 -    }
   5.117 -
   5.118 -    printk("CPU:    %d\nEIP:    %04lx:[<%08lx>]      \nEFLAGS: %08lx\n",
   5.119 -           smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags);
   5.120 -    printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
   5.121 -           regs->eax, regs->ebx, regs->ecx, regs->edx);
   5.122 -    printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
   5.123 -           regs->esi, regs->edi, regs->ebp, esp);
   5.124 -    printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x\n",
   5.125 -           ds, es, fs, gs, ss);
   5.126 -
   5.127 -    show_stack((unsigned long *)&regs->esp);
   5.128 -} 
   5.129 -
   5.130  /*
   5.131   * This is called for faults at very unexpected times (e.g., when interrupts
   5.132   * are disabled). In such situations we can't do much that is safe. We try to
   5.133 @@ -231,7 +115,7 @@ asmlinkage void fatal_trap(int trapnr, s
   5.134  
   5.135      if ( trapnr == TRAP_page_fault )
   5.136      {
   5.137 -        __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (cr2) : );
   5.138 +        __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (cr2) : );
   5.139          printk("Faulting linear address might be %08lx\n", cr2);
   5.140      }
   5.141  
   5.142 @@ -344,38 +228,6 @@ asmlinkage int do_int3(struct xen_regs *
   5.143      return 0;
   5.144  }
   5.145  
   5.146 -asmlinkage void do_double_fault(void)
   5.147 -{
   5.148 -    struct tss_struct *tss = &doublefault_tss;
   5.149 -    unsigned int cpu = ((tss->back_link>>3)-__FIRST_TSS_ENTRY)>>1;
   5.150 -
   5.151 -    /* Disable the NMI watchdog. It's useless now. */
   5.152 -    watchdog_on = 0;
   5.153 -
   5.154 -    /* Find information saved during fault and dump it to the console. */
   5.155 -    tss = &init_tss[cpu];
   5.156 -    printk("CPU:    %d\nEIP:    %04x:[<%08x>]      \nEFLAGS: %08x\n",
   5.157 -           cpu, tss->cs, tss->eip, tss->eflags);
   5.158 -    printk("CR3:    %08x\n", tss->__cr3);
   5.159 -    printk("eax: %08x   ebx: %08x   ecx: %08x   edx: %08x\n",
   5.160 -           tss->eax, tss->ebx, tss->ecx, tss->edx);
   5.161 -    printk("esi: %08x   edi: %08x   ebp: %08x   esp: %08x\n",
   5.162 -           tss->esi, tss->edi, tss->ebp, tss->esp);
   5.163 -    printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x\n",
   5.164 -           tss->ds, tss->es, tss->fs, tss->gs, tss->ss);
   5.165 -    printk("************************************\n");
   5.166 -    printk("CPU%d DOUBLE FAULT -- system shutdown\n", cpu);
   5.167 -    printk("System needs manual reset.\n");
   5.168 -    printk("************************************\n");
   5.169 -
   5.170 -    /* Lock up the console to prevent spurious output from other CPUs. */
   5.171 -    console_force_lock();
   5.172 -
   5.173 -    /* Wait for manual reset. */
   5.174 -    for ( ; ; )
   5.175 -        __asm__ __volatile__ ( "hlt" );
   5.176 -}
   5.177 -
   5.178  asmlinkage void do_machine_check(struct xen_regs *regs)
   5.179  {
   5.180      fatal_trap(TRAP_machine_check, regs);
   5.181 @@ -408,7 +260,7 @@ asmlinkage int do_page_fault(struct xen_
   5.182      int cpu = ed->processor;
   5.183      int ret;
   5.184  
   5.185 -    __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
   5.186 +    __asm__ __volatile__ ("mov %%cr2,%0" : "=r" (addr) : );
   5.187  
   5.188      DEBUGGER_trap_entry(TRAP_page_fault, regs);
   5.189  
   5.190 @@ -477,6 +329,7 @@ asmlinkage int do_page_fault(struct xen_
   5.191  
   5.192      DEBUGGER_trap_fatal(TRAP_page_fault, regs);
   5.193  
   5.194 +#ifdef __i386__
   5.195      if ( addr >= PAGE_OFFSET )
   5.196      {
   5.197          unsigned long page;
   5.198 @@ -493,6 +346,7 @@ asmlinkage int do_page_fault(struct xen_
   5.199              printk(" -- POSSIBLY AN ACCESS TO FREED MEMORY? --\n");
   5.200  #endif
   5.201      }
   5.202 +#endif /* __i386__ */
   5.203  
   5.204      show_registers(regs);
   5.205      panic("CPU%d FATAL PAGE FAULT\n"
   5.206 @@ -542,7 +396,7 @@ static int emulate_privileged_op(struct 
   5.207          eip += 1;
   5.208          if ( (opcode & 0xc0) != 0xc0 )
   5.209              goto fail;
   5.210 -        reg = decode_reg(regs, opcode);
   5.211 +        reg = decode_reg(regs, opcode & 7);
   5.212          switch ( (opcode >> 3) & 7 )
   5.213          {
   5.214          case 0: /* Read CR0 */
   5.215 @@ -570,7 +424,7 @@ static int emulate_privileged_op(struct 
   5.216          eip += 1;
   5.217          if ( (opcode & 0xc0) != 0xc0 )
   5.218              goto fail;
   5.219 -        reg = decode_reg(regs, opcode);
   5.220 +        reg = decode_reg(regs, opcode & 7);
   5.221          switch ( (opcode >> 3) & 7 )
   5.222          {
   5.223          case 0: /* Write CR0 */
   5.224 @@ -629,7 +483,6 @@ static int emulate_privileged_op(struct 
   5.225  asmlinkage int do_general_protection(struct xen_regs *regs)
   5.226  {
   5.227      struct exec_domain *ed = current;
   5.228 -    struct domain *d = ed->domain;
   5.229      struct trap_bounce *tb = &ed->thread.trap_bounce;
   5.230      trap_info_t *ti;
   5.231      unsigned long fixup;
   5.232 @@ -681,7 +534,7 @@ asmlinkage int do_general_protection(str
   5.233          return 0;
   5.234  
   5.235  #if defined(__i386__)
   5.236 -    if ( VM_ASSIST(d, VMASST_TYPE_4gb_segments) && 
   5.237 +    if ( VM_ASSIST(ed->domain, VMASST_TYPE_4gb_segments) && 
   5.238           (regs->error_code == 0) && 
   5.239           gpf_emulate_4gb(regs) )
   5.240          return 0;
   5.241 @@ -791,19 +644,19 @@ asmlinkage int math_state_restore(struct
   5.242  
   5.243  asmlinkage int do_debug(struct xen_regs *regs)
   5.244  {
   5.245 -    unsigned int condition;
   5.246 +    unsigned long condition;
   5.247      struct exec_domain *d = current;
   5.248      struct trap_bounce *tb = &d->thread.trap_bounce;
   5.249  
   5.250      DEBUGGER_trap_entry(TRAP_debug, regs);
   5.251  
   5.252 -    __asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
   5.253 +    __asm__ __volatile__("mov %%db6,%0" : "=r" (condition));
   5.254  
   5.255      /* Mask out spurious debug traps due to lazy DR7 setting */
   5.256      if ( (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) &&
   5.257           (d->thread.debugreg[7] == 0) )
   5.258      {
   5.259 -        __asm__("movl %0,%%db7" : : "r" (0));
   5.260 +        __asm__("mov %0,%%db7" : : "r" (0UL));
   5.261          goto out;
   5.262      }
   5.263  
   5.264 @@ -836,30 +689,17 @@ asmlinkage int do_spurious_interrupt_bug
   5.265      return EXCRET_not_a_fault;
   5.266  }
   5.267  
   5.268 -#define _set_gate(gate_addr,type,dpl,addr) \
   5.269 -do { \
   5.270 -  int __d0, __d1; \
   5.271 -  __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
   5.272 - "movw %4,%%dx\n\t" \
   5.273 - "movl %%eax,%0\n\t" \
   5.274 - "movl %%edx,%1" \
   5.275 - :"=m" (*((long *) (gate_addr))), \
   5.276 -  "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
   5.277 - :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
   5.278 -  "3" ((char *) (addr)),"2" (__HYPERVISOR_CS << 16)); \
   5.279 -} while (0)
   5.280 -
   5.281  void set_intr_gate(unsigned int n, void *addr)
   5.282  {
   5.283      _set_gate(idt_table+n,14,0,addr);
   5.284  }
   5.285  
   5.286 -static void __init set_system_gate(unsigned int n, void *addr)
   5.287 +void set_system_gate(unsigned int n, void *addr)
   5.288  {
   5.289      _set_gate(idt_table+n,14,3,addr);
   5.290  }
   5.291  
   5.292 -static void set_task_gate(unsigned int n, unsigned int sel)
   5.293 +void set_task_gate(unsigned int n, unsigned int sel)
   5.294  {
   5.295      idt_table[n].a = sel << 16;
   5.296      idt_table[n].b = 0x8500;
   5.297 @@ -875,17 +715,6 @@ static void set_task_gate(unsigned int n
   5.298   *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
   5.299    ((limit) & 0x0ffff); }
   5.300  
   5.301 -#define _set_tssldt_desc(n,addr,limit,type) \
   5.302 -__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
   5.303 - "movw %%ax,2(%2)\n\t" \
   5.304 - "rorl $16,%%eax\n\t" \
   5.305 - "movb %%al,4(%2)\n\t" \
   5.306 - "movb %4,5(%2)\n\t" \
   5.307 - "movb $0,6(%2)\n\t" \
   5.308 - "movb %%ah,7(%2)\n\t" \
   5.309 - "rorl $16,%%eax" \
   5.310 - : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
   5.311 -
   5.312  void set_tss_desc(unsigned int n, void *addr)
   5.313  {
   5.314      _set_tssldt_desc(
   5.315 @@ -897,25 +726,10 @@ void set_tss_desc(unsigned int n, void *
   5.316  
   5.317  void __init trap_init(void)
   5.318  {
   5.319 -    /*
   5.320 -     * Make a separate task for double faults. This will get us debug output if
   5.321 -     * we blow the kernel stack.
   5.322 -     */
   5.323 -    struct tss_struct *tss = &doublefault_tss;
   5.324 -    memset(tss, 0, sizeof(*tss));
   5.325 -    tss->ds     = __HYPERVISOR_DS;
   5.326 -    tss->es     = __HYPERVISOR_DS;
   5.327 -    tss->ss     = __HYPERVISOR_DS;
   5.328 -    tss->esp    = (unsigned long)
   5.329 -        &doublefault_stack[DOUBLEFAULT_STACK_SIZE];
   5.330 -    tss->__cr3  = __pa(idle_pg_table);
   5.331 -    tss->cs     = __HYPERVISOR_CS;
   5.332 -    tss->eip    = (unsigned long)do_double_fault;
   5.333 -    tss->eflags = 2;
   5.334 -    tss->bitmap = IOBMP_INVALID_OFFSET;
   5.335 -    _set_tssldt_desc(gdt_table+__DOUBLEFAULT_TSS_ENTRY,
   5.336 -                     (int)tss, 235, 0x89);
   5.337 +    extern void doublefault_init(void);
   5.338 +    doublefault_init();
   5.339  
   5.340 +#ifdef __i386__
   5.341      /*
   5.342       * Note that interrupt gates are always used, rather than trap gates. We 
   5.343       * must have interrupts disabled until DS/ES/FS/GS are saved because the 
   5.344 @@ -948,6 +762,7 @@ void __init trap_init(void)
   5.345  
   5.346      /* Only ring 1 can access Xen services. */
   5.347      _set_gate(idt_table+HYPERCALL_VECTOR,14,1,&hypercall);
   5.348 +#endif
   5.349  
   5.350      /* CPU0 uses the master IDT. */
   5.351      idt_tables[0] = idt_table;
   5.352 @@ -1015,57 +830,6 @@ long do_set_callbacks(unsigned long even
   5.353  }
   5.354  
   5.355  
   5.356 -long set_fast_trap(struct exec_domain *p, int idx)
   5.357 -{
   5.358 -    trap_info_t *ti;
   5.359 -
   5.360 -    /* Index 0 is special: it disables fast traps. */
   5.361 -    if ( idx == 0 )
   5.362 -    {
   5.363 -        if ( p == current )
   5.364 -            CLEAR_FAST_TRAP(&p->thread);
   5.365 -        SET_DEFAULT_FAST_TRAP(&p->thread);
   5.366 -        return 0;
   5.367 -    }
   5.368 -
   5.369 -    /*
   5.370 -     * We only fast-trap vectors 0x20-0x2f, and vector 0x80.
   5.371 -     * The former range is used by Windows and MS-DOS.
   5.372 -     * Vector 0x80 is used by Linux and the BSD variants.
   5.373 -     */
   5.374 -    if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) 
   5.375 -        return -1;
   5.376 -
   5.377 -    ti = p->thread.traps + idx;
   5.378 -
   5.379 -    /*
   5.380 -     * We can't virtualise interrupt gates, as there's no way to get
   5.381 -     * the CPU to automatically clear the events_mask variable.
   5.382 -     */
   5.383 -    if ( TI_GET_IF(ti) )
   5.384 -        return -1;
   5.385 -
   5.386 -    if ( p == current )
   5.387 -        CLEAR_FAST_TRAP(&p->thread);
   5.388 -
   5.389 -    p->thread.fast_trap_idx    = idx;
   5.390 -    p->thread.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
   5.391 -    p->thread.fast_trap_desc.b = 
   5.392 -        (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13;
   5.393 -
   5.394 -    if ( p == current )
   5.395 -        SET_FAST_TRAP(&p->thread);
   5.396 -
   5.397 -    return 0;
   5.398 -}
   5.399 -
   5.400 -
   5.401 -long do_set_fast_trap(int idx)
   5.402 -{
   5.403 -    return set_fast_trap(current, idx);
   5.404 -}
   5.405 -
   5.406 -
   5.407  long do_fpu_taskswitch(void)
   5.408  {
   5.409      set_bit(EDF_GUEST_STTS, &current->ed_flags);
   5.410 @@ -1083,22 +847,22 @@ long set_debugreg(struct exec_domain *p,
   5.411      case 0: 
   5.412          if ( value > (PAGE_OFFSET-4) ) return -EPERM;
   5.413          if ( p == current ) 
   5.414 -            __asm__ ( "movl %0, %%db0" : : "r" (value) );
   5.415 +            __asm__ ( "mov %0, %%db0" : : "r" (value) );
   5.416          break;
   5.417      case 1: 
   5.418          if ( value > (PAGE_OFFSET-4) ) return -EPERM;
   5.419          if ( p == current ) 
   5.420 -            __asm__ ( "movl %0, %%db1" : : "r" (value) );
   5.421 +            __asm__ ( "mov %0, %%db1" : : "r" (value) );
   5.422          break;
   5.423      case 2: 
   5.424          if ( value > (PAGE_OFFSET-4) ) return -EPERM;
   5.425          if ( p == current ) 
   5.426 -            __asm__ ( "movl %0, %%db2" : : "r" (value) );
   5.427 +            __asm__ ( "mov %0, %%db2" : : "r" (value) );
   5.428          break;
   5.429      case 3:
   5.430          if ( value > (PAGE_OFFSET-4) ) return -EPERM;
   5.431          if ( p == current ) 
   5.432 -            __asm__ ( "movl %0, %%db3" : : "r" (value) );
   5.433 +            __asm__ ( "mov %0, %%db3" : : "r" (value) );
   5.434          break;
   5.435      case 6:
   5.436          /*
   5.437 @@ -1108,7 +872,7 @@ long set_debugreg(struct exec_domain *p,
   5.438          value &= 0xffffefff; /* reserved bits => 0 */
   5.439          value |= 0xffff0ff0; /* reserved bits => 1 */
   5.440          if ( p == current ) 
   5.441 -            __asm__ ( "movl %0, %%db6" : : "r" (value) );
   5.442 +            __asm__ ( "mov %0, %%db6" : : "r" (value) );
   5.443          break;
   5.444      case 7:
   5.445          /*
   5.446 @@ -1129,7 +893,7 @@ long set_debugreg(struct exec_domain *p,
   5.447                  if ( ((value >> (i+16)) & 3) == 2 ) return -EPERM;
   5.448          }
   5.449          if ( p == current ) 
   5.450 -            __asm__ ( "movl %0, %%db7" : : "r" (value) );
   5.451 +            __asm__ ( "mov %0, %%db7" : : "r" (value) );
   5.452          break;
   5.453      default:
   5.454          return -EINVAL;
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/xen/arch/x86/x86_32/traps.c	Wed Feb 02 00:26:53 2005 +0000
     6.3 @@ -0,0 +1,226 @@
     6.4 +
     6.5 +#include <xen/config.h>
     6.6 +#include <xen/init.h>
     6.7 +#include <xen/sched.h>
     6.8 +#include <xen/lib.h>
     6.9 +#include <xen/console.h>
    6.10 +#include <xen/mm.h>
    6.11 +#include <xen/irq.h>
    6.12 +
    6.13 +static int kstack_depth_to_print = 8*20;
    6.14 +
    6.15 +static inline int kernel_text_address(unsigned long addr)
    6.16 +{
    6.17 +    if (addr >= (unsigned long) &_stext &&
    6.18 +        addr <= (unsigned long) &_etext)
    6.19 +        return 1;
    6.20 +    return 0;
    6.21 +
    6.22 +}
    6.23 +
    6.24 +void show_guest_stack(void)
    6.25 +{
    6.26 +    int i;
    6.27 +    execution_context_t *ec = get_execution_context();
    6.28 +    unsigned long *stack = (unsigned long *)ec->esp;
    6.29 +    printk("Guest EIP is %lx\n",ec->eip);
    6.30 +
    6.31 +    for ( i = 0; i < kstack_depth_to_print; i++ )
    6.32 +    {
    6.33 +        if ( ((long)stack & (STACK_SIZE-1)) == 0 )
    6.34 +            break;
    6.35 +        if ( i && ((i % 8) == 0) )
    6.36 +            printk("\n       ");
    6.37 +            printk("%08lx ", *stack++);            
    6.38 +    }
    6.39 +    printk("\n");
    6.40 +    
    6.41 +}
    6.42 +
    6.43 +void show_trace(unsigned long *esp)
    6.44 +{
    6.45 +    unsigned long *stack, addr;
    6.46 +    int i;
    6.47 +
    6.48 +    printk("Call Trace from ESP=%p: ", esp);
    6.49 +    stack = esp;
    6.50 +    i = 0;
    6.51 +    while (((long) stack & (STACK_SIZE-1)) != 0) {
    6.52 +        addr = *stack++;
    6.53 +        if (kernel_text_address(addr)) {
    6.54 +            if (i && ((i % 6) == 0))
    6.55 +                printk("\n   ");
    6.56 +            printk("[<%08lx>] ", addr);
    6.57 +            i++;
    6.58 +        }
    6.59 +    }
    6.60 +    printk("\n");
    6.61 +}
    6.62 +
    6.63 +void show_stack(unsigned long *esp)
    6.64 +{
    6.65 +    unsigned long *stack;
    6.66 +    int i;
    6.67 +
    6.68 +    printk("Stack trace from ESP=%p:\n", esp);
    6.69 +
    6.70 +    stack = esp;
    6.71 +    for ( i = 0; i < kstack_depth_to_print; i++ )
    6.72 +    {
    6.73 +        if ( ((long)stack & (STACK_SIZE-1)) == 0 )
    6.74 +            break;
    6.75 +        if ( i && ((i % 8) == 0) )
    6.76 +            printk("\n       ");
    6.77 +        if ( kernel_text_address(*stack) )
    6.78 +            printk("[%08lx] ", *stack++);
    6.79 +        else
    6.80 +            printk("%08lx ", *stack++);            
    6.81 +    }
    6.82 +    printk("\n");
    6.83 +
    6.84 +    show_trace( esp );
    6.85 +}
    6.86 +
    6.87 +void show_registers(struct xen_regs *regs)
    6.88 +{
    6.89 +    unsigned long esp;
    6.90 +    unsigned short ss, ds, es, fs, gs;
    6.91 +
    6.92 +    if ( GUEST_FAULT(regs) )
    6.93 +    {
    6.94 +        esp = regs->esp;
    6.95 +        ss  = regs->ss & 0xffff;
    6.96 +        ds  = regs->ds & 0xffff;
    6.97 +        es  = regs->es & 0xffff;
    6.98 +        fs  = regs->fs & 0xffff;
    6.99 +        gs  = regs->gs & 0xffff;
   6.100 +    }
   6.101 +    else
   6.102 +    {
   6.103 +        esp = (unsigned long)(&regs->esp);
   6.104 +        ss  = __HYPERVISOR_DS;
   6.105 +        ds  = __HYPERVISOR_DS;
   6.106 +        es  = __HYPERVISOR_DS;
   6.107 +        fs  = __HYPERVISOR_DS;
   6.108 +        gs  = __HYPERVISOR_DS;
   6.109 +    }
   6.110 +
   6.111 +    printk("CPU:    %d\nEIP:    %04lx:[<%08lx>]      \nEFLAGS: %08lx\n",
   6.112 +           smp_processor_id(), 0xffff & regs->cs, regs->eip, regs->eflags);
   6.113 +    printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
   6.114 +           regs->eax, regs->ebx, regs->ecx, regs->edx);
   6.115 +    printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
   6.116 +           regs->esi, regs->edi, regs->ebp, esp);
   6.117 +    printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x\n",
   6.118 +           ds, es, fs, gs, ss);
   6.119 +
   6.120 +    show_stack((unsigned long *)&regs->esp);
   6.121 +} 
   6.122 +
   6.123 +#define DOUBLEFAULT_STACK_SIZE 1024
   6.124 +static struct tss_struct doublefault_tss;
   6.125 +static unsigned char doublefault_stack[DOUBLEFAULT_STACK_SIZE];
   6.126 +
   6.127 +asmlinkage void do_double_fault(void)
   6.128 +{
   6.129 +    struct tss_struct *tss = &doublefault_tss;
   6.130 +    unsigned int cpu = ((tss->back_link>>3)-__FIRST_TSS_ENTRY)>>1;
   6.131 +
   6.132 +    /* Disable the NMI watchdog. It's useless now. */
   6.133 +    watchdog_on = 0;
   6.134 +
   6.135 +    /* Find information saved during fault and dump it to the console. */
   6.136 +    tss = &init_tss[cpu];
   6.137 +    printk("CPU:    %d\nEIP:    %04x:[<%08x>]      \nEFLAGS: %08x\n",
   6.138 +           cpu, tss->cs, tss->eip, tss->eflags);
   6.139 +    printk("CR3:    %08x\n", tss->__cr3);
   6.140 +    printk("eax: %08x   ebx: %08x   ecx: %08x   edx: %08x\n",
   6.141 +           tss->eax, tss->ebx, tss->ecx, tss->edx);
   6.142 +    printk("esi: %08x   edi: %08x   ebp: %08x   esp: %08x\n",
   6.143 +           tss->esi, tss->edi, tss->ebp, tss->esp);
   6.144 +    printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x\n",
   6.145 +           tss->ds, tss->es, tss->fs, tss->gs, tss->ss);
   6.146 +    printk("************************************\n");
   6.147 +    printk("CPU%d DOUBLE FAULT -- system shutdown\n", cpu);
   6.148 +    printk("System needs manual reset.\n");
   6.149 +    printk("************************************\n");
   6.150 +
   6.151 +    /* Lock up the console to prevent spurious output from other CPUs. */
   6.152 +    console_force_lock();
   6.153 +
   6.154 +    /* Wait for manual reset. */
   6.155 +    for ( ; ; )
   6.156 +        __asm__ __volatile__ ( "hlt" );
   6.157 +}
   6.158 +
   6.159 +void __init doublefault_init(void)
   6.160 +{
   6.161 +    /*
   6.162 +     * Make a separate task for double faults. This will get us debug output if
   6.163 +     * we blow the kernel stack.
   6.164 +     */
   6.165 +    struct tss_struct *tss = &doublefault_tss;
   6.166 +    memset(tss, 0, sizeof(*tss));
   6.167 +    tss->ds     = __HYPERVISOR_DS;
   6.168 +    tss->es     = __HYPERVISOR_DS;
   6.169 +    tss->ss     = __HYPERVISOR_DS;
   6.170 +    tss->esp    = (unsigned long)
   6.171 +        &doublefault_stack[DOUBLEFAULT_STACK_SIZE];
   6.172 +    tss->__cr3  = __pa(idle_pg_table);
   6.173 +    tss->cs     = __HYPERVISOR_CS;
   6.174 +    tss->eip    = (unsigned long)do_double_fault;
   6.175 +    tss->eflags = 2;
   6.176 +    tss->bitmap = IOBMP_INVALID_OFFSET;
   6.177 +    _set_tssldt_desc(gdt_table+__DOUBLEFAULT_TSS_ENTRY,
   6.178 +                     (int)tss, 235, 0x89);
   6.179 +}
   6.180 +
   6.181 +long set_fast_trap(struct exec_domain *p, int idx)
   6.182 +{
   6.183 +    trap_info_t *ti;
   6.184 +
   6.185 +    /* Index 0 is special: it disables fast traps. */
   6.186 +    if ( idx == 0 )
   6.187 +    {
   6.188 +        if ( p == current )
   6.189 +            CLEAR_FAST_TRAP(&p->thread);
   6.190 +        SET_DEFAULT_FAST_TRAP(&p->thread);
   6.191 +        return 0;
   6.192 +    }
   6.193 +
   6.194 +    /*
   6.195 +     * We only fast-trap vectors 0x20-0x2f, and vector 0x80.
   6.196 +     * The former range is used by Windows and MS-DOS.
   6.197 +     * Vector 0x80 is used by Linux and the BSD variants.
   6.198 +     */
   6.199 +    if ( (idx != 0x80) && ((idx < 0x20) || (idx > 0x2f)) ) 
   6.200 +        return -1;
   6.201 +
   6.202 +    ti = p->thread.traps + idx;
   6.203 +
   6.204 +    /*
   6.205 +     * We can't virtualise interrupt gates, as there's no way to get
   6.206 +     * the CPU to automatically clear the events_mask variable.
   6.207 +     */
   6.208 +    if ( TI_GET_IF(ti) )
   6.209 +        return -1;
   6.210 +
   6.211 +    if ( p == current )
   6.212 +        CLEAR_FAST_TRAP(&p->thread);
   6.213 +
   6.214 +    p->thread.fast_trap_idx    = idx;
   6.215 +    p->thread.fast_trap_desc.a = (ti->cs << 16) | (ti->address & 0xffff);
   6.216 +    p->thread.fast_trap_desc.b = 
   6.217 +        (ti->address & 0xffff0000) | 0x8f00 | (TI_GET_DPL(ti)&3)<<13;
   6.218 +
   6.219 +    if ( p == current )
   6.220 +        SET_FAST_TRAP(&p->thread);
   6.221 +
   6.222 +    return 0;
   6.223 +}
   6.224 +
   6.225 +
   6.226 +long do_set_fast_trap(int idx)
   6.227 +{
   6.228 +    return set_fast_trap(current, idx);
   6.229 +}
     7.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.2 +++ b/xen/arch/x86/x86_64/traps.c	Wed Feb 02 00:26:53 2005 +0000
     7.3 @@ -0,0 +1,127 @@
     7.4 +
     7.5 +#include <xen/config.h>
     7.6 +#include <xen/init.h>
     7.7 +#include <xen/sched.h>
     7.8 +#include <xen/lib.h>
     7.9 +#include <xen/errno.h>
    7.10 +#include <xen/mm.h>
    7.11 +#include <xen/irq.h>
    7.12 +
    7.13 +static int kstack_depth_to_print = 8*20;
    7.14 +
    7.15 +static inline int kernel_text_address(unsigned long addr)
    7.16 +{
    7.17 +    if (addr >= (unsigned long) &_stext &&
    7.18 +        addr <= (unsigned long) &_etext)
    7.19 +        return 1;
    7.20 +    return 0;
    7.21 +
    7.22 +}
    7.23 +
    7.24 +void show_guest_stack(void)
    7.25 +{
    7.26 +    int i;
    7.27 +    execution_context_t *ec = get_execution_context();
    7.28 +    unsigned long *stack = (unsigned long *)ec->rsp;
    7.29 +    printk("Guest RIP is %lx\n", ec->rip);
    7.30 +
    7.31 +    for ( i = 0; i < kstack_depth_to_print; i++ )
    7.32 +    {
    7.33 +        if ( ((long)stack & (STACK_SIZE-1)) == 0 )
    7.34 +            break;
    7.35 +        if ( i && ((i % 8) == 0) )
    7.36 +            printk("\n       ");
    7.37 +            printk("%08lx ", *stack++);            
    7.38 +    }
    7.39 +    printk("\n");
    7.40 +    
    7.41 +}
    7.42 +
    7.43 +void show_trace(unsigned long *rsp)
    7.44 +{
    7.45 +    unsigned long *stack, addr;
    7.46 +    int i;
    7.47 +
    7.48 +    printk("Call Trace from RSP=%p: ", rsp);
    7.49 +    stack = rsp;
    7.50 +    i = 0;
    7.51 +    while (((long) stack & (STACK_SIZE-1)) != 0) {
    7.52 +        addr = *stack++;
    7.53 +        if (kernel_text_address(addr)) {
    7.54 +            if (i && ((i % 6) == 0))
    7.55 +                printk("\n   ");
    7.56 +            printk("[<%08lx>] ", addr);
    7.57 +            i++;
    7.58 +        }
    7.59 +    }
    7.60 +    printk("\n");
    7.61 +}
    7.62 +
    7.63 +void show_stack(unsigned long *rsp)
    7.64 +{
    7.65 +    unsigned long *stack;
    7.66 +    int i;
    7.67 +
    7.68 +    printk("Stack trace from RSP=%p:\n", rsp);
    7.69 +
    7.70 +    stack = rsp;
    7.71 +    for ( i = 0; i < kstack_depth_to_print; i++ )
    7.72 +    {
    7.73 +        if ( ((long)stack & (STACK_SIZE-1)) == 0 )
    7.74 +            break;
    7.75 +        if ( i && ((i % 8) == 0) )
    7.76 +            printk("\n       ");
    7.77 +        if ( kernel_text_address(*stack) )
    7.78 +            printk("[%08lx] ", *stack++);
    7.79 +        else
    7.80 +            printk("%08lx ", *stack++);            
    7.81 +    }
    7.82 +    printk("\n");
    7.83 +
    7.84 +    show_trace(rsp);
    7.85 +}
    7.86 +
    7.87 +void show_registers(struct xen_regs *regs)
    7.88 +{
    7.89 +    printk("CPU:    %d\nEIP:    %04lx:[<%08lx>]      \nEFLAGS: %08lx\n",
    7.90 +           smp_processor_id(), 0xffff & regs->cs, regs->rip, regs->eflags);
    7.91 +    printk("rax: %08lx   rbx: %08lx   rcx: %08lx   rdx: %08lx\n",
    7.92 +           regs->rax, regs->rbx, regs->rcx, regs->rdx);
    7.93 +    printk("rsi: %08lx   rdi: %08lx   rbp: %08lx   rsp: %08lx   ss: %04x\n",
    7.94 +           regs->rsi, regs->rdi, regs->rbp, regs->rsp, regs->ss);
    7.95 +    printk("r8:  %08lx   r9:  %08lx   r10: %08lx   r11: %08lx\n",
    7.96 +           regs->r8,  regs->r9,  regs->r10, regs->r11);
    7.97 +    printk("r12: %08lx   r13: %08lx   r14: %08lx   r15: %08lx\n",
    7.98 +           regs->r12, regs->r13, regs->r14, regs->r15);
    7.99 +
   7.100 +    show_stack((unsigned long *)regs->rsp);
   7.101 +} 
   7.102 +
   7.103 +void __init doublefault_init(void)
   7.104 +{
   7.105 +}
   7.106 +
   7.107 +void *decode_reg(struct xen_regs *regs, u8 b)
   7.108 +{
   7.109 +    switch ( b )
   7.110 +    {
   7.111 +    case  0: return &regs->rax;
   7.112 +    case  1: return &regs->rcx;
   7.113 +    case  2: return &regs->rdx;
   7.114 +    case  3: return &regs->rbx;
   7.115 +    case  4: return &regs->rsp;
   7.116 +    case  5: return &regs->rbp;
   7.117 +    case  6: return &regs->rsi;
   7.118 +    case  7: return &regs->rdi;
   7.119 +    case  8: return &regs->r8;
   7.120 +    case  9: return &regs->r9;
   7.121 +    case 10: return &regs->r10;
   7.122 +    case 11: return &regs->r11;
   7.123 +    case 12: return &regs->r12;
   7.124 +    case 13: return &regs->r13;
   7.125 +    case 14: return &regs->r14;
   7.126 +    case 15: return &regs->r15;
   7.127 +    }
   7.128 +
   7.129 +    return NULL;
   7.130 +}
     8.1 --- a/xen/include/asm-x86/desc.h	Wed Feb 02 00:22:15 2005 +0000
     8.2 +++ b/xen/include/asm-x86/desc.h	Wed Feb 02 00:26:53 2005 +0000
     8.3 @@ -1,5 +1,6 @@
     8.4  #ifndef __ARCH_DESC_H
     8.5  #define __ARCH_DESC_H
     8.6 +#ifndef __ASSEMBLY__
     8.7  
     8.8  #define LDT_ENTRY_SIZE 8
     8.9  
    8.10 @@ -25,7 +26,6 @@
    8.11        (((_s)>>3) >  LAST_RESERVED_GDT_ENTRY) ||                            \
    8.12        ((_s)&4)) &&                                                         \
    8.13       (((_s)&3) == 1))
    8.14 -#define VALID_CODESEL(_s) ((_s) == FLAT_RING1_CS || VALID_SEL(_s))
    8.15  
    8.16  /* These are bitmasks for the high 32 bits of a descriptor table entry. */
    8.17  #define _SEGMENT_TYPE    (15<< 8)
    8.18 @@ -38,17 +38,51 @@
    8.19  #define _SEGMENT_DB      ( 1<<22) /* 16- or 32-bit segment */
    8.20  #define _SEGMENT_G       ( 1<<23) /* Granularity */
    8.21  
    8.22 -#ifndef __ASSEMBLY__
    8.23  struct desc_struct {
    8.24      u32 a, b;
    8.25  };
    8.26  
    8.27  #if defined(__x86_64__)
    8.28 +
    8.29 +#define VALID_CODESEL(_s) ((_s) == FLAT_RING3_CS64 || VALID_SEL(_s))
    8.30 +
    8.31  typedef struct {
    8.32      u64 a, b;
    8.33  } idt_entry_t;
    8.34 +
    8.35 +#define _set_gate(gate_addr,type,dpl,addr) ((void)0)
    8.36 +#define _set_tssldt_desc(n,addr,limit,type) ((void)0)
    8.37 +
    8.38  #elif defined(__i386__)
    8.39 +
    8.40 +#define VALID_CODESEL(_s) ((_s) == FLAT_RING1_CS || VALID_SEL(_s))
    8.41 +
    8.42  typedef struct desc_struct idt_entry_t;
    8.43 +
    8.44 +#define _set_gate(gate_addr,type,dpl,addr) \
    8.45 +do { \
    8.46 +  int __d0, __d1; \
    8.47 +  __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
    8.48 + "movw %4,%%dx\n\t" \
    8.49 + "movl %%eax,%0\n\t" \
    8.50 + "movl %%edx,%1" \
    8.51 + :"=m" (*((long *) (gate_addr))), \
    8.52 +  "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
    8.53 + :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
    8.54 +  "3" ((char *) (addr)),"2" (__HYPERVISOR_CS << 16)); \
    8.55 +} while (0)
    8.56 +
    8.57 +#define _set_tssldt_desc(n,addr,limit,type) \
    8.58 +__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
    8.59 + "movw %%ax,2(%2)\n\t" \
    8.60 + "rorl $16,%%eax\n\t" \
    8.61 + "movb %%al,4(%2)\n\t" \
    8.62 + "movb %4,5(%2)\n\t" \
    8.63 + "movb $0,6(%2)\n\t" \
    8.64 + "movb %%ah,7(%2)\n\t" \
    8.65 + "rorl $16,%%eax" \
    8.66 + : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
    8.67 +
    8.68  #endif
    8.69  
    8.70  extern struct desc_struct gdt_table[];
    8.71 @@ -64,8 +98,9 @@ struct Xgt_desc_struct {
    8.72  #define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2))
    8.73  
    8.74  extern void set_intr_gate(unsigned int irq, void * addr);
    8.75 +extern void set_system_gate(unsigned int n, void *addr);
    8.76 +extern void set_task_gate(unsigned int n, unsigned int sel);
    8.77  extern void set_tss_desc(unsigned int n, void *addr);
    8.78  
    8.79  #endif /* !__ASSEMBLY__ */
    8.80 -
    8.81 -#endif
    8.82 +#endif /* __ARCH_DESC_H */
     9.1 --- a/xen/include/asm-x86/regs.h	Wed Feb 02 00:22:15 2005 +0000
     9.2 +++ b/xen/include/asm-x86/regs.h	Wed Feb 02 00:26:53 2005 +0000
     9.3 @@ -31,4 +31,6 @@ enum EFLAGS {
     9.4      EF_ID   = 0x00200000,   /* id */
     9.5  };
     9.6  
     9.7 +#define GUEST_FAULT(_r) (likely(VM86_MODE(_r) || !RING_0(_r)))
     9.8 +
     9.9  #endif /* __X86_REGS_H__ */
    10.1 --- a/xen/include/asm-x86/x86_64/regs.h	Wed Feb 02 00:22:15 2005 +0000
    10.2 +++ b/xen/include/asm-x86/x86_64/regs.h	Wed Feb 02 00:26:53 2005 +0000
    10.3 @@ -11,18 +11,19 @@ struct xen_regs
    10.4      u64 r12;
    10.5      u64 rbp;
    10.6      u64 rbx;
    10.7 +    /* NB. Above here is C callee-saves. */
    10.8      u64 r11;
    10.9      u64 r10;	
   10.10      u64 r9;
   10.11      u64 r8;
   10.12 -    u64 rax;
   10.13 -    u64 rcx;
   10.14 -    u64 rdx;
   10.15 -    u64 rsi;
   10.16 -    u64 rdi;
   10.17 +    union { u64 rax; u32 eax; } __attribute__ ((packed));
   10.18 +    union { u64 rcx; u32 ecx; } __attribute__ ((packed));
   10.19 +    union { u64 rdx; u32 edx; } __attribute__ ((packed));
   10.20 +    union { u64 rsi; u32 esi; } __attribute__ ((packed));
   10.21 +    union { u64 rdi; u32 edi; } __attribute__ ((packed));
   10.22      u32 error_code;
   10.23      u32 entry_vector;
   10.24 -    u64 rip;
   10.25 +    union { u64 rip; u64 eip; } __attribute__ ((packed));
   10.26      u64 cs;
   10.27      u64 eflags;
   10.28      u64 rsp;