ia64/xen-unstable

changeset 357:c9a8a90c8a96

bitkeeper revision 1.165 (3e9ae27fgvCRUGxAZJ70ClZ8aKWQkw)

Many files:
Virtualised the GDT. Also some progress in virtualising the LDT.
author kaf24@scramble.cl.cam.ac.uk
date Mon Apr 14 16:31:59 2003 +0000 (2003-04-14)
parents 1a214a999de6
children 88dba60f48b3 8d23d3ad0f20
files tools/domain_builder/dom_builder.c xen/arch/i386/boot/boot.S xen/arch/i386/entry.S xen/arch/i386/ioremap.c xen/arch/i386/mm.c xen/arch/i386/process.c xen/arch/i386/traps.c xen/common/domain.c xen/include/asm-i386/desc.h xen/include/asm-i386/processor.h xen/include/asm-i386/ptrace.h xen/include/hypervisor-ifs/hypervisor-if.h xen/include/xeno/config.h xen/include/xeno/sched.h xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/setup.c xenolinux-2.4.21-pre4-sparse/include/asm-xeno/hypervisor.h xenolinux-2.4.21-pre4-sparse/include/asm-xeno/page.h xenolinux-2.4.21-pre4-sparse/include/asm-xeno/segment.h
line diff
     1.1 --- a/tools/domain_builder/dom_builder.c	Thu Apr 10 11:06:11 2003 +0000
     1.2 +++ b/tools/domain_builder/dom_builder.c	Mon Apr 14 16:31:59 2003 +0000
     1.3 @@ -23,8 +23,13 @@
     1.4  #define GUEST_SIG   "XenoGues"
     1.5  #define SIG_LEN    8
     1.6  
     1.7 -#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
     1.8 -#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
     1.9 +/*
    1.10 + * NB. No ring-3 access in initial guestOS pagetables. Note that we allow
    1.11 + * ring-3 privileges in the page directories, so that the guestOS may later
    1.12 + * decide to share a 4MB region with applications.
    1.13 + */
    1.14 +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
    1.15 +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
    1.16  
    1.17  /* standardized error reporting function */
    1.18  static void dberr(char *msg)
     2.1 --- a/xen/arch/i386/boot/boot.S	Thu Apr 10 11:06:11 2003 +0000
     2.2 +++ b/xen/arch/i386/boot/boot.S	Mon Apr 14 16:31:59 2003 +0000
     2.3 @@ -208,28 +208,28 @@ SYMBOL_NAME(idt):
     2.4  
     2.5          .word   0
     2.6  gdt_descr:
     2.7 -	.word	256*8-1
     2.8 +	.word	(2*NR_CPUS+8)*8-1
     2.9  SYMBOL_NAME(gdt):       
    2.10          .long   SYMBOL_NAME(gdt_table)	/* gdt base */
    2.11  
    2.12          .word   0
    2.13  nopaging_gdt_descr:
    2.14 -        .word   256*8-1
    2.15 +        .word   (2*NR_CPUS+8)*8-1
    2.16          .long   SYMBOL_NAME(gdt_table)-__PAGE_OFFSET
    2.17          
    2.18          ALIGN
    2.19  /* NB. Rings != 0 get access up to 0xFC400000. This allows access to the */
    2.20  /*     machine->physical mapping table. Ring 0 can access all memory.    */
    2.21  ENTRY(gdt_table)
    2.22 -        .quad 0x0000000000000000     /* NULL descriptor */
    2.23 -        .quad 0x0000000000000000     /* not used */
    2.24 -        .quad 0x00cfba000000c3ff     /* 0x11 ring 1 3.95GB code at 0x0 */
    2.25 -        .quad 0x00cfb2000000c3ff     /* 0x19 ring 1 3.95GB data at 0x0 */
    2.26 -        .quad 0x00cffa000000c3ff     /* 0x23 ring 3 3.95GB code at 0x0 */
    2.27 -        .quad 0x00cff2000000c3ff     /* 0x2b ring 3 3.95GB data at 0x0 */
    2.28 -        .quad 0x00cf9a000000ffff     /* 0x30 ring 0 4.00GB code at 0x0 */
    2.29 -        .quad 0x00cf92000000ffff     /* 0x38 ring 0 4.00GB data at 0x0 */
    2.30 -        .fill NR_CPUS,8,0             /* space for TSS's */
    2.31 +        .quad 0x0000000000000000     /* 0x0000 NULL descriptor */
    2.32 +        .quad 0x00cf9a000000ffff     /* 0x0008 ring 0 4.00GB code at 0x0 */
    2.33 +        .quad 0x00cf92000000ffff     /* 0x0010 ring 0 4.00GB data at 0x0 */
    2.34 +        .quad 0x00cfba000000c3ff     /* 0x0019 ring 1 3.95GB code at 0x0 */
    2.35 +        .quad 0x00cfb2000000c3ff     /* 0x0021 ring 1 3.95GB data at 0x0 */
    2.36 +        .quad 0x00cffa000000c3ff     /* 0x002b ring 3 3.95GB code at 0x0 */
    2.37 +        .quad 0x00cff2000000c3ff     /* 0x0033 ring 3 3.95GB data at 0x0 */
    2.38 +        .quad 0x0000000000000000     /* unused                           */
    2.39 +        .fill 2*NR_CPUS,8,0          /* space for TSS and LDT per CPU    */
    2.40  
    2.41  # The following adds 12kB to the kernel file size.
    2.42          .org 0x1000
     3.1 --- a/xen/arch/i386/entry.S	Thu Apr 10 11:06:11 2003 +0000
     3.2 +++ b/xen/arch/i386/entry.S	Mon Apr 14 16:31:59 2003 +0000
     3.3 @@ -36,10 +36,8 @@
     3.4   * in that it means we don't have to do messy GDT/LDT lookups to find
     3.5   * out which the privilege-level of the return code-selector. That code
     3.6   * would just be a hassle to write, and would need to account for running
     3.7 - * off the end of the GDT/LDT, for example. The event callback has quite
     3.8 - * a constrained callback method: the guest OS provides a linear address
     3.9 - * which we call back to using the hard-coded __GUEST_CS descriptor (which
    3.10 - * is a ring 1 descriptor). For IDT callbacks, we check that the provided
    3.11 + * off the end of the GDT/LDT, for example. For all callbacks we check
    3.12 + * that the provided
    3.13   * return CS is not == __HYPERVISOR_{CS,DS}. Apart from that we're safe as
    3.14   * don't allow a guest OS to install ring-0 privileges into the GDT/LDT.
    3.15   * It's up to the guest OS to ensure all returns via the IDT are to ring 1.
    3.16 @@ -105,12 +103,14 @@ STATE           =  4
    3.17  HYP_EVENTS      =  8
    3.18  DOMAIN          = 12        
    3.19  SHARED_INFO     = 16
    3.20 +EVENT_SEL       = 20
    3.21 +EVENT_ADDR      = 24
    3.22 +FAILSAFE_SEL    = 28
    3.23 +FAILSAFE_ADDR   = 32
    3.24  
    3.25  /* Offsets in shared_info_t */
    3.26  EVENTS          =  0
    3.27  EVENTS_ENABLE   =  4
    3.28 -EVENT_ADDR      =  8
    3.29 -FAILSAFE_ADDR   = 12
    3.30  
    3.31  /* Offsets in guest_trap_bounce */
    3.32  GTB_ERROR_CODE  =  0
    3.33 @@ -290,14 +290,14 @@ test_all_events:
    3.34          /* Prevent unnecessary reentry of event callback (stack overflow!) */
    3.35          xorl %ecx,%ecx
    3.36          movl %ecx,EVENTS_ENABLE(%eax)      
    3.37 -/* %eax == shared_info, %ebx == task_struct */
    3.38 -process_guest_events:   
    3.39 +/*process_guest_events:*/
    3.40          mov  PROCESSOR(%ebx),%edx
    3.41          shl  $4,%edx                    # sizeof(guest_trap_bounce) == 16
    3.42          lea  guest_trap_bounce(%edx),%edx
    3.43 -        movl EVENT_ADDR(%eax),%eax
    3.44 +        movl EVENT_ADDR(%ebx),%eax
    3.45          movl %eax,GTB_EIP(%edx)
    3.46 -        movw $__GUEST_CS,GTB_CS(%edx)
    3.47 +        movl EVENT_SEL(%ebx),%eax
    3.48 +        movw %ax,GTB_CS(%edx)
    3.49          call create_bounce_frame
    3.50          jmp  restore_all
    3.51  
    3.52 @@ -319,10 +319,10 @@ failsafe_callback:
    3.53          mov  PROCESSOR(%ebx),%eax
    3.54          shl  $4,%eax
    3.55          lea  guest_trap_bounce(%eax),%edx
    3.56 -        movl SHARED_INFO(%ebx),%eax
    3.57 -        movl FAILSAFE_ADDR(%eax),%eax
    3.58 +        movl FAILSAFE_ADDR(%ebx),%eax
    3.59          movl %eax,GTB_EIP(%edx)
    3.60 -        movw $__GUEST_CS,GTB_CS(%edx)
    3.61 +        movl FAILSAFE_SEL(%ebx),%eax
    3.62 +        movw %ax,GTB_CS(%edx)
    3.63          call create_bounce_frame
    3.64          subl $8,%esi                 # add DS/ES to failsafe stack frame
    3.65          movl DS(%esp),%eax
    3.66 @@ -590,7 +590,7 @@ ENTRY(hypervisor_call_table)
    3.67          .long SYMBOL_NAME(do_console_write)
    3.68          .long SYMBOL_NAME(do_set_gdt)
    3.69          .long SYMBOL_NAME(do_stack_switch)
    3.70 -        .long SYMBOL_NAME(do_ldt_switch)
    3.71 +        .long SYMBOL_NAME(do_set_callbacks)
    3.72          .long SYMBOL_NAME(do_net_update)
    3.73          .long SYMBOL_NAME(do_fpu_taskswitch)
    3.74          .long SYMBOL_NAME(do_yield)
     4.1 --- a/xen/arch/i386/ioremap.c	Thu Apr 10 11:06:11 2003 +0000
     4.2 +++ b/xen/arch/i386/ioremap.c	Mon Apr 14 16:31:59 2003 +0000
     4.3 @@ -15,9 +15,6 @@
     4.4  
     4.5  static unsigned long remap_base = 0;
     4.6  
     4.7 -#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
     4.8 -#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY)
     4.9 -
    4.10  #define PAGE_ALIGN(addr)    (((addr)+PAGE_SIZE-1)&PAGE_MASK)
    4.11  
    4.12  static void new_l2e(l2_pgentry_t *pl2e)
    4.13 @@ -25,7 +22,7 @@ static void new_l2e(l2_pgentry_t *pl2e)
    4.14      l1_pgentry_t *pl1e = (l1_pgentry_t *)get_free_page(GFP_KERNEL);
    4.15      if ( !pl1e ) BUG();
    4.16      clear_page(pl1e);
    4.17 -    *pl2e = mk_l2_pgentry(__pa(pl1e)|L2_PROT);
    4.18 +    *pl2e = mk_l2_pgentry(__pa(pl1e)|__PAGE_HYPERVISOR);
    4.19  }
    4.20  
    4.21  
    4.22 @@ -89,7 +86,7 @@ void * __ioremap(unsigned long phys_addr
    4.23      for ( ; ; ) 
    4.24      {
    4.25          if ( !l1_pgentry_empty(*pl1e) ) BUG();
    4.26 -        *pl1e++ = mk_l1_pgentry((phys_addr+cur)|L1_PROT|flags);
    4.27 +        *pl1e++ = mk_l1_pgentry((phys_addr+cur)|PAGE_HYPERVISOR|flags);
    4.28          cur += PAGE_SIZE;
    4.29          if ( cur == size ) break;
    4.30          if ( !((unsigned long)pl1e & (PAGE_SIZE-1)) )
     5.1 --- a/xen/arch/i386/mm.c	Thu Apr 10 11:06:11 2003 +0000
     5.2 +++ b/xen/arch/i386/mm.c	Mon Apr 14 16:31:59 2003 +0000
     5.3 @@ -5,6 +5,7 @@
     5.4  #include <asm/page.h>
     5.5  #include <asm/pgalloc.h>
     5.6  #include <asm/fixmap.h>
     5.7 +#include <asm/domain_page.h>
     5.8  
     5.9  static inline void set_pte_phys (unsigned long vaddr,
    5.10                                   l1_pgentry_t entry)
    5.11 @@ -114,31 +115,193 @@ long do_stack_switch(unsigned long ss, u
    5.12  }
    5.13  
    5.14  
    5.15 -long do_ldt_switch(unsigned long ldts)
    5.16 +/* Returns TRUE if given descriptor is valid for GDT or LDT. */
    5.17 +static int check_descriptor(unsigned long a, unsigned long b)
    5.18  {
    5.19 -    unsigned long *ptabent;
    5.20 +    unsigned long base, limit;
    5.21 +
    5.22 +    /* A not-present descriptor will always fault, so is safe. */
    5.23 +    if ( !(a & _SEGMENT_P) ) 
    5.24 +        goto good;
    5.25 +
    5.26 +    /*
    5.27 +     * We don't allow a DPL of zero. There is no legitimate reason for 
    5.28 +     * specifying DPL==0, and it gets rather dangerous if we also accept call 
    5.29 +     * gates (consider a call gate pointing at another guestos descriptor with 
    5.30 +     * DPL 0 -- this would get the OS ring-0 privileges).
    5.31 +     */
    5.32 +    if ( (a & _SEGMENT_DPL) == 0 )
    5.33 +        goto bad;
    5.34  
    5.35 -    ptabent = (unsigned long *)GET_GDT_ADDRESS(current);
    5.36 -    /* Out of range for GDT table? */
    5.37 -    if ( (ldts * 8) > GET_GDT_ENTRIES(current) ) return -1;
    5.38 -    ptabent += ldts * 2; /* 8 bytes per desc == 2 * unsigned long */
    5.39 -    /* Not an LDT entry? (S=0b, type =0010b) */
    5.40 -    if ( ldts && ((*ptabent & 0x00001f00) != 0x00000200) ) return -1;
    5.41 -    current->mm.ldt_sel = ldts;
    5.42 -    __load_LDT(ldts);
    5.43 +    if ( !(a & _SEGMENT_S) )
    5.44 +    {
    5.45 +        /*
    5.46 +         * System segment:
    5.47 +         *  1. Don't allow interrupt or trap gates as they belong in the IDT.
    5.48 +         *  2. Don't allow TSS descriptors or task gates as we don't
    5.49 +         *     virtualise x86 tasks.
    5.50 +         *  3. Don't allow LDT descriptors because they're unnecessary and
    5.51 +         *     I'm uneasy about allowing an LDT page to contain LDT
    5.52 +         *     descriptors. In any case, Xen automatically creates the
    5.53 +         *     required descriptor when reloading the LDT register.
    5.54 +         *  4. We allow call gates but they must not jump to a private segment.
    5.55 +         */
    5.56 +
    5.57 +        /* Disallow everything but call gates. */
    5.58 +        if ( (a & _SEGMENT_TYPE) != 0xc00 )
    5.59 +            goto bad;
    5.60 +
    5.61 +        /* Can't allow far jump to a Xen-private segment. */
    5.62 +        if ( !VALID_CODESEL(b>>16) )
    5.63 +            goto bad;
    5.64  
    5.65 +        /* Reserved bits must be zero. */
    5.66 +        if ( (a & 0xe0) != 0 )
    5.67 +            goto bad;
    5.68 +        
    5.69 +        /* No base/limit check is needed for a call gate. */
    5.70 +        goto good;
    5.71 +    }
    5.72 +    
    5.73 +    /* Check that base/limit do not overlap Xen-private space. */
    5.74 +    base  = (a&(0xff<<24)) | ((a&0xff)<<16) | (b>>16);
    5.75 +    limit = (a&0xf0000) | (b&0xffff);
    5.76 +    limit++; /* We add one because limit is inclusive. */
    5.77 +    if ( (a & _SEGMENT_G) )
    5.78 +        limit <<= 12;
    5.79 +    if ( ((base + limit) <= base) || 
    5.80 +         ((base + limit) >= PAGE_OFFSET) )
    5.81 +        goto bad;
    5.82 +
    5.83 + good:
    5.84 +    return 1;
    5.85 + bad:
    5.86      return 0;
    5.87  }
    5.88  
    5.89  
    5.90 -long do_set_gdt(unsigned long *frame_list, int entries)
    5.91 +long do_set_gdt(unsigned long *frame_list, unsigned int entries)
    5.92  {
    5.93 -    return -ENOSYS;
    5.94 +    /* NB. There are 512 8-byte entries per GDT page. */
    5.95 +    unsigned int i, nr_pages = (entries + 511) / 512;
    5.96 +    unsigned long frames[16], pfn, *gdt_page, flags;
    5.97 +    long ret = -EINVAL;
    5.98 +    struct pfn_info *page;
    5.99 +
   5.100 +    if ( (entries < FIRST_DOMAIN_GDT_ENTRY) || (entries > 8192) ) 
   5.101 +        return -EINVAL;
   5.102 +
   5.103 +    if ( copy_from_user(frames, frame_list, nr_pages * sizeof(unsigned long)) )
   5.104 +        return -EFAULT;
   5.105 +
   5.106 +    spin_lock_irqsave(&current->page_lock, flags);
   5.107 +
   5.108 +    /* Check the new GDT. */
   5.109 +    for ( i = 0; i < nr_pages; i++ )
   5.110 +    {
   5.111 +        if ( frames[i] >= max_page ) 
   5.112 +            goto out;
   5.113 +        
   5.114 +        page = frame_table + frames[i];
   5.115 +        if ( (page->flags & PG_domain_mask) != current->domain )
   5.116 +            goto out;
   5.117 +
   5.118 +        if ( (page->flags & PG_type_mask) != PGT_gdt_page )
   5.119 +        {
   5.120 +            if ( page->type_count != 0 )
   5.121 +                goto out;
   5.122 +
   5.123 +            /* Check all potential GDT entries in the page. */
   5.124 +            gdt_page = map_domain_mem(frames[0] << PAGE_SHIFT);
   5.125 +            for ( i = 0; i < 512; i++ )
   5.126 +                if ( !check_descriptor(gdt_page[i*2], gdt_page[i*2]+1) )
   5.127 +                    goto out;
   5.128 +            unmap_domain_mem(gdt_page);
   5.129 +        }
   5.130 +    }
   5.131 +
   5.132 +    /* Tear down the old GDT. */
   5.133 +    for ( i = 0; i < 16; i++ )
   5.134 +    {
   5.135 +        pfn = l1_pgentry_to_pagenr(current->mm.perdomain_pt[i]);
   5.136 +        current->mm.perdomain_pt[i] = mk_l1_pgentry(0);
   5.137 +        if ( pfn == 0 ) continue;
   5.138 +        page = frame_table + pfn;
   5.139 +        put_page_type(page);
   5.140 +        put_page_tot(page);
   5.141 +    }
   5.142 +
   5.143 +    /* Install the new GDT. */
   5.144 +    for ( i = 0; i < nr_pages; i++ )
   5.145 +    {
   5.146 +        current->mm.perdomain_pt[i] =
   5.147 +            mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
   5.148 +        
   5.149 +        page = frame_table + frames[i];
   5.150 +        page->flags &= ~PG_type_mask;
   5.151 +        page->flags |= PGT_gdt_page;
   5.152 +        get_page_type(page);
   5.153 +        get_page_tot(page);
   5.154 +    }
   5.155 +
   5.156 +    flush_tlb();
   5.157 +
   5.158 +    /* Copy over first entries of the new GDT. */
   5.159 +    memcpy((void *)PERDOMAIN_VIRT_START, gdt_table, FIRST_DOMAIN_GDT_ENTRY*8);
   5.160 +    
   5.161 +    SET_GDT_ADDRESS(current, PERDOMAIN_VIRT_START);
   5.162 +    SET_GDT_ENTRIES(current, (entries*8)-1);
   5.163 +    __asm__ __volatile__ ("lgdt %0" : "=m" (*current->mm.gdt));
   5.164 +
   5.165 +    ret = 0; /* success */
   5.166 +
   5.167 + out:
   5.168 +    spin_unlock_irqrestore(&current->page_lock, flags);
   5.169 +    return ret;
   5.170  }
   5.171  
   5.172  
   5.173  long do_update_descriptor(
   5.174      unsigned long pa, unsigned long word1, unsigned long word2)
   5.175  {
   5.176 -    return -ENOSYS;
   5.177 +    unsigned long *gdt_pent, flags, pfn = pa >> PAGE_SHIFT;
   5.178 +    struct pfn_info *page;
   5.179 +    long ret = -EINVAL;
   5.180 +
   5.181 +    if ( (pa & 7) || (pfn >= max_page) || !check_descriptor(word1, word2) )
   5.182 +        return -EINVAL;
   5.183 +
   5.184 +    spin_lock_irqsave(&current->page_lock, flags);
   5.185 +
   5.186 +    page = frame_table + pfn;
   5.187 +    if ( (page->flags & PG_domain_mask) != current->domain )
   5.188 +        goto out;
   5.189 +
   5.190 +    /* Check if the given frame is in use in an unsafe context. */
   5.191 +    switch ( (page->flags & PG_type_mask) )
   5.192 +    {
   5.193 +    case PGT_gdt_page:
   5.194 +        /* Disallow updates of Xen-private descriptors in the current GDT. */
   5.195 +        if ( (l1_pgentry_to_pagenr(current->mm.perdomain_pt[0]) == pfn) &&
   5.196 +             (((pa&(PAGE_SIZE-1))>>3) < FIRST_DOMAIN_GDT_ENTRY) )
   5.197 +            goto out;
   5.198 +    case PGT_ldt_page:
   5.199 +    case PGT_writeable_page:
   5.200 +        break;
   5.201 +    default:
   5.202 +        if ( page->type_count != 0 )
   5.203 +            goto out;
   5.204 +    }
   5.205 +
   5.206 +    /* All is good so make the update. */
   5.207 +    gdt_pent = map_domain_mem(pa);
   5.208 +    gdt_pent[0] = word1;
   5.209 +    gdt_pent[1] = word2;
   5.210 +    unmap_domain_mem(gdt_pent);
   5.211 +
   5.212 +    ret = 0; /* success */
   5.213 +
   5.214 + out:
   5.215 +    spin_unlock_irqrestore(&current->page_lock, flags);
   5.216 +    return ret;
   5.217  }
     6.1 --- a/xen/arch/i386/process.c	Thu Apr 10 11:06:11 2003 +0000
     6.2 +++ b/xen/arch/i386/process.c	Mon Apr 14 16:31:59 2003 +0000
     6.3 @@ -312,15 +312,15 @@ void new_thread(struct task_struct *p,
     6.4  
     6.5      /*
     6.6       * Initial register values:
     6.7 -     *  DS,ES,FS,GS = __GUEST_DS
     6.8 -     *       CS:EIP = __GUEST_CS:start_pc
     6.9 -     *       SS:ESP = __GUEST_DS:start_stack
    6.10 +     *  DS,ES,FS,GS = FLAT_RING1_DS
    6.11 +     *       CS:EIP = FLAT_RING1_CS:start_pc
    6.12 +     *       SS:ESP = FLAT_RING1_DS:start_stack
    6.13       *          ESI = start_info
    6.14       *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
    6.15       */
    6.16 -    p->thread.fs = p->thread.gs = __GUEST_DS;
    6.17 -    regs->xds = regs->xes = regs->xss = __GUEST_DS;
    6.18 -    regs->xcs = __GUEST_CS;
    6.19 +    p->thread.fs = p->thread.gs = FLAT_RING1_DS;
    6.20 +    regs->xds = regs->xes = regs->xss = FLAT_RING1_DS;
    6.21 +    regs->xcs = FLAT_RING1_CS;
    6.22      regs->eip = start_pc;
    6.23      regs->esp = start_stack;
    6.24      regs->esi = start_info;
    6.25 @@ -395,7 +395,7 @@ void __switch_to(struct task_struct *pre
    6.26  
    6.27      /* Switch GDT and LDT. */
    6.28      __asm__ __volatile__ ("lgdt %0" : "=m" (*next_p->mm.gdt));
    6.29 -    __load_LDT(next_p->mm.ldt_sel);
    6.30 +//    __load_LDT(0);
    6.31  
    6.32      /*
    6.33       * Restore %fs and %gs.
     7.1 --- a/xen/arch/i386/traps.c	Thu Apr 10 11:06:11 2003 +0000
     7.2 +++ b/xen/arch/i386/traps.c	Mon Apr 14 16:31:59 2003 +0000
     7.3 @@ -325,6 +325,7 @@ asmlinkage void do_general_protection(st
     7.4      return;
     7.5  
     7.6   gp_in_kernel:
     7.7 +
     7.8      if ( (fixup = search_exception_table(regs->eip)) != 0 )
     7.9      {
    7.10          regs->eip = fixup;
    7.11 @@ -568,23 +569,38 @@ long do_set_trap_table(trap_info_t *trap
    7.12      trap_info_t cur;
    7.13      trap_info_t *dst = current->thread.traps;
    7.14  
    7.15 -    /*
    7.16 -     * I'm removing the next line, since it seems more intuitive to use this 
    7.17 -     * as an interface to incrementally update a domain's trap table. Clearing 
    7.18 -     * out old entries automatically is rather antisocial!
    7.19 -     */
    7.20 -    /*memset(dst, 0, sizeof(*dst) * 256);*/
    7.21 -
    7.22      for ( ; ; )
    7.23      {
    7.24          if ( copy_from_user(&cur, traps, sizeof(cur)) ) return -EFAULT;
    7.25 -        if ( (cur.cs & 3) == 0 ) return -EPERM;
    7.26 +
    7.27          if ( cur.address == 0 ) break;
    7.28 +
    7.29 +        if ( !VALID_CODESEL(cur.cs) ) return -EPERM;
    7.30 +
    7.31          memcpy(dst+cur.vector, &cur, sizeof(cur));
    7.32          traps++;
    7.33      }
    7.34  
    7.35 -    return(0);
    7.36 +    return 0;
    7.37 +}
    7.38 +
    7.39 +
    7.40 +long do_set_callbacks(unsigned long event_selector,
    7.41 +                      unsigned long event_address,
    7.42 +                      unsigned long failsafe_selector,
    7.43 +                      unsigned long failsafe_address)
    7.44 +{
    7.45 +    struct task_struct *p = current;
    7.46 +
    7.47 +    if ( !VALID_CODESEL(event_selector) || !VALID_CODESEL(failsafe_selector) )
    7.48 +        return -EPERM;
    7.49 +
    7.50 +    p->event_selector    = event_selector;
    7.51 +    p->event_address     = event_address;
    7.52 +    p->failsafe_selector = failsafe_selector;
    7.53 +    p->failsafe_address  = failsafe_address;
    7.54 +
    7.55 +    return 0;
    7.56  }
    7.57  
    7.58  
     8.1 --- a/xen/common/domain.c	Thu Apr 10 11:06:11 2003 +0000
     8.2 +++ b/xen/common/domain.c	Mon Apr 14 16:31:59 2003 +0000
     8.3 @@ -16,8 +16,13 @@
     8.4  #include <asm/msr.h>
     8.5  #include <xeno/blkdev.h>
     8.6  
     8.7 -#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)
     8.8 -#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
     8.9 +/*
    8.10 + * NB. No ring-3 access in initial guestOS pagetables. Note that we allow
    8.11 + * ring-3 privileges in the page directories, so that the guestOS may later
    8.12 + * decide to share a 4MB region with applications.
    8.13 + */
    8.14 +#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
    8.15 +#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
    8.16  
    8.17  rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
    8.18  
    8.19 @@ -47,6 +52,9 @@ struct task_struct *do_newdomain(unsigne
    8.20      memset(p->shared_info, 0, PAGE_SIZE);
    8.21      SHARE_PFN_WITH_DOMAIN(virt_to_page(p->shared_info), dom_id);
    8.22  
    8.23 +    p->mm.perdomain_pt = (l1_pgentry_t *)get_free_page(GFP_KERNEL);
    8.24 +    memset(p->mm.perdomain_pt, 0, PAGE_SIZE);
    8.25 +
    8.26      init_blkdev_info(p);
    8.27  
    8.28      SET_GDT_ENTRIES(p, DEFAULT_GDT_ENTRIES);
    8.29 @@ -224,7 +232,8 @@ void release_task(struct task_struct *p)
    8.30      {
    8.31          destroy_net_vif(p);
    8.32      }
    8.33 -    if ( p->mm.perdomain_pt ) free_page((unsigned long)p->mm.perdomain_pt);
    8.34 +    
    8.35 +    free_page((unsigned long)p->mm.perdomain_pt);
    8.36  
    8.37      destroy_blkdev_info(p);
    8.38  
    8.39 @@ -268,7 +277,7 @@ int final_setup_guestos(struct task_stru
    8.40      net_ring_t *net_ring;
    8.41      net_vif_t *net_vif;
    8.42  
    8.43 -    /* entries 0xe0000000 onwards in page table must contain hypervisor
    8.44 +    /* High entries in page table must contain hypervisor
    8.45       * mem mappings - set them up.
    8.46       */
    8.47      phys_l2tab = meminfo->l2_pgt_addr;
    8.48 @@ -279,7 +288,7 @@ int final_setup_guestos(struct task_stru
    8.49          (ENTRIES_PER_L2_PAGETABLE - DOMAIN_ENTRIES_PER_L2_PAGETABLE) 
    8.50          * sizeof(l2_pgentry_t));
    8.51      l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] = 
    8.52 -        mk_l2_pgentry(__pa(p->mm.perdomain_pt) | PAGE_HYPERVISOR);
    8.53 +        mk_l2_pgentry(__pa(p->mm.perdomain_pt) | __PAGE_HYPERVISOR);
    8.54      p->mm.pagetable = mk_pagetable(phys_l2tab);
    8.55      unmap_domain_mem(l2tab);
    8.56  
     9.1 --- a/xen/include/asm-i386/desc.h	Thu Apr 10 11:06:11 2003 +0000
     9.2 +++ b/xen/include/asm-i386/desc.h	Mon Apr 14 16:31:59 2003 +0000
     9.3 @@ -2,7 +2,24 @@
     9.4  #define __ARCH_DESC_H
     9.5  
     9.6  #define __FIRST_TSS_ENTRY 8
     9.7 -#define __TSS(n) ((n) + __FIRST_TSS_ENTRY)
     9.8 +#define __FIRST_LDT_ENTRY (__FIRST_TSS_ENTRY+1)
     9.9 +
    9.10 +#define __TSS(n) (((n)<<1) + __FIRST_TSS_ENTRY)
    9.11 +#define __LDT(n) (((n)<<1) + __FIRST_LDT_ENTRY)
    9.12 +
    9.13 +#define load_TR(n)  __asm__ __volatile__ ( "ltr  %%ax" : : "a" (__TSS(n)<<3) )
    9.14 +#define __load_LDT(n) __asm__ __volatile__ ( "lldt %%ax" : : "a" (n) )
    9.15 +
    9.16 +/* Guest OS must provide its own code selectors, or use the one we provide. */
    9.17 +#define VALID_CODESEL(_s) \
    9.18 +    ((((_s)>>2) >= FIRST_DOMAIN_GDT_ENTRY) || ((_s) == FLAT_RING1_CS))
    9.19 +
    9.20 +/* These are bitmasks for the first 32 bits of a descriptor table entry. */
    9.21 +#define _SEGMENT_TYPE    (15<< 8)
    9.22 +#define _SEGMENT_S       ( 1<<12) /* System descriptor (yes iff S==0) */
    9.23 +#define _SEGMENT_DPL     ( 3<<13) /* Descriptor Privilege Level */
    9.24 +#define _SEGMENT_P       ( 1<<15) /* Segment Present */
    9.25 +#define _SEGMENT_G       ( 1<<23) /* Granularity */
    9.26  
    9.27  #ifndef __ASSEMBLY__
    9.28  struct desc_struct {
    9.29 @@ -20,10 +37,6 @@ struct Xgt_desc_struct {
    9.30  #define idt_descr (*(struct Xgt_desc_struct *)((char *)&idt - 2))
    9.31  #define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2))
    9.32  
    9.33 -#define load_TR(n) __asm__ __volatile__("ltr %%ax"::"a" (__TSS(n)<<3))
    9.34 -
    9.35 -#define __load_LDT(n) __asm__ __volatile__("lldt %%ax"::"a" ((n)<<3))
    9.36 -
    9.37  extern void set_intr_gate(unsigned int irq, void * addr);
    9.38  extern void set_tss_desc(unsigned int n, void *addr);
    9.39  
    10.1 --- a/xen/include/asm-i386/processor.h	Thu Apr 10 11:06:11 2003 +0000
    10.2 +++ b/xen/include/asm-i386/processor.h	Mon Apr 14 16:31:59 2003 +0000
    10.3 @@ -401,17 +401,6 @@ extern struct desc_struct *idt_tables[];
    10.4  	{~0, } /* ioperm */					\
    10.5  }
    10.6  
    10.7 -#define start_thread(regs, new_eip, new_esp) do {		\
    10.8 -	__asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0));	\
    10.9 -	set_fs(USER_DS);					\
   10.10 -	regs->xds = __USER_DS;					\
   10.11 -	regs->xes = __USER_DS;					\
   10.12 -	regs->xss = __USER_DS;					\
   10.13 -	regs->xcs = __USER_CS;					\
   10.14 -	regs->eip = new_eip;					\
   10.15 -	regs->esp = new_esp;					\
   10.16 -} while (0)
   10.17 -
   10.18  /* Forward declaration, a strange C thing */
   10.19  struct task_struct;
   10.20  struct mm_struct;
    11.1 --- a/xen/include/asm-i386/ptrace.h	Thu Apr 10 11:06:11 2003 +0000
    11.2 +++ b/xen/include/asm-i386/ptrace.h	Mon Apr 14 16:31:59 2003 +0000
    11.3 @@ -79,7 +79,6 @@ enum EFLAGS {
    11.4  
    11.5  #ifdef __KERNEL__
    11.6  #define user_mode(regs) ((3 & (regs)->xcs))
    11.7 -#define instruction_pointer(regs) ((regs)->eip)
    11.8  extern void show_regs(struct pt_regs *);
    11.9  #endif
   11.10  
    12.1 --- a/xen/include/hypervisor-ifs/hypervisor-if.h	Thu Apr 10 11:06:11 2003 +0000
    12.2 +++ b/xen/include/hypervisor-ifs/hypervisor-if.h	Mon Apr 14 16:31:59 2003 +0000
    12.3 @@ -10,13 +10,17 @@
    12.4  /*
    12.5   * SEGMENT DESCRIPTOR TABLES
    12.6   */
    12.7 -/* 8 entries, plus a TSS entry for each CPU (up to 32 CPUs). */
    12.8 +/* The first few GDT entries are reserved by Xen. */
    12.9  #define FIRST_DOMAIN_GDT_ENTRY	40
   12.10 -/* These are flat segments for domain bootstrap and fallback. */
   12.11 -#define FLAT_RING1_CS		0x11
   12.12 -#define FLAT_RING1_DS		0x19
   12.13 -#define FLAT_RING3_CS		0x23
   12.14 -#define FLAT_RING3_DS		0x2b
   12.15 +/*
   12.16 + * These flat segments are in the Xen-private section of every GDT. Since 
   12.17 + * these are also present in the initial GDT, many OSes will be able to avoid 
   12.18 + * installing their own GDT.
   12.19 + */
   12.20 +#define FLAT_RING1_CS		0x0019
   12.21 +#define FLAT_RING1_DS		0x0021
   12.22 +#define FLAT_RING3_CS		0x002b
   12.23 +#define FLAT_RING3_DS		0x0033
   12.24  
   12.25  
   12.26  /*
   12.27 @@ -29,7 +33,7 @@
   12.28  #define __HYPERVISOR_console_write	   2
   12.29  #define __HYPERVISOR_set_gdt		   3
   12.30  #define __HYPERVISOR_stack_switch          4
   12.31 -#define __HYPERVISOR_ldt_switch            5
   12.32 +#define __HYPERVISOR_set_callbacks         5
   12.33  #define __HYPERVISOR_net_update		   6
   12.34  #define __HYPERVISOR_fpu_taskswitch	   7
   12.35  #define __HYPERVISOR_yield		   8
   12.36 @@ -97,28 +101,32 @@
   12.37  /*
   12.38   * PAGE UPDATE COMMANDS AND FLAGS
   12.39   * 
   12.40 - * PGREQ_XXX: specified in least-significant bits of 'ptr' field.
   12.41 - * All requests specify relevent PTE or PT address in 'ptr'.
   12.42 + * PGREQ_XXX: specified in least 2 bits of 'ptr' field. These bits are masked
   12.43 + *  off to get the real 'ptr' value.
   12.44 + * All requests specify relevent machine address in 'ptr'.
   12.45   * Normal requests specify update value in 'value'.
   12.46 - * Extended requests specify command in least 8 bits of 'value'.
   12.47 + * Extended requests specify command in least 8 bits of 'value'. These bits
   12.48 + *  are masked off to get the real 'val' value. Except for PGEXT_SET_LDT 
   12.49 + *  which shifts the least bits out.
   12.50   */
   12.51  /* A normal page-table update request. */
   12.52 -#define PGREQ_NORMAL		0
   12.53 +#define PGREQ_NORMAL		0 /* does a checked form of '*ptr = val'   */
   12.54  /* Update an entry in the machine->physical mapping table. */
   12.55 -#define PGREQ_MPT_UPDATE	1
   12.56 +#define PGREQ_MPT_UPDATE	1 /* ptr = frame to modify table entry for */
   12.57  /* An extended command. */
   12.58 -#define PGREQ_EXTENDED_COMMAND	2
   12.59 +#define PGREQ_EXTENDED_COMMAND	2 /* least 8 bits of val demux further     */
   12.60  /* DOM0 can make entirely unchecked updates which do not affect refcnts. */
   12.61 -#define PGREQ_UNCHECKED_UPDATE	3
   12.62 -/* Announce a new top-level page table. */
   12.63 -#define PGEXT_PIN_L1_TABLE	0
   12.64 -#define PGEXT_PIN_L2_TABLE	1
   12.65 -#define PGEXT_PIN_L3_TABLE	2
   12.66 -#define PGEXT_PIN_L4_TABLE	3
   12.67 -#define PGEXT_UNPIN_TABLE	4
   12.68 -#define PGEXT_NEW_BASEPTR	5
   12.69 -#define PGEXT_TLB_FLUSH		6
   12.70 -#define PGEXT_INVLPG		7
   12.71 +#define PGREQ_UNCHECKED_UPDATE	3 /* does an unchecked '*ptr = val'        */
   12.72 +/* Extended commands: */
   12.73 +#define PGEXT_PIN_L1_TABLE	0 /* ptr = frame to pin                    */
   12.74 +#define PGEXT_PIN_L2_TABLE	1 /* ptr = frame to pin                    */
   12.75 +#define PGEXT_PIN_L3_TABLE	2 /* ptr = frame to pin                    */
   12.76 +#define PGEXT_PIN_L4_TABLE	3 /* ptr = frame to pin                    */
   12.77 +#define PGEXT_UNPIN_TABLE	4 /* ptr = frame to unpin                  */
   12.78 +#define PGEXT_NEW_BASEPTR	5 /* ptr = new pagetable base to install   */
   12.79 +#define PGEXT_TLB_FLUSH		6 /* ptr = NULL                            */
   12.80 +#define PGEXT_INVLPG		7 /* ptr = NULL ; val = page to invalidate */
   12.81 +#define PGEXT_SET_LDT           8 /* ptr = linear address; val = # entries */
   12.82  #define PGEXT_CMD_MASK	      255
   12.83  #define PGEXT_CMD_SHIFT		8
   12.84  
   12.85 @@ -173,27 +181,6 @@ typedef struct shared_info_st {
   12.86      unsigned long events_enable;
   12.87  
   12.88      /*
   12.89 -     * Address for callbacks hypervisor -> guest OS.
   12.90 -     * Stack frame looks like that of an interrupt.
   12.91 -     * Code segment is the default flat selector.
   12.92 -     * This handler will only be called when events_enable is non-zero.
   12.93 -     */
   12.94 -    unsigned long event_address;
   12.95 -
   12.96 -    /*
   12.97 -     * Hypervisor uses this callback when it takes a fault on behalf of
   12.98 -     * an application. This can happen when returning from interrupts for
   12.99 -     * example: various faults can occur when reloading the segment
  12.100 -     * registers, and executing 'iret'.
  12.101 -     * This callback is provided with an extended stack frame, augmented
  12.102 -     * with saved values for segment registers %ds and %es:
  12.103 -     *	%ds, %es, %eip, %cs, %eflags [, %oldesp, %oldss]
  12.104 -     * Code segment is the default flat selector.
  12.105 -     * FAULTS WHEN CALLING THIS HANDLER WILL TERMINATE THE DOMAIN!!!
  12.106 -     */
  12.107 -    unsigned long failsafe_address;
  12.108 -
  12.109 -    /*
  12.110       * Time: The following abstractions are exposed: System Time, Clock Time,
  12.111       * Domain Virtual Time. Domains can access Cycle counter time directly.
  12.112       * XXX RN: Need something to pass NTP scaling to GuestOS.
    13.1 --- a/xen/include/xeno/config.h	Thu Apr 10 11:06:11 2003 +0000
    13.2 +++ b/xen/include/xeno/config.h	Mon Apr 14 16:31:59 2003 +0000
    13.3 @@ -120,10 +120,8 @@
    13.4  
    13.5  #define barrier() __asm__ __volatile__("": : :"memory")
    13.6  
    13.7 -#define __HYPERVISOR_CS 0x30
    13.8 -#define __HYPERVISOR_DS 0x38
    13.9 -#define __GUEST_CS      0x11
   13.10 -#define __GUEST_DS      0x19
   13.11 +#define __HYPERVISOR_CS 0x0008
   13.12 +#define __HYPERVISOR_DS 0x0010
   13.13  
   13.14  #define NR_syscalls 256
   13.15  
    14.1 --- a/xen/include/xeno/sched.h	Thu Apr 10 11:06:11 2003 +0000
    14.2 +++ b/xen/include/xeno/sched.h	Mon Apr 14 16:31:59 2003 +0000
    14.3 @@ -27,10 +27,10 @@ struct mm_struct {
    14.4       * Every domain has a L1 pagetable of its own. Per-domain mappings
    14.5       * are put in this table (eg. the current GDT is mapped here).
    14.6       */
    14.7 -    l2_pgentry_t *perdomain_pt;
    14.8 +    l1_pgentry_t *perdomain_pt;
    14.9      pagetable_t  pagetable;
   14.10 -    /* Current LDT selector. */
   14.11 -    unsigned int ldt_sel;
   14.12 +    /* Current LDT descriptor. */
   14.13 +    unsigned long ldt[2];
   14.14      /* Next entry is passed to LGDT on domain switch. */
   14.15      char gdt[6];
   14.16  };
   14.17 @@ -65,18 +65,30 @@ struct task_struct {
   14.18  
   14.19      /*
   14.20       * DO NOT CHANGE THE ORDER OF THE FOLLOWING.
   14.21 -     * There offsets are hardcoded in entry.S
   14.22 +     * Their offsets are hardcoded in entry.S
   14.23       */
   14.24  
   14.25      int processor;               /* 00: current processor */
   14.26      int state;                   /* 04: current run state */
   14.27 -    int hyp_events;              /* 08: pending events */
   14.28 +    int hyp_events;              /* 08: pending intra-Xen events */
   14.29      unsigned int domain;         /* 12: domain id */
   14.30  
   14.31      /* An unsafe pointer into a shared data area. */
   14.32      shared_info_t *shared_info;  /* 16: shared data area */
   14.33  
   14.34      /*
   14.35 +     * Return vectors pushed to us by guest OS.
   14.36 +     * The stack frame for events is exactly that of an x86 hardware interrupt.
   14.37 +     * The stack frame for a failsafe callback is augmented with saved values
   14.38 +     * for segment registers %ds and %es:
   14.39 +     * 	%ds, %es, %eip, %cs, %eflags [, %oldesp, %oldss]
   14.40 +     */
   14.41 +    unsigned long event_selector;    /* 20: entry CS  */
   14.42 +    unsigned long event_address;     /* 24: entry EIP */
   14.43 +    unsigned long failsafe_selector; /* 28: entry CS  */
   14.44 +    unsigned long failsafe_address;  /* 32: entry EIP */
   14.45 +
   14.46 +    /*
   14.47       * From here on things can be added and shuffled without special attention
   14.48       */
   14.49      
    15.1 --- a/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/setup.c	Thu Apr 10 11:06:11 2003 +0000
    15.2 +++ b/xenolinux-2.4.21-pre4-sparse/arch/xeno/kernel/setup.c	Mon Apr 14 16:31:59 2003 +0000
    15.3 @@ -153,10 +153,9 @@ void __init setup_arch(char **cmdline_p)
    15.4      extern unsigned long cpu0_pte_quicklist[];
    15.5      extern unsigned long cpu0_pgd_quicklist[];
    15.6  
    15.7 -    HYPERVISOR_shared_info->event_address    = 
    15.8 -        (unsigned long)hypervisor_callback;
    15.9 -    HYPERVISOR_shared_info->failsafe_address =
   15.10 -        (unsigned long)failsafe_callback;
   15.11 +    HYPERVISOR_set_callbacks(
   15.12 +        __KERNEL_CS, (unsigned long)hypervisor_callback,
   15.13 +        __KERNEL_CS, (unsigned long)failsafe_callback);
   15.14  
   15.15      boot_cpu_data.pgd_quick = cpu0_pgd_quicklist;
   15.16      boot_cpu_data.pte_quick = cpu0_pte_quicklist;
    16.1 --- a/xenolinux-2.4.21-pre4-sparse/include/asm-xeno/hypervisor.h	Thu Apr 10 11:06:11 2003 +0000
    16.2 +++ b/xenolinux-2.4.21-pre4-sparse/include/asm-xeno/hypervisor.h	Mon Apr 14 16:31:59 2003 +0000
    16.3 @@ -195,13 +195,16 @@ static inline int HYPERVISOR_stack_switc
    16.4      return ret;
    16.5  }
    16.6  
    16.7 -static inline int HYPERVISOR_ldt_switch(unsigned long ldts)
    16.8 +static inline int HYPERVISOR_set_callbacks(
    16.9 +    unsigned long event_selector, unsigned long event_address,
   16.10 +    unsigned long failsafe_selector, unsigned long failsafe_address)
   16.11  {
   16.12      int ret;
   16.13      __asm__ __volatile__ (
   16.14          TRAP_INSTR
   16.15 -        : "=a" (ret) : "0" (__HYPERVISOR_ldt_switch),
   16.16 -        "b" (ldts) : "memory" );
   16.17 +        : "=a" (ret) : "0" (__HYPERVISOR_set_callbacks),
   16.18 +        "b" (event_selector), "c" (event_address), 
   16.19 +        "d" (failsafe_selector), "S" (failsafe_address) : "memory" );
   16.20  
   16.21      return ret;
   16.22  }
    17.1 --- a/xenolinux-2.4.21-pre4-sparse/include/asm-xeno/page.h	Thu Apr 10 11:06:11 2003 +0000
    17.2 +++ b/xenolinux-2.4.21-pre4-sparse/include/asm-xeno/page.h	Mon Apr 14 16:31:59 2003 +0000
    17.3 @@ -116,12 +116,6 @@ static inline pmd_t __pmd(unsigned long 
    17.4  
    17.5  #define __PAGE_OFFSET		(0xC0000000)
    17.6  
    17.7 -/*
    17.8 - * This much address space is reserved for vmalloc() and iomap()
    17.9 - * as well as fixmap mappings.
   17.10 - */
   17.11 -#define __VMALLOC_RESERVE	(128 << 20)
   17.12 -
   17.13  #ifndef __ASSEMBLY__
   17.14  
   17.15  /*
   17.16 @@ -162,9 +156,6 @@ static __inline__ int get_order(unsigned
   17.17  #endif /* __ASSEMBLY__ */
   17.18  
   17.19  #define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)
   17.20 -#define VMALLOC_RESERVE		((unsigned long)__VMALLOC_RESERVE)
   17.21 -#define __MAXMEM		(-__PAGE_OFFSET-__VMALLOC_RESERVE)
   17.22 -#define MAXMEM			((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
   17.23  #define __pa(x)			((unsigned long)(x)-PAGE_OFFSET)
   17.24  #define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
   17.25  #define virt_to_page(kaddr)	(mem_map + (__pa(kaddr) >> PAGE_SHIFT))
    18.1 --- a/xenolinux-2.4.21-pre4-sparse/include/asm-xeno/segment.h	Thu Apr 10 11:06:11 2003 +0000
    18.2 +++ b/xenolinux-2.4.21-pre4-sparse/include/asm-xeno/segment.h	Mon Apr 14 16:31:59 2003 +0000
    18.3 @@ -1,10 +1,12 @@
    18.4  #ifndef _ASM_SEGMENT_H
    18.5  #define _ASM_SEGMENT_H
    18.6  
    18.7 -#define __KERNEL_CS	0x11
    18.8 -#define __KERNEL_DS	0x19
    18.9 +#include <asm/hypervisor-ifs/hypervisor-if.h>
   18.10  
   18.11 -#define __USER_CS	0x23
   18.12 -#define __USER_DS	0x2B
   18.13 +#define __KERNEL_CS	FLAT_RING1_CS
   18.14 +#define __KERNEL_DS	FLAT_RING1_DS
   18.15 +
   18.16 +#define __USER_CS	FLAT_RING3_CS
   18.17 +#define __USER_DS	FLAT_RING3_DS
   18.18  
   18.19  #endif