ia64/xen-unstable

changeset 1622:954bece440ef

bitkeeper revision 1.1026.3.1 (40e1966bdTS8WBGJY9WhLRE3t6GBdQ)

More cleanups for x86-64.
author kaf24@scramble.cl.cam.ac.uk
date Tue Jun 29 16:18:51 2004 +0000 (2004-06-29)
parents f9bbf7aa1596
children 489b925b0e22
files .rootkeys xen/arch/x86/boot/boot.S xen/arch/x86/mm.c xen/common/kernel.c xen/common/memory.c xen/common/shadow.c xen/include/asm-x86/config.h xen/include/asm-x86/page.h xen/include/asm-x86/string.h xen/include/asm-x86/types.h xen/include/asm-x86/x86_32/string.h xen/include/asm-x86/x86_64/string.h xen/include/xen/mm.h
line diff
     1.1 --- a/.rootkeys	Tue Jun 29 14:01:49 2004 +0000
     1.2 +++ b/.rootkeys	Tue Jun 29 16:18:51 2004 +0000
     1.3 @@ -414,18 +414,20 @@ 3ddb79c2plf7ciNgoNjU-RsbUzawsw xen/inclu
     1.4  3ddb79c3Hgbb2g8CyWLMCK-6_ZVQSQ xen/include/asm-x86/smp.h
     1.5  3ddb79c3jn8ALV_S9W5aeTYUQRKBpg xen/include/asm-x86/smpboot.h
     1.6  3ddb79c3NiyQE2vQnyGiaBnNjBO1rA xen/include/asm-x86/spinlock.h
     1.7 -3e7f358aG11EvMI9VJ4_9hD4LUO7rQ xen/include/asm-x86/string.h
     1.8 +40e1966akOHWvvunCED7x3HPv35QvQ xen/include/asm-x86/string.h
     1.9  3ddb79c3ezddh34MdelJpa5tNR00Dw xen/include/asm-x86/system.h
    1.10  3ddb79c4HugMq7IYGxcQKFBpKwKhzA xen/include/asm-x86/types.h
    1.11  40cf1596saFaHD5DC5zvrSn7CDCWGQ xen/include/asm-x86/uaccess.h
    1.12  3ddb79c2ADvRmdexd9y3AYK9_NTx-Q xen/include/asm-x86/x86_32/current.h
    1.13  3ddb79c3mbqEM7QQr3zVq7NiBNhouA xen/include/asm-x86/x86_32/ptrace.h
    1.14 +3e7f358aG11EvMI9VJ4_9hD4LUO7rQ xen/include/asm-x86/x86_32/string.h
    1.15  3ddb79c3M2n1ROZH6xk3HbyN4CPDqg xen/include/asm-x86/x86_32/uaccess.h
    1.16  404f1b9ceJeGVaPNIENm2FkK0AgEOQ xen/include/asm-x86/x86_64/current.h
    1.17  404f1b9fl6AQ_a-T1TDK3fuwTPXmHw xen/include/asm-x86/x86_64/desc.h
    1.18  404f1badfXZJZ2sU8sh9PS2EZvd19Q xen/include/asm-x86/x86_64/ldt.h
    1.19  404f1bb1LSCqrMDSfRAti5NdMQPJBQ xen/include/asm-x86/x86_64/page.h
    1.20  404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/ptrace.h
    1.21 +40e1966azOJZfNI6Ilthe6Q-T3Hewg xen/include/asm-x86/x86_64/string.h
    1.22  404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h
    1.23  400304fcmRQmDdFYEzDh0wcBba9alg xen/include/hypervisor-ifs/COPYING
    1.24  404f1bc68SXxmv0zQpXBWGrCzSyp8w xen/include/hypervisor-ifs/arch-x86_32.h
     2.1 --- a/xen/arch/x86/boot/boot.S	Tue Jun 29 14:01:49 2004 +0000
     2.2 +++ b/xen/arch/x86/boot/boot.S	Tue Jun 29 16:18:51 2004 +0000
     2.3 @@ -101,7 +101,7 @@ continue_boot_cpu:
     2.4          mov     0x4(%eax),%eax               /* %eax = mod[mod_count-1]->end */
     2.5          mov     %eax,%ecx
     2.6          sub     %ebx,%ecx                    /* %ecx = byte len of all mods */
     2.7 -        mov     $(MAX_DIRECTMAP_ADDRESS), %edi
     2.8 +        mov     $(DIRECTMAP_PHYS_END), %edi
     2.9          add     %ecx, %edi                   /* %edi = src + length */        
    2.10          shr     $2,%ecx                      /* %ecx = length/4 */
    2.11  1:      sub     $4,%eax                      /* %eax = src, %edi = dst */
    2.12 @@ -117,7 +117,7 @@ skip_dom0_copy:
    2.13  1:      mov     %eax,__PAGE_OFFSET>>20(%edi) /* high mapping */
    2.14          stosl                                /* low mapping */
    2.15          add     $(1<<L2_PAGETABLE_SHIFT),%eax
    2.16 -        cmp     $MAX_DIRECTMAP_ADDRESS+0x1e3,%eax
    2.17 +        cmp     $DIRECTMAP_PHYS_END+0x1e3,%eax
    2.18          jne     1b
    2.19  
    2.20          call    start_paging        
     3.1 --- a/xen/arch/x86/mm.c	Tue Jun 29 14:01:49 2004 +0000
     3.2 +++ b/xen/arch/x86/mm.c	Tue Jun 29 16:18:51 2004 +0000
     3.3 @@ -347,7 +347,7 @@ void *memguard_init(void *heap_start)
     3.4                            PAGE_MASK);
     3.5  
     3.6      /* Memory guarding is incompatible with super pages. */
     3.7 -    for ( i = 0; i < (MAX_XENHEAP_ADDRESS >> L2_PAGETABLE_SHIFT); i++ )
     3.8 +    for ( i = 0; i < (xenheap_phys_end >> L2_PAGETABLE_SHIFT); i++ )
     3.9      {
    3.10          l1 = (l1_pgentry_t *)heap_start;
    3.11          heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
     4.1 --- a/xen/common/kernel.c	Tue Jun 29 14:01:49 2004 +0000
     4.2 +++ b/xen/common/kernel.c	Tue Jun 29 16:18:51 2004 +0000
     4.3 @@ -27,6 +27,8 @@
     4.4  #include <asm/domain_page.h>
     4.5  #include <hypervisor-ifs/dom0_ops.h>
     4.6  
     4.7 +unsigned long xenheap_phys_end;
     4.8 +
     4.9  kmem_cache_t *domain_struct_cachep;
    4.10  
    4.11  struct e820entry {
    4.12 @@ -69,6 +71,11 @@ char opt_physdev_dom0_hide[200] = "";
    4.13  /*                                    level- or edge-triggered.         */
    4.14  /* Example: 'leveltrigger=4,5,6,20 edgetrigger=21'. */
    4.15  char opt_leveltrigger[30] = "", opt_edgetrigger[30] = "";
    4.16 +/*
    4.17 + * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the
    4.18 + * pfn_info table and allocation bitmap.
    4.19 + */
    4.20 +unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB;
    4.21  
    4.22  static struct {
    4.23      unsigned char *name;
    4.24 @@ -91,6 +98,7 @@ static struct {
    4.25      { "physdev_dom0_hide", OPT_STR,  &opt_physdev_dom0_hide },
    4.26      { "leveltrigger",      OPT_STR,  &opt_leveltrigger },
    4.27      { "edgetrigger",       OPT_STR,  &opt_edgetrigger },
    4.28 +    { "xenheap_megabytes", OPT_UINT, &opt_xenheap_megabytes },
    4.29      { NULL,               0,        NULL     }
    4.30  };
    4.31  
    4.32 @@ -180,52 +188,78 @@ void cmain(unsigned long magic, multiboo
    4.33          for ( ; ; ) ;
    4.34      }
    4.35  
    4.36 -    max_mem = max_page = (mbi->mem_upper+1024) >> (PAGE_SHIFT - 10);
    4.37 -
    4.38 -    /* The array of pfn_info structures must fit into the reserved area. */
    4.39 -    if ( (sizeof(struct pfn_info) * max_page) >
    4.40 -         (FRAMETABLE_VIRT_END - FRAMETABLE_VIRT_START) )
    4.41 +    if ( opt_xenheap_megabytes < 4 )
    4.42      {
    4.43 -        unsigned long new_max =
    4.44 -            (FRAMETABLE_VIRT_END - FRAMETABLE_VIRT_START) /
    4.45 -            sizeof(struct pfn_info);
    4.46 -        printk("Truncating available memory to %lu/%luMB\n",
    4.47 -               new_max >> (20 - PAGE_SHIFT), max_page >> (20 - PAGE_SHIFT));
    4.48 -        max_page = new_max;
    4.49 +        printk("Xen heap size is too small to safely continue!\n");
    4.50 +        for ( ; ; ) ;
    4.51      }
    4.52  
    4.53      set_current(&idle0_task);
    4.54  
    4.55 -    init_frametable(max_page);
    4.56 -    printk("Initialised %luMB memory (%lu pages) on a %luMB machine\n",
    4.57 -           max_page >> (20-PAGE_SHIFT), max_page,
    4.58 -	   max_mem  >> (20-PAGE_SHIFT));
    4.59 +    xenheap_phys_end = opt_xenheap_megabytes << 20;
    4.60 +
    4.61 +    max_mem = max_page = (mbi->mem_upper+1024) >> (PAGE_SHIFT - 10);
    4.62 +
    4.63 +#if defined(__i386__)
    4.64  
    4.65 -    initial_images_start = MAX_DIRECTMAP_ADDRESS;
    4.66 +    if ( opt_xenheap_megabytes > XENHEAP_DEFAULT_MB )
    4.67 +    {
    4.68 +        printk("Xen heap size is limited to %dMB - you specified %dMB.\n",
    4.69 +               XENHEAP_DEFAULT_MB, opt_xenheap_megabytes);
    4.70 +        for ( ; ; ) ;
    4.71 +    }
    4.72 +
    4.73 +    ASSERT((sizeof(struct pfn_info) << 20) >
    4.74 +           (FRAMETABLE_VIRT_END - FRAMETABLE_VIRT_START));
    4.75 +
    4.76 +    init_frametable((void *)FRAMETABLE_VIRT_START, max_page);
    4.77 +
    4.78 +    /* Initial images stashed away above DIRECTMAP area in boot.S. */
    4.79 +    initial_images_start = DIRECTMAP_PHYS_END;
    4.80      initial_images_end   = initial_images_start + 
    4.81          (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
    4.82 +
    4.83 +#elif defined(__x86_64__)
    4.84 +
    4.85 +    init_frametable(__va(xenheap_phys_end), max_page);
    4.86 +
    4.87 +    initial_images_start = __pa(frame_table) + frame_table_size;
    4.88 +    initial_images_end   = initial_images_start + 
    4.89 +        (mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
    4.90 +    if ( initial_images_end > (max_page << PAGE_SHIFT) )
    4.91 +    {
    4.92 +        printk("Not enough memory to stash the DOM0 kernel image.\n");
    4.93 +        for ( ; ; ) ;
    4.94 +    }
    4.95 +    memmove(__va(initial_images_start),
    4.96 +            __va(mod[0].mod_start),
    4.97 +            mod[mbi->mods_count-1].mod_end - mod[0].mod_start);
    4.98 +
    4.99 +#endif
   4.100 +
   4.101      dom0_memory_start    = (initial_images_end + ((4<<20)-1)) & ~((4<<20)-1);
   4.102      dom0_memory_end      = dom0_memory_start + (opt_dom0_mem << 10);
   4.103      dom0_memory_end      = (dom0_memory_end + PAGE_SIZE - 1) & PAGE_MASK;
   4.104      
   4.105      /* Cheesy sanity check: enough memory for DOM0 allocation + some slack? */
   4.106 -    if ( (dom0_memory_end + (8<<20)) > (max_page<<PAGE_SHIFT) )
   4.107 -        panic("Not enough memory to craete initial domain!\n");
   4.108 +    if ( (dom0_memory_end + (8<<20)) > (max_page << PAGE_SHIFT) )
   4.109 +    {
   4.110 +        printk("Not enough memory for DOM0 memory reservation.\n");
   4.111 +        for ( ; ; ) ;
   4.112 +    }
   4.113 +
   4.114 +    printk("Initialised %luMB memory (%lu pages) on a %luMB machine\n",
   4.115 +           max_page >> (20-PAGE_SHIFT), max_page,
   4.116 +	   max_mem  >> (20-PAGE_SHIFT));
   4.117  
   4.118      add_to_domain_alloc_list(dom0_memory_end, max_page << PAGE_SHIFT);
   4.119  
   4.120      heap_start = memguard_init(&_end);
   4.121  
   4.122      printk("Xen heap size is %luKB\n", 
   4.123 -	   (MAX_XENHEAP_ADDRESS-__pa(heap_start))/1024 );
   4.124 +	   (xenheap_phys_end-__pa(heap_start))/1024 );
   4.125  
   4.126 -    if ( ((MAX_XENHEAP_ADDRESS-__pa(heap_start))/1024) <= 4096 )
   4.127 -    {
   4.128 -        printk("Xen heap size is too small to safely continue!\n");
   4.129 -        for ( ; ; ) ;
   4.130 -    }
   4.131 -
   4.132 -    init_page_allocator(__pa(heap_start), MAX_XENHEAP_ADDRESS);
   4.133 +    init_page_allocator(__pa(heap_start), xenheap_phys_end);
   4.134   
   4.135      /* Initialise the slab allocator. */
   4.136      kmem_cache_init();
   4.137 @@ -253,8 +287,7 @@ void cmain(unsigned long magic, multiboo
   4.138  
   4.139      /*
   4.140       * We're going to setup domain0 using the module(s) that we stashed safely
   4.141 -     * above our MAX_DIRECTMAP_ADDRESS in boot/boot.S. The second module, if
   4.142 -     * present, is an initrd ramdisk.
   4.143 +     * above our heap. The second module, if present, is an initrd ramdisk.
   4.144       */
   4.145      if ( construct_dom0(new_dom, dom0_memory_start, dom0_memory_end,
   4.146                          (char *)initial_images_start, 
     5.1 --- a/xen/common/memory.c	Tue Jun 29 14:01:49 2004 +0000
     5.2 +++ b/xen/common/memory.c	Tue Jun 29 16:18:51 2004 +0000
     5.3 @@ -144,7 +144,7 @@ static struct {
     5.4  #define GPS (percpu_info[smp_processor_id()].gps ? : current)
     5.5  
     5.6  
     5.7 -void __init init_frametable(unsigned long nr_pages)
     5.8 +void __init init_frametable(void *frametable_vstart, unsigned long nr_pages)
     5.9  {
    5.10      unsigned long mfn;
    5.11  
    5.12 @@ -153,22 +153,23 @@ void __init init_frametable(unsigned lon
    5.13      max_page = nr_pages;
    5.14      frame_table_size = nr_pages * sizeof(struct pfn_info);
    5.15      frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
    5.16 -    frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START;
    5.17 +    frame_table = frametable_vstart;
    5.18 +
    5.19 +    if ( (__pa(frame_table) + frame_table_size) > (max_page << PAGE_SHIFT) )
    5.20 +        panic("Not enough memory for frame table - reduce Xen heap size?\n");
    5.21 +
    5.22      memset(frame_table, 0, frame_table_size);
    5.23  
    5.24      spin_lock_init(&free_list_lock);
    5.25      INIT_LIST_HEAD(&free_list);    
    5.26      free_pfns = 0;
    5.27  
    5.28 -    /* initialise to a magic of 0x55555555 so easier to spot bugs later */
    5.29 -    memset( machine_to_phys_mapping, 0x55, 4*1024*1024 );
    5.30 -
    5.31 -    /* The array is sized for a 4GB machine regardless of actuall mem size. 
    5.32 -       This costs 4MB -- may want to fix some day */
    5.33 +    /* Initialise to a magic of 0x55555555 so easier to spot bugs later. */
    5.34 +    memset(machine_to_phys_mapping, 0x55, 4<<20);
    5.35  
    5.36      /* Pin the ownership of the MP table so that DOM0 can map it later. */
    5.37 -    for ( mfn = virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
    5.38 -          mfn < virt_to_phys(&machine_to_phys_mapping[1024*1024])>>PAGE_SHIFT;
    5.39 +    for ( mfn = virt_to_phys(&machine_to_phys_mapping[0<<20])>>PAGE_SHIFT;
    5.40 +          mfn < virt_to_phys(&machine_to_phys_mapping[1<<20])>>PAGE_SHIFT;
    5.41            mfn++ )
    5.42      {
    5.43          frame_table[mfn].count_and_flags = 1 | PGC_allocated;
    5.44 @@ -471,6 +472,7 @@ static int alloc_l2_table(struct pfn_inf
    5.45          if ( unlikely(!get_page_from_l2e(pl2e[i], page_nr)) )
    5.46              goto fail;
    5.47      
    5.48 +#if defined(__i386__)
    5.49      /* Now we add our private high mappings. */
    5.50      memcpy(&pl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
    5.51             &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
    5.52 @@ -480,6 +482,7 @@ static int alloc_l2_table(struct pfn_inf
    5.53      pl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
    5.54          mk_l2_pgentry(__pa(page->u.domain->mm.perdomain_pt) | 
    5.55                        __PAGE_HYPERVISOR);
    5.56 +#endif
    5.57  
    5.58      unmap_domain_mem(pl2e);
    5.59      return 1;
     6.1 --- a/xen/common/shadow.c	Tue Jun 29 14:01:49 2004 +0000
     6.2 +++ b/xen/common/shadow.c	Tue Jun 29 16:18:51 2004 +0000
     6.3 @@ -151,7 +151,9 @@ static inline int shadow_page_op( struct
     6.4               PGT_l2_page_table )
     6.5  		{
     6.6  			unsigned long * spl1e = map_domain_mem( spfn<<PAGE_SHIFT );
     6.7 -			memset( spl1e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*spl1e) );
     6.8 +#ifdef __i386__
     6.9 +			memset(spl1e, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*spl1e));
    6.10 +#endif
    6.11  			unmap_domain_mem( spl1e );
    6.12  		}
    6.13      }
    6.14 @@ -574,6 +576,7 @@ unsigned long shadow_l2_table(
    6.15      // we need to do this before the linear map is set up
    6.16      spl2e = (l2_pgentry_t *) map_domain_mem(spfn << PAGE_SHIFT);
    6.17  
    6.18 +#ifdef __i386__
    6.19      // get hypervisor and 2x linear PT mapings installed 
    6.20      memcpy(&spl2e[DOMAIN_ENTRIES_PER_L2_PAGETABLE], 
    6.21             &idle_pg_table[DOMAIN_ENTRIES_PER_L2_PAGETABLE],
    6.22 @@ -585,6 +588,7 @@ unsigned long shadow_l2_table(
    6.23      spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] =
    6.24          mk_l2_pgentry(__pa(frame_table[gpfn].u.domain->mm.perdomain_pt) | 
    6.25                        __PAGE_HYPERVISOR);
    6.26 +#endif
    6.27  
    6.28      // can't use the linear map as we may not be in the right PT
    6.29      gpl2e = (l2_pgentry_t *) map_domain_mem(gpfn << PAGE_SHIFT);
     7.1 --- a/xen/include/asm-x86/config.h	Tue Jun 29 14:01:49 2004 +0000
     7.2 +++ b/xen/include/asm-x86/config.h	Tue Jun 29 16:18:51 2004 +0000
     7.3 @@ -93,6 +93,8 @@ extern void __out_of_line_bug(int line) 
     7.4  
     7.5  #if defined(__x86_64__)
     7.6  
     7.7 +#define XENHEAP_DEFAULT_MB (16)
     7.8 +
     7.9  #define PML4_ENTRY_BITS  39
    7.10  #define PML4_ENTRY_BYTES (1UL<<PML4_ENTRY_BITS)
    7.11  
    7.12 @@ -158,9 +160,8 @@ extern void __out_of_line_bug(int line) 
    7.13  
    7.14  #elif defined(__i386__)
    7.15  
    7.16 -/* The following are machine addresses. */
    7.17 -#define MAX_XENHEAP_ADDRESS   (12*1024*1024)
    7.18 -#define MAX_DIRECTMAP_ADDRESS (40*1024*1024)
    7.19 +#define XENHEAP_DEFAULT_MB (12)
    7.20 +#define DIRECTMAP_PHYS_END (40*1024*1024)
    7.21  
    7.22  /* Hypervisor owns top 64MB of virtual address space. */
    7.23  #define HYPERVISOR_VIRT_START (0xFC000000UL)
    7.24 @@ -173,9 +174,9 @@ extern void __out_of_line_bug(int line) 
    7.25  #define RO_MPT_VIRT_END       (RO_MPT_VIRT_START + (4*1024*1024))
    7.26  /* The virtual addresses for the 40MB direct-map region. */
    7.27  #define DIRECTMAP_VIRT_START  (RO_MPT_VIRT_END)
    7.28 -#define DIRECTMAP_VIRT_END    (DIRECTMAP_VIRT_START + MAX_DIRECTMAP_ADDRESS)
    7.29 +#define DIRECTMAP_VIRT_END    (DIRECTMAP_VIRT_START + DIRECTMAP_PHYS_END)
    7.30  #define XENHEAP_VIRT_START    (DIRECTMAP_VIRT_START)
    7.31 -#define XENHEAP_VIRT_END      (XENHEAP_VIRT_START + MAX_XENHEAP_ADDRESS)
    7.32 +#define XENHEAP_VIRT_END      (XENHEAP_VIRT_START + (XENHEAP_DEFAULT_MB<<20))
    7.33  #define RDWR_MPT_VIRT_START   (XENHEAP_VIRT_END)
    7.34  #define RDWR_MPT_VIRT_END     (RDWR_MPT_VIRT_START + (4*1024*1024))
    7.35  #define FRAMETABLE_VIRT_START (RDWR_MPT_VIRT_END)
    7.36 @@ -207,6 +208,10 @@ extern void __out_of_line_bug(int line) 
    7.37  
    7.38  #endif /* __i386__ */
    7.39  
    7.40 +#ifndef __ASSEMBLY__
    7.41 +extern unsigned long xenheap_phys_end; /* user-configurable */
    7.42 +#endif
    7.43 +
    7.44  #define GDT_VIRT_START        (PERDOMAIN_VIRT_START)
    7.45  #define GDT_VIRT_END          (GDT_VIRT_START + (64*1024))
    7.46  #define LDT_VIRT_START        (GDT_VIRT_END)
     8.1 --- a/xen/include/asm-x86/page.h	Tue Jun 29 14:01:49 2004 +0000
     8.2 +++ b/xen/include/asm-x86/page.h	Tue Jun 29 16:18:51 2004 +0000
     8.3 @@ -95,12 +95,12 @@ typedef struct { unsigned long pt_lo; } 
     8.4  extern l2_pgentry_t idle_pg_table[ENTRIES_PER_L2_PAGETABLE];
     8.5  extern void paging_init(void);
     8.6  
     8.7 -#define __flush_tlb()                                    \
     8.8 -    do {                                                 \
     8.9 -        __asm__ __volatile__ (                           \
    8.10 -            "movl %%cr3, %%eax; movl %%eax, %%cr3"       \
    8.11 -            : : : "memory", "eax" );                     \
    8.12 -        tlb_clocktick();                                 \
    8.13 +#define __flush_tlb()                                             \
    8.14 +    do {                                                          \
    8.15 +        __asm__ __volatile__ (                                    \
    8.16 +            "mov %%cr3, %%"__OP"ax; mov %%"__OP"ax, %%cr3"        \
    8.17 +            : : : "memory", __OP"ax" );                           \
    8.18 +        tlb_clocktick();                                          \
    8.19      } while ( 0 )
    8.20  
    8.21  /* Flush global pages as well. */
    8.22 @@ -108,14 +108,14 @@ extern void paging_init(void);
    8.23  #define __pge_off()                                                     \
    8.24          do {                                                            \
    8.25                  __asm__ __volatile__(                                   \
    8.26 -                        "movl %0, %%cr4;  # turn off PGE     "          \
    8.27 +                        "mov %0, %%cr4;  # turn off PGE     "           \
    8.28                          :: "r" (mmu_cr4_features & ~X86_CR4_PGE));      \
    8.29          } while (0)
    8.30  
    8.31  #define __pge_on()                                                      \
    8.32          do {                                                            \
    8.33                  __asm__ __volatile__(                                   \
    8.34 -                        "movl %0, %%cr4;  # turn off PGE     "          \
    8.35 +                        "mov %0, %%cr4;  # turn off PGE     "           \
    8.36                          :: "r" (mmu_cr4_features));                     \
    8.37          } while (0)
    8.38  
     9.1 --- a/xen/include/asm-x86/string.h	Tue Jun 29 14:01:49 2004 +0000
     9.2 +++ b/xen/include/asm-x86/string.h	Tue Jun 29 16:18:51 2004 +0000
     9.3 @@ -1,485 +1,5 @@
     9.4 -#ifndef _I386_STRING_H_
     9.5 -#define _I386_STRING_H_
     9.6 -
     9.7 -#include <xen/config.h>
     9.8 -
     9.9 -/*
    9.10 - * This string-include defines all string functions as inline
    9.11 - * functions. Use gcc. It also assumes ds=es=data space, this should be
    9.12 - * normal. Most of the string-functions are rather heavily hand-optimized,
    9.13 - * see especially strtok,strstr,str[c]spn. They should work, but are not
    9.14 - * very easy to understand. Everything is done entirely within the register
    9.15 - * set, making the functions fast and clean. String instructions have been
    9.16 - * used through-out, making for "slightly" unclear code :-)
    9.17 - *
    9.18 - *		NO Copyright (C) 1991, 1992 Linus Torvalds,
    9.19 - *		consider these trivial functions to be PD.
    9.20 - */
    9.21 -
    9.22 -
    9.23 -#define __HAVE_ARCH_STRCPY
    9.24 -static inline char * strcpy(char * dest,const char *src)
    9.25 -{
    9.26 -int d0, d1, d2;
    9.27 -__asm__ __volatile__(
    9.28 -	"1:\tlodsb\n\t"
    9.29 -	"stosb\n\t"
    9.30 -	"testb %%al,%%al\n\t"
    9.31 -	"jne 1b"
    9.32 -	: "=&S" (d0), "=&D" (d1), "=&a" (d2)
    9.33 -	:"0" (src),"1" (dest) : "memory");
    9.34 -return dest;
    9.35 -}
    9.36 -
    9.37 -#define __HAVE_ARCH_STRNCPY
    9.38 -static inline char * strncpy(char * dest,const char *src,size_t count)
    9.39 -{
    9.40 -int d0, d1, d2, d3;
    9.41 -__asm__ __volatile__(
    9.42 -	"1:\tdecl %2\n\t"
    9.43 -	"js 2f\n\t"
    9.44 -	"lodsb\n\t"
    9.45 -	"stosb\n\t"
    9.46 -	"testb %%al,%%al\n\t"
    9.47 -	"jne 1b\n\t"
    9.48 -	"rep\n\t"
    9.49 -	"stosb\n"
    9.50 -	"2:"
    9.51 -	: "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
    9.52 -	:"0" (src),"1" (dest),"2" (count) : "memory");
    9.53 -return dest;
    9.54 -}
    9.55 -
    9.56 -#define __HAVE_ARCH_STRCAT
    9.57 -static inline char * strcat(char * dest,const char * src)
    9.58 -{
    9.59 -int d0, d1, d2, d3;
    9.60 -__asm__ __volatile__(
    9.61 -	"repne\n\t"
    9.62 -	"scasb\n\t"
    9.63 -	"decl %1\n"
    9.64 -	"1:\tlodsb\n\t"
    9.65 -	"stosb\n\t"
    9.66 -	"testb %%al,%%al\n\t"
    9.67 -	"jne 1b"
    9.68 -	: "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
    9.69 -	: "0" (src), "1" (dest), "2" (0), "3" (0xffffffff):"memory");
    9.70 -return dest;
    9.71 -}
    9.72 -
    9.73 -#define __HAVE_ARCH_STRNCAT
    9.74 -static inline char * strncat(char * dest,const char * src,size_t count)
    9.75 -{
    9.76 -int d0, d1, d2, d3;
    9.77 -__asm__ __volatile__(
    9.78 -	"repne\n\t"
    9.79 -	"scasb\n\t"
    9.80 -	"decl %1\n\t"
    9.81 -	"movl %8,%3\n"
    9.82 -	"1:\tdecl %3\n\t"
    9.83 -	"js 2f\n\t"
    9.84 -	"lodsb\n\t"
    9.85 -	"stosb\n\t"
    9.86 -	"testb %%al,%%al\n\t"
    9.87 -	"jne 1b\n"
    9.88 -	"2:\txorl %2,%2\n\t"
    9.89 -	"stosb"
    9.90 -	: "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
    9.91 -	: "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count)
    9.92 -	: "memory");
    9.93 -return dest;
    9.94 -}
    9.95 -
    9.96 -#define __HAVE_ARCH_STRCMP
    9.97 -static inline int strcmp(const char * cs,const char * ct)
    9.98 -{
    9.99 -int d0, d1;
   9.100 -register int __res;
   9.101 -__asm__ __volatile__(
   9.102 -	"1:\tlodsb\n\t"
   9.103 -	"scasb\n\t"
   9.104 -	"jne 2f\n\t"
   9.105 -	"testb %%al,%%al\n\t"
   9.106 -	"jne 1b\n\t"
   9.107 -	"xorl %%eax,%%eax\n\t"
   9.108 -	"jmp 3f\n"
   9.109 -	"2:\tsbbl %%eax,%%eax\n\t"
   9.110 -	"orb $1,%%al\n"
   9.111 -	"3:"
   9.112 -	:"=a" (__res), "=&S" (d0), "=&D" (d1)
   9.113 -		     :"1" (cs),"2" (ct));
   9.114 -return __res;
   9.115 -}
   9.116 -
   9.117 -#define __HAVE_ARCH_STRNCMP
   9.118 -static inline int strncmp(const char * cs,const char * ct,size_t count)
   9.119 -{
   9.120 -register int __res;
   9.121 -int d0, d1, d2;
   9.122 -__asm__ __volatile__(
   9.123 -	"1:\tdecl %3\n\t"
   9.124 -	"js 2f\n\t"
   9.125 -	"lodsb\n\t"
   9.126 -	"scasb\n\t"
   9.127 -	"jne 3f\n\t"
   9.128 -	"testb %%al,%%al\n\t"
   9.129 -	"jne 1b\n"
   9.130 -	"2:\txorl %%eax,%%eax\n\t"
   9.131 -	"jmp 4f\n"
   9.132 -	"3:\tsbbl %%eax,%%eax\n\t"
   9.133 -	"orb $1,%%al\n"
   9.134 -	"4:"
   9.135 -		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
   9.136 -		     :"1" (cs),"2" (ct),"3" (count));
   9.137 -return __res;
   9.138 -}
   9.139 -
   9.140 -#define __HAVE_ARCH_STRCHR
   9.141 -static inline char * strchr(const char * s, int c)
   9.142 -{
   9.143 -int d0;
   9.144 -register char * __res;
   9.145 -__asm__ __volatile__(
   9.146 -	"movb %%al,%%ah\n"
   9.147 -	"1:\tlodsb\n\t"
   9.148 -	"cmpb %%ah,%%al\n\t"
   9.149 -	"je 2f\n\t"
   9.150 -	"testb %%al,%%al\n\t"
   9.151 -	"jne 1b\n\t"
   9.152 -	"movl $1,%1\n"
   9.153 -	"2:\tmovl %1,%0\n\t"
   9.154 -	"decl %0"
   9.155 -	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
   9.156 -return __res;
   9.157 -}
   9.158 -
   9.159 -#define __HAVE_ARCH_STRRCHR
   9.160 -static inline char * strrchr(const char * s, int c)
   9.161 -{
   9.162 -int d0, d1;
   9.163 -register char * __res;
   9.164 -__asm__ __volatile__(
   9.165 -	"movb %%al,%%ah\n"
   9.166 -	"1:\tlodsb\n\t"
   9.167 -	"cmpb %%ah,%%al\n\t"
   9.168 -	"jne 2f\n\t"
   9.169 -	"leal -1(%%esi),%0\n"
   9.170 -	"2:\ttestb %%al,%%al\n\t"
   9.171 -	"jne 1b"
   9.172 -	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
   9.173 -return __res;
   9.174 -}
   9.175 -
   9.176 -#define __HAVE_ARCH_STRLEN
   9.177 -static inline size_t strlen(const char * s)
   9.178 -{
   9.179 -int d0;
   9.180 -register int __res;
   9.181 -__asm__ __volatile__(
   9.182 -	"repne\n\t"
   9.183 -	"scasb\n\t"
   9.184 -	"notl %0\n\t"
   9.185 -	"decl %0"
   9.186 -	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff));
   9.187 -return __res;
   9.188 -}
   9.189 -
   9.190 -static inline void * __memcpy(void * to, const void * from, size_t n)
   9.191 -{
   9.192 -int d0, d1, d2;
   9.193 -__asm__ __volatile__(
   9.194 -	"rep ; movsl\n\t"
   9.195 -	"testb $2,%b4\n\t"
   9.196 -	"je 1f\n\t"
   9.197 -	"movsw\n"
   9.198 -	"1:\ttestb $1,%b4\n\t"
   9.199 -	"je 2f\n\t"
   9.200 -	"movsb\n"
   9.201 -	"2:"
   9.202 -	: "=&c" (d0), "=&D" (d1), "=&S" (d2)
   9.203 -	:"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
   9.204 -	: "memory");
   9.205 -return (to);
   9.206 -}
   9.207 -
   9.208 -/*
   9.209 - * This looks horribly ugly, but the compiler can optimize it totally,
   9.210 - * as the count is constant.
   9.211 - */
   9.212 -static inline void * __constant_memcpy(void * to, const void * from, size_t n)
   9.213 -{
   9.214 -	switch (n) {
   9.215 -		case 0:
   9.216 -			return to;
   9.217 -		case 1:
   9.218 -			*(unsigned char *)to = *(const unsigned char *)from;
   9.219 -			return to;
   9.220 -		case 2:
   9.221 -			*(unsigned short *)to = *(const unsigned short *)from;
   9.222 -			return to;
   9.223 -		case 3:
   9.224 -			*(unsigned short *)to = *(const unsigned short *)from;
   9.225 -			*(2+(unsigned char *)to) = *(2+(const unsigned char *)from);
   9.226 -			return to;
   9.227 -		case 4:
   9.228 -			*(unsigned long *)to = *(const unsigned long *)from;
   9.229 -			return to;
   9.230 -		case 6:	/* for Ethernet addresses */
   9.231 -			*(unsigned long *)to = *(const unsigned long *)from;
   9.232 -			*(2+(unsigned short *)to) = *(2+(const unsigned short *)from);
   9.233 -			return to;
   9.234 -		case 8:
   9.235 -			*(unsigned long *)to = *(const unsigned long *)from;
   9.236 -			*(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
   9.237 -			return to;
   9.238 -		case 12:
   9.239 -			*(unsigned long *)to = *(const unsigned long *)from;
   9.240 -			*(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
   9.241 -			*(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
   9.242 -			return to;
   9.243 -		case 16:
   9.244 -			*(unsigned long *)to = *(const unsigned long *)from;
   9.245 -			*(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
   9.246 -			*(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
   9.247 -			*(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
   9.248 -			return to;
   9.249 -		case 20:
   9.250 -			*(unsigned long *)to = *(const unsigned long *)from;
   9.251 -			*(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
   9.252 -			*(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
   9.253 -			*(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
   9.254 -			*(4+(unsigned long *)to) = *(4+(const unsigned long *)from);
   9.255 -			return to;
   9.256 -	}
   9.257 -#define COMMON(x) \
   9.258 -__asm__ __volatile__( \
   9.259 -	"rep ; movsl" \
   9.260 -	x \
   9.261 -	: "=&c" (d0), "=&D" (d1), "=&S" (d2) \
   9.262 -	: "0" (n/4),"1" ((long) to),"2" ((long) from) \
   9.263 -	: "memory");
   9.264 -{
   9.265 -	int d0, d1, d2;
   9.266 -	switch (n % 4) {
   9.267 -		case 0: COMMON(""); return to;
   9.268 -		case 1: COMMON("\n\tmovsb"); return to;
   9.269 -		case 2: COMMON("\n\tmovsw"); return to;
   9.270 -		default: COMMON("\n\tmovsw\n\tmovsb"); return to;
   9.271 -	}
   9.272 -}
   9.273 -  
   9.274 -#undef COMMON
   9.275 -}
   9.276 -
   9.277 -#define __HAVE_ARCH_MEMCPY
   9.278 -
   9.279 -#define memcpy(t, f, n) \
   9.280 -(__builtin_constant_p(n) ? \
   9.281 - __constant_memcpy((t),(f),(n)) : \
   9.282 - __memcpy((t),(f),(n)))
   9.283 -
   9.284 -
   9.285 -/*
   9.286 - * struct_cpy(x,y), copy structure *x into (matching structure) *y.
   9.287 - *
   9.288 - * We get link-time errors if the structure sizes do not match.
   9.289 - * There is no runtime overhead, it's all optimized away at
   9.290 - * compile time.
   9.291 - */
   9.292 -//extern void __struct_cpy_bug (void);
   9.293 -
   9.294 -/*
   9.295 -#define struct_cpy(x,y) 			\
   9.296 -({						\
   9.297 -	if (sizeof(*(x)) != sizeof(*(y))) 	\
   9.298 -		__struct_cpy_bug;		\
   9.299 -	memcpy(x, y, sizeof(*(x)));		\
   9.300 -})
   9.301 -*/
   9.302 -
   9.303 -#define __HAVE_ARCH_MEMMOVE
   9.304 -static inline void * memmove(void * dest,const void * src, size_t n)
   9.305 -{
   9.306 -int d0, d1, d2;
   9.307 -if (dest<src)
   9.308 -__asm__ __volatile__(
   9.309 -	"rep\n\t"
   9.310 -	"movsb"
   9.311 -	: "=&c" (d0), "=&S" (d1), "=&D" (d2)
   9.312 -	:"0" (n),"1" (src),"2" (dest)
   9.313 -	: "memory");
   9.314 -else
   9.315 -__asm__ __volatile__(
   9.316 -	"std\n\t"
   9.317 -	"rep\n\t"
   9.318 -	"movsb\n\t"
   9.319 -	"cld"
   9.320 -	: "=&c" (d0), "=&S" (d1), "=&D" (d2)
   9.321 -	:"0" (n),
   9.322 -	 "1" (n-1+(const char *)src),
   9.323 -	 "2" (n-1+(char *)dest)
   9.324 -	:"memory");
   9.325 -return dest;
   9.326 -}
   9.327 -
   9.328 -#define __HAVE_ARCH_MEMCMP
   9.329 -#define memcmp __builtin_memcmp
   9.330 -
   9.331 -#define __HAVE_ARCH_MEMCHR
   9.332 -static inline void * memchr(const void * cs,int c,size_t count)
   9.333 -{
   9.334 -int d0;
   9.335 -register void * __res;
   9.336 -if (!count)
   9.337 -	return NULL;
   9.338 -__asm__ __volatile__(
   9.339 -	"repne\n\t"
   9.340 -	"scasb\n\t"
   9.341 -	"je 1f\n\t"
   9.342 -	"movl $1,%0\n"
   9.343 -	"1:\tdecl %0"
   9.344 -	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
   9.345 -return __res;
   9.346 -}
   9.347 -
   9.348 -static inline void * __memset_generic(void * s, char c,size_t count)
   9.349 -{
   9.350 -int d0, d1;
   9.351 -__asm__ __volatile__(
   9.352 -	"rep\n\t"
   9.353 -	"stosb"
   9.354 -	: "=&c" (d0), "=&D" (d1)
   9.355 -	:"a" (c),"1" (s),"0" (count)
   9.356 -	:"memory");
   9.357 -return s;
   9.358 -}
   9.359 -
   9.360 -/* we might want to write optimized versions of these later */
   9.361 -#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
   9.362 -
   9.363 -/*
   9.364 - * memset(x,0,y) is a reasonably common thing to do, so we want to fill
   9.365 - * things 32 bits at a time even when we don't know the size of the
   9.366 - * area at compile-time..
   9.367 - */
   9.368 -static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
   9.369 -{
   9.370 -int d0, d1;
   9.371 -__asm__ __volatile__(
   9.372 -	"rep ; stosl\n\t"
   9.373 -	"testb $2,%b3\n\t"
   9.374 -	"je 1f\n\t"
   9.375 -	"stosw\n"
   9.376 -	"1:\ttestb $1,%b3\n\t"
   9.377 -	"je 2f\n\t"
   9.378 -	"stosb\n"
   9.379 -	"2:"
   9.380 -	: "=&c" (d0), "=&D" (d1)
   9.381 -	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
   9.382 -	:"memory");
   9.383 -return (s);	
   9.384 -}
   9.385 -
   9.386 -/* Added by Gertjan van Wingerde to make minix and sysv module work */
   9.387 -#define __HAVE_ARCH_STRNLEN
   9.388 -static inline size_t strnlen(const char * s, size_t count)
   9.389 -{
   9.390 -int d0;
   9.391 -register int __res;
   9.392 -__asm__ __volatile__(
   9.393 -	"movl %2,%0\n\t"
   9.394 -	"jmp 2f\n"
   9.395 -	"1:\tcmpb $0,(%0)\n\t"
   9.396 -	"je 3f\n\t"
   9.397 -	"incl %0\n"
   9.398 -	"2:\tdecl %1\n\t"
   9.399 -	"cmpl $-1,%1\n\t"
   9.400 -	"jne 1b\n"
   9.401 -	"3:\tsubl %2,%0"
   9.402 -	:"=a" (__res), "=&d" (d0)
   9.403 -	:"c" (s),"1" (count));
   9.404 -return __res;
   9.405 -}
   9.406 -/* end of additional stuff */
   9.407 -
   9.408 -//#define __HAVE_ARCH_STRSTR
   9.409 -
   9.410 -//extern char *strstr(const char *cs, const char *ct);
   9.411 -
   9.412 -/*
   9.413 - * This looks horribly ugly, but the compiler can optimize it totally,
   9.414 - * as we by now know that both pattern and count is constant..
   9.415 - */
   9.416 -static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
   9.417 -{
   9.418 -	switch (count) {
   9.419 -		case 0:
   9.420 -			return s;
   9.421 -		case 1:
   9.422 -			*(unsigned char *)s = pattern;
   9.423 -			return s;
   9.424 -		case 2:
   9.425 -			*(unsigned short *)s = pattern;
   9.426 -			return s;
   9.427 -		case 3:
   9.428 -			*(unsigned short *)s = pattern;
   9.429 -			*(2+(unsigned char *)s) = pattern;
   9.430 -			return s;
   9.431 -		case 4:
   9.432 -			*(unsigned long *)s = pattern;
   9.433 -			return s;
   9.434 -	}
   9.435 -#define COMMON(x) \
   9.436 -__asm__  __volatile__( \
   9.437 -	"rep ; stosl" \
   9.438 -	x \
   9.439 -	: "=&c" (d0), "=&D" (d1) \
   9.440 -	: "a" (pattern),"0" (count/4),"1" ((long) s) \
   9.441 -	: "memory")
   9.442 -{
   9.443 -	int d0, d1;
   9.444 -	switch (count % 4) {
   9.445 -		case 0: COMMON(""); return s;
   9.446 -		case 1: COMMON("\n\tstosb"); return s;
   9.447 -		case 2: COMMON("\n\tstosw"); return s;
   9.448 -		default: COMMON("\n\tstosw\n\tstosb"); return s;
   9.449 -	}
   9.450 -}
   9.451 -  
   9.452 -#undef COMMON
   9.453 -}
   9.454 -
   9.455 -#define __constant_c_x_memset(s, c, count) \
   9.456 -(__builtin_constant_p(count) ? \
   9.457 - __constant_c_and_count_memset((s),(c),(count)) : \
   9.458 - __constant_c_memset((s),(c),(count)))
   9.459 -
   9.460 -#define __memset(s, c, count) \
   9.461 -(__builtin_constant_p(count) ? \
   9.462 - __constant_count_memset((s),(c),(count)) : \
   9.463 - __memset_generic((s),(c),(count)))
   9.464 -
   9.465 -#define __HAVE_ARCH_MEMSET
   9.466 -#define memset(s, c, count) \
   9.467 -(__builtin_constant_p(c) ? \
   9.468 - __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
   9.469 - __memset((s),(c),(count)))
   9.470 -
   9.471 -/*
   9.472 - * find the first occurrence of byte 'c', or 1 past the area if none
   9.473 - */
   9.474 -#define __HAVE_ARCH_MEMSCAN
   9.475 -static inline void * memscan(void * addr, int c, size_t size)
   9.476 -{
   9.477 -	if (!size)
   9.478 -		return addr;
   9.479 -	__asm__("repnz; scasb\n\t"
   9.480 -		"jnz 1f\n\t"
   9.481 -		"dec %%edi\n"
   9.482 -		"1:"
   9.483 -		: "=D" (addr), "=c" (size)
   9.484 -		: "0" (addr), "1" (size), "a" (c));
   9.485 -	return addr;
   9.486 -}
   9.487 -
   9.488 +#ifdef __x86_64__
   9.489 +#include <asm/x86_64/string.h>
   9.490 +#else
   9.491 +#include <asm/x86_32/string.h>
   9.492  #endif
    10.1 --- a/xen/include/asm-x86/types.h	Tue Jun 29 14:01:49 2004 +0000
    10.2 +++ b/xen/include/asm-x86/types.h	Tue Jun 29 16:18:51 2004 +0000
    10.3 @@ -3,7 +3,6 @@
    10.4  
    10.5  typedef unsigned short umode_t;
    10.6  
    10.7 -typedef unsigned int size_t;
    10.8  
    10.9  /*
   10.10   * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
   10.11 @@ -44,10 +43,12 @@ typedef unsigned int u32;
   10.12  typedef signed long long s64;
   10.13  typedef unsigned long long u64;
   10.14  #define BITS_PER_LONG 32
   10.15 +typedef unsigned int size_t;
   10.16  #elif defined(__x86_64__)
   10.17  typedef signed long s64;
   10.18  typedef unsigned long u64;
   10.19  #define BITS_PER_LONG 64
   10.20 +typedef unsigned long size_t;
   10.21  #endif
   10.22  
   10.23  /* DMA addresses come in generic and 64-bit flavours.  */
    11.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.2 +++ b/xen/include/asm-x86/x86_32/string.h	Tue Jun 29 16:18:51 2004 +0000
    11.3 @@ -0,0 +1,485 @@
    11.4 +#ifndef _I386_STRING_H_
    11.5 +#define _I386_STRING_H_
    11.6 +
    11.7 +#include <xen/config.h>
    11.8 +
    11.9 +/*
   11.10 + * This string-include defines all string functions as inline
   11.11 + * functions. Use gcc. It also assumes ds=es=data space, this should be
   11.12 + * normal. Most of the string-functions are rather heavily hand-optimized,
   11.13 + * see especially strtok,strstr,str[c]spn. They should work, but are not
   11.14 + * very easy to understand. Everything is done entirely within the register
   11.15 + * set, making the functions fast and clean. String instructions have been
   11.16 + * used through-out, making for "slightly" unclear code :-)
   11.17 + *
   11.18 + *		NO Copyright (C) 1991, 1992 Linus Torvalds,
   11.19 + *		consider these trivial functions to be PD.
   11.20 + */
   11.21 +
   11.22 +
   11.23 +#define __HAVE_ARCH_STRCPY
   11.24 +static inline char * strcpy(char * dest,const char *src)
   11.25 +{
   11.26 +int d0, d1, d2;
   11.27 +__asm__ __volatile__(
   11.28 +	"1:\tlodsb\n\t"
   11.29 +	"stosb\n\t"
   11.30 +	"testb %%al,%%al\n\t"
   11.31 +	"jne 1b"
   11.32 +	: "=&S" (d0), "=&D" (d1), "=&a" (d2)
   11.33 +	:"0" (src),"1" (dest) : "memory");
   11.34 +return dest;
   11.35 +}
   11.36 +
   11.37 +#define __HAVE_ARCH_STRNCPY
   11.38 +static inline char * strncpy(char * dest,const char *src,size_t count)
   11.39 +{
   11.40 +int d0, d1, d2, d3;
   11.41 +__asm__ __volatile__(
   11.42 +	"1:\tdecl %2\n\t"
   11.43 +	"js 2f\n\t"
   11.44 +	"lodsb\n\t"
   11.45 +	"stosb\n\t"
   11.46 +	"testb %%al,%%al\n\t"
   11.47 +	"jne 1b\n\t"
   11.48 +	"rep\n\t"
   11.49 +	"stosb\n"
   11.50 +	"2:"
   11.51 +	: "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
   11.52 +	:"0" (src),"1" (dest),"2" (count) : "memory");
   11.53 +return dest;
   11.54 +}
   11.55 +
   11.56 +#define __HAVE_ARCH_STRCAT
   11.57 +static inline char * strcat(char * dest,const char * src)
   11.58 +{
   11.59 +int d0, d1, d2, d3;
   11.60 +__asm__ __volatile__(
   11.61 +	"repne\n\t"
   11.62 +	"scasb\n\t"
   11.63 +	"decl %1\n"
   11.64 +	"1:\tlodsb\n\t"
   11.65 +	"stosb\n\t"
   11.66 +	"testb %%al,%%al\n\t"
   11.67 +	"jne 1b"
   11.68 +	: "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
   11.69 +	: "0" (src), "1" (dest), "2" (0), "3" (0xffffffff):"memory");
   11.70 +return dest;
   11.71 +}
   11.72 +
   11.73 +#define __HAVE_ARCH_STRNCAT
   11.74 +static inline char * strncat(char * dest,const char * src,size_t count)
   11.75 +{
   11.76 +int d0, d1, d2, d3;
   11.77 +__asm__ __volatile__(
   11.78 +	"repne\n\t"
   11.79 +	"scasb\n\t"
   11.80 +	"decl %1\n\t"
   11.81 +	"movl %8,%3\n"
   11.82 +	"1:\tdecl %3\n\t"
   11.83 +	"js 2f\n\t"
   11.84 +	"lodsb\n\t"
   11.85 +	"stosb\n\t"
   11.86 +	"testb %%al,%%al\n\t"
   11.87 +	"jne 1b\n"
   11.88 +	"2:\txorl %2,%2\n\t"
   11.89 +	"stosb"
   11.90 +	: "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
   11.91 +	: "0" (src),"1" (dest),"2" (0),"3" (0xffffffff), "g" (count)
   11.92 +	: "memory");
   11.93 +return dest;
   11.94 +}
   11.95 +
   11.96 +#define __HAVE_ARCH_STRCMP
   11.97 +static inline int strcmp(const char * cs,const char * ct)
   11.98 +{
   11.99 +int d0, d1;
  11.100 +register int __res;
  11.101 +__asm__ __volatile__(
  11.102 +	"1:\tlodsb\n\t"
  11.103 +	"scasb\n\t"
  11.104 +	"jne 2f\n\t"
  11.105 +	"testb %%al,%%al\n\t"
  11.106 +	"jne 1b\n\t"
  11.107 +	"xorl %%eax,%%eax\n\t"
  11.108 +	"jmp 3f\n"
  11.109 +	"2:\tsbbl %%eax,%%eax\n\t"
  11.110 +	"orb $1,%%al\n"
  11.111 +	"3:"
  11.112 +	:"=a" (__res), "=&S" (d0), "=&D" (d1)
  11.113 +		     :"1" (cs),"2" (ct));
  11.114 +return __res;
  11.115 +}
  11.116 +
  11.117 +#define __HAVE_ARCH_STRNCMP
  11.118 +static inline int strncmp(const char * cs,const char * ct,size_t count)
  11.119 +{
  11.120 +register int __res;
  11.121 +int d0, d1, d2;
  11.122 +__asm__ __volatile__(
  11.123 +	"1:\tdecl %3\n\t"
  11.124 +	"js 2f\n\t"
  11.125 +	"lodsb\n\t"
  11.126 +	"scasb\n\t"
  11.127 +	"jne 3f\n\t"
  11.128 +	"testb %%al,%%al\n\t"
  11.129 +	"jne 1b\n"
  11.130 +	"2:\txorl %%eax,%%eax\n\t"
  11.131 +	"jmp 4f\n"
  11.132 +	"3:\tsbbl %%eax,%%eax\n\t"
  11.133 +	"orb $1,%%al\n"
  11.134 +	"4:"
  11.135 +		     :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
  11.136 +		     :"1" (cs),"2" (ct),"3" (count));
  11.137 +return __res;
  11.138 +}
  11.139 +
  11.140 +#define __HAVE_ARCH_STRCHR
  11.141 +static inline char * strchr(const char * s, int c)
  11.142 +{
  11.143 +int d0;
  11.144 +register char * __res;
  11.145 +__asm__ __volatile__(
  11.146 +	"movb %%al,%%ah\n"
  11.147 +	"1:\tlodsb\n\t"
  11.148 +	"cmpb %%ah,%%al\n\t"
  11.149 +	"je 2f\n\t"
  11.150 +	"testb %%al,%%al\n\t"
  11.151 +	"jne 1b\n\t"
  11.152 +	"movl $1,%1\n"
  11.153 +	"2:\tmovl %1,%0\n\t"
  11.154 +	"decl %0"
  11.155 +	:"=a" (__res), "=&S" (d0) : "1" (s),"0" (c));
  11.156 +return __res;
  11.157 +}
  11.158 +
  11.159 +#define __HAVE_ARCH_STRRCHR
  11.160 +static inline char * strrchr(const char * s, int c)
  11.161 +{
  11.162 +int d0, d1;
  11.163 +register char * __res;
  11.164 +__asm__ __volatile__(
  11.165 +	"movb %%al,%%ah\n"
  11.166 +	"1:\tlodsb\n\t"
  11.167 +	"cmpb %%ah,%%al\n\t"
  11.168 +	"jne 2f\n\t"
  11.169 +	"leal -1(%%esi),%0\n"
  11.170 +	"2:\ttestb %%al,%%al\n\t"
  11.171 +	"jne 1b"
  11.172 +	:"=g" (__res), "=&S" (d0), "=&a" (d1) :"0" (0),"1" (s),"2" (c));
  11.173 +return __res;
  11.174 +}
  11.175 +
  11.176 +#define __HAVE_ARCH_STRLEN
  11.177 +static inline size_t strlen(const char * s)
  11.178 +{
  11.179 +int d0;
  11.180 +register int __res;
  11.181 +__asm__ __volatile__(
  11.182 +	"repne\n\t"
  11.183 +	"scasb\n\t"
  11.184 +	"notl %0\n\t"
  11.185 +	"decl %0"
  11.186 +	:"=c" (__res), "=&D" (d0) :"1" (s),"a" (0), "0" (0xffffffff));
  11.187 +return __res;
  11.188 +}
  11.189 +
  11.190 +static inline void * __memcpy(void * to, const void * from, size_t n)
  11.191 +{
  11.192 +int d0, d1, d2;
  11.193 +__asm__ __volatile__(
  11.194 +	"rep ; movsl\n\t"
  11.195 +	"testb $2,%b4\n\t"
  11.196 +	"je 1f\n\t"
  11.197 +	"movsw\n"
  11.198 +	"1:\ttestb $1,%b4\n\t"
  11.199 +	"je 2f\n\t"
  11.200 +	"movsb\n"
  11.201 +	"2:"
  11.202 +	: "=&c" (d0), "=&D" (d1), "=&S" (d2)
  11.203 +	:"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
  11.204 +	: "memory");
  11.205 +return (to);
  11.206 +}
  11.207 +
  11.208 +/*
  11.209 + * This looks horribly ugly, but the compiler can optimize it totally,
  11.210 + * as the count is constant.
  11.211 + */
  11.212 +static inline void * __constant_memcpy(void * to, const void * from, size_t n)
  11.213 +{
  11.214 +	switch (n) {
  11.215 +		case 0:
  11.216 +			return to;
  11.217 +		case 1:
  11.218 +			*(unsigned char *)to = *(const unsigned char *)from;
  11.219 +			return to;
  11.220 +		case 2:
  11.221 +			*(unsigned short *)to = *(const unsigned short *)from;
  11.222 +			return to;
  11.223 +		case 3:
  11.224 +			*(unsigned short *)to = *(const unsigned short *)from;
  11.225 +			*(2+(unsigned char *)to) = *(2+(const unsigned char *)from);
  11.226 +			return to;
  11.227 +		case 4:
  11.228 +			*(unsigned long *)to = *(const unsigned long *)from;
  11.229 +			return to;
  11.230 +		case 6:	/* for Ethernet addresses */
  11.231 +			*(unsigned long *)to = *(const unsigned long *)from;
  11.232 +			*(2+(unsigned short *)to) = *(2+(const unsigned short *)from);
  11.233 +			return to;
  11.234 +		case 8:
  11.235 +			*(unsigned long *)to = *(const unsigned long *)from;
  11.236 +			*(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
  11.237 +			return to;
  11.238 +		case 12:
  11.239 +			*(unsigned long *)to = *(const unsigned long *)from;
  11.240 +			*(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
  11.241 +			*(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
  11.242 +			return to;
  11.243 +		case 16:
  11.244 +			*(unsigned long *)to = *(const unsigned long *)from;
  11.245 +			*(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
  11.246 +			*(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
  11.247 +			*(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
  11.248 +			return to;
  11.249 +		case 20:
  11.250 +			*(unsigned long *)to = *(const unsigned long *)from;
  11.251 +			*(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
  11.252 +			*(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
  11.253 +			*(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
  11.254 +			*(4+(unsigned long *)to) = *(4+(const unsigned long *)from);
  11.255 +			return to;
  11.256 +	}
  11.257 +#define COMMON(x) \
  11.258 +__asm__ __volatile__( \
  11.259 +	"rep ; movsl" \
  11.260 +	x \
  11.261 +	: "=&c" (d0), "=&D" (d1), "=&S" (d2) \
  11.262 +	: "0" (n/4),"1" ((long) to),"2" ((long) from) \
  11.263 +	: "memory");
  11.264 +{
  11.265 +	int d0, d1, d2;
  11.266 +	switch (n % 4) {
  11.267 +		case 0: COMMON(""); return to;
  11.268 +		case 1: COMMON("\n\tmovsb"); return to;
  11.269 +		case 2: COMMON("\n\tmovsw"); return to;
  11.270 +		default: COMMON("\n\tmovsw\n\tmovsb"); return to;
  11.271 +	}
  11.272 +}
  11.273 +  
  11.274 +#undef COMMON
  11.275 +}
  11.276 +
  11.277 +#define __HAVE_ARCH_MEMCPY
  11.278 +
  11.279 +#define memcpy(t, f, n) \
  11.280 +(__builtin_constant_p(n) ? \
  11.281 + __constant_memcpy((t),(f),(n)) : \
  11.282 + __memcpy((t),(f),(n)))
  11.283 +
  11.284 +
  11.285 +/*
  11.286 + * struct_cpy(x,y), copy structure *x into (matching structure) *y.
  11.287 + *
  11.288 + * We get link-time errors if the structure sizes do not match.
  11.289 + * There is no runtime overhead, it's all optimized away at
  11.290 + * compile time.
  11.291 + */
  11.292 +//extern void __struct_cpy_bug (void);
  11.293 +
  11.294 +/*
  11.295 +#define struct_cpy(x,y) 			\
  11.296 +({						\
  11.297 +	if (sizeof(*(x)) != sizeof(*(y))) 	\
  11.298 +		__struct_cpy_bug;		\
  11.299 +	memcpy(x, y, sizeof(*(x)));		\
  11.300 +})
  11.301 +*/
  11.302 +
  11.303 +#define __HAVE_ARCH_MEMMOVE
  11.304 +static inline void * memmove(void * dest,const void * src, size_t n)
  11.305 +{
  11.306 +int d0, d1, d2;
  11.307 +if (dest<src)
  11.308 +__asm__ __volatile__(
  11.309 +	"rep\n\t"
  11.310 +	"movsb"
  11.311 +	: "=&c" (d0), "=&S" (d1), "=&D" (d2)
  11.312 +	:"0" (n),"1" (src),"2" (dest)
  11.313 +	: "memory");
  11.314 +else
  11.315 +__asm__ __volatile__(
  11.316 +	"std\n\t"
  11.317 +	"rep\n\t"
  11.318 +	"movsb\n\t"
  11.319 +	"cld"
  11.320 +	: "=&c" (d0), "=&S" (d1), "=&D" (d2)
  11.321 +	:"0" (n),
  11.322 +	 "1" (n-1+(const char *)src),
  11.323 +	 "2" (n-1+(char *)dest)
  11.324 +	:"memory");
  11.325 +return dest;
  11.326 +}
  11.327 +
  11.328 +#define __HAVE_ARCH_MEMCMP
  11.329 +#define memcmp __builtin_memcmp
  11.330 +
  11.331 +#define __HAVE_ARCH_MEMCHR
  11.332 +static inline void * memchr(const void * cs,int c,size_t count)
  11.333 +{
  11.334 +int d0;
  11.335 +register void * __res;
  11.336 +if (!count)
  11.337 +	return NULL;
  11.338 +__asm__ __volatile__(
  11.339 +	"repne\n\t"
  11.340 +	"scasb\n\t"
  11.341 +	"je 1f\n\t"
  11.342 +	"movl $1,%0\n"
  11.343 +	"1:\tdecl %0"
  11.344 +	:"=D" (__res), "=&c" (d0) : "a" (c),"0" (cs),"1" (count));
  11.345 +return __res;
  11.346 +}
  11.347 +
  11.348 +static inline void * __memset_generic(void * s, char c,size_t count)
  11.349 +{
  11.350 +int d0, d1;
  11.351 +__asm__ __volatile__(
  11.352 +	"rep\n\t"
  11.353 +	"stosb"
  11.354 +	: "=&c" (d0), "=&D" (d1)
  11.355 +	:"a" (c),"1" (s),"0" (count)
  11.356 +	:"memory");
  11.357 +return s;
  11.358 +}
  11.359 +
  11.360 +/* we might want to write optimized versions of these later */
  11.361 +#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
  11.362 +
  11.363 +/*
  11.364 + * memset(x,0,y) is a reasonably common thing to do, so we want to fill
  11.365 + * things 32 bits at a time even when we don't know the size of the
  11.366 + * area at compile-time..
  11.367 + */
  11.368 +static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
  11.369 +{
  11.370 +int d0, d1;
  11.371 +__asm__ __volatile__(
  11.372 +	"rep ; stosl\n\t"
  11.373 +	"testb $2,%b3\n\t"
  11.374 +	"je 1f\n\t"
  11.375 +	"stosw\n"
  11.376 +	"1:\ttestb $1,%b3\n\t"
  11.377 +	"je 2f\n\t"
  11.378 +	"stosb\n"
  11.379 +	"2:"
  11.380 +	: "=&c" (d0), "=&D" (d1)
  11.381 +	:"a" (c), "q" (count), "0" (count/4), "1" ((long) s)
  11.382 +	:"memory");
  11.383 +return (s);	
  11.384 +}
  11.385 +
  11.386 +/* Added by Gertjan van Wingerde to make minix and sysv module work */
  11.387 +#define __HAVE_ARCH_STRNLEN
  11.388 +static inline size_t strnlen(const char * s, size_t count)
  11.389 +{
  11.390 +int d0;
  11.391 +register int __res;
  11.392 +__asm__ __volatile__(
  11.393 +	"movl %2,%0\n\t"
  11.394 +	"jmp 2f\n"
  11.395 +	"1:\tcmpb $0,(%0)\n\t"
  11.396 +	"je 3f\n\t"
  11.397 +	"incl %0\n"
  11.398 +	"2:\tdecl %1\n\t"
  11.399 +	"cmpl $-1,%1\n\t"
  11.400 +	"jne 1b\n"
  11.401 +	"3:\tsubl %2,%0"
  11.402 +	:"=a" (__res), "=&d" (d0)
  11.403 +	:"c" (s),"1" (count));
  11.404 +return __res;
  11.405 +}
  11.406 +/* end of additional stuff */
  11.407 +
  11.408 +//#define __HAVE_ARCH_STRSTR
  11.409 +
  11.410 +//extern char *strstr(const char *cs, const char *ct);
  11.411 +
  11.412 +/*
  11.413 + * This looks horribly ugly, but the compiler can optimize it totally,
  11.414 + * as we by now know that both pattern and count is constant..
  11.415 + */
  11.416 +static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
  11.417 +{
  11.418 +	switch (count) {
  11.419 +		case 0:
  11.420 +			return s;
  11.421 +		case 1:
  11.422 +			*(unsigned char *)s = pattern;
  11.423 +			return s;
  11.424 +		case 2:
  11.425 +			*(unsigned short *)s = pattern;
  11.426 +			return s;
  11.427 +		case 3:
  11.428 +			*(unsigned short *)s = pattern;
  11.429 +			*(2+(unsigned char *)s) = pattern;
  11.430 +			return s;
  11.431 +		case 4:
  11.432 +			*(unsigned long *)s = pattern;
  11.433 +			return s;
  11.434 +	}
  11.435 +#define COMMON(x) \
  11.436 +__asm__  __volatile__( \
  11.437 +	"rep ; stosl" \
  11.438 +	x \
  11.439 +	: "=&c" (d0), "=&D" (d1) \
  11.440 +	: "a" (pattern),"0" (count/4),"1" ((long) s) \
  11.441 +	: "memory")
  11.442 +{
  11.443 +	int d0, d1;
  11.444 +	switch (count % 4) {
  11.445 +		case 0: COMMON(""); return s;
  11.446 +		case 1: COMMON("\n\tstosb"); return s;
  11.447 +		case 2: COMMON("\n\tstosw"); return s;
  11.448 +		default: COMMON("\n\tstosw\n\tstosb"); return s;
  11.449 +	}
  11.450 +}
  11.451 +  
  11.452 +#undef COMMON
  11.453 +}
  11.454 +
  11.455 +#define __constant_c_x_memset(s, c, count) \
  11.456 +(__builtin_constant_p(count) ? \
  11.457 + __constant_c_and_count_memset((s),(c),(count)) : \
  11.458 + __constant_c_memset((s),(c),(count)))
  11.459 +
  11.460 +#define __memset(s, c, count) \
  11.461 +(__builtin_constant_p(count) ? \
  11.462 + __constant_count_memset((s),(c),(count)) : \
  11.463 + __memset_generic((s),(c),(count)))
  11.464 +
  11.465 +#define __HAVE_ARCH_MEMSET
  11.466 +#define memset(s, c, count) \
  11.467 +(__builtin_constant_p(c) ? \
  11.468 + __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
  11.469 + __memset((s),(c),(count)))
  11.470 +
  11.471 +/*
  11.472 + * find the first occurrence of byte 'c', or 1 past the area if none
  11.473 + */
  11.474 +#define __HAVE_ARCH_MEMSCAN
  11.475 +static inline void * memscan(void * addr, int c, size_t size)
  11.476 +{
  11.477 +	if (!size)
  11.478 +		return addr;
  11.479 +	__asm__("repnz; scasb\n\t"
  11.480 +		"jnz 1f\n\t"
  11.481 +		"dec %%edi\n"
  11.482 +		"1:"
  11.483 +		: "=D" (addr), "=c" (size)
  11.484 +		: "0" (addr), "1" (size), "a" (c));
  11.485 +	return addr;
  11.486 +}
  11.487 +
  11.488 +#endif
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/xen/include/asm-x86/x86_64/string.h	Tue Jun 29 16:18:51 2004 +0000
    12.3 @@ -0,0 +1,69 @@
    12.4 +#ifndef _X86_64_STRING_H_
    12.5 +#define _X86_64_STRING_H_
    12.6 +
    12.7 +#ifdef __KERNEL__
    12.8 +
    12.9 +#define struct_cpy(x,y) (*(x)=*(y))
   12.10 +
   12.11 +/* Written 2002 by Andi Kleen */ 
   12.12 +
   12.13 +/* Only used for special circumstances. Stolen from i386/string.h */ 
   12.14 +static inline void * __inline_memcpy(void * to, const void * from, size_t n)
   12.15 +{
   12.16 +unsigned long d0, d1, d2;
   12.17 +__asm__ __volatile__(
   12.18 +	"rep ; movsl\n\t"
   12.19 +	"testb $2,%b4\n\t"
   12.20 +	"je 1f\n\t"
   12.21 +	"movsw\n"
   12.22 +	"1:\ttestb $1,%b4\n\t"
   12.23 +	"je 2f\n\t"
   12.24 +	"movsb\n"
   12.25 +	"2:"
   12.26 +	: "=&c" (d0), "=&D" (d1), "=&S" (d2)
   12.27 +	:"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
   12.28 +	: "memory");
   12.29 +return (to);
   12.30 +}
   12.31 +
   12.32 +/* Even with __builtin_ the compiler may decide to use the out of line
   12.33 +   function. */
   12.34 +
   12.35 +#define __HAVE_ARCH_MEMCPY 1
   12.36 +extern void *__memcpy(void *to, const void *from, size_t len); 
   12.37 +#define memcpy(dst,src,len) \
   12.38 +	({ size_t __len = (len);				\
   12.39 +	   void *__ret;						\
   12.40 +	   if (__builtin_constant_p(len) && __len >= 64)	\
   12.41 +		 __ret = __memcpy((dst),(src),__len);		\
   12.42 +	   else							\
   12.43 +		 __ret = __builtin_memcpy((dst),(src),__len);	\
   12.44 +	   __ret; }) 
   12.45 +
   12.46 +
   12.47 +#define __HAVE_ARCH_MEMSET
   12.48 +#define memset __builtin_memset
   12.49 +
   12.50 +#define __HAVE_ARCH_MEMMOVE
   12.51 +void * memmove(void * dest,const void *src,size_t count);
   12.52 +
   12.53 +/* Use C out of line version for memcmp */ 
   12.54 +#define memcmp __builtin_memcmp
   12.55 +int memcmp(const void * cs,const void * ct,size_t count);
   12.56 +
   12.57 +/* out of line string functions use always C versions */ 
   12.58 +#define strlen __builtin_strlen
   12.59 +size_t strlen(const char * s);
   12.60 +
   12.61 +#define strcpy __builtin_strcpy
   12.62 +char * strcpy(char * dest,const char *src);
   12.63 +
   12.64 +#define strcat __builtin_strcat
   12.65 +char * strcat(char * dest, const char * src);
   12.66 +
   12.67 +#define strcmp __builtin_strcmp
   12.68 +int strcmp(const char * cs,const char * ct);
   12.69 +
   12.70 +#endif /* __KERNEL__ */
   12.71 +
   12.72 +#endif
    13.1 --- a/xen/include/xen/mm.h	Tue Jun 29 14:01:49 2004 +0000
    13.2 +++ b/xen/include/xen/mm.h	Tue Jun 29 16:18:51 2004 +0000
    13.3 @@ -86,7 +86,7 @@ struct pfn_info
    13.4  #define PageSetSlab(page)	((void)0)
    13.5  #define PageClearSlab(page)	((void)0)
    13.6  
    13.7 -#define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < MAX_XENHEAP_ADDRESS)
    13.8 +#define IS_XEN_HEAP_FRAME(_pfn) (page_to_phys(_pfn) < xenheap_phys_end)
    13.9  
   13.10  #define SHARE_PFN_WITH_DOMAIN(_pfn, _dom)                                   \
   13.11      do {                                                                    \
   13.12 @@ -104,7 +104,7 @@ extern struct list_head free_list;
   13.13  extern spinlock_t free_list_lock;
   13.14  extern unsigned int free_pfns;
   13.15  extern unsigned long max_page;
   13.16 -void init_frametable(unsigned long nr_pages);
   13.17 +void init_frametable(void *frametable_vstart, unsigned long nr_pages);
   13.18  void add_to_domain_alloc_list(unsigned long ps, unsigned long pe);
   13.19  
   13.20  struct pfn_info *alloc_domain_page(struct domain *p);