ia64/xen-unstable

changeset 95:f72ea5eac57b

bitkeeper revision 1.15.1.7 (3e2b0be3giRAXsN5jAmCONe953mK9w)

page.h, page_alloc.c, mm.c:
Fixed PGEXT_INVLPG, plus a few other cleanups.
author kaf24@labyrinth.cl.cam.ac.uk
date Sun Jan 19 20:34:43 2003 +0000 (2003-01-19)
parents e84c63b9a807
children f7ff141acc2a a8063692097a
files xen-2.4.16/arch/i386/mm.c xen-2.4.16/common/page_alloc.c xen-2.4.16/include/asm-i386/page.h
line diff
     1.1 --- a/xen-2.4.16/arch/i386/mm.c	Sun Jan 19 16:05:41 2003 +0000
     1.2 +++ b/xen-2.4.16/arch/i386/mm.c	Sun Jan 19 20:34:43 2003 +0000
     1.3 @@ -102,7 +102,8 @@ long do_stack_and_ldt_switch(
     1.4  
     1.5      if ( ldts != current->mm.ldt_sel )
     1.6      {
     1.7 -        unsigned long *ptabent = GET_GDT_ADDRESS(current);
     1.8 +        unsigned long *ptabent;
     1.9 +        ptabent = (unsigned long *)GET_GDT_ADDRESS(current);
    1.10          /* Out of range for GDT table? */
    1.11          if ( (ldts * 8) > GET_GDT_ENTRIES(current) ) return -1;
    1.12          ptabent += ldts * 2; /* 8 bytes per desc == 2 * unsigned long */
     2.1 --- a/xen-2.4.16/common/page_alloc.c	Sun Jan 19 16:05:41 2003 +0000
     2.2 +++ b/xen-2.4.16/common/page_alloc.c	Sun Jan 19 20:34:43 2003 +0000
     2.3 @@ -12,7 +12,7 @@
     2.4  #include <xeno/lib.h>
     2.5  #include <asm/page.h>
     2.6  #include <xeno/spinlock.h>
     2.7 -
     2.8 +#include <xeno/slab.h>
     2.9  
    2.10  static spinlock_t alloc_lock = SPIN_LOCK_UNLOCKED;
    2.11  
    2.12 @@ -102,7 +102,7 @@ struct chunk_tail_st {
    2.13  
    2.14  /* Linked lists of free chunks of different powers-of-two in size. */
    2.15  #define FREELIST_SIZE ((sizeof(void*)<<3)-PAGE_SHIFT)
    2.16 -static chunk_head_t *free_list[FREELIST_SIZE];
    2.17 +static chunk_head_t *free_head[FREELIST_SIZE];
    2.18  static chunk_head_t  free_tail[FREELIST_SIZE];
    2.19  #define FREELIST_EMPTY(_l) ((_l)->next == NULL)
    2.20  
    2.21 @@ -120,8 +120,8 @@ void __init init_page_allocator(unsigned
    2.22  
    2.23      for ( i = 0; i < FREELIST_SIZE; i++ )
    2.24      {
    2.25 -        free_list[i]       = &free_tail[i];
    2.26 -        free_tail[i].pprev = &free_list[i];
    2.27 +        free_head[i]       = &free_tail[i];
    2.28 +        free_tail[i].pprev = &free_head[i];
    2.29          free_tail[i].next  = NULL;
    2.30      }
    2.31  
    2.32 @@ -159,10 +159,10 @@ void __init init_page_allocator(unsigned
    2.33          ct = (chunk_tail_t *)min-1;
    2.34          i -= PAGE_SHIFT;
    2.35          ch->level       = i;
    2.36 -        ch->next        = free_list[i];
    2.37 -        ch->pprev       = &free_list[i];
    2.38 +        ch->next        = free_head[i];
    2.39 +        ch->pprev       = &free_head[i];
    2.40          ch->next->pprev = &ch->next;
    2.41 -        free_list[i]    = ch;
    2.42 +        free_head[i]    = ch;
    2.43          ct->level       = i;
    2.44      }
    2.45  }
    2.46 @@ -182,15 +182,15 @@ retry:
    2.47  
    2.48      /* Find smallest order which can satisfy the request. */
    2.49      for ( i = order; i < FREELIST_SIZE; i++ ) {
    2.50 -	if ( !FREELIST_EMPTY(free_list[i]) ) 
    2.51 +	if ( !FREELIST_EMPTY(free_head[i]) ) 
    2.52  	    break;
    2.53      }
    2.54  
    2.55      if ( i == FREELIST_SIZE ) goto no_memory;
    2.56   
    2.57      /* Unlink a chunk. */
    2.58 -    alloc_ch = free_list[i];
    2.59 -    free_list[i] = alloc_ch->next;
    2.60 +    alloc_ch = free_head[i];
    2.61 +    free_head[i] = alloc_ch->next;
    2.62      alloc_ch->next->pprev = alloc_ch->pprev;
    2.63  
    2.64      /* We may have to break the chunk a number of times. */
    2.65 @@ -203,13 +203,13 @@ retry:
    2.66  
    2.67          /* Create new header for spare chunk. */
    2.68          spare_ch->level = i;
    2.69 -        spare_ch->next  = free_list[i];
    2.70 -        spare_ch->pprev = &free_list[i];
    2.71 +        spare_ch->next  = free_head[i];
    2.72 +        spare_ch->pprev = &free_head[i];
    2.73          spare_ct->level = i;
    2.74  
    2.75          /* Link in the spare chunk. */
    2.76          spare_ch->next->pprev = &spare_ch->next;
    2.77 -        free_list[i] = spare_ch;
    2.78 +        free_head[i] = spare_ch;
    2.79      }
    2.80      
    2.81      map_alloc(__pa(alloc_ch)>>PAGE_SHIFT, 1<<order);
    2.82 @@ -279,10 +279,10 @@ void __free_pages(unsigned long p, int o
    2.83      ct = (chunk_tail_t *)(p+size)-1;
    2.84      ct->level = order;
    2.85      ch->level = order;
    2.86 -    ch->pprev = &free_list[order];
    2.87 -    ch->next  = free_list[order];
    2.88 +    ch->pprev = &free_head[order];
    2.89 +    ch->next  = free_head[order];
    2.90      ch->next->pprev = &ch->next;
    2.91 -    free_list[order] = ch;
    2.92 +    free_head[order] = ch;
    2.93  
    2.94      spin_unlock_irqrestore(&alloc_lock, flags);
    2.95  }
     3.1 --- a/xen-2.4.16/include/asm-i386/page.h	Sun Jan 19 16:05:41 2003 +0000
     3.2 +++ b/xen-2.4.16/include/asm-i386/page.h	Sun Jan 19 20:34:43 2003 +0000
     3.3 @@ -123,8 +123,8 @@ extern void paging_init(void);
     3.4  			: "memory");					\
     3.5  	} while (0)
     3.6  
     3.7 -#define __flush_tlb_one(addr) \
     3.8 -__asm__ __volatile__("invlpg %0": :"m" (*(char *) addr))
     3.9 +#define __flush_tlb_one(__addr) \
    3.10 +__asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr)))
    3.11  
    3.12  #endif /* !__ASSEMBLY__ */
    3.13