ia64/xen-unstable

changeset 66:b5d2cff54653

bitkeeper revision 1.7.3.41 (3e0304e3ruV7OqrhZfYs29ld8-OyuA)

Keir's & a bit of mine get_unmapped_area fixes
author lynx@idefix.cl.cam.ac.uk
date Fri Dec 20 11:54:11 2002 +0000 (2002-12-20)
parents 654b3fe81bb9
children df03ff4a14a1
files xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h
line diff
     1.1 --- a/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c	Fri Dec 20 10:48:30 2002 +0000
     1.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/drivers/dom0/dom0_memory.c	Fri Dec 20 11:54:11 2002 +0000
     1.3 @@ -20,18 +20,14 @@
     1.4  #define MAP_CONT    0
     1.5  #define MAP_DISCONT 1
     1.6  
     1.7 +extern struct list_head * find_direct(struct list_head *, unsigned long);
     1.8 +
     1.9  /* now, this is grimm, kmalloc seems to have problems allocating small mem
    1.10   * blocks, so i have decided to use fixed (a bit) larger blocks... this needs
    1.11   * to be traced down but no time now.
    1.12   */
    1.13  #define KMALLOC_SIZE	128
    1.14  
    1.15 -/*
    1.16 - * maps a range of physical memory into the requested pages. the old
    1.17 - * mappings are removed. any references to nonexistent pages results
    1.18 - * in null mappings (currently treated as "copy-on-access")
    1.19 - */
    1.20 -
    1.21  /* bd240: functions below perform direct mapping to the real physical pages needed for
    1.22   * mapping various hypervisor specific structures needed in dom0 userspace by various
    1.23   * management applications such as domain builder etc.
    1.24 @@ -169,6 +165,7 @@ unsigned long direct_mmap(unsigned long 
    1.25                  pgprot_t prot, int flag, int tot_pages)
    1.26  {
    1.27      direct_mmap_node_t * dmmap;
    1.28 +    struct list_head * entry;
    1.29      unsigned long addr;
    1.30      int ret = 0;
    1.31      
    1.32 @@ -184,12 +181,19 @@ unsigned long direct_mmap(unsigned long 
    1.33          goto out;
    1.34      }
    1.35  
    1.36 -    /* add node on the list of directly mapped areas */ 
    1.37 +    /* add node on the list of directly mapped areas, make sure the
    1.38 +	 * list remains sorted.
    1.39 +	 */ 
    1.40      //dmmap = (direct_mmap_node_t *)kmalloc(GFP_KERNEL, sizeof(direct_mmap_node_t));
    1.41      dmmap = (direct_mmap_node_t *)kmalloc(GFP_KERNEL, KMALLOC_SIZE);
    1.42 -
    1.43 -    dmmap->addr = addr;
    1.44 -    list_add(&dmmap->list, &current->mm->context.direct_list);
    1.45 +    dmmap->vm_start = addr;
    1.46 +    dmmap->vm_end = addr + size;
    1.47 +	entry = find_direct(&current->mm->context.direct_list, addr);
    1.48 +	if(entry != &current->mm->context.direct_list){
    1.49 +		list_add_tail(&dmmap->list, entry);
    1.50 +	} else {
    1.51 +    	list_add(&dmmap->list, &current->mm->context.direct_list);
    1.52 +	}
    1.53  
    1.54      /* and perform the mapping */
    1.55      if(flag == MAP_DISCONT){
     2.1 --- a/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c	Fri Dec 20 10:48:30 2002 +0000
     2.2 +++ b/xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c	Fri Dec 20 11:54:11 2002 +0000
     2.3 @@ -14,15 +14,13 @@
     2.4  #include <asm/uaccess.h>
     2.5  #include <asm/pgalloc.h>
     2.6  
     2.7 +/*
     2.8  static int direct_mapped(unsigned long addr)
     2.9  {
    2.10      direct_mmap_node_t * node;
    2.11      struct list_head * curr;
    2.12      struct list_head * direct_list = &current->mm->context.direct_list;
    2.13  
    2.14 -    /* now, this loop is going to make things slow, maybe should think
    2.15 -     * of a better way to implement it, maybe without list_head
    2.16 -     */
    2.17      curr = direct_list->next;
    2.18      while(curr != direct_list){
    2.19          node = list_entry(curr, direct_mmap_node_t, list);
    2.20 @@ -36,7 +34,8 @@ static int direct_mapped(unsigned long a
    2.21  
    2.22      return 1;
    2.23  }
    2.24 -
    2.25 +*/
    2.26 +/*
    2.27  unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
    2.28  {
    2.29  	struct vm_area_struct *vma;
    2.30 @@ -54,15 +53,9 @@ unsigned long arch_get_unmapped_area(str
    2.31  	addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
    2.32  
    2.33  	for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
    2.34 -		/* At this point:  (!vma || addr < vma->vm_end). */
    2.35  		if (TASK_SIZE - len < addr)
    2.36  			return -ENOMEM;
    2.37          
    2.38 -        /* here we check whether the vma is big enough and we also check
    2.39 -         * whether it has already been direct mapped, in which case it
    2.40 -         * is not available. this is the only difference to generic
    2.41 -         * arch_get_unmapped_area. 
    2.42 -         */
    2.43  		if(current->pid > 100){
    2.44  		printk(KERN_ALERT "bd240 debug: gua: vm addr found %lx\n", addr);
    2.45  			printk(KERN_ALERT "bd240 debug: gua: first condition %d, %lx, %lx\n",vma, addr + len, vma->vm_start);
    2.46 @@ -74,3 +67,72 @@ unsigned long arch_get_unmapped_area(str
    2.47          addr = vma->vm_end;
    2.48  	}
    2.49  }
    2.50 +*/
    2.51 +struct list_head *find_direct(struct list_head *list, unsigned long addr)
    2.52 +{
    2.53 +    for ( curr = direct_list->next; curr != direct_list; curr = curr->next )
    2.54 +    {
    2.55 +        node = list_entry(curr, direct_mmap_node_t, list);
    2.56 +        if( node->vm_start > addr ) break;
    2.57 +    }
    2.58 +
    2.59 +    return curr;
    2.60 +}
    2.61 +
    2.62 +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long
    2.63 +addr, unsigned long len, unsigned long pgoff, unsigned long flags)
    2.64 +{
    2.65 +    struct vm_area_struct *vma;
    2.66 +    direct_mmap_node_t * node;
    2.67 +    struct list_head * curr;
    2.68 +    struct list_head * direct_list = &current->mm->context.direct_list;
    2.69 +
    2.70 +    if (len > TASK_SIZE)
    2.71 +        return -ENOMEM;
    2.72 +
    2.73 +    if ( addr )
    2.74 +    {
    2.75 +        addr = PAGE_ALIGN(addr);
    2.76 +        vma = find_vma(current->mm, addr);
    2.77 +        curr = find_direct(direct_list, addr);
    2.78 +        node = list_entry(curr, direct_mmap_node_t, list);
    2.79 +        if ( (TASK_SIZE - len >= addr) &&
    2.80 +             (!vma || addr + len <= vma->vm_start) &&
    2.81 +             ((curr == direct_list) || addr + len <= node->vm_start) )
    2.82 +            return addr;
    2.83 +    }
    2.84 +
    2.85 +    addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
    2.86 +
    2.87 +
    2.88 +    /* Find first VMA and direct_map nodes with vm_start > addr */
    2.89 +    vma  = find_vma(current->mm, addr);
    2.90 +    curr = find_direct(direct_list, addr);
    2.91 +    node = list_entry(curr, direct_mmap_node_t, list);
    2.92 +
    2.93 +    for ( ; ; )
    2.94 +    {
    2.95 +        if ( TASK_SIZE - len < addr ) return -ENOMEM;
    2.96 +
    2.97 +        if ( vma && (vma->vm_start < node->vm_start) )
    2.98 +        {
    2.99 +            /* Do we fit before VMA node? */
   2.100 +            if ( addr + len <= vma->vm_start ) return addr;
   2.101 +            addr = vma->vm_end;
   2.102 +            vma = vma->vm_next;
   2.103 +        }
   2.104 +        else if ( curr != direct_list )
   2.105 +        {
   2.106 +            /* Do we fit before direct_map node? */
   2.107 +            if ( addr + len <= node->vm_start) return addr;
   2.108 +            addr = node->vm_end;
   2.109 +            curr = curr->next;
   2.110 +            node = list_entry(curr, direct_mmap_node_t, list);
   2.111 +        }
   2.112 +        else
   2.113 +        {
   2.114 +            /* Reached end of VMA and direct_map lists */
   2.115 +            return addr;
   2.116 +        }
   2.117 +    }
   2.118 +}
     3.1 --- a/xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h	Fri Dec 20 10:48:30 2002 +0000
     3.2 +++ b/xenolinux-2.4.16-sparse/include/asm-xeno/mmu.h	Fri Dec 20 11:54:11 2002 +0000
     3.3 @@ -7,7 +7,8 @@
     3.4  /* describes dirrectly mapped vma nodes */
     3.5  typedef struct {
     3.6      struct list_head list;
     3.7 -    unsigned long addr;
     3.8 +    unsigned long vm_start;
     3.9 +	unsigned long vm_end;
    3.10  } direct_mmap_node_t;
    3.11  
    3.12  /*