ia64/xen-unstable

view xenolinux-2.4.16-sparse/arch/xeno/mm/get_unmapped_area.c @ 76:ddd3b8b09fea

bitkeeper revision 1.7.3.51 (3e10162at5jecFjh-WeMXPorgJzMcQ)

get_unmapped_area fix
author lynx@idefix.cl.cam.ac.uk
date Mon Dec 30 09:47:22 2002 +0000 (2002-12-30)
parents f30aedb6c2ac
children 024aa2a06c1e
line source
2 #include <linux/slab.h>
3 #include <linux/shm.h>
4 #include <linux/mman.h>
5 #include <linux/pagemap.h>
6 #include <linux/swap.h>
7 #include <linux/swapctl.h>
8 #include <linux/smp_lock.h>
9 #include <linux/init.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/personality.h>
14 #include <asm/uaccess.h>
15 #include <asm/pgalloc.h>
17 /*
18 static int direct_mapped(unsigned long addr)
19 {
20 direct_mmap_node_t * node;
21 struct list_head * curr;
22 struct list_head * direct_list = &current->mm->context.direct_list;
24 curr = direct_list->next;
25 while(curr != direct_list){
26 node = list_entry(curr, direct_mmap_node_t, list);
27 if(node->addr == addr)
28 break;
29 curr = curr->next;
30 }
32 if(curr == direct_list)
33 return 0;
35 return 1;
36 }
37 */
38 /*
39 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
40 {
41 struct vm_area_struct *vma;
43 if (len > TASK_SIZE)
44 return -ENOMEM;
46 if (addr) {
47 addr = PAGE_ALIGN(addr);
48 vma = find_vma(current->mm, addr);
49 if (TASK_SIZE - len >= addr &&
50 (!vma || addr + len <= vma->vm_start))
51 return addr;
52 }
53 addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
55 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
56 if (TASK_SIZE - len < addr)
57 return -ENOMEM;
59 if(current->pid > 100){
60 printk(KERN_ALERT "bd240 debug: gua: vm addr found %lx\n", addr);
61 printk(KERN_ALERT "bd240 debug: gua: first condition %d, %lx, %lx\n",vma, addr + len, vma->vm_start);
62 printk(KERN_ALERT "bd240 debug: gua: second condition %d\n", direct_mapped(addr));
63 }
64 if ((!vma || addr + len <= vma->vm_start) && !direct_mapped(addr))
65 return addr;
67 addr = vma->vm_end;
68 }
69 }
70 */
71 struct list_head *find_direct(struct list_head *list, unsigned long addr)
72 {
73 struct list_head * curr;
74 struct list_head * direct_list = &current->mm->context.direct_list;
75 direct_mmap_node_t * node;
77 for ( curr = direct_list->next; curr != direct_list; curr = curr->next )
78 {
79 node = list_entry(curr, direct_mmap_node_t, list);
80 if( node->vm_start >= addr ) break;
81 }
83 return curr;
84 }
86 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long
87 addr, unsigned long len, unsigned long pgoff, unsigned long flags)
88 {
89 struct vm_area_struct *vma;
90 direct_mmap_node_t * node;
91 struct list_head * curr;
92 struct list_head * direct_list = &current->mm->context.direct_list;
94 if (len > TASK_SIZE)
95 return -ENOMEM;
97 if ( addr )
98 {
99 addr = PAGE_ALIGN(addr);
100 vma = find_vma(current->mm, addr);
101 curr = find_direct(direct_list, addr);
102 node = list_entry(curr, direct_mmap_node_t, list);
103 if ( (TASK_SIZE - len >= addr) &&
104 (!vma || addr + len <= vma->vm_start) &&
105 ((curr == direct_list) || addr + len <= node->vm_start) )
106 return addr;
107 }
109 addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
112 /* Find first VMA and direct_map nodes with vm_start > addr */
113 vma = find_vma(current->mm, addr);
114 curr = find_direct(direct_list, addr);
115 node = list_entry(curr, direct_mmap_node_t, list);
117 for ( ; ; )
118 {
119 if ( TASK_SIZE - len < addr ) return -ENOMEM;
121 if ( vma && ((curr == direct_list) || (vma->vm_start < node->vm_start)))
122 {
123 /* Do we fit before VMA node? */
124 if ( addr + len <= vma->vm_start ) return addr;
125 addr = vma->vm_end;
126 vma = vma->vm_next;
127 }
128 else if ( curr != direct_list )
129 {
130 /* Do we fit before direct_map node? */
131 if ( addr + len <= node->vm_start) return addr;
132 addr = node->vm_end;
133 curr = curr->next;
134 node = list_entry(curr, direct_mmap_node_t, list);
135 }
136 else
137 {
138 /* !vma && curr == direct_list */
139 return addr;
140 }
141 }
142 }