ia64/xen-unstable

view linux-2.4.29-xen-sparse/mm/mprotect.c @ 3602:9a9c5a491401

bitkeeper revision 1.1159.235.1 (42000d3dwcPyT8aY4VIPYGCfCAJuQQ)

More x86/64. Status: traps.c now included in the build, but actual building
of IDT doesn't happen, and we need some sort of entry.S. More page-table
building required so that arch_init_memory() can work. And there is something
odd with MP-table parsing; I currently suspect that __init sections are
causing problems.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@viper.(none)
date Tue Feb 01 23:14:05 2005 +0000 (2005-02-01)
parents 610068179f96
children 0a4b76b6b5a0
line source
1 /*
2 * linux/mm/mprotect.c
3 *
4 * (C) Copyright 1994 Linus Torvalds
5 */
6 #include <linux/slab.h>
7 #include <linux/smp_lock.h>
8 #include <linux/shm.h>
9 #include <linux/mman.h>
11 #include <asm/uaccess.h>
12 #include <asm/pgalloc.h>
13 #include <asm/pgtable.h>
15 static inline void change_pte_range(pmd_t * pmd, unsigned long address,
16 unsigned long size, pgprot_t newprot)
17 {
18 pte_t * pte;
19 unsigned long end;
21 if (pmd_none(*pmd))
22 return;
23 if (pmd_bad(*pmd)) {
24 pmd_ERROR(*pmd);
25 pmd_clear(pmd);
26 return;
27 }
28 pte = pte_offset(pmd, address);
29 address &= ~PMD_MASK;
30 end = address + size;
31 if (end > PMD_SIZE)
32 end = PMD_SIZE;
33 do {
34 if (pte_present(*pte)) {
35 pte_t entry;
37 /* Avoid an SMP race with hardware updated dirty/clean
38 * bits by wiping the pte and then setting the new pte
39 * into place.
40 */
41 entry = ptep_get_and_clear(pte);
42 set_pte(pte, pte_modify(entry, newprot));
43 }
44 address += PAGE_SIZE;
45 pte++;
46 } while (address && (address < end));
47 }
49 static inline void change_pmd_range(pgd_t * pgd, unsigned long address,
50 unsigned long size, pgprot_t newprot)
51 {
52 pmd_t * pmd;
53 unsigned long end;
55 if (pgd_none(*pgd))
56 return;
57 if (pgd_bad(*pgd)) {
58 pgd_ERROR(*pgd);
59 pgd_clear(pgd);
60 return;
61 }
62 pmd = pmd_offset(pgd, address);
63 address &= ~PGDIR_MASK;
64 end = address + size;
65 if (end > PGDIR_SIZE)
66 end = PGDIR_SIZE;
67 do {
68 change_pte_range(pmd, address, end - address, newprot);
69 address = (address + PMD_SIZE) & PMD_MASK;
70 pmd++;
71 } while (address && (address < end));
72 }
74 static void change_protection(unsigned long start, unsigned long end, pgprot_t newprot)
75 {
76 pgd_t *dir;
77 unsigned long beg = start;
79 dir = pgd_offset(current->mm, start);
80 flush_cache_range(current->mm, beg, end);
81 if (start >= end)
82 BUG();
83 spin_lock(&current->mm->page_table_lock);
84 do {
85 change_pmd_range(dir, start, end - start, newprot);
86 start = (start + PGDIR_SIZE) & PGDIR_MASK;
87 dir++;
88 } while (start && (start < end));
89 spin_unlock(&current->mm->page_table_lock);
90 flush_tlb_range(current->mm, beg, end);
91 return;
92 }
94 static inline int mprotect_fixup_all(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
95 int newflags, pgprot_t prot)
96 {
97 struct vm_area_struct * prev = *pprev;
98 struct mm_struct * mm = vma->vm_mm;
100 if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev, newflags) &&
101 !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
102 spin_lock(&mm->page_table_lock);
103 prev->vm_end = vma->vm_end;
104 __vma_unlink(mm, vma, prev);
105 spin_unlock(&mm->page_table_lock);
107 kmem_cache_free(vm_area_cachep, vma);
108 mm->map_count--;
110 return 0;
111 }
113 spin_lock(&mm->page_table_lock);
114 vma->vm_flags = newflags;
115 vma->vm_page_prot = prot;
116 spin_unlock(&mm->page_table_lock);
118 *pprev = vma;
120 return 0;
121 }
123 static inline int mprotect_fixup_start(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
124 unsigned long end,
125 int newflags, pgprot_t prot)
126 {
127 struct vm_area_struct * n, * prev = *pprev;
129 *pprev = vma;
131 if (prev && prev->vm_end == vma->vm_start && can_vma_merge(prev, newflags) &&
132 !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
133 spin_lock(&vma->vm_mm->page_table_lock);
134 prev->vm_end = end;
135 vma->vm_start = end;
136 spin_unlock(&vma->vm_mm->page_table_lock);
138 return 0;
139 }
140 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
141 if (!n)
142 return -ENOMEM;
143 *n = *vma;
144 n->vm_end = end;
145 n->vm_flags = newflags;
146 n->vm_raend = 0;
147 n->vm_page_prot = prot;
148 if (n->vm_file)
149 get_file(n->vm_file);
150 if (n->vm_ops && n->vm_ops->open)
151 n->vm_ops->open(n);
152 vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
153 lock_vma_mappings(vma);
154 spin_lock(&vma->vm_mm->page_table_lock);
155 vma->vm_start = end;
156 __insert_vm_struct(current->mm, n);
157 spin_unlock(&vma->vm_mm->page_table_lock);
158 unlock_vma_mappings(vma);
160 return 0;
161 }
163 static inline int mprotect_fixup_end(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
164 unsigned long start,
165 int newflags, pgprot_t prot)
166 {
167 struct vm_area_struct * n;
169 n = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
170 if (!n)
171 return -ENOMEM;
172 *n = *vma;
173 n->vm_start = start;
174 n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
175 n->vm_flags = newflags;
176 n->vm_raend = 0;
177 n->vm_page_prot = prot;
178 if (n->vm_file)
179 get_file(n->vm_file);
180 if (n->vm_ops && n->vm_ops->open)
181 n->vm_ops->open(n);
182 lock_vma_mappings(vma);
183 spin_lock(&vma->vm_mm->page_table_lock);
184 vma->vm_end = start;
185 __insert_vm_struct(current->mm, n);
186 spin_unlock(&vma->vm_mm->page_table_lock);
187 unlock_vma_mappings(vma);
189 *pprev = n;
191 return 0;
192 }
194 static inline int mprotect_fixup_middle(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
195 unsigned long start, unsigned long end,
196 int newflags, pgprot_t prot)
197 {
198 struct vm_area_struct * left, * right;
200 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
201 if (!left)
202 return -ENOMEM;
203 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
204 if (!right) {
205 kmem_cache_free(vm_area_cachep, left);
206 return -ENOMEM;
207 }
208 *left = *vma;
209 *right = *vma;
210 left->vm_end = start;
211 right->vm_start = end;
212 right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
213 left->vm_raend = 0;
214 right->vm_raend = 0;
215 if (vma->vm_file)
216 atomic_add(2,&vma->vm_file->f_count);
217 if (vma->vm_ops && vma->vm_ops->open) {
218 vma->vm_ops->open(left);
219 vma->vm_ops->open(right);
220 }
221 vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
222 vma->vm_raend = 0;
223 vma->vm_page_prot = prot;
224 lock_vma_mappings(vma);
225 spin_lock(&vma->vm_mm->page_table_lock);
226 vma->vm_start = start;
227 vma->vm_end = end;
228 vma->vm_flags = newflags;
229 __insert_vm_struct(current->mm, left);
230 __insert_vm_struct(current->mm, right);
231 spin_unlock(&vma->vm_mm->page_table_lock);
232 unlock_vma_mappings(vma);
234 *pprev = right;
236 return 0;
237 }
239 static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
240 unsigned long start, unsigned long end, unsigned int newflags)
241 {
242 pgprot_t newprot;
243 int error;
245 if (newflags == vma->vm_flags) {
246 *pprev = vma;
247 return 0;
248 }
249 newprot = protection_map[newflags & 0xf];
250 if (start == vma->vm_start) {
251 if (end == vma->vm_end)
252 error = mprotect_fixup_all(vma, pprev, newflags, newprot);
253 else
254 error = mprotect_fixup_start(vma, pprev, end, newflags, newprot);
255 } else if (end == vma->vm_end)
256 error = mprotect_fixup_end(vma, pprev, start, newflags, newprot);
257 else
258 error = mprotect_fixup_middle(vma, pprev, start, end, newflags, newprot);
260 if (error)
261 return error;
263 change_protection(start, end, newprot);
264 return 0;
265 }
267 asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
268 {
269 unsigned long nstart, end, tmp;
270 struct vm_area_struct * vma, * next, * prev;
271 int error = -EINVAL;
273 if (start & ~PAGE_MASK)
274 return -EINVAL;
275 len = PAGE_ALIGN(len);
276 end = start + len;
277 if (end < start)
278 return -ENOMEM;
279 if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
280 return -EINVAL;
281 if (end == start)
282 return 0;
284 down_write(&current->mm->mmap_sem);
286 vma = find_vma_prev(current->mm, start, &prev);
287 error = -ENOMEM;
288 if (!vma || vma->vm_start > start)
289 goto out;
291 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
292 /* mprotect() unsupported for I/O mappings in Xenolinux. */
293 error = -EINVAL;
294 if (vma->vm_flags & VM_IO)
295 goto out;
296 #endif
298 for (nstart = start ; ; ) {
299 unsigned int newflags;
300 int last = 0;
302 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
304 newflags = prot | (vma->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
305 if ((newflags & ~(newflags >> 4)) & 0xf) {
306 error = -EACCES;
307 goto out;
308 }
310 if (vma->vm_end > end) {
311 error = mprotect_fixup(vma, &prev, nstart, end, newflags);
312 goto out;
313 }
314 if (vma->vm_end == end)
315 last = 1;
317 tmp = vma->vm_end;
318 next = vma->vm_next;
319 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
320 if (error)
321 goto out;
322 if (last)
323 break;
324 nstart = tmp;
325 vma = next;
326 if (!vma || vma->vm_start != nstart) {
327 error = -ENOMEM;
328 goto out;
329 }
330 }
331 if (next && prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags) &&
332 !prev->vm_file && !(prev->vm_flags & VM_SHARED)) {
333 spin_lock(&prev->vm_mm->page_table_lock);
334 prev->vm_end = next->vm_end;
335 __vma_unlink(prev->vm_mm, next, prev);
336 spin_unlock(&prev->vm_mm->page_table_lock);
338 kmem_cache_free(vm_area_cachep, next);
339 prev->vm_mm->map_count--;
340 }
341 out:
342 up_write(&current->mm->mmap_sem);
343 return error;
344 }