ia64/linux-2.6.18-xen.hg

view arch/m68k/kernel/sys_m68k.c @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 /*
2 * linux/arch/m68k/kernel/sys_m68k.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
6 * platform.
7 */
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/smp_lock.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/syscalls.h>
20 #include <linux/mman.h>
21 #include <linux/file.h>
22 #include <linux/utsname.h>
24 #include <asm/setup.h>
25 #include <asm/uaccess.h>
26 #include <asm/cachectl.h>
27 #include <asm/traps.h>
28 #include <asm/ipc.h>
29 #include <asm/page.h>
31 /*
32 * sys_pipe() is the normal C calling standard for creating
33 * a pipe. It's not the way unix traditionally does this, though.
34 */
35 asmlinkage int sys_pipe(unsigned long __user * fildes)
36 {
37 int fd[2];
38 int error;
40 error = do_pipe(fd);
41 if (!error) {
42 if (copy_to_user(fildes, fd, 2*sizeof(int)))
43 error = -EFAULT;
44 }
45 return error;
46 }
48 /* common code for old and new mmaps */
49 static inline long do_mmap2(
50 unsigned long addr, unsigned long len,
51 unsigned long prot, unsigned long flags,
52 unsigned long fd, unsigned long pgoff)
53 {
54 int error = -EBADF;
55 struct file * file = NULL;
57 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
58 if (!(flags & MAP_ANONYMOUS)) {
59 file = fget(fd);
60 if (!file)
61 goto out;
62 }
64 down_write(&current->mm->mmap_sem);
65 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
66 up_write(&current->mm->mmap_sem);
68 if (file)
69 fput(file);
70 out:
71 return error;
72 }
74 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
75 unsigned long prot, unsigned long flags,
76 unsigned long fd, unsigned long pgoff)
77 {
78 return do_mmap2(addr, len, prot, flags, fd, pgoff);
79 }
81 /*
82 * Perform the select(nd, in, out, ex, tv) and mmap() system
83 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
84 * handle more than 4 system call parameters, so these system calls
85 * used a memory block for parameter passing..
86 */
88 struct mmap_arg_struct {
89 unsigned long addr;
90 unsigned long len;
91 unsigned long prot;
92 unsigned long flags;
93 unsigned long fd;
94 unsigned long offset;
95 };
97 asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
98 {
99 struct mmap_arg_struct a;
100 int error = -EFAULT;
102 if (copy_from_user(&a, arg, sizeof(a)))
103 goto out;
105 error = -EINVAL;
106 if (a.offset & ~PAGE_MASK)
107 goto out;
109 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
111 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
112 out:
113 return error;
114 }
116 #if 0
117 struct mmap_arg_struct64 {
118 __u32 addr;
119 __u32 len;
120 __u32 prot;
121 __u32 flags;
122 __u64 offset; /* 64 bits */
123 __u32 fd;
124 };
126 asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
127 {
128 int error = -EFAULT;
129 struct file * file = NULL;
130 struct mmap_arg_struct64 a;
131 unsigned long pgoff;
133 if (copy_from_user(&a, arg, sizeof(a)))
134 return -EFAULT;
136 if ((long)a.offset & ~PAGE_MASK)
137 return -EINVAL;
139 pgoff = a.offset >> PAGE_SHIFT;
140 if ((a.offset >> PAGE_SHIFT) != pgoff)
141 return -EINVAL;
143 if (!(a.flags & MAP_ANONYMOUS)) {
144 error = -EBADF;
145 file = fget(a.fd);
146 if (!file)
147 goto out;
148 }
149 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
151 down_write(&current->mm->mmap_sem);
152 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
153 up_write(&current->mm->mmap_sem);
154 if (file)
155 fput(file);
156 out:
157 return error;
158 }
159 #endif
161 struct sel_arg_struct {
162 unsigned long n;
163 fd_set __user *inp, *outp, *exp;
164 struct timeval __user *tvp;
165 };
167 asmlinkage int old_select(struct sel_arg_struct __user *arg)
168 {
169 struct sel_arg_struct a;
171 if (copy_from_user(&a, arg, sizeof(a)))
172 return -EFAULT;
173 /* sys_select() does the appropriate kernel locking */
174 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
175 }
177 /*
178 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
179 *
180 * This is really horribly ugly.
181 */
182 asmlinkage int sys_ipc (uint call, int first, int second,
183 int third, void __user *ptr, long fifth)
184 {
185 int version, ret;
187 version = call >> 16; /* hack for backward compatibility */
188 call &= 0xffff;
190 if (call <= SEMCTL)
191 switch (call) {
192 case SEMOP:
193 return sys_semop (first, ptr, second);
194 case SEMGET:
195 return sys_semget (first, second, third);
196 case SEMCTL: {
197 union semun fourth;
198 if (!ptr)
199 return -EINVAL;
200 if (get_user(fourth.__pad, (void __user *__user *) ptr))
201 return -EFAULT;
202 return sys_semctl (first, second, third, fourth);
203 }
204 default:
205 return -ENOSYS;
206 }
207 if (call <= MSGCTL)
208 switch (call) {
209 case MSGSND:
210 return sys_msgsnd (first, ptr, second, third);
211 case MSGRCV:
212 switch (version) {
213 case 0: {
214 struct ipc_kludge tmp;
215 if (!ptr)
216 return -EINVAL;
217 if (copy_from_user (&tmp, ptr, sizeof (tmp)))
218 return -EFAULT;
219 return sys_msgrcv (first, tmp.msgp, second,
220 tmp.msgtyp, third);
221 }
222 default:
223 return sys_msgrcv (first, ptr,
224 second, fifth, third);
225 }
226 case MSGGET:
227 return sys_msgget ((key_t) first, second);
228 case MSGCTL:
229 return sys_msgctl (first, second, ptr);
230 default:
231 return -ENOSYS;
232 }
233 if (call <= SHMCTL)
234 switch (call) {
235 case SHMAT:
236 switch (version) {
237 default: {
238 ulong raddr;
239 ret = do_shmat (first, ptr, second, &raddr);
240 if (ret)
241 return ret;
242 return put_user (raddr, (ulong __user *) third);
243 }
244 }
245 case SHMDT:
246 return sys_shmdt (ptr);
247 case SHMGET:
248 return sys_shmget (first, second, third);
249 case SHMCTL:
250 return sys_shmctl (first, second, ptr);
251 default:
252 return -ENOSYS;
253 }
255 return -EINVAL;
256 }
258 /* Convert virtual (user) address VADDR to physical address PADDR */
259 #define virt_to_phys_040(vaddr) \
260 ({ \
261 unsigned long _mmusr, _paddr; \
262 \
263 __asm__ __volatile__ (".chip 68040\n\t" \
264 "ptestr (%1)\n\t" \
265 "movec %%mmusr,%0\n\t" \
266 ".chip 68k" \
267 : "=r" (_mmusr) \
268 : "a" (vaddr)); \
269 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
270 _paddr; \
271 })
273 static inline int
274 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
275 {
276 unsigned long paddr, i;
278 switch (scope)
279 {
280 case FLUSH_SCOPE_ALL:
281 switch (cache)
282 {
283 case FLUSH_CACHE_DATA:
284 /* This nop is needed for some broken versions of the 68040. */
285 __asm__ __volatile__ ("nop\n\t"
286 ".chip 68040\n\t"
287 "cpusha %dc\n\t"
288 ".chip 68k");
289 break;
290 case FLUSH_CACHE_INSN:
291 __asm__ __volatile__ ("nop\n\t"
292 ".chip 68040\n\t"
293 "cpusha %ic\n\t"
294 ".chip 68k");
295 break;
296 default:
297 case FLUSH_CACHE_BOTH:
298 __asm__ __volatile__ ("nop\n\t"
299 ".chip 68040\n\t"
300 "cpusha %bc\n\t"
301 ".chip 68k");
302 break;
303 }
304 break;
306 case FLUSH_SCOPE_LINE:
307 /* Find the physical address of the first mapped page in the
308 address range. */
309 if ((paddr = virt_to_phys_040(addr))) {
310 paddr += addr & ~(PAGE_MASK | 15);
311 len = (len + (addr & 15) + 15) >> 4;
312 } else {
313 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
315 if (len <= tmp)
316 return 0;
317 addr += tmp;
318 len -= tmp;
319 tmp = PAGE_SIZE;
320 for (;;)
321 {
322 if ((paddr = virt_to_phys_040(addr)))
323 break;
324 if (len <= tmp)
325 return 0;
326 addr += tmp;
327 len -= tmp;
328 }
329 len = (len + 15) >> 4;
330 }
331 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
332 while (len--)
333 {
334 switch (cache)
335 {
336 case FLUSH_CACHE_DATA:
337 __asm__ __volatile__ ("nop\n\t"
338 ".chip 68040\n\t"
339 "cpushl %%dc,(%0)\n\t"
340 ".chip 68k"
341 : : "a" (paddr));
342 break;
343 case FLUSH_CACHE_INSN:
344 __asm__ __volatile__ ("nop\n\t"
345 ".chip 68040\n\t"
346 "cpushl %%ic,(%0)\n\t"
347 ".chip 68k"
348 : : "a" (paddr));
349 break;
350 default:
351 case FLUSH_CACHE_BOTH:
352 __asm__ __volatile__ ("nop\n\t"
353 ".chip 68040\n\t"
354 "cpushl %%bc,(%0)\n\t"
355 ".chip 68k"
356 : : "a" (paddr));
357 break;
358 }
359 if (!--i && len)
360 {
361 /*
362 * No need to page align here since it is done by
363 * virt_to_phys_040().
364 */
365 addr += PAGE_SIZE;
366 i = PAGE_SIZE / 16;
367 /* Recompute physical address when crossing a page
368 boundary. */
369 for (;;)
370 {
371 if ((paddr = virt_to_phys_040(addr)))
372 break;
373 if (len <= i)
374 return 0;
375 len -= i;
376 addr += PAGE_SIZE;
377 }
378 }
379 else
380 paddr += 16;
381 }
382 break;
384 default:
385 case FLUSH_SCOPE_PAGE:
386 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
387 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
388 {
389 if (!(paddr = virt_to_phys_040(addr)))
390 continue;
391 switch (cache)
392 {
393 case FLUSH_CACHE_DATA:
394 __asm__ __volatile__ ("nop\n\t"
395 ".chip 68040\n\t"
396 "cpushp %%dc,(%0)\n\t"
397 ".chip 68k"
398 : : "a" (paddr));
399 break;
400 case FLUSH_CACHE_INSN:
401 __asm__ __volatile__ ("nop\n\t"
402 ".chip 68040\n\t"
403 "cpushp %%ic,(%0)\n\t"
404 ".chip 68k"
405 : : "a" (paddr));
406 break;
407 default:
408 case FLUSH_CACHE_BOTH:
409 __asm__ __volatile__ ("nop\n\t"
410 ".chip 68040\n\t"
411 "cpushp %%bc,(%0)\n\t"
412 ".chip 68k"
413 : : "a" (paddr));
414 break;
415 }
416 }
417 break;
418 }
419 return 0;
420 }
422 #define virt_to_phys_060(vaddr) \
423 ({ \
424 unsigned long paddr; \
425 __asm__ __volatile__ (".chip 68060\n\t" \
426 "plpar (%0)\n\t" \
427 ".chip 68k" \
428 : "=a" (paddr) \
429 : "0" (vaddr)); \
430 (paddr); /* XXX */ \
431 })
433 static inline int
434 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
435 {
436 unsigned long paddr, i;
438 /*
439 * 68060 manual says:
440 * cpush %dc : flush DC, remains valid (with our %cacr setup)
441 * cpush %ic : invalidate IC
442 * cpush %bc : flush DC + invalidate IC
443 */
444 switch (scope)
445 {
446 case FLUSH_SCOPE_ALL:
447 switch (cache)
448 {
449 case FLUSH_CACHE_DATA:
450 __asm__ __volatile__ (".chip 68060\n\t"
451 "cpusha %dc\n\t"
452 ".chip 68k");
453 break;
454 case FLUSH_CACHE_INSN:
455 __asm__ __volatile__ (".chip 68060\n\t"
456 "cpusha %ic\n\t"
457 ".chip 68k");
458 break;
459 default:
460 case FLUSH_CACHE_BOTH:
461 __asm__ __volatile__ (".chip 68060\n\t"
462 "cpusha %bc\n\t"
463 ".chip 68k");
464 break;
465 }
466 break;
468 case FLUSH_SCOPE_LINE:
469 /* Find the physical address of the first mapped page in the
470 address range. */
471 len += addr & 15;
472 addr &= -16;
473 if (!(paddr = virt_to_phys_060(addr))) {
474 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
476 if (len <= tmp)
477 return 0;
478 addr += tmp;
479 len -= tmp;
480 tmp = PAGE_SIZE;
481 for (;;)
482 {
483 if ((paddr = virt_to_phys_060(addr)))
484 break;
485 if (len <= tmp)
486 return 0;
487 addr += tmp;
488 len -= tmp;
489 }
490 }
491 len = (len + 15) >> 4;
492 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
493 while (len--)
494 {
495 switch (cache)
496 {
497 case FLUSH_CACHE_DATA:
498 __asm__ __volatile__ (".chip 68060\n\t"
499 "cpushl %%dc,(%0)\n\t"
500 ".chip 68k"
501 : : "a" (paddr));
502 break;
503 case FLUSH_CACHE_INSN:
504 __asm__ __volatile__ (".chip 68060\n\t"
505 "cpushl %%ic,(%0)\n\t"
506 ".chip 68k"
507 : : "a" (paddr));
508 break;
509 default:
510 case FLUSH_CACHE_BOTH:
511 __asm__ __volatile__ (".chip 68060\n\t"
512 "cpushl %%bc,(%0)\n\t"
513 ".chip 68k"
514 : : "a" (paddr));
515 break;
516 }
517 if (!--i && len)
518 {
520 /*
521 * We just want to jump to the first cache line
522 * in the next page.
523 */
524 addr += PAGE_SIZE;
525 addr &= PAGE_MASK;
527 i = PAGE_SIZE / 16;
528 /* Recompute physical address when crossing a page
529 boundary. */
530 for (;;)
531 {
532 if ((paddr = virt_to_phys_060(addr)))
533 break;
534 if (len <= i)
535 return 0;
536 len -= i;
537 addr += PAGE_SIZE;
538 }
539 }
540 else
541 paddr += 16;
542 }
543 break;
545 default:
546 case FLUSH_SCOPE_PAGE:
547 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
548 addr &= PAGE_MASK; /* Workaround for bug in some
549 revisions of the 68060 */
550 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
551 {
552 if (!(paddr = virt_to_phys_060(addr)))
553 continue;
554 switch (cache)
555 {
556 case FLUSH_CACHE_DATA:
557 __asm__ __volatile__ (".chip 68060\n\t"
558 "cpushp %%dc,(%0)\n\t"
559 ".chip 68k"
560 : : "a" (paddr));
561 break;
562 case FLUSH_CACHE_INSN:
563 __asm__ __volatile__ (".chip 68060\n\t"
564 "cpushp %%ic,(%0)\n\t"
565 ".chip 68k"
566 : : "a" (paddr));
567 break;
568 default:
569 case FLUSH_CACHE_BOTH:
570 __asm__ __volatile__ (".chip 68060\n\t"
571 "cpushp %%bc,(%0)\n\t"
572 ".chip 68k"
573 : : "a" (paddr));
574 break;
575 }
576 }
577 break;
578 }
579 return 0;
580 }
582 /* sys_cacheflush -- flush (part of) the processor cache. */
583 asmlinkage int
584 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
585 {
586 struct vm_area_struct *vma;
587 int ret = -EINVAL;
589 lock_kernel();
590 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
591 cache & ~FLUSH_CACHE_BOTH)
592 goto out;
594 if (scope == FLUSH_SCOPE_ALL) {
595 /* Only the superuser may explicitly flush the whole cache. */
596 ret = -EPERM;
597 if (!capable(CAP_SYS_ADMIN))
598 goto out;
599 } else {
600 /*
601 * Verify that the specified address region actually belongs
602 * to this process.
603 */
604 vma = find_vma (current->mm, addr);
605 ret = -EINVAL;
606 /* Check for overflow. */
607 if (addr + len < addr)
608 goto out;
609 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
610 goto out;
611 }
613 if (CPU_IS_020_OR_030) {
614 if (scope == FLUSH_SCOPE_LINE && len < 256) {
615 unsigned long cacr;
616 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
617 if (cache & FLUSH_CACHE_INSN)
618 cacr |= 4;
619 if (cache & FLUSH_CACHE_DATA)
620 cacr |= 0x400;
621 len >>= 2;
622 while (len--) {
623 __asm__ __volatile__ ("movec %1, %%caar\n\t"
624 "movec %0, %%cacr"
625 : /* no outputs */
626 : "r" (cacr), "r" (addr));
627 addr += 4;
628 }
629 } else {
630 /* Flush the whole cache, even if page granularity requested. */
631 unsigned long cacr;
632 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
633 if (cache & FLUSH_CACHE_INSN)
634 cacr |= 8;
635 if (cache & FLUSH_CACHE_DATA)
636 cacr |= 0x800;
637 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
638 }
639 ret = 0;
640 goto out;
641 } else {
642 /*
643 * 040 or 060: don't blindly trust 'scope', someone could
644 * try to flush a few megs of memory.
645 */
647 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
648 scope=FLUSH_SCOPE_PAGE;
649 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
650 scope=FLUSH_SCOPE_ALL;
651 if (CPU_IS_040) {
652 ret = cache_flush_040 (addr, scope, cache, len);
653 } else if (CPU_IS_060) {
654 ret = cache_flush_060 (addr, scope, cache, len);
655 }
656 }
657 out:
658 unlock_kernel();
659 return ret;
660 }
662 asmlinkage int sys_getpagesize(void)
663 {
664 return PAGE_SIZE;
665 }