ia64/linux-2.6.18-xen.hg

view arch/sparc64/kernel/sys_sparc.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
2 * linux/arch/sparc64/kernel/sys_sparc.c
3 *
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
6 * platform.
7 */
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/sched.h>
12 #include <linux/fs.h>
13 #include <linux/file.h>
14 #include <linux/mm.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/mman.h>
20 #include <linux/utsname.h>
21 #include <linux/smp.h>
22 #include <linux/smp_lock.h>
23 #include <linux/slab.h>
24 #include <linux/syscalls.h>
25 #include <linux/ipc.h>
26 #include <linux/personality.h>
27 #include <linux/random.h>
29 #include <asm/uaccess.h>
30 #include <asm/ipc.h>
31 #include <asm/utrap.h>
32 #include <asm/perfctr.h>
33 #include <asm/a.out.h>
35 /* #define DEBUG_UNIMP_SYSCALL */
37 asmlinkage unsigned long sys_getpagesize(void)
38 {
39 return PAGE_SIZE;
40 }
42 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
43 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
45 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
46 * overflow past the end of the 64-bit address space?
47 */
48 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
49 {
50 unsigned long va_exclude_start, va_exclude_end;
52 va_exclude_start = VA_EXCLUDE_START;
53 va_exclude_end = VA_EXCLUDE_END;
55 if (unlikely(len >= va_exclude_start))
56 return 1;
58 if (unlikely((addr + len) < addr))
59 return 1;
61 if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
62 ((addr + len) >= va_exclude_start &&
63 (addr + len) < va_exclude_end)))
64 return 1;
66 return 0;
67 }
69 /* Does start,end straddle the VA-space hole? */
70 static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
71 {
72 unsigned long va_exclude_start, va_exclude_end;
74 va_exclude_start = VA_EXCLUDE_START;
75 va_exclude_end = VA_EXCLUDE_END;
77 if (likely(start < va_exclude_start && end < va_exclude_start))
78 return 0;
80 if (likely(start >= va_exclude_end && end >= va_exclude_end))
81 return 0;
83 return 1;
84 }
86 /* These functions differ from the default implementations in
87 * mm/mmap.c in two ways:
88 *
89 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
90 * for fixed such mappings we just validate what the user gave us.
91 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
92 * the spitfire/niagara VA-hole.
93 */
95 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
96 unsigned long pgoff)
97 {
98 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
99 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
101 return base + off;
102 }
104 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
105 unsigned long pgoff)
106 {
107 unsigned long base = addr & ~(SHMLBA-1);
108 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
110 if (base + off <= addr)
111 return base + off;
112 return base - off;
113 }
115 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
116 {
117 struct mm_struct *mm = current->mm;
118 struct vm_area_struct * vma;
119 unsigned long task_size = TASK_SIZE;
120 unsigned long start_addr;
121 int do_color_align;
123 if (flags & MAP_FIXED) {
124 /* We do not accept a shared mapping if it would violate
125 * cache aliasing constraints.
126 */
127 if ((flags & MAP_SHARED) &&
128 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
129 return -EINVAL;
130 return addr;
131 }
133 if (test_thread_flag(TIF_32BIT))
134 task_size = STACK_TOP32;
135 if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
136 return -ENOMEM;
138 do_color_align = 0;
139 if (filp || (flags & MAP_SHARED))
140 do_color_align = 1;
142 if (addr) {
143 if (do_color_align)
144 addr = COLOUR_ALIGN(addr, pgoff);
145 else
146 addr = PAGE_ALIGN(addr);
148 vma = find_vma(mm, addr);
149 if (task_size - len >= addr &&
150 (!vma || addr + len <= vma->vm_start))
151 return addr;
152 }
154 if (len > mm->cached_hole_size) {
155 start_addr = addr = mm->free_area_cache;
156 } else {
157 start_addr = addr = TASK_UNMAPPED_BASE;
158 mm->cached_hole_size = 0;
159 }
161 task_size -= len;
163 full_search:
164 if (do_color_align)
165 addr = COLOUR_ALIGN(addr, pgoff);
166 else
167 addr = PAGE_ALIGN(addr);
169 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
170 /* At this point: (!vma || addr < vma->vm_end). */
171 if (addr < VA_EXCLUDE_START &&
172 (addr + len) >= VA_EXCLUDE_START) {
173 addr = VA_EXCLUDE_END;
174 vma = find_vma(mm, VA_EXCLUDE_END);
175 }
176 if (unlikely(task_size < addr)) {
177 if (start_addr != TASK_UNMAPPED_BASE) {
178 start_addr = addr = TASK_UNMAPPED_BASE;
179 mm->cached_hole_size = 0;
180 goto full_search;
181 }
182 return -ENOMEM;
183 }
184 if (likely(!vma || addr + len <= vma->vm_start)) {
185 /*
186 * Remember the place where we stopped the search:
187 */
188 mm->free_area_cache = addr + len;
189 return addr;
190 }
191 if (addr + mm->cached_hole_size < vma->vm_start)
192 mm->cached_hole_size = vma->vm_start - addr;
194 addr = vma->vm_end;
195 if (do_color_align)
196 addr = COLOUR_ALIGN(addr, pgoff);
197 }
198 }
200 unsigned long
201 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
202 const unsigned long len, const unsigned long pgoff,
203 const unsigned long flags)
204 {
205 struct vm_area_struct *vma;
206 struct mm_struct *mm = current->mm;
207 unsigned long task_size = STACK_TOP32;
208 unsigned long addr = addr0;
209 int do_color_align;
211 /* This should only ever run for 32-bit processes. */
212 BUG_ON(!test_thread_flag(TIF_32BIT));
214 if (flags & MAP_FIXED) {
215 /* We do not accept a shared mapping if it would violate
216 * cache aliasing constraints.
217 */
218 if ((flags & MAP_SHARED) &&
219 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
220 return -EINVAL;
221 return addr;
222 }
224 if (unlikely(len > task_size))
225 return -ENOMEM;
227 do_color_align = 0;
228 if (filp || (flags & MAP_SHARED))
229 do_color_align = 1;
231 /* requesting a specific address */
232 if (addr) {
233 if (do_color_align)
234 addr = COLOUR_ALIGN(addr, pgoff);
235 else
236 addr = PAGE_ALIGN(addr);
238 vma = find_vma(mm, addr);
239 if (task_size - len >= addr &&
240 (!vma || addr + len <= vma->vm_start))
241 return addr;
242 }
244 /* check if free_area_cache is useful for us */
245 if (len <= mm->cached_hole_size) {
246 mm->cached_hole_size = 0;
247 mm->free_area_cache = mm->mmap_base;
248 }
250 /* either no address requested or can't fit in requested address hole */
251 addr = mm->free_area_cache;
252 if (do_color_align) {
253 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
255 addr = base + len;
256 }
258 /* make sure it can fit in the remaining address space */
259 if (likely(addr > len)) {
260 vma = find_vma(mm, addr-len);
261 if (!vma || addr <= vma->vm_start) {
262 /* remember the address as a hint for next time */
263 return (mm->free_area_cache = addr-len);
264 }
265 }
267 if (unlikely(mm->mmap_base < len))
268 goto bottomup;
270 addr = mm->mmap_base-len;
271 if (do_color_align)
272 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
274 do {
275 /*
276 * Lookup failure means no vma is above this address,
277 * else if new region fits below vma->vm_start,
278 * return with success:
279 */
280 vma = find_vma(mm, addr);
281 if (likely(!vma || addr+len <= vma->vm_start)) {
282 /* remember the address as a hint for next time */
283 return (mm->free_area_cache = addr);
284 }
286 /* remember the largest hole we saw so far */
287 if (addr + mm->cached_hole_size < vma->vm_start)
288 mm->cached_hole_size = vma->vm_start - addr;
290 /* try just below the current vma->vm_start */
291 addr = vma->vm_start-len;
292 if (do_color_align)
293 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
294 } while (likely(len < vma->vm_start));
296 bottomup:
297 /*
298 * A failed mmap() very likely causes application failure,
299 * so fall back to the bottom-up function here. This scenario
300 * can happen with large stack limits and large mmap()
301 * allocations.
302 */
303 mm->cached_hole_size = ~0UL;
304 mm->free_area_cache = TASK_UNMAPPED_BASE;
305 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
306 /*
307 * Restore the topdown base:
308 */
309 mm->free_area_cache = mm->mmap_base;
310 mm->cached_hole_size = ~0UL;
312 return addr;
313 }
315 /* Try to align mapping such that we align it as much as possible. */
316 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
317 {
318 unsigned long align_goal, addr = -ENOMEM;
320 if (flags & MAP_FIXED) {
321 /* Ok, don't mess with it. */
322 return get_unmapped_area(NULL, addr, len, pgoff, flags);
323 }
324 flags &= ~MAP_SHARED;
326 align_goal = PAGE_SIZE;
327 if (len >= (4UL * 1024 * 1024))
328 align_goal = (4UL * 1024 * 1024);
329 else if (len >= (512UL * 1024))
330 align_goal = (512UL * 1024);
331 else if (len >= (64UL * 1024))
332 align_goal = (64UL * 1024);
334 do {
335 addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
336 if (!(addr & ~PAGE_MASK)) {
337 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
338 break;
339 }
341 if (align_goal == (4UL * 1024 * 1024))
342 align_goal = (512UL * 1024);
343 else if (align_goal == (512UL * 1024))
344 align_goal = (64UL * 1024);
345 else
346 align_goal = PAGE_SIZE;
347 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
349 /* Mapping is smaller than 64K or larger areas could not
350 * be obtained.
351 */
352 if (addr & ~PAGE_MASK)
353 addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
355 return addr;
356 }
358 /* Essentially the same as PowerPC... */
359 void arch_pick_mmap_layout(struct mm_struct *mm)
360 {
361 unsigned long random_factor = 0UL;
363 if (current->flags & PF_RANDOMIZE) {
364 random_factor = get_random_int();
365 if (test_thread_flag(TIF_32BIT))
366 random_factor &= ((1 * 1024 * 1024) - 1);
367 else
368 random_factor = ((random_factor << PAGE_SHIFT) &
369 0xffffffffUL);
370 }
372 /*
373 * Fall back to the standard layout if the personality
374 * bit is set, or if the expected stack growth is unlimited:
375 */
376 if (!test_thread_flag(TIF_32BIT) ||
377 (current->personality & ADDR_COMPAT_LAYOUT) ||
378 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
379 sysctl_legacy_va_layout) {
380 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
381 mm->get_unmapped_area = arch_get_unmapped_area;
382 mm->unmap_area = arch_unmap_area;
383 } else {
384 /* We know it's 32-bit */
385 unsigned long task_size = STACK_TOP32;
386 unsigned long gap;
388 gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
389 if (gap < 128 * 1024 * 1024)
390 gap = 128 * 1024 * 1024;
391 if (gap > (task_size / 6 * 5))
392 gap = (task_size / 6 * 5);
394 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
395 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
396 mm->unmap_area = arch_unmap_area_topdown;
397 }
398 }
400 asmlinkage unsigned long sparc_brk(unsigned long brk)
401 {
402 /* People could try to be nasty and use ta 0x6d in 32bit programs */
403 if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
404 return current->mm->brk;
406 if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
407 return current->mm->brk;
409 return sys_brk(brk);
410 }
412 /*
413 * sys_pipe() is the normal C calling standard for creating
414 * a pipe. It's not the way unix traditionally does this, though.
415 */
416 asmlinkage long sparc_pipe(struct pt_regs *regs)
417 {
418 int fd[2];
419 int error;
421 error = do_pipe(fd);
422 if (error)
423 goto out;
424 regs->u_regs[UREG_I1] = fd[1];
425 error = fd[0];
426 out:
427 return error;
428 }
430 /*
431 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
432 *
433 * This is really horribly ugly.
434 */
436 asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
437 unsigned long third, void __user *ptr, long fifth)
438 {
439 int err;
441 /* No need for backward compatibility. We can start fresh... */
442 if (call <= SEMCTL) {
443 switch (call) {
444 case SEMOP:
445 err = sys_semtimedop(first, ptr,
446 (unsigned)second, NULL);
447 goto out;
448 case SEMTIMEDOP:
449 err = sys_semtimedop(first, ptr, (unsigned)second,
450 (const struct timespec __user *) fifth);
451 goto out;
452 case SEMGET:
453 err = sys_semget(first, (int)second, (int)third);
454 goto out;
455 case SEMCTL: {
456 union semun fourth;
457 err = -EINVAL;
458 if (!ptr)
459 goto out;
460 err = -EFAULT;
461 if (get_user(fourth.__pad,
462 (void __user * __user *) ptr))
463 goto out;
464 err = sys_semctl(first, (int)second | IPC_64,
465 (int)third, fourth);
466 goto out;
467 }
468 default:
469 err = -ENOSYS;
470 goto out;
471 };
472 }
473 if (call <= MSGCTL) {
474 switch (call) {
475 case MSGSND:
476 err = sys_msgsnd(first, ptr, (size_t)second,
477 (int)third);
478 goto out;
479 case MSGRCV:
480 err = sys_msgrcv(first, ptr, (size_t)second, fifth,
481 (int)third);
482 goto out;
483 case MSGGET:
484 err = sys_msgget((key_t)first, (int)second);
485 goto out;
486 case MSGCTL:
487 err = sys_msgctl(first, (int)second | IPC_64, ptr);
488 goto out;
489 default:
490 err = -ENOSYS;
491 goto out;
492 };
493 }
494 if (call <= SHMCTL) {
495 switch (call) {
496 case SHMAT: {
497 ulong raddr;
498 err = do_shmat(first, ptr, (int)second, &raddr);
499 if (!err) {
500 if (put_user(raddr,
501 (ulong __user *) third))
502 err = -EFAULT;
503 }
504 goto out;
505 }
506 case SHMDT:
507 err = sys_shmdt(ptr);
508 goto out;
509 case SHMGET:
510 err = sys_shmget(first, (size_t)second, (int)third);
511 goto out;
512 case SHMCTL:
513 err = sys_shmctl(first, (int)second | IPC_64, ptr);
514 goto out;
515 default:
516 err = -ENOSYS;
517 goto out;
518 };
519 } else {
520 err = -ENOSYS;
521 }
522 out:
523 return err;
524 }
526 asmlinkage long sparc64_newuname(struct new_utsname __user *name)
527 {
528 int ret = sys_newuname(name);
530 if (current->personality == PER_LINUX32 && !ret) {
531 ret = (copy_to_user(name->machine, "sparc\0\0", 8)
532 ? -EFAULT : 0);
533 }
534 return ret;
535 }
537 asmlinkage long sparc64_personality(unsigned long personality)
538 {
539 int ret;
541 if (current->personality == PER_LINUX32 &&
542 personality == PER_LINUX)
543 personality = PER_LINUX32;
544 ret = sys_personality(personality);
545 if (ret == PER_LINUX32)
546 ret = PER_LINUX;
548 return ret;
549 }
551 int sparc64_mmap_check(unsigned long addr, unsigned long len,
552 unsigned long flags)
553 {
554 if (test_thread_flag(TIF_32BIT)) {
555 if (len >= STACK_TOP32)
556 return -EINVAL;
558 if ((flags & MAP_FIXED) && addr > STACK_TOP32 - len)
559 return -EINVAL;
560 } else {
561 if (len >= VA_EXCLUDE_START)
562 return -EINVAL;
564 if ((flags & MAP_FIXED) && invalid_64bit_range(addr, len))
565 return -EINVAL;
566 }
568 return 0;
569 }
571 /* Linux version of mmap */
572 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
573 unsigned long prot, unsigned long flags, unsigned long fd,
574 unsigned long off)
575 {
576 struct file * file = NULL;
577 unsigned long retval = -EBADF;
579 if (!(flags & MAP_ANONYMOUS)) {
580 file = fget(fd);
581 if (!file)
582 goto out;
583 }
584 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
585 len = PAGE_ALIGN(len);
587 down_write(&current->mm->mmap_sem);
588 retval = do_mmap(file, addr, len, prot, flags, off);
589 up_write(&current->mm->mmap_sem);
591 if (file)
592 fput(file);
593 out:
594 return retval;
595 }
597 asmlinkage long sys64_munmap(unsigned long addr, size_t len)
598 {
599 long ret;
601 if (invalid_64bit_range(addr, len))
602 return -EINVAL;
604 down_write(&current->mm->mmap_sem);
605 ret = do_munmap(current->mm, addr, len);
606 up_write(&current->mm->mmap_sem);
607 return ret;
608 }
610 extern unsigned long do_mremap(unsigned long addr,
611 unsigned long old_len, unsigned long new_len,
612 unsigned long flags, unsigned long new_addr);
614 asmlinkage unsigned long sys64_mremap(unsigned long addr,
615 unsigned long old_len, unsigned long new_len,
616 unsigned long flags, unsigned long new_addr)
617 {
618 struct vm_area_struct *vma;
619 unsigned long ret = -EINVAL;
621 if (test_thread_flag(TIF_32BIT))
622 goto out;
623 if (unlikely(new_len >= VA_EXCLUDE_START))
624 goto out;
625 if (unlikely(invalid_64bit_range(addr, old_len)))
626 goto out;
628 down_write(&current->mm->mmap_sem);
629 if (flags & MREMAP_FIXED) {
630 if (invalid_64bit_range(new_addr, new_len))
631 goto out_sem;
632 } else if (invalid_64bit_range(addr, new_len)) {
633 unsigned long map_flags = 0;
634 struct file *file = NULL;
636 ret = -ENOMEM;
637 if (!(flags & MREMAP_MAYMOVE))
638 goto out_sem;
640 vma = find_vma(current->mm, addr);
641 if (vma) {
642 if (vma->vm_flags & VM_SHARED)
643 map_flags |= MAP_SHARED;
644 file = vma->vm_file;
645 }
647 /* MREMAP_FIXED checked above. */
648 new_addr = get_unmapped_area(file, addr, new_len,
649 vma ? vma->vm_pgoff : 0,
650 map_flags);
651 ret = new_addr;
652 if (new_addr & ~PAGE_MASK)
653 goto out_sem;
654 flags |= MREMAP_FIXED;
655 }
656 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
657 out_sem:
658 up_write(&current->mm->mmap_sem);
659 out:
660 return ret;
661 }
663 /* we come to here via sys_nis_syscall so it can setup the regs argument */
664 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
665 {
666 static int count;
668 /* Don't make the system unusable, if someone goes stuck */
669 if (count++ > 5)
670 return -ENOSYS;
672 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
673 #ifdef DEBUG_UNIMP_SYSCALL
674 show_regs (regs);
675 #endif
677 return -ENOSYS;
678 }
680 /* #define DEBUG_SPARC_BREAKPOINT */
682 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
683 {
684 siginfo_t info;
686 if (test_thread_flag(TIF_32BIT)) {
687 regs->tpc &= 0xffffffff;
688 regs->tnpc &= 0xffffffff;
689 }
690 #ifdef DEBUG_SPARC_BREAKPOINT
691 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
692 #endif
693 info.si_signo = SIGTRAP;
694 info.si_errno = 0;
695 info.si_code = TRAP_BRKPT;
696 info.si_addr = (void __user *)regs->tpc;
697 info.si_trapno = 0;
698 force_sig_info(SIGTRAP, &info, current);
699 #ifdef DEBUG_SPARC_BREAKPOINT
700 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
701 #endif
702 }
704 extern void check_pending(int signum);
706 asmlinkage long sys_getdomainname(char __user *name, int len)
707 {
708 int nlen, err;
710 if (len < 0)
711 return -EINVAL;
713 down_read(&uts_sem);
715 nlen = strlen(system_utsname.domainname) + 1;
716 err = -EINVAL;
717 if (nlen > len)
718 goto out;
720 err = -EFAULT;
721 if (!copy_to_user(name, system_utsname.domainname, nlen))
722 err = 0;
724 out:
725 up_read(&uts_sem);
726 return err;
727 }
729 asmlinkage long solaris_syscall(struct pt_regs *regs)
730 {
731 static int count;
733 regs->tpc = regs->tnpc;
734 regs->tnpc += 4;
735 if (test_thread_flag(TIF_32BIT)) {
736 regs->tpc &= 0xffffffff;
737 regs->tnpc &= 0xffffffff;
738 }
739 if (++count <= 5) {
740 printk ("For Solaris binary emulation you need solaris module loaded\n");
741 show_regs (regs);
742 }
743 send_sig(SIGSEGV, current, 1);
745 return -ENOSYS;
746 }
748 #ifndef CONFIG_SUNOS_EMUL
749 asmlinkage long sunos_syscall(struct pt_regs *regs)
750 {
751 static int count;
753 regs->tpc = regs->tnpc;
754 regs->tnpc += 4;
755 if (test_thread_flag(TIF_32BIT)) {
756 regs->tpc &= 0xffffffff;
757 regs->tnpc &= 0xffffffff;
758 }
759 if (++count <= 20)
760 printk ("SunOS binary emulation not compiled in\n");
761 force_sig(SIGSEGV, current);
763 return -ENOSYS;
764 }
765 #endif
767 asmlinkage long sys_utrap_install(utrap_entry_t type,
768 utrap_handler_t new_p,
769 utrap_handler_t new_d,
770 utrap_handler_t __user *old_p,
771 utrap_handler_t __user *old_d)
772 {
773 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
774 return -EINVAL;
775 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
776 if (old_p) {
777 if (!current_thread_info()->utraps) {
778 if (put_user(NULL, old_p))
779 return -EFAULT;
780 } else {
781 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
782 return -EFAULT;
783 }
784 }
785 if (old_d) {
786 if (put_user(NULL, old_d))
787 return -EFAULT;
788 }
789 return 0;
790 }
791 if (!current_thread_info()->utraps) {
792 current_thread_info()->utraps =
793 kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
794 if (!current_thread_info()->utraps)
795 return -ENOMEM;
796 current_thread_info()->utraps[0] = 1;
797 } else {
798 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
799 current_thread_info()->utraps[0] > 1) {
800 long *p = current_thread_info()->utraps;
802 current_thread_info()->utraps =
803 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
804 GFP_KERNEL);
805 if (!current_thread_info()->utraps) {
806 current_thread_info()->utraps = p;
807 return -ENOMEM;
808 }
809 p[0]--;
810 current_thread_info()->utraps[0] = 1;
811 memcpy(current_thread_info()->utraps+1, p+1,
812 UT_TRAP_INSTRUCTION_31*sizeof(long));
813 }
814 }
815 if (old_p) {
816 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
817 return -EFAULT;
818 }
819 if (old_d) {
820 if (put_user(NULL, old_d))
821 return -EFAULT;
822 }
823 current_thread_info()->utraps[type] = (long)new_p;
825 return 0;
826 }
828 long sparc_memory_ordering(unsigned long model, struct pt_regs *regs)
829 {
830 if (model >= 3)
831 return -EINVAL;
832 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
833 return 0;
834 }
836 asmlinkage long sys_rt_sigaction(int sig,
837 const struct sigaction __user *act,
838 struct sigaction __user *oact,
839 void __user *restorer,
840 size_t sigsetsize)
841 {
842 struct k_sigaction new_ka, old_ka;
843 int ret;
845 /* XXX: Don't preclude handling different sized sigset_t's. */
846 if (sigsetsize != sizeof(sigset_t))
847 return -EINVAL;
849 if (act) {
850 new_ka.ka_restorer = restorer;
851 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
852 return -EFAULT;
853 }
855 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
857 if (!ret && oact) {
858 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
859 return -EFAULT;
860 }
862 return ret;
863 }
865 /* Invoked by rtrap code to update performance counters in
866 * user space.
867 */
868 asmlinkage void update_perfctrs(void)
869 {
870 unsigned long pic, tmp;
872 read_pic(pic);
873 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
874 __put_user(tmp, current_thread_info()->user_cntd0);
875 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
876 __put_user(tmp, current_thread_info()->user_cntd1);
877 reset_pic();
878 }
880 asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
881 {
882 int err = 0;
884 switch(opcode) {
885 case PERFCTR_ON:
886 current_thread_info()->pcr_reg = arg2;
887 current_thread_info()->user_cntd0 = (u64 __user *) arg0;
888 current_thread_info()->user_cntd1 = (u64 __user *) arg1;
889 current_thread_info()->kernel_cntd0 =
890 current_thread_info()->kernel_cntd1 = 0;
891 write_pcr(arg2);
892 reset_pic();
893 set_thread_flag(TIF_PERFCTR);
894 break;
896 case PERFCTR_OFF:
897 err = -EINVAL;
898 if (test_thread_flag(TIF_PERFCTR)) {
899 current_thread_info()->user_cntd0 =
900 current_thread_info()->user_cntd1 = NULL;
901 current_thread_info()->pcr_reg = 0;
902 write_pcr(0);
903 clear_thread_flag(TIF_PERFCTR);
904 err = 0;
905 }
906 break;
908 case PERFCTR_READ: {
909 unsigned long pic, tmp;
911 if (!test_thread_flag(TIF_PERFCTR)) {
912 err = -EINVAL;
913 break;
914 }
915 read_pic(pic);
916 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
917 err |= __put_user(tmp, current_thread_info()->user_cntd0);
918 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
919 err |= __put_user(tmp, current_thread_info()->user_cntd1);
920 reset_pic();
921 break;
922 }
924 case PERFCTR_CLRPIC:
925 if (!test_thread_flag(TIF_PERFCTR)) {
926 err = -EINVAL;
927 break;
928 }
929 current_thread_info()->kernel_cntd0 =
930 current_thread_info()->kernel_cntd1 = 0;
931 reset_pic();
932 break;
934 case PERFCTR_SETPCR: {
935 u64 __user *user_pcr = (u64 __user *)arg0;
937 if (!test_thread_flag(TIF_PERFCTR)) {
938 err = -EINVAL;
939 break;
940 }
941 err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
942 write_pcr(current_thread_info()->pcr_reg);
943 current_thread_info()->kernel_cntd0 =
944 current_thread_info()->kernel_cntd1 = 0;
945 reset_pic();
946 break;
947 }
949 case PERFCTR_GETPCR: {
950 u64 __user *user_pcr = (u64 __user *)arg0;
952 if (!test_thread_flag(TIF_PERFCTR)) {
953 err = -EINVAL;
954 break;
955 }
956 err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
957 break;
958 }
960 default:
961 err = -EINVAL;
962 break;
963 };
964 return err;
965 }