ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/x86_64/ia32/syscall32.c @ 6422:e24fd7012ffb

merge?
author cl349@firebug.cl.cam.ac.uk
date Thu Aug 25 10:09:39 2005 +0000 (2005-08-25)
parents 2f20c2fce2c5 603f55eaa690
children 4abd299ef2f6
line source
1 /* Copyright 2002,2003 Andi Kleen, SuSE Labs */
3 /* vsyscall handling for 32bit processes. Map a stub page into it
4 on demand because 32bit cannot reach the kernel's fixmaps */
6 #include <linux/mm.h>
7 #include <linux/string.h>
8 #include <linux/kernel.h>
9 #include <linux/gfp.h>
10 #include <linux/init.h>
11 #include <linux/stringify.h>
12 #include <linux/security.h>
13 #include <asm/proto.h>
14 #include <asm/tlbflush.h>
15 #include <asm/ia32_unistd.h>
17 #define USE_INT80
19 #ifdef USE_INT80
20 /* 32bit VDSOs mapped into user space. */
21 asm(".section \".init.data\",\"aw\"\n"
22 "syscall32_int80:\n"
23 ".incbin \"arch/xen/x86_64/ia32/vsyscall-int80.so\"\n"
24 "syscall32_int80_end:\n"
25 "syscall32_syscall:\n"
26 ".incbin \"arch/xen/x86_64/ia32/vsyscall-syscall.so\"\n"
27 "syscall32_syscall_end:\n"
28 "syscall32_sysenter:\n"
29 ".incbin \"arch/xen/x86_64/ia32/vsyscall-sysenter.so\"\n"
30 "syscall32_sysenter_end:\n"
31 ".previous");
33 extern unsigned char syscall32_int80[], syscall32_int80_end[];
34 #else
35 /* 32bit VDSOs mapped into user space. */
36 asm(".section \".init.data\",\"aw\"\n"
37 "syscall32_syscall:\n"
38 ".incbin \"arch/xen/x86_64/ia32/vsyscall-syscall.so\"\n"
39 "syscall32_syscall_end:\n"
40 "syscall32_sysenter:\n"
41 ".incbin \"arch/xen/x86_64/ia32/vsyscall-sysenter.so\"\n"
42 "syscall32_sysenter_end:\n"
43 ".previous");
45 static int use_sysenter = -1;
46 #endif
48 extern unsigned char syscall32_syscall[], syscall32_syscall_end[];
49 extern unsigned char syscall32_sysenter[], syscall32_sysenter_end[];
50 extern int sysctl_vsyscall32;
52 char *syscall32_page;
54 static struct page *
55 syscall32_nopage(struct vm_area_struct *vma, unsigned long adr, int *type)
56 {
57 struct page *p = virt_to_page(adr - vma->vm_start + syscall32_page);
58 get_page(p);
59 return p;
60 }
62 /* Prevent VMA merging */
63 static void syscall32_vma_close(struct vm_area_struct *vma)
64 {
65 }
67 static struct vm_operations_struct syscall32_vm_ops = {
68 .close = syscall32_vma_close,
69 .nopage = syscall32_nopage,
70 };
72 struct linux_binprm;
74 /* Setup a VMA at program startup for the vsyscall page */
75 int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
76 {
77 int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
78 struct vm_area_struct *vma;
79 struct mm_struct *mm = current->mm;
81 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
82 if (!vma)
83 return -ENOMEM;
84 if (security_vm_enough_memory(npages)) {
85 kmem_cache_free(vm_area_cachep, vma);
86 return -ENOMEM;
87 }
89 memset(vma, 0, sizeof(struct vm_area_struct));
90 /* Could randomize here */
91 vma->vm_start = VSYSCALL32_BASE;
92 vma->vm_end = VSYSCALL32_END;
93 /* MAYWRITE to allow gdb to COW and set breakpoints */
94 vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYEXEC|VM_MAYWRITE;
95 vma->vm_flags |= mm->def_flags;
96 vma->vm_page_prot = protection_map[vma->vm_flags & 7];
97 vma->vm_ops = &syscall32_vm_ops;
98 vma->vm_mm = mm;
100 down_write(&mm->mmap_sem);
101 insert_vm_struct(mm, vma);
102 mm->total_vm += npages;
103 up_write(&mm->mmap_sem);
104 return 0;
105 }
107 static int __init init_syscall32(void)
108 {
109 syscall32_page = (void *)get_zeroed_page(GFP_KERNEL);
110 if (!syscall32_page)
111 panic("Cannot allocate syscall32 page");
113 #ifdef USE_INT80
114 /*
115 * At this point we use int 0x80.
116 */
117 memcpy(syscall32_page, syscall32_int80,
118 syscall32_int80_end - syscall32_int80);
119 #else
121 if (use_sysenter > 0) {
122 memcpy(syscall32_page, syscall32_sysenter,
123 syscall32_sysenter_end - syscall32_sysenter);
124 } else {
125 memcpy(syscall32_page, syscall32_syscall,
126 syscall32_syscall_end - syscall32_syscall);
127 }
128 #endif
129 return 0;
130 }
132 /*
133 * This must be done early in case we have an initrd containing 32-bit
134 * binaries (e.g., hotplug). This could be pushed upstream to arch/x86_64.
135 */
136 core_initcall(init_syscall32);
138 /* May not be __init: called during resume */
139 void syscall32_cpu_init(void)
140 {
141 #ifndef USE_INT80
142 if (use_sysenter < 0)
143 use_sysenter = (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL);
145 /* Load these always in case some future AMD CPU supports
146 SYSENTER from compat mode too. */
147 checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)(__KERNEL_CS | 3));
148 checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
149 checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
151 wrmsrl(MSR_CSTAR, ia32_cstar_target);
152 #endif
153 }