ia64/xen-unstable

view linux-2.4.27-xen-sparse/arch/xen/kernel/ldt.c @ 2621:9402048e2325

bitkeeper revision 1.1159.1.218 (416a8128OiHXHyk_Sy8FsA0YUQcEnA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xeno.bk-26dom0
into freefall.cl.cam.ac.uk:/local/scratch/cl349/xeno.bk-26dom0
author cl349@freefall.cl.cam.ac.uk
date Mon Oct 11 12:48:40 2004 +0000 (2004-10-11)
parents c326283ef029
children
line source
1 /*
2 * linux/kernel/ldt.c
3 *
4 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
5 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
6 */
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/string.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/vmalloc.h>
15 #include <linux/slab.h>
17 #include <asm/uaccess.h>
18 #include <asm/system.h>
19 #include <asm/ldt.h>
20 #include <asm/desc.h>
22 #ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
23 static void flush_ldt(void *mm)
24 {
25 if (current->active_mm)
26 load_LDT(&current->active_mm->context);
27 }
28 #endif
30 static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
31 {
32 void *oldldt;
33 void *newldt;
34 int oldsize;
36 if (mincount <= pc->size)
37 return 0;
38 oldsize = pc->size;
39 mincount = (mincount+511)&(~511);
40 if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
41 newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
42 else
43 newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
45 if (!newldt)
46 return -ENOMEM;
48 if (oldsize)
49 memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
51 oldldt = pc->ldt;
52 memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
53 wmb();
54 pc->ldt = newldt;
55 pc->size = mincount;
56 if (reload) {
57 make_pages_readonly(
58 pc->ldt,
59 (pc->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
60 load_LDT(pc);
61 flush_page_update_queue();
62 #ifdef CONFIG_SMP
63 if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
64 smp_call_function(flush_ldt, 0, 1, 1);
65 #endif
66 }
67 wmb();
68 if (oldsize) {
69 if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
70 vfree(oldldt);
71 else
72 kfree(oldldt);
73 }
74 return 0;
75 }
77 static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
78 {
79 int err = alloc_ldt(new, old->size, 0);
80 if (err < 0) {
81 printk(KERN_WARNING "ldt allocation failed\n");
82 new->size = 0;
83 return err;
84 }
85 memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
86 make_pages_readonly(new->ldt, (new->size*LDT_ENTRY_SIZE)/PAGE_SIZE);
87 return 0;
88 }
90 /*
91 * we do not have to muck with descriptors here, that is
92 * done in switch_mm() as needed.
93 */
94 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
95 {
96 struct mm_struct * old_mm;
97 int retval = 0;
99 init_MUTEX(&mm->context.sem);
100 mm->context.size = 0;
101 old_mm = current->mm;
102 if (old_mm && old_mm->context.size > 0) {
103 down(&old_mm->context.sem);
104 retval = copy_ldt(&mm->context, &old_mm->context);
105 up(&old_mm->context.sem);
106 }
107 return retval;
108 }
110 /*
111 * No need to lock the MM as we are the last user
112 * Do not touch the ldt register, we are already
113 * in the next thread.
114 */
115 void destroy_context(struct mm_struct *mm)
116 {
117 if (mm->context.size) {
118 make_pages_writable(
119 mm->context.ldt,
120 (mm->context.size*LDT_ENTRY_SIZE)/PAGE_SIZE);
121 flush_page_update_queue();
122 if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
123 vfree(mm->context.ldt);
124 else
125 kfree(mm->context.ldt);
126 mm->context.size = 0;
127 }
128 }
130 static int read_ldt(void * ptr, unsigned long bytecount)
131 {
132 int err;
133 unsigned long size;
134 struct mm_struct * mm = current->mm;
136 if (!mm->context.size)
137 return 0;
138 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
139 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
141 down(&mm->context.sem);
142 size = mm->context.size*LDT_ENTRY_SIZE;
143 if (size > bytecount)
144 size = bytecount;
146 err = 0;
147 if (copy_to_user(ptr, mm->context.ldt, size))
148 err = -EFAULT;
149 up(&mm->context.sem);
150 if (err < 0)
151 return err;
152 if (size != bytecount) {
153 /* zero-fill the rest */
154 clear_user(ptr+size, bytecount-size);
155 }
156 return bytecount;
157 }
159 static int read_default_ldt(void * ptr, unsigned long bytecount)
160 {
161 int err;
162 unsigned long size;
163 void *address;
165 err = 0;
166 address = &default_ldt[0];
167 size = 5*sizeof(struct desc_struct);
168 if (size > bytecount)
169 size = bytecount;
171 err = size;
172 if (copy_to_user(ptr, address, size))
173 err = -EFAULT;
175 return err;
176 }
178 static int write_ldt(void * ptr, unsigned long bytecount, int oldmode)
179 {
180 struct mm_struct * mm = current->mm;
181 __u32 entry_1, entry_2, *lp;
182 unsigned long phys_lp;
183 int error;
184 struct modify_ldt_ldt_s ldt_info;
186 error = -EINVAL;
187 if (bytecount != sizeof(ldt_info))
188 goto out;
189 error = -EFAULT;
190 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
191 goto out;
193 error = -EINVAL;
194 if (ldt_info.entry_number >= LDT_ENTRIES)
195 goto out;
196 if (ldt_info.contents == 3) {
197 if (oldmode)
198 goto out;
199 if (ldt_info.seg_not_present == 0)
200 goto out;
201 }
203 down(&mm->context.sem);
204 if (ldt_info.entry_number >= mm->context.size) {
205 error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
206 if (error < 0)
207 goto out_unlock;
208 }
210 lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
211 phys_lp = arbitrary_virt_to_phys(lp);
213 /* Allow LDTs to be cleared by the user. */
214 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
215 if (oldmode ||
216 (ldt_info.contents == 0 &&
217 ldt_info.read_exec_only == 1 &&
218 ldt_info.seg_32bit == 0 &&
219 ldt_info.limit_in_pages == 0 &&
220 ldt_info.seg_not_present == 1 &&
221 ldt_info.useable == 0 )) {
222 entry_1 = 0;
223 entry_2 = 0;
224 goto install;
225 }
226 }
228 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
229 (ldt_info.limit & 0x0ffff);
230 entry_2 = (ldt_info.base_addr & 0xff000000) |
231 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
232 (ldt_info.limit & 0xf0000) |
233 ((ldt_info.read_exec_only ^ 1) << 9) |
234 (ldt_info.contents << 10) |
235 ((ldt_info.seg_not_present ^ 1) << 15) |
236 (ldt_info.seg_32bit << 22) |
237 (ldt_info.limit_in_pages << 23) |
238 0x7000;
239 if (!oldmode)
240 entry_2 |= (ldt_info.useable << 20);
242 /* Install the new entry ... */
243 install:
244 error = HYPERVISOR_update_descriptor(phys_lp, entry_1, entry_2);
246 out_unlock:
247 up(&mm->context.sem);
248 out:
249 return error;
250 }
252 asmlinkage int sys_modify_ldt(int func, void *ptr, unsigned long bytecount)
253 {
254 int ret = -ENOSYS;
256 switch (func) {
257 case 0:
258 ret = read_ldt(ptr, bytecount);
259 break;
260 case 1:
261 ret = write_ldt(ptr, bytecount, 1);
262 break;
263 case 2:
264 ret = read_default_ldt(ptr, bytecount);
265 break;
266 case 0x11:
267 ret = write_ldt(ptr, bytecount, 0);
268 break;
269 }
270 return ret;
271 }