ia64/xen-unstable

view xen/arch/x86/dom0_ops.c @ 3684:2e9105d1c5a6

bitkeeper revision 1.1159.223.73 (42061207JyX53LmlrclgfiWwVqnF7w)

Avoid large stack frame in arch_dom0_op by dynamically allocating space
for getmeminfo batch.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@viper.(none)
date Sun Feb 06 12:48:07 2005 +0000 (2005-02-06)
parents d331c6994d28
children f4eb69e2ad9e 0dc3b8b8c298
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/dom0_ops.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <asm/domain_page.h>
17 #include <asm/msr.h>
18 #include <asm/pdb.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <asm/shadow.h>
22 #include <public/sched_ctl.h>
24 #include <asm/mtrr.h>
25 #include "mtrr/mtrr.h"
27 #define TRC_DOM0OP_ENTER_BASE 0x00020000
28 #define TRC_DOM0OP_LEAVE_BASE 0x00030000
30 extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
32 static int msr_cpu_mask;
33 static unsigned long msr_addr;
34 static unsigned long msr_lo;
35 static unsigned long msr_hi;
37 static void write_msr_for(void *unused)
38 {
39 if (((1 << current->processor) & msr_cpu_mask))
40 wrmsr(msr_addr, msr_lo, msr_hi);
41 }
43 static void read_msr_for(void *unused)
44 {
45 if (((1 << current->processor) & msr_cpu_mask))
46 rdmsr(msr_addr, msr_lo, msr_hi);
47 }
49 long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
50 {
51 long ret = 0;
53 if ( !IS_PRIV(current) )
54 return -EPERM;
56 switch ( op->cmd )
57 {
59 case DOM0_MSR:
60 {
61 if ( op->u.msr.write )
62 {
63 msr_cpu_mask = op->u.msr.cpu_mask;
64 msr_addr = op->u.msr.msr;
65 msr_lo = op->u.msr.in1;
66 msr_hi = op->u.msr.in2;
67 smp_call_function(write_msr_for, NULL, 1, 1);
68 write_msr_for(NULL);
69 }
70 else
71 {
72 msr_cpu_mask = op->u.msr.cpu_mask;
73 msr_addr = op->u.msr.msr;
74 smp_call_function(read_msr_for, NULL, 1, 1);
75 read_msr_for(NULL);
77 op->u.msr.out1 = msr_lo;
78 op->u.msr.out2 = msr_hi;
79 copy_to_user(u_dom0_op, op, sizeof(*op));
80 }
81 ret = 0;
82 }
83 break;
85 case DOM0_SHADOW_CONTROL:
86 {
87 struct domain *d;
88 ret = -ESRCH;
89 d = find_domain_by_id(op->u.shadow_control.domain);
90 if ( d != NULL )
91 {
92 ret = shadow_mode_control(d, &op->u.shadow_control);
93 put_domain(d);
94 copy_to_user(u_dom0_op, op, sizeof(*op));
95 }
96 }
97 break;
99 case DOM0_ADD_MEMTYPE:
100 {
101 ret = mtrr_add_page(
102 op->u.add_memtype.pfn,
103 op->u.add_memtype.nr_pfns,
104 op->u.add_memtype.type,
105 1);
106 }
107 break;
109 case DOM0_DEL_MEMTYPE:
110 {
111 ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
112 }
113 break;
115 case DOM0_READ_MEMTYPE:
116 {
117 unsigned long pfn;
118 unsigned int nr_pfns;
119 mtrr_type type;
121 ret = -EINVAL;
122 if ( op->u.read_memtype.reg < num_var_ranges )
123 {
124 mtrr_if->get(op->u.read_memtype.reg, &pfn, &nr_pfns, &type);
125 (void)__put_user(pfn, &u_dom0_op->u.read_memtype.pfn);
126 (void)__put_user(nr_pfns, &u_dom0_op->u.read_memtype.nr_pfns);
127 (void)__put_user(type, &u_dom0_op->u.read_memtype.type);
128 ret = 0;
129 }
130 }
131 break;
133 case DOM0_MICROCODE:
134 {
135 extern int microcode_update(void *buf, unsigned long len);
136 ret = microcode_update(op->u.microcode.data, op->u.microcode.length);
137 }
138 break;
140 case DOM0_IOPL:
141 {
142 extern long do_iopl(domid_t, unsigned int);
143 ret = do_iopl(op->u.iopl.domain, op->u.iopl.iopl);
144 }
145 break;
147 case DOM0_PHYSINFO:
148 {
149 dom0_physinfo_t *pi = &op->u.physinfo;
151 pi->ht_per_core = opt_noht ? 1 : ht_per_core;
152 pi->cores = smp_num_cpus / pi->ht_per_core;
153 pi->total_pages = max_page;
154 pi->free_pages = avail_domheap_pages();
155 pi->cpu_khz = cpu_khz;
157 copy_to_user(u_dom0_op, op, sizeof(*op));
158 ret = 0;
159 }
160 break;
162 case DOM0_GETPAGEFRAMEINFO:
163 {
164 struct pfn_info *page;
165 unsigned long pfn = op->u.getpageframeinfo.pfn;
166 domid_t dom = op->u.getpageframeinfo.domain;
167 struct domain *d;
169 ret = -EINVAL;
171 if ( unlikely(pfn >= max_page) ||
172 unlikely((d = find_domain_by_id(dom)) == NULL) )
173 break;
175 page = &frame_table[pfn];
177 if ( likely(get_page(page, d)) )
178 {
179 ret = 0;
181 op->u.getpageframeinfo.type = NOTAB;
183 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
184 {
185 switch ( page->u.inuse.type_info & PGT_type_mask )
186 {
187 case PGT_l1_page_table:
188 op->u.getpageframeinfo.type = L1TAB;
189 break;
190 case PGT_l2_page_table:
191 op->u.getpageframeinfo.type = L2TAB;
192 break;
193 case PGT_l3_page_table:
194 op->u.getpageframeinfo.type = L3TAB;
195 break;
196 case PGT_l4_page_table:
197 op->u.getpageframeinfo.type = L4TAB;
198 break;
199 }
200 }
202 put_page(page);
203 }
205 put_domain(d);
207 copy_to_user(u_dom0_op, op, sizeof(*op));
208 }
209 break;
211 case DOM0_GETPAGEFRAMEINFO2:
212 {
213 #define GPF2_BATCH 128
214 int n,j;
215 int num = op->u.getpageframeinfo2.num;
216 domid_t dom = op->u.getpageframeinfo2.domain;
217 unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
218 struct domain *d;
219 unsigned long *l_arr;
220 ret = -ESRCH;
222 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
223 break;
225 if ( unlikely(num > 1024) )
226 {
227 ret = -E2BIG;
228 break;
229 }
231 l_arr = (unsigned long *)alloc_xenheap_page();
233 ret = 0;
234 for( n = 0; n < num; )
235 {
236 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
238 if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
239 {
240 ret = -EINVAL;
241 break;
242 }
244 for( j = 0; j < k; j++ )
245 {
246 struct pfn_info *page;
247 unsigned long mfn = l_arr[j];
249 if ( unlikely(mfn >= max_page) )
250 goto e2_err;
252 page = &frame_table[mfn];
254 if ( likely(get_page(page, d)) )
255 {
256 unsigned long type = 0;
258 switch( page->u.inuse.type_info & PGT_type_mask )
259 {
260 case PGT_l1_page_table:
261 type = L1TAB;
262 break;
263 case PGT_l2_page_table:
264 type = L2TAB;
265 break;
266 case PGT_l3_page_table:
267 type = L3TAB;
268 break;
269 case PGT_l4_page_table:
270 type = L4TAB;
271 break;
272 }
274 if ( page->u.inuse.type_info & PGT_pinned )
275 type |= LPINTAB;
276 l_arr[j] |= type;
277 put_page(page);
278 }
279 else
280 {
281 e2_err:
282 l_arr[j] |= XTAB;
283 }
285 }
287 if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
288 {
289 ret = -EINVAL;
290 break;
291 }
293 n += j;
294 }
296 free_xenheap_page((unsigned long)l_arr);
298 put_domain(d);
299 }
300 break;
302 default:
303 ret = -ENOSYS;
305 }
307 return ret;
308 }
310 void arch_getdomaininfo_ctxt(struct domain *d, full_execution_context_t *c)
311 {
312 int i;
314 c->flags = 0;
315 memcpy(&c->cpu_ctxt,
316 &d->thread.user_ctxt,
317 sizeof(d->thread.user_ctxt));
318 if ( test_bit(DF_DONEFPUINIT, &d->flags) )
319 c->flags |= ECF_I387_VALID;
320 memcpy(&c->fpu_ctxt,
321 &d->thread.i387,
322 sizeof(d->thread.i387));
323 memcpy(&c->trap_ctxt,
324 d->thread.traps,
325 sizeof(d->thread.traps));
326 #ifdef ARCH_HAS_FAST_TRAP
327 if ( (d->thread.fast_trap_desc.a == 0) &&
328 (d->thread.fast_trap_desc.b == 0) )
329 c->fast_trap_idx = 0;
330 else
331 c->fast_trap_idx =
332 d->thread.fast_trap_idx;
333 #endif
334 c->ldt_base = d->mm.ldt_base;
335 c->ldt_ents = d->mm.ldt_ents;
336 c->gdt_ents = 0;
337 if ( GET_GDT_ADDRESS(d) == GDT_VIRT_START )
338 {
339 for ( i = 0; i < 16; i++ )
340 c->gdt_frames[i] =
341 l1_pgentry_to_pagenr(d->mm.perdomain_pt[i]);
342 c->gdt_ents = GET_GDT_ENTRIES(d);
343 }
344 c->guestos_ss = d->thread.guestos_ss;
345 c->guestos_esp = d->thread.guestos_sp;
346 c->pt_base =
347 pagetable_val(d->mm.pagetable);
348 memcpy(c->debugreg,
349 d->thread.debugreg,
350 sizeof(d->thread.debugreg));
351 c->event_callback_cs = d->thread.event_selector;
352 c->event_callback_eip = d->thread.event_address;
353 c->failsafe_callback_cs = d->thread.failsafe_selector;
354 c->failsafe_callback_eip = d->thread.failsafe_address;
355 }