ia64/xen-unstable

view xen/arch/x86/dom0_ops.c @ 10132:9d838b8ceebf

Remove DOM0_PHYSICAL_MEMORY_MAP dom0 op.

The addition of the e820 style memory_op sub calls make this operation
obsolete.

Signed-off-by: Ian Campbell <ian.campbell@xensource.com>
author Ian.Campbell@xensource.com
date Mon May 22 09:23:21 2006 +0100 (2006-05-22)
parents 4e1b8be54311
children b198bbfeec10
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/guest_access.h>
14 #include <public/dom0_ops.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <xen/domain_page.h>
18 #include <asm/msr.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <xen/iocap.h>
22 #include <asm/shadow.h>
23 #include <asm/irq.h>
24 #include <asm/hvm/hvm.h>
25 #include <asm/hvm/support.h>
26 #include <asm/processor.h>
27 #include <public/sched_ctl.h>
29 #include <asm/mtrr.h>
30 #include "cpu/mtrr/mtrr.h"
32 #define TRC_DOM0OP_ENTER_BASE 0x00020000
33 #define TRC_DOM0OP_LEAVE_BASE 0x00030000
35 static int msr_cpu_mask;
36 static unsigned long msr_addr;
37 static unsigned long msr_lo;
38 static unsigned long msr_hi;
40 static void write_msr_for(void *unused)
41 {
42 if ( ((1 << smp_processor_id()) & msr_cpu_mask) )
43 (void)wrmsr_safe(msr_addr, msr_lo, msr_hi);
44 }
46 static void read_msr_for(void *unused)
47 {
48 if ( ((1 << smp_processor_id()) & msr_cpu_mask) )
49 (void)rdmsr_safe(msr_addr, msr_lo, msr_hi);
50 }
52 long arch_do_dom0_op(struct dom0_op *op, XEN_GUEST_HANDLE(dom0_op_t) u_dom0_op)
53 {
54 long ret = 0;
56 switch ( op->cmd )
57 {
59 case DOM0_MSR:
60 {
61 if ( op->u.msr.write )
62 {
63 msr_cpu_mask = op->u.msr.cpu_mask;
64 msr_addr = op->u.msr.msr;
65 msr_lo = op->u.msr.in1;
66 msr_hi = op->u.msr.in2;
67 smp_call_function(write_msr_for, NULL, 1, 1);
68 write_msr_for(NULL);
69 }
70 else
71 {
72 msr_cpu_mask = op->u.msr.cpu_mask;
73 msr_addr = op->u.msr.msr;
74 smp_call_function(read_msr_for, NULL, 1, 1);
75 read_msr_for(NULL);
77 op->u.msr.out1 = msr_lo;
78 op->u.msr.out2 = msr_hi;
79 copy_to_guest(u_dom0_op, op, 1);
80 }
81 ret = 0;
82 }
83 break;
85 case DOM0_SHADOW_CONTROL:
86 {
87 struct domain *d;
88 ret = -ESRCH;
89 d = find_domain_by_id(op->u.shadow_control.domain);
90 if ( d != NULL )
91 {
92 ret = shadow_mode_control(d, &op->u.shadow_control);
93 put_domain(d);
94 copy_to_guest(u_dom0_op, op, 1);
95 }
96 }
97 break;
99 case DOM0_ADD_MEMTYPE:
100 {
101 ret = mtrr_add_page(
102 op->u.add_memtype.mfn,
103 op->u.add_memtype.nr_mfns,
104 op->u.add_memtype.type,
105 1);
106 if ( ret > 0 )
107 {
108 op->u.add_memtype.handle = 0;
109 op->u.add_memtype.reg = ret;
110 (void)copy_to_guest(u_dom0_op, op, 1);
111 ret = 0;
112 }
113 }
114 break;
116 case DOM0_DEL_MEMTYPE:
117 {
118 if (op->u.del_memtype.handle == 0
119 /* mtrr/main.c otherwise does a lookup */
120 && (int)op->u.del_memtype.reg >= 0)
121 {
122 ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
123 if (ret > 0)
124 ret = 0;
125 }
126 else
127 ret = -EINVAL;
128 }
129 break;
131 case DOM0_READ_MEMTYPE:
132 {
133 unsigned long mfn;
134 unsigned int nr_mfns;
135 mtrr_type type;
137 ret = -EINVAL;
138 if ( op->u.read_memtype.reg < num_var_ranges )
139 {
140 mtrr_if->get(op->u.read_memtype.reg, &mfn, &nr_mfns, &type);
141 op->u.read_memtype.mfn = mfn;
142 op->u.read_memtype.nr_mfns = nr_mfns;
143 op->u.read_memtype.type = type;
144 (void)copy_to_guest(u_dom0_op, op, 1);
145 ret = 0;
146 }
147 }
148 break;
150 case DOM0_MICROCODE:
151 {
152 extern int microcode_update(void *buf, unsigned long len);
153 ret = microcode_update(op->u.microcode.data.p, op->u.microcode.length);
154 }
155 break;
157 case DOM0_IOPORT_PERMISSION:
158 {
159 struct domain *d;
160 unsigned int fp = op->u.ioport_permission.first_port;
161 unsigned int np = op->u.ioport_permission.nr_ports;
163 ret = -EINVAL;
164 if ( (fp + np) > 65536 )
165 break;
167 ret = -ESRCH;
168 if ( unlikely((d = find_domain_by_id(
169 op->u.ioport_permission.domain)) == NULL) )
170 break;
172 if ( np == 0 )
173 ret = 0;
174 else if ( op->u.ioport_permission.allow_access )
175 ret = ioports_permit_access(d, fp, fp + np - 1);
176 else
177 ret = ioports_deny_access(d, fp, fp + np - 1);
179 put_domain(d);
180 }
181 break;
183 case DOM0_PHYSINFO:
184 {
185 dom0_physinfo_t *pi = &op->u.physinfo;
187 pi->threads_per_core =
188 cpus_weight(cpu_sibling_map[0]);
189 pi->cores_per_socket =
190 cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
191 pi->sockets_per_node =
192 num_online_cpus() / cpus_weight(cpu_core_map[0]);
194 pi->nr_nodes = 1;
195 pi->total_pages = total_pages;
196 pi->free_pages = avail_domheap_pages();
197 pi->cpu_khz = cpu_khz;
198 memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
199 memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
200 ret = 0;
201 if ( copy_to_guest(u_dom0_op, op, 1) )
202 ret = -EFAULT;
203 }
204 break;
206 case DOM0_GETPAGEFRAMEINFO:
207 {
208 struct page_info *page;
209 unsigned long mfn = op->u.getpageframeinfo.mfn;
210 domid_t dom = op->u.getpageframeinfo.domain;
211 struct domain *d;
213 ret = -EINVAL;
215 if ( unlikely(!mfn_valid(mfn)) ||
216 unlikely((d = find_domain_by_id(dom)) == NULL) )
217 break;
219 page = mfn_to_page(mfn);
221 if ( likely(get_page(page, d)) )
222 {
223 ret = 0;
225 op->u.getpageframeinfo.type = NOTAB;
227 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
228 {
229 switch ( page->u.inuse.type_info & PGT_type_mask )
230 {
231 case PGT_l1_page_table:
232 op->u.getpageframeinfo.type = L1TAB;
233 break;
234 case PGT_l2_page_table:
235 op->u.getpageframeinfo.type = L2TAB;
236 break;
237 case PGT_l3_page_table:
238 op->u.getpageframeinfo.type = L3TAB;
239 break;
240 case PGT_l4_page_table:
241 op->u.getpageframeinfo.type = L4TAB;
242 break;
243 }
244 }
246 put_page(page);
247 }
249 put_domain(d);
251 copy_to_guest(u_dom0_op, op, 1);
252 }
253 break;
255 case DOM0_GETPAGEFRAMEINFO2:
256 {
257 #define GPF2_BATCH (PAGE_SIZE / sizeof(unsigned long))
258 int n,j;
259 int num = op->u.getpageframeinfo2.num;
260 domid_t dom = op->u.getpageframeinfo2.domain;
261 struct domain *d;
262 unsigned long *l_arr;
263 ret = -ESRCH;
265 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
266 break;
268 if ( unlikely(num > 1024) )
269 {
270 ret = -E2BIG;
271 put_domain(d);
272 break;
273 }
275 l_arr = alloc_xenheap_page();
277 ret = 0;
278 for( n = 0; n < num; )
279 {
280 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
282 if ( copy_from_guest_offset(l_arr, op->u.getpageframeinfo2.array,
283 n, k) )
284 {
285 ret = -EINVAL;
286 break;
287 }
289 for( j = 0; j < k; j++ )
290 {
291 struct page_info *page;
292 unsigned long mfn = l_arr[j];
294 page = mfn_to_page(mfn);
296 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
297 {
298 unsigned long type = 0;
300 switch( page->u.inuse.type_info & PGT_type_mask )
301 {
302 case PGT_l1_page_table:
303 type = L1TAB;
304 break;
305 case PGT_l2_page_table:
306 type = L2TAB;
307 break;
308 case PGT_l3_page_table:
309 type = L3TAB;
310 break;
311 case PGT_l4_page_table:
312 type = L4TAB;
313 break;
314 }
316 if ( page->u.inuse.type_info & PGT_pinned )
317 type |= LPINTAB;
318 l_arr[j] |= type;
319 put_page(page);
320 }
321 else
322 l_arr[j] |= XTAB;
324 }
326 if ( copy_to_guest_offset(op->u.getpageframeinfo2.array,
327 n, l_arr, k) )
328 {
329 ret = -EINVAL;
330 break;
331 }
333 n += k;
334 }
336 free_xenheap_page(l_arr);
338 put_domain(d);
339 }
340 break;
342 case DOM0_GETMEMLIST:
343 {
344 int i;
345 struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
346 unsigned long max_pfns = op->u.getmemlist.max_pfns;
347 unsigned long mfn;
348 struct list_head *list_ent;
350 ret = -EINVAL;
351 if ( d != NULL )
352 {
353 ret = 0;
355 spin_lock(&d->page_alloc_lock);
356 list_ent = d->page_list.next;
357 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
358 {
359 mfn = page_to_mfn(list_entry(
360 list_ent, struct page_info, list));
361 if ( copy_to_guest_offset(op->u.getmemlist.buffer,
362 i, &mfn, 1) )
363 {
364 ret = -EFAULT;
365 break;
366 }
367 list_ent = mfn_to_page(mfn)->list.next;
368 }
369 spin_unlock(&d->page_alloc_lock);
371 op->u.getmemlist.num_pfns = i;
372 copy_to_guest(u_dom0_op, op, 1);
374 put_domain(d);
375 }
376 }
377 break;
379 case DOM0_PLATFORM_QUIRK:
380 {
381 extern int opt_noirqbalance;
382 int quirk_id = op->u.platform_quirk.quirk_id;
383 switch ( quirk_id )
384 {
385 case QUIRK_NOIRQBALANCING:
386 printk("Platform quirk -- Disabling IRQ balancing/affinity.\n");
387 opt_noirqbalance = 1;
388 setup_ioapic_dest();
389 break;
390 case QUIRK_IOAPIC_BAD_REGSEL:
391 case QUIRK_IOAPIC_GOOD_REGSEL:
392 #ifndef sis_apic_bug
393 sis_apic_bug = (quirk_id == QUIRK_IOAPIC_BAD_REGSEL);
394 DPRINTK("Domain 0 says that IO-APIC REGSEL is %s\n",
395 sis_apic_bug ? "bad" : "good");
396 #else
397 BUG_ON(sis_apic_bug != (quirk_id == QUIRK_IOAPIC_BAD_REGSEL));
398 #endif
399 break;
400 default:
401 ret = -EINVAL;
402 break;
403 }
404 }
405 break;
407 case DOM0_HYPERCALL_INIT:
408 {
409 struct domain *d;
410 unsigned long mfn = op->u.hypercall_init.mfn;
411 void *hypercall_page;
413 ret = -ESRCH;
414 if ( unlikely((d = find_domain_by_id(
415 op->u.hypercall_init.domain)) == NULL) )
416 break;
418 ret = -EACCES;
419 if ( !mfn_valid(mfn) ||
420 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
421 {
422 put_domain(d);
423 break;
424 }
426 ret = 0;
428 hypercall_page = map_domain_page(mfn);
429 hypercall_page_initialise(hypercall_page);
430 unmap_domain_page(hypercall_page);
432 put_page_and_type(mfn_to_page(mfn));
434 put_domain(d);
435 }
436 break;
438 default:
439 ret = -ENOSYS;
440 break;
441 }
443 return ret;
444 }
446 void arch_getdomaininfo_ctxt(
447 struct vcpu *v, struct vcpu_guest_context *c)
448 {
449 memcpy(c, &v->arch.guest_context, sizeof(*c));
451 if ( hvm_guest(v) )
452 {
453 hvm_store_cpu_guest_regs(v, &c->user_regs, c->ctrlreg);
454 }
455 else
456 {
457 /* IOPL privileges are virtualised: merge back into returned eflags. */
458 BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
459 c->user_regs.eflags |= v->arch.iopl << 12;
460 }
462 c->flags = 0;
463 if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
464 c->flags |= VGCF_I387_VALID;
465 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
466 c->flags |= VGCF_IN_KERNEL;
467 if ( hvm_guest(v) )
468 c->flags |= VGCF_HVM_GUEST;
470 c->ctrlreg[3] = pagetable_get_paddr(v->arch.guest_table);
472 c->vm_assist = v->domain->vm_assist;
473 }
475 /*
476 * Local variables:
477 * mode: C
478 * c-set-style: "BSD"
479 * c-basic-offset: 4
480 * tab-width: 4
481 * indent-tabs-mode: nil
482 * End:
483 */