ia64/xen-unstable

view xen/arch/x86/dom0_ops.c @ 5313:4e868aa7cad7

bitkeeper revision 1.1665.3.1 (42a0cec19GKZHbCwpHO5h-vYt_an-w)

[PATCH] missing put_domain in DOM0_IOPORT_PERMISSION

There appears to be a missing put_domain() in the failure case. The code
doesn't seem to be used by anyone, but anyway...

Signed-off-by: John Levon <levon@movementarian.org>
Signed-off-by: ian@xensource.com
author iap10@freefall.cl.cam.ac.uk
date Fri Jun 03 21:42:25 2005 +0000 (2005-06-03)
parents 8651a99cdc09
children c59632e7ff3e
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/dom0_ops.h>
14 #include <xen/sched.h>
15 #include <xen/event.h>
16 #include <asm/domain_page.h>
17 #include <asm/msr.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <asm/shadow.h>
21 #include <public/sched_ctl.h>
23 #include <asm/mtrr.h>
24 #include "mtrr/mtrr.h"
26 #define TRC_DOM0OP_ENTER_BASE 0x00020000
27 #define TRC_DOM0OP_LEAVE_BASE 0x00030000
29 static int msr_cpu_mask;
30 static unsigned long msr_addr;
31 static unsigned long msr_lo;
32 static unsigned long msr_hi;
34 static void write_msr_for(void *unused)
35 {
36 if (((1 << current->processor) & msr_cpu_mask))
37 (void)wrmsr_user(msr_addr, msr_lo, msr_hi);
38 }
40 static void read_msr_for(void *unused)
41 {
42 if (((1 << current->processor) & msr_cpu_mask))
43 (void)rdmsr_user(msr_addr, msr_lo, msr_hi);
44 }
46 long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
47 {
48 long ret = 0;
50 if ( !IS_PRIV(current->domain) )
51 return -EPERM;
53 switch ( op->cmd )
54 {
56 case DOM0_MSR:
57 {
58 if ( op->u.msr.write )
59 {
60 msr_cpu_mask = op->u.msr.cpu_mask;
61 msr_addr = op->u.msr.msr;
62 msr_lo = op->u.msr.in1;
63 msr_hi = op->u.msr.in2;
64 smp_call_function(write_msr_for, NULL, 1, 1);
65 write_msr_for(NULL);
66 }
67 else
68 {
69 msr_cpu_mask = op->u.msr.cpu_mask;
70 msr_addr = op->u.msr.msr;
71 smp_call_function(read_msr_for, NULL, 1, 1);
72 read_msr_for(NULL);
74 op->u.msr.out1 = msr_lo;
75 op->u.msr.out2 = msr_hi;
76 copy_to_user(u_dom0_op, op, sizeof(*op));
77 }
78 ret = 0;
79 }
80 break;
82 case DOM0_SHADOW_CONTROL:
83 {
84 struct domain *d;
85 ret = -ESRCH;
86 d = find_domain_by_id(op->u.shadow_control.domain);
87 if ( d != NULL )
88 {
89 ret = shadow_mode_control(d, &op->u.shadow_control);
90 put_domain(d);
91 copy_to_user(u_dom0_op, op, sizeof(*op));
92 }
93 }
94 break;
96 case DOM0_ADD_MEMTYPE:
97 {
98 ret = mtrr_add_page(
99 op->u.add_memtype.pfn,
100 op->u.add_memtype.nr_pfns,
101 op->u.add_memtype.type,
102 1);
103 }
104 break;
106 case DOM0_DEL_MEMTYPE:
107 {
108 ret = mtrr_del_page(op->u.del_memtype.reg, 0, 0);
109 }
110 break;
112 case DOM0_READ_MEMTYPE:
113 {
114 unsigned long pfn;
115 unsigned int nr_pfns;
116 mtrr_type type;
118 ret = -EINVAL;
119 if ( op->u.read_memtype.reg < num_var_ranges )
120 {
121 mtrr_if->get(op->u.read_memtype.reg, &pfn, &nr_pfns, &type);
122 (void)__put_user(pfn, &u_dom0_op->u.read_memtype.pfn);
123 (void)__put_user(nr_pfns, &u_dom0_op->u.read_memtype.nr_pfns);
124 (void)__put_user(type, &u_dom0_op->u.read_memtype.type);
125 ret = 0;
126 }
127 }
128 break;
130 case DOM0_MICROCODE:
131 {
132 extern int microcode_update(void *buf, unsigned long len);
133 ret = microcode_update(op->u.microcode.data, op->u.microcode.length);
134 }
135 break;
137 case DOM0_IOPORT_PERMISSION:
138 {
139 struct domain *d;
140 unsigned int fp = op->u.ioport_permission.first_port;
141 unsigned int np = op->u.ioport_permission.nr_ports;
142 unsigned int p;
144 ret = -EINVAL;
145 if ( (fp + np) >= 65536 )
146 break;
148 ret = -ESRCH;
149 if ( unlikely((d = find_domain_by_id(
150 op->u.ioport_permission.domain)) == NULL) )
151 break;
153 ret = -ENOMEM;
154 if ( d->arch.iobmp_mask != NULL )
155 {
156 if ( (d->arch.iobmp_mask = xmalloc_array(
157 u8, IOBMP_BYTES)) == NULL )
158 {
159 put_domain(d);
160 break;
161 }
162 memset(d->arch.iobmp_mask, 0xFF, IOBMP_BYTES);
163 }
165 ret = 0;
166 for ( p = fp; p < (fp + np); p++ )
167 {
168 if ( op->u.ioport_permission.allow_access )
169 clear_bit(p, d->arch.iobmp_mask);
170 else
171 set_bit(p, d->arch.iobmp_mask);
172 }
174 put_domain(d);
175 }
176 break;
178 case DOM0_PHYSINFO:
179 {
180 dom0_physinfo_t *pi = &op->u.physinfo;
182 pi->ht_per_core = ht_per_core;
183 pi->cores = num_online_cpus() / ht_per_core;
184 pi->total_pages = max_page;
185 pi->free_pages = avail_domheap_pages();
186 pi->cpu_khz = cpu_khz;
188 copy_to_user(u_dom0_op, op, sizeof(*op));
189 ret = 0;
190 }
191 break;
193 case DOM0_GETPAGEFRAMEINFO:
194 {
195 struct pfn_info *page;
196 unsigned long pfn = op->u.getpageframeinfo.pfn;
197 domid_t dom = op->u.getpageframeinfo.domain;
198 struct domain *d;
200 ret = -EINVAL;
202 if ( unlikely(pfn >= max_page) ||
203 unlikely((d = find_domain_by_id(dom)) == NULL) )
204 break;
206 page = &frame_table[pfn];
208 if ( likely(get_page(page, d)) )
209 {
210 ret = 0;
212 op->u.getpageframeinfo.type = NOTAB;
214 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
215 {
216 switch ( page->u.inuse.type_info & PGT_type_mask )
217 {
218 case PGT_l1_page_table:
219 op->u.getpageframeinfo.type = L1TAB;
220 break;
221 case PGT_l2_page_table:
222 op->u.getpageframeinfo.type = L2TAB;
223 break;
224 case PGT_l3_page_table:
225 op->u.getpageframeinfo.type = L3TAB;
226 break;
227 case PGT_l4_page_table:
228 op->u.getpageframeinfo.type = L4TAB;
229 break;
230 }
231 }
233 put_page(page);
234 }
236 put_domain(d);
238 copy_to_user(u_dom0_op, op, sizeof(*op));
239 }
240 break;
242 case DOM0_GETPAGEFRAMEINFO2:
243 {
244 #define GPF2_BATCH 128
245 int n,j;
246 int num = op->u.getpageframeinfo2.num;
247 domid_t dom = op->u.getpageframeinfo2.domain;
248 unsigned long *s_ptr = (unsigned long*) op->u.getpageframeinfo2.array;
249 struct domain *d;
250 unsigned long *l_arr;
251 ret = -ESRCH;
253 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
254 break;
256 if ( unlikely(num > 1024) )
257 {
258 ret = -E2BIG;
259 break;
260 }
262 l_arr = (unsigned long *)alloc_xenheap_page();
264 ret = 0;
265 for( n = 0; n < num; )
266 {
267 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
269 if ( copy_from_user(l_arr, &s_ptr[n], k*sizeof(unsigned long)) )
270 {
271 ret = -EINVAL;
272 break;
273 }
275 for( j = 0; j < k; j++ )
276 {
277 struct pfn_info *page;
278 unsigned long mfn = l_arr[j];
280 if ( unlikely(mfn >= max_page) )
281 goto e2_err;
283 page = &frame_table[mfn];
285 if ( likely(get_page(page, d)) )
286 {
287 unsigned long type = 0;
289 switch( page->u.inuse.type_info & PGT_type_mask )
290 {
291 case PGT_l1_page_table:
292 type = L1TAB;
293 break;
294 case PGT_l2_page_table:
295 type = L2TAB;
296 break;
297 case PGT_l3_page_table:
298 type = L3TAB;
299 break;
300 case PGT_l4_page_table:
301 type = L4TAB;
302 break;
303 }
305 if ( page->u.inuse.type_info & PGT_pinned )
306 type |= LPINTAB;
307 l_arr[j] |= type;
308 put_page(page);
309 }
310 else
311 {
312 e2_err:
313 l_arr[j] |= XTAB;
314 }
316 }
318 if ( copy_to_user(&s_ptr[n], l_arr, k*sizeof(unsigned long)) )
319 {
320 ret = -EINVAL;
321 break;
322 }
324 n += j;
325 }
327 free_xenheap_page((unsigned long)l_arr);
329 put_domain(d);
330 }
331 break;
333 case DOM0_GETMEMLIST:
334 {
335 int i;
336 struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
337 unsigned long max_pfns = op->u.getmemlist.max_pfns;
338 unsigned long pfn;
339 unsigned long *buffer = op->u.getmemlist.buffer;
340 struct list_head *list_ent;
342 ret = -EINVAL;
343 if ( d != NULL )
344 {
345 ret = 0;
347 spin_lock(&d->page_alloc_lock);
348 list_ent = d->page_list.next;
349 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
350 {
351 pfn = list_entry(list_ent, struct pfn_info, list) -
352 frame_table;
353 if ( put_user(pfn, buffer) )
354 {
355 ret = -EFAULT;
356 break;
357 }
358 buffer++;
359 list_ent = frame_table[pfn].list.next;
360 }
361 spin_unlock(&d->page_alloc_lock);
363 op->u.getmemlist.num_pfns = i;
364 copy_to_user(u_dom0_op, op, sizeof(*op));
366 put_domain(d);
367 }
368 }
369 break;
371 default:
372 ret = -ENOSYS;
374 }
376 return ret;
377 }
379 void arch_getdomaininfo_ctxt(
380 struct vcpu *v, struct vcpu_guest_context *c)
381 {
382 #ifdef __i386__ /* Remove when x86_64 VMX is implemented */
383 #ifdef CONFIG_VMX
384 extern void save_vmx_cpu_user_regs(struct cpu_user_regs *);
385 #endif
386 #endif
388 memcpy(c, &v->arch.guest_context, sizeof(*c));
390 /* IOPL privileges are virtualised -- merge back into returned eflags. */
391 BUG_ON((c->user_regs.eflags & EF_IOPL) != 0);
392 c->user_regs.eflags |= v->arch.iopl << 12;
394 #ifdef __i386__
395 #ifdef CONFIG_VMX
396 if ( VMX_DOMAIN(v) )
397 save_vmx_cpu_user_regs(&c->user_regs);
398 #endif
399 #endif
401 c->flags = 0;
402 if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
403 c->flags |= VGCF_I387_VALID;
404 if ( KERNEL_MODE(v, &v->arch.guest_context.user_regs) )
405 c->flags |= VGCF_IN_KERNEL;
406 #ifdef CONFIG_VMX
407 if (VMX_DOMAIN(v))
408 c->flags |= VGCF_VMX_GUEST;
409 #endif
411 c->pt_base = pagetable_get_paddr(v->arch.guest_table);
413 c->vm_assist = v->domain->vm_assist;
414 }