ia64/xen-unstable

view xen/arch/ia64/xen/hypercall.c @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 4ac315e33f88
children ada944b5e066
line source
1 /*
2 * Hypercall implementations
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/sched.h>
11 #include <xen/hypercall.h>
12 #include <xen/multicall.h>
13 #include <xen/guest_access.h>
14 #include <xen/mm.h>
16 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
17 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
18 #include <asm/fpswa.h> /* FOR struct fpswa_ret_t */
20 #include <asm/vmx_vcpu.h>
21 #include <asm/vcpu.h>
22 #include <asm/dom_fw.h>
23 #include <public/domctl.h>
24 #include <public/sysctl.h>
25 #include <public/event_channel.h>
26 #include <public/memory.h>
27 #include <public/sched.h>
28 #include <xen/irq.h>
29 #include <asm/hw_irq.h>
30 #include <public/physdev.h>
31 #include <xen/domain.h>
32 #include <public/callback.h>
33 #include <xen/event.h>
34 #include <xen/perfc.h>
35 #include <public/arch-ia64/debug_op.h>
37 extern long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg);
38 extern long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg);
40 static IA64FAULT
41 xen_hypercall (struct pt_regs *regs)
42 {
43 uint32_t cmd = (uint32_t)regs->r2;
44 printk("Warning %s should not be called %d\n", __FUNCTION__, cmd);
45 return IA64_NO_FAULT;
46 }
48 static IA64FAULT
49 xen_fast_hypercall (struct pt_regs *regs)
50 {
51 uint32_t cmd = (uint32_t)regs->r2;
52 switch (cmd) {
53 case __HYPERVISOR_ia64_fast_eoi:
54 printk("Warning %s should not be called %d\n",
55 __FUNCTION__, cmd);
56 break;
57 default:
58 regs->r8 = -ENOSYS;
59 }
60 return IA64_NO_FAULT;
61 }
63 long do_pirq_guest_eoi(int pirq)
64 {
65 return pirq_guest_eoi(current->domain, pirq);
66 }
69 static void
70 fw_hypercall_ipi (struct pt_regs *regs)
71 {
72 int cpu = regs->r14;
73 int vector = regs->r15;
74 struct vcpu *targ;
75 struct domain *d = current->domain;
77 /* Be sure the target exists. */
78 if (cpu > MAX_VIRT_CPUS)
79 return;
80 targ = d->vcpu[cpu];
81 if (targ == NULL)
82 return;
84 if (vector == XEN_SAL_BOOT_RENDEZ_VEC
85 && (!targ->is_initialised
86 || test_bit(_VPF_down, &targ->pause_flags))) {
88 /* First start: initialize vpcu. */
89 if (!targ->is_initialised) {
90 if (arch_set_info_guest (targ, NULL) != 0) {
91 printk ("arch_boot_vcpu: failure\n");
92 return;
93 }
94 }
96 /* First or next rendez-vous: set registers. */
97 vcpu_init_regs (targ);
98 vcpu_regs (targ)->cr_iip = d->arch.sal_data->boot_rdv_ip;
99 vcpu_regs (targ)->r1 = d->arch.sal_data->boot_rdv_r1;
100 vcpu_regs (targ)->b0 = FW_HYPERCALL_SAL_RETURN_PADDR;
102 if (test_and_clear_bit(_VPF_down,
103 &targ->pause_flags)) {
104 vcpu_wake(targ);
105 printk(XENLOG_INFO "arch_boot_vcpu: vcpu %d awaken\n",
106 targ->vcpu_id);
107 }
108 else
109 printk ("arch_boot_vcpu: huu, already awaken!\n");
110 }
111 else {
112 int running = targ->is_running;
113 vcpu_pend_interrupt(targ, vector);
114 vcpu_unblock(targ);
115 if (running)
116 smp_send_event_check_cpu(targ->processor);
117 }
118 return;
119 }
121 static fpswa_ret_t
122 fw_hypercall_fpswa (struct vcpu *v)
123 {
124 return PSCBX(v, fpswa_ret);
125 }
127 IA64FAULT
128 ia64_hypercall(struct pt_regs *regs)
129 {
130 struct vcpu *v = current;
131 struct sal_ret_values x;
132 efi_status_t efi_ret_value;
133 fpswa_ret_t fpswa_ret;
134 IA64FAULT fault;
135 unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH;
137 perfc_incra(fw_hypercall, index >> 8);
138 switch (index) {
139 case FW_HYPERCALL_XEN:
140 return xen_hypercall(regs);
142 case FW_HYPERCALL_XEN_FAST:
143 return xen_fast_hypercall(regs);
145 case FW_HYPERCALL_PAL_CALL:
146 //printk("*** PAL hypercall: index=%d\n",regs->r28);
147 //FIXME: This should call a C routine
148 #if 0
149 // This is very conservative, but avoids a possible
150 // (and deadly) freeze in paravirtualized domains due
151 // to a yet-to-be-found bug where pending_interruption
152 // is zero when it shouldn't be. Since PAL is called
153 // in the idle loop, this should resolve it
154 VCPU(v,pending_interruption) = 1;
155 #endif
156 if (regs->r28 == PAL_HALT_LIGHT) {
157 if (vcpu_deliverable_interrupts(v) ||
158 event_pending(v)) {
159 perfc_incr(idle_when_pending);
160 vcpu_pend_unspecified_interrupt(v);
161 //printk("idle w/int#%d pending!\n",pi);
162 //this shouldn't happen, but it apparently does quite a bit! so don't
163 //allow it to happen... i.e. if a domain has an interrupt pending and
164 //it tries to halt itself because it thinks it is idle, just return here
165 //as deliver_pending_interrupt is called on the way out and will deliver it
166 }
167 else {
168 perfc_incr(pal_halt_light);
169 migrate_timer(&v->arch.hlt_timer,
170 v->processor);
171 set_timer(&v->arch.hlt_timer,
172 vcpu_get_next_timer_ns(v));
173 do_sched_op_compat(SCHEDOP_block, 0);
174 /* do_block only pends a softirq */
175 do_softirq();
176 stop_timer(&v->arch.hlt_timer);
177 }
178 regs->r8 = 0;
179 regs->r9 = 0;
180 regs->r10 = 0;
181 regs->r11 = 0;
182 }
183 else {
184 struct ia64_pal_retval y;
186 if (regs->r28 >= PAL_COPY_PAL)
187 y = xen_pal_emulator
188 (regs->r28, vcpu_get_gr (v, 33),
189 vcpu_get_gr (v, 34),
190 vcpu_get_gr (v, 35));
191 else
192 y = xen_pal_emulator(regs->r28,regs->r29,
193 regs->r30,regs->r31);
194 regs->r8 = y.status; regs->r9 = y.v0;
195 regs->r10 = y.v1; regs->r11 = y.v2;
196 }
197 break;
198 case FW_HYPERCALL_SAL_CALL:
199 x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
200 vcpu_get_gr(v,34),vcpu_get_gr(v,35),
201 vcpu_get_gr(v,36),vcpu_get_gr(v,37),
202 vcpu_get_gr(v,38),vcpu_get_gr(v,39));
203 regs->r8 = x.r8; regs->r9 = x.r9;
204 regs->r10 = x.r10; regs->r11 = x.r11;
205 break;
206 case FW_HYPERCALL_SAL_RETURN:
207 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
208 vcpu_sleep_nosync(v);
209 break;
210 case FW_HYPERCALL_EFI_CALL:
211 efi_ret_value = efi_emulator (regs, &fault);
212 if (fault != IA64_NO_FAULT) return fault;
213 regs->r8 = efi_ret_value;
214 break;
215 case FW_HYPERCALL_IPI:
216 fw_hypercall_ipi (regs);
217 break;
218 case FW_HYPERCALL_SET_SHARED_INFO_VA:
219 regs->r8 = domain_set_shared_info_va (regs->r28);
220 break;
221 case FW_HYPERCALL_FPSWA:
222 fpswa_ret = fw_hypercall_fpswa (v);
223 regs->r8 = fpswa_ret.status;
224 regs->r9 = fpswa_ret.err0;
225 regs->r10 = fpswa_ret.err1;
226 regs->r11 = fpswa_ret.err2;
227 break;
228 case __HYPERVISOR_opt_feature: {
229 XEN_GUEST_HANDLE(void) arg;
230 struct xen_ia64_opt_feature optf;
231 set_xen_guest_handle(arg, (void*)(vcpu_get_gr(v, 32)));
232 if (copy_from_guest(&optf, arg, 1) == 0)
233 regs->r8 = domain_opt_feature(v->domain, &optf);
234 else
235 regs->r8 = -EFAULT;
236 break;
237 }
238 default:
239 printk("unknown ia64 fw hypercall %lx\n", regs->r2);
240 regs->r8 = do_ni_hypercall();
241 }
242 return IA64_NO_FAULT;
243 }
245 unsigned long hypercall_create_continuation(
246 unsigned int op, const char *format, ...)
247 {
248 struct mc_state *mcs = &this_cpu(mc_state);
249 struct vcpu *v = current;
250 const char *p = format;
251 unsigned long arg;
252 unsigned int i;
253 va_list args;
255 va_start(args, format);
256 if (test_bit(_MCSF_in_multicall, &mcs->flags))
257 panic("PREEMPT happen in multicall\n"); // Not support yet
259 vcpu_set_gr(v, 15, op, 0);
261 for (i = 0; *p != '\0'; i++) {
262 switch ( *p++ )
263 {
264 case 'i':
265 arg = (unsigned long)va_arg(args, unsigned int);
266 break;
267 case 'l':
268 arg = (unsigned long)va_arg(args, unsigned long);
269 break;
270 case 'h':
271 arg = (unsigned long)va_arg(args, void *);
272 break;
273 default:
274 arg = 0;
275 BUG();
276 }
277 vcpu_set_gr(v, 16 + i, arg, 0);
278 }
280 if (i >= 6)
281 panic("Too many args for hypercall continuation\n");
283 // Clean other argument to 0
284 while (i < 6) {
285 vcpu_set_gr(v, 16 + i, 0, 0);
286 i++;
287 }
289 // re-execute break;
290 vcpu_decrement_iip(v);
292 v->arch.hypercall_continuation = 1;
293 va_end(args);
294 return op;
295 }
297 /* Need make this function common */
298 extern int
299 iosapic_guest_read(
300 unsigned long physbase, unsigned int reg, u32 *pval);
301 extern int
302 iosapic_guest_write(
303 unsigned long physbase, unsigned int reg, u32 pval);
305 long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
306 {
307 int irq;
308 long ret;
310 switch ( cmd )
311 {
312 case PHYSDEVOP_eoi: {
313 struct physdev_eoi eoi;
314 ret = -EFAULT;
315 if ( copy_from_guest(&eoi, arg, 1) != 0 )
316 break;
317 ret = pirq_guest_eoi(current->domain, eoi.irq);
318 break;
319 }
321 /* Legacy since 0x00030202. */
322 case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
323 ret = pirq_guest_unmask(current->domain);
324 break;
325 }
327 case PHYSDEVOP_irq_status_query: {
328 struct physdev_irq_status_query irq_status_query;
329 ret = -EFAULT;
330 if ( copy_from_guest(&irq_status_query, arg, 1) != 0 )
331 break;
332 irq = irq_status_query.irq;
333 ret = -EINVAL;
334 if ( (irq < 0) || (irq >= NR_IRQS) )
335 break;
336 irq_status_query.flags = 0;
337 /* Edge-triggered interrupts don't need an explicit unmask downcall. */
338 if ( !strstr(irq_desc[irq_to_vector(irq)].handler->typename, "edge") )
339 irq_status_query.flags |= XENIRQSTAT_needs_eoi;
340 ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
341 break;
342 }
344 case PHYSDEVOP_apic_read: {
345 struct physdev_apic apic;
346 ret = -EFAULT;
347 if ( copy_from_guest(&apic, arg, 1) != 0 )
348 break;
349 ret = -EPERM;
350 if ( !IS_PRIV(current->domain) )
351 break;
352 ret = iosapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
353 if ( copy_to_guest(arg, &apic, 1) != 0 )
354 ret = -EFAULT;
355 break;
356 }
358 case PHYSDEVOP_apic_write: {
359 struct physdev_apic apic;
360 ret = -EFAULT;
361 if ( copy_from_guest(&apic, arg, 1) != 0 )
362 break;
363 ret = -EPERM;
364 if ( !IS_PRIV(current->domain) )
365 break;
366 ret = iosapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
367 break;
368 }
370 case PHYSDEVOP_alloc_irq_vector: {
371 struct physdev_irq irq_op;
373 ret = -EFAULT;
374 if ( copy_from_guest(&irq_op, arg, 1) != 0 )
375 break;
377 ret = -EPERM;
378 if ( !IS_PRIV(current->domain) )
379 break;
381 ret = -EINVAL;
382 if ( (irq = irq_op.irq) >= NR_IRQS )
383 break;
385 irq_op.vector = assign_irq_vector(irq);
386 ret = copy_to_guest(arg, &irq_op, 1) ? -EFAULT : 0;
387 break;
388 }
390 case PHYSDEVOP_free_irq_vector: {
391 struct physdev_irq irq_op;
392 int vector;
394 ret = -EFAULT;
395 if ( copy_from_guest(&irq_op, arg, 1) != 0 )
396 break;
398 ret = -EPERM;
399 if ( !IS_PRIV(current->domain) )
400 break;
402 ret = -EINVAL;
403 vector = irq_op.vector;
404 if (vector < IA64_FIRST_DEVICE_VECTOR ||
405 vector > IA64_LAST_DEVICE_VECTOR)
406 break;
408 /* XXX This should be called, but causes a NAT consumption via the
409 * reboot notifier_call_chain in dom0 if a device is hidden for
410 * a driver domain using pciback.hide= (specifically, hiding function
411 * 1 of a 2 port e1000 card).
412 * free_irq_vector(vector);
413 */
414 ret = 0;
415 break;
416 }
418 default:
419 ret = -ENOSYS;
420 break;
421 }
423 return ret;
424 }
426 static long register_guest_callback(struct callback_register *reg)
427 {
428 long ret = 0;
429 struct vcpu *v = current;
431 if (IS_VMM_ADDRESS(reg->address))
432 return -EINVAL;
434 switch ( reg->type )
435 {
436 case CALLBACKTYPE_event:
437 v->arch.event_callback_ip = reg->address;
438 break;
440 case CALLBACKTYPE_failsafe:
441 v->arch.failsafe_callback_ip = reg->address;
442 break;
444 default:
445 ret = -ENOSYS;
446 break;
447 }
449 return ret;
450 }
452 static long unregister_guest_callback(struct callback_unregister *unreg)
453 {
454 return -EINVAL;
455 }
457 /* First time to add callback to xen/ia64, so let's just stick to
458 * the newer callback interface.
459 */
460 long do_callback_op(int cmd, XEN_GUEST_HANDLE(void) arg)
461 {
462 long ret;
464 switch ( cmd )
465 {
466 case CALLBACKOP_register:
467 {
468 struct callback_register reg;
470 ret = -EFAULT;
471 if ( copy_from_guest(&reg, arg, 1) )
472 break;
474 ret = register_guest_callback(&reg);
475 }
476 break;
478 case CALLBACKOP_unregister:
479 {
480 struct callback_unregister unreg;
482 ret = -EFAULT;
483 if ( copy_from_guest(&unreg, arg, 1) )
484 break;
486 ret = unregister_guest_callback(&unreg);
487 }
488 break;
490 default:
491 ret = -ENOSYS;
492 break;
493 }
495 return ret;
496 }
498 unsigned long
499 do_ia64_debug_op(unsigned long cmd, unsigned long domain,
500 XEN_GUEST_HANDLE(xen_ia64_debug_op_t) u_debug_op)
501 {
502 xen_ia64_debug_op_t curop, *op = &curop;
503 struct domain *d;
504 long ret = 0;
506 if (!IS_PRIV(current->domain))
507 return -EPERM;
508 if (copy_from_guest(op, u_debug_op, 1))
509 return -EFAULT;
510 d = rcu_lock_domain_by_id(domain);
511 if (d == NULL)
512 return -ESRCH;
514 switch (cmd) {
515 case XEN_IA64_DEBUG_OP_SET_FLAGS:
516 d->arch.debug_flags = op->flags;
517 break;
518 case XEN_IA64_DEBUG_OP_GET_FLAGS:
519 op->flags = d->arch.debug_flags;
520 if (copy_to_guest(u_debug_op, op, 1))
521 ret = -EFAULT;
522 break;
523 default:
524 ret = -ENOSYS;
525 }
526 rcu_unlock_domain(d);
527 return ret;
528 }