ia64/xen-unstable

view xen/arch/ia64/xen/hypercall.c @ 17769:1feb98eb64ef

[IA64] hypervisor needs to turn off psr.i after PAL_HALT_LIGHT

psr.i must be set to 0 on PAL entry and must be unchanged on PAL exit.
But do_block() turns on psr.i.
So we need to set it off at exit of PAL_HALT_LIGHT.

Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Jun 10 15:08:06 2008 +0900 (2008-06-10)
parents 430a036ab261
children 36c274bbc5df
line source
1 /*
2 * Hypercall implementations
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/sched.h>
11 #include <xen/hypercall.h>
12 #include <xen/multicall.h>
13 #include <xen/guest_access.h>
14 #include <xen/mm.h>
16 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
17 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
18 #include <asm/fpswa.h> /* FOR struct fpswa_ret_t */
20 #include <asm/vmx_vcpu.h>
21 #include <asm/vcpu.h>
22 #include <asm/dom_fw.h>
23 #include <public/domctl.h>
24 #include <public/sysctl.h>
25 #include <public/event_channel.h>
26 #include <public/memory.h>
27 #include <public/sched.h>
28 #include <xen/irq.h>
29 #include <asm/hw_irq.h>
30 #include <public/physdev.h>
31 #include <xen/domain.h>
32 #include <public/callback.h>
33 #include <xen/event.h>
34 #include <xen/perfc.h>
35 #include <public/arch-ia64/debug_op.h>
36 #include <asm/sioemu.h>
37 #include <public/arch-ia64/sioemu.h>
39 static IA64FAULT
40 xen_hypercall (struct pt_regs *regs)
41 {
42 uint32_t cmd = (uint32_t)regs->r2;
43 printk("Warning %s should not be called %d\n", __FUNCTION__, cmd);
44 return IA64_NO_FAULT;
45 }
47 static IA64FAULT
48 xen_fast_hypercall (struct pt_regs *regs)
49 {
50 uint32_t cmd = (uint32_t)regs->r2;
51 switch (cmd) {
52 case __HYPERVISOR_ia64_fast_eoi:
53 printk("Warning %s should not be called %d\n",
54 __FUNCTION__, cmd);
55 break;
56 default:
57 regs->r8 = -ENOSYS;
58 }
59 return IA64_NO_FAULT;
60 }
62 long do_pirq_guest_eoi(int pirq)
63 {
64 return pirq_guest_eoi(current->domain, pirq);
65 }
68 static void
69 fw_hypercall_ipi (struct pt_regs *regs)
70 {
71 int cpu = regs->r14;
72 int vector = regs->r15;
73 struct vcpu *targ;
74 struct domain *d = current->domain;
76 /* Be sure the target exists. */
77 if (cpu > MAX_VIRT_CPUS)
78 return;
79 targ = d->vcpu[cpu];
80 if (targ == NULL)
81 return;
83 if (vector == XEN_SAL_BOOT_RENDEZ_VEC
84 && (!targ->is_initialised
85 || test_bit(_VPF_down, &targ->pause_flags))) {
87 /* First start: initialize vpcu. */
88 if (!targ->is_initialised) {
89 if (arch_set_info_guest (targ, NULL) != 0) {
90 printk ("arch_boot_vcpu: failure\n");
91 return;
92 }
93 }
95 /* First or next rendez-vous: set registers. */
96 vcpu_init_regs (targ);
97 vcpu_regs (targ)->cr_iip = d->arch.sal_data->boot_rdv_ip;
98 vcpu_regs (targ)->r1 = d->arch.sal_data->boot_rdv_r1;
99 vcpu_regs (targ)->b0 = FW_HYPERCALL_SAL_RETURN_PADDR;
101 if (test_and_clear_bit(_VPF_down,
102 &targ->pause_flags)) {
103 vcpu_wake(targ);
104 printk(XENLOG_INFO "arch_boot_vcpu: vcpu %d awaken\n",
105 targ->vcpu_id);
106 }
107 else
108 printk ("arch_boot_vcpu: huu, already awaken!\n");
109 }
110 else {
111 int running = targ->is_running;
112 vcpu_pend_interrupt(targ, vector);
113 vcpu_unblock(targ);
114 if (running)
115 smp_send_event_check_cpu(targ->processor);
116 }
117 return;
118 }
120 static fpswa_ret_t
121 fw_hypercall_fpswa (struct vcpu *v)
122 {
123 return PSCBX(v, fpswa_ret);
124 }
126 IA64FAULT
127 ia64_hypercall(struct pt_regs *regs)
128 {
129 struct vcpu *v = current;
130 struct sal_ret_values x;
131 efi_status_t efi_ret_value;
132 fpswa_ret_t fpswa_ret;
133 IA64FAULT fault;
134 unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH;
136 perfc_incra(fw_hypercall, index >> 8);
137 switch (index) {
138 case FW_HYPERCALL_XEN:
139 return xen_hypercall(regs);
141 case FW_HYPERCALL_XEN_FAST:
142 return xen_fast_hypercall(regs);
144 case FW_HYPERCALL_PAL_CALL:
145 //printk("*** PAL hypercall: index=%d\n",regs->r28);
146 //FIXME: This should call a C routine
147 #if 0
148 // This is very conservative, but avoids a possible
149 // (and deadly) freeze in paravirtualized domains due
150 // to a yet-to-be-found bug where pending_interruption
151 // is zero when it shouldn't be. Since PAL is called
152 // in the idle loop, this should resolve it
153 VCPU(v,pending_interruption) = 1;
154 #endif
155 if (regs->r28 == PAL_HALT_LIGHT) {
156 if (vcpu_deliverable_interrupts(v) ||
157 event_pending(v)) {
158 perfc_incr(idle_when_pending);
159 vcpu_pend_unspecified_interrupt(v);
160 //printk("idle w/int#%d pending!\n",pi);
161 //this shouldn't happen, but it apparently does quite a bit! so don't
162 //allow it to happen... i.e. if a domain has an interrupt pending and
163 //it tries to halt itself because it thinks it is idle, just return here
164 //as deliver_pending_interrupt is called on the way out and will deliver it
165 }
166 else {
167 perfc_incr(pal_halt_light);
168 migrate_timer(&v->arch.hlt_timer,
169 v->processor);
170 set_timer(&v->arch.hlt_timer,
171 vcpu_get_next_timer_ns(v));
172 do_sched_op_compat(SCHEDOP_block, 0);
173 /* do_block only pends a softirq */
174 do_softirq();
175 stop_timer(&v->arch.hlt_timer);
176 /* do_block() calls
177 * local_event_delivery_enable(),
178 * but PALL CALL must be called with
179 * psr.i = 0 and psr.i is unchanged.
180 * SDM vol.2 Part I 11.10.2
181 * PAL Calling Conventions.
182 */
183 local_event_delivery_disable();
184 }
185 regs->r8 = 0;
186 regs->r9 = 0;
187 regs->r10 = 0;
188 regs->r11 = 0;
189 }
190 else {
191 struct ia64_pal_retval y;
193 if (regs->r28 >= PAL_COPY_PAL)
194 y = xen_pal_emulator
195 (regs->r28, vcpu_get_gr (v, 33),
196 vcpu_get_gr (v, 34),
197 vcpu_get_gr (v, 35));
198 else
199 y = xen_pal_emulator(regs->r28,regs->r29,
200 regs->r30,regs->r31);
201 regs->r8 = y.status; regs->r9 = y.v0;
202 regs->r10 = y.v1; regs->r11 = y.v2;
203 }
204 break;
205 case FW_HYPERCALL_SAL_CALL:
206 x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
207 vcpu_get_gr(v,34),vcpu_get_gr(v,35),
208 vcpu_get_gr(v,36),vcpu_get_gr(v,37),
209 vcpu_get_gr(v,38),vcpu_get_gr(v,39));
210 regs->r8 = x.r8; regs->r9 = x.r9;
211 regs->r10 = x.r10; regs->r11 = x.r11;
212 break;
213 case FW_HYPERCALL_SAL_RETURN:
214 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
215 vcpu_sleep_nosync(v);
216 break;
217 case FW_HYPERCALL_EFI_CALL:
218 efi_ret_value = efi_emulator (regs, &fault);
219 if (fault != IA64_NO_FAULT) return fault;
220 regs->r8 = efi_ret_value;
221 break;
222 case FW_HYPERCALL_IPI:
223 fw_hypercall_ipi (regs);
224 break;
225 case FW_HYPERCALL_SET_SHARED_INFO_VA:
226 regs->r8 = domain_set_shared_info_va (regs->r28);
227 break;
228 case FW_HYPERCALL_FPSWA:
229 fpswa_ret = fw_hypercall_fpswa (v);
230 regs->r8 = fpswa_ret.status;
231 regs->r9 = fpswa_ret.err0;
232 regs->r10 = fpswa_ret.err1;
233 regs->r11 = fpswa_ret.err2;
234 break;
235 case __HYPERVISOR_opt_feature:
236 {
237 XEN_GUEST_HANDLE(void) arg;
238 struct xen_ia64_opt_feature optf;
239 set_xen_guest_handle(arg, (void*)(vcpu_get_gr(v, 32)));
240 if (copy_from_guest(&optf, arg, 1) == 0)
241 regs->r8 = domain_opt_feature(v->domain, &optf);
242 else
243 regs->r8 = -EFAULT;
244 break;
245 }
246 case FW_HYPERCALL_SIOEMU:
247 sioemu_hypercall(regs);
248 break;
249 default:
250 printk("unknown ia64 fw hypercall %lx\n", regs->r2);
251 regs->r8 = do_ni_hypercall();
252 }
253 return IA64_NO_FAULT;
254 }
256 unsigned long hypercall_create_continuation(
257 unsigned int op, const char *format, ...)
258 {
259 struct mc_state *mcs = &this_cpu(mc_state);
260 struct vcpu *v = current;
261 const char *p = format;
262 unsigned long arg;
263 unsigned int i;
264 va_list args;
266 va_start(args, format);
267 if (test_bit(_MCSF_in_multicall, &mcs->flags))
268 panic("PREEMPT happen in multicall\n"); // Not support yet
270 vcpu_set_gr(v, 15, op, 0);
272 for (i = 0; *p != '\0'; i++) {
273 switch ( *p++ )
274 {
275 case 'i':
276 arg = (unsigned long)va_arg(args, unsigned int);
277 break;
278 case 'l':
279 arg = (unsigned long)va_arg(args, unsigned long);
280 break;
281 case 'h':
282 arg = (unsigned long)va_arg(args, void *);
283 break;
284 default:
285 arg = 0;
286 BUG();
287 }
288 vcpu_set_gr(v, 16 + i, arg, 0);
289 }
291 if (i >= 6)
292 panic("Too many args for hypercall continuation\n");
294 // Clean other argument to 0
295 while (i < 6) {
296 vcpu_set_gr(v, 16 + i, 0, 0);
297 i++;
298 }
300 // re-execute break;
301 vcpu_decrement_iip(v);
303 v->arch.hypercall_continuation = 1;
304 va_end(args);
305 return op;
306 }
308 /* Need make this function common */
309 extern int
310 iosapic_guest_read(
311 unsigned long physbase, unsigned int reg, u32 *pval);
312 extern int
313 iosapic_guest_write(
314 unsigned long physbase, unsigned int reg, u32 pval);
316 long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
317 {
318 int irq;
319 long ret;
321 switch ( cmd )
322 {
323 case PHYSDEVOP_eoi: {
324 struct physdev_eoi eoi;
325 ret = -EFAULT;
326 if ( copy_from_guest(&eoi, arg, 1) != 0 )
327 break;
328 ret = pirq_guest_eoi(current->domain, eoi.irq);
329 break;
330 }
332 /* Legacy since 0x00030202. */
333 case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
334 ret = pirq_guest_unmask(current->domain);
335 break;
336 }
338 case PHYSDEVOP_irq_status_query: {
339 struct physdev_irq_status_query irq_status_query;
340 ret = -EFAULT;
341 if ( copy_from_guest(&irq_status_query, arg, 1) != 0 )
342 break;
343 irq = irq_status_query.irq;
344 ret = -EINVAL;
345 if ( (irq < 0) || (irq >= NR_IRQS) )
346 break;
347 irq_status_query.flags = 0;
348 /* Edge-triggered interrupts don't need an explicit unmask downcall. */
349 if ( !strstr(irq_desc[irq_to_vector(irq)].handler->typename, "edge") )
350 irq_status_query.flags |= XENIRQSTAT_needs_eoi;
351 ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
352 break;
353 }
355 case PHYSDEVOP_apic_read: {
356 struct physdev_apic apic;
357 ret = -EFAULT;
358 if ( copy_from_guest(&apic, arg, 1) != 0 )
359 break;
360 ret = -EPERM;
361 if ( !IS_PRIV(current->domain) )
362 break;
363 ret = iosapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
364 if ( copy_to_guest(arg, &apic, 1) != 0 )
365 ret = -EFAULT;
366 break;
367 }
369 case PHYSDEVOP_apic_write: {
370 struct physdev_apic apic;
371 ret = -EFAULT;
372 if ( copy_from_guest(&apic, arg, 1) != 0 )
373 break;
374 ret = -EPERM;
375 if ( !IS_PRIV(current->domain) )
376 break;
377 ret = iosapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
378 break;
379 }
381 case PHYSDEVOP_alloc_irq_vector: {
382 struct physdev_irq irq_op;
384 ret = -EFAULT;
385 if ( copy_from_guest(&irq_op, arg, 1) != 0 )
386 break;
388 ret = -EPERM;
389 if ( !IS_PRIV(current->domain) )
390 break;
392 ret = -EINVAL;
393 if ( (irq = irq_op.irq) >= NR_IRQS )
394 break;
396 irq_op.vector = assign_irq_vector(irq);
397 ret = copy_to_guest(arg, &irq_op, 1) ? -EFAULT : 0;
398 break;
399 }
401 case PHYSDEVOP_free_irq_vector: {
402 struct physdev_irq irq_op;
403 int vector;
405 ret = -EFAULT;
406 if ( copy_from_guest(&irq_op, arg, 1) != 0 )
407 break;
409 ret = -EPERM;
410 if ( !IS_PRIV(current->domain) )
411 break;
413 ret = -EINVAL;
414 vector = irq_op.vector;
415 if (vector < IA64_FIRST_DEVICE_VECTOR ||
416 vector > IA64_LAST_DEVICE_VECTOR)
417 break;
419 /* XXX This should be called, but causes a NAT consumption via the
420 * reboot notifier_call_chain in dom0 if a device is hidden for
421 * a driver domain using pciback.hide= (specifically, hiding function
422 * 1 of a 2 port e1000 card).
423 * free_irq_vector(vector);
424 */
425 ret = 0;
426 break;
427 }
429 default:
430 ret = -ENOSYS;
431 break;
432 }
434 return ret;
435 }
437 static long register_guest_callback(struct callback_register *reg)
438 {
439 long ret = 0;
440 struct vcpu *v = current;
442 if (IS_VMM_ADDRESS(reg->address))
443 return -EINVAL;
445 switch ( reg->type )
446 {
447 case CALLBACKTYPE_event:
448 v->arch.event_callback_ip = reg->address;
449 break;
451 case CALLBACKTYPE_failsafe:
452 v->arch.failsafe_callback_ip = reg->address;
453 break;
455 default:
456 ret = -ENOSYS;
457 break;
458 }
460 return ret;
461 }
463 static long unregister_guest_callback(struct callback_unregister *unreg)
464 {
465 return -EINVAL;
466 }
468 /* First time to add callback to xen/ia64, so let's just stick to
469 * the newer callback interface.
470 */
471 long do_callback_op(int cmd, XEN_GUEST_HANDLE(const_void) arg)
472 {
473 long ret;
475 switch ( cmd )
476 {
477 case CALLBACKOP_register:
478 {
479 struct callback_register reg;
481 ret = -EFAULT;
482 if ( copy_from_guest(&reg, arg, 1) )
483 break;
485 ret = register_guest_callback(&reg);
486 }
487 break;
489 case CALLBACKOP_unregister:
490 {
491 struct callback_unregister unreg;
493 ret = -EFAULT;
494 if ( copy_from_guest(&unreg, arg, 1) )
495 break;
497 ret = unregister_guest_callback(&unreg);
498 }
499 break;
501 default:
502 ret = -ENOSYS;
503 break;
504 }
506 return ret;
507 }
509 unsigned long
510 do_ia64_debug_op(unsigned long cmd, unsigned long domain,
511 XEN_GUEST_HANDLE(xen_ia64_debug_op_t) u_debug_op)
512 {
513 xen_ia64_debug_op_t curop, *op = &curop;
514 struct domain *d;
515 long ret = 0;
517 if (copy_from_guest(op, u_debug_op, 1))
518 return -EFAULT;
519 d = rcu_lock_domain_by_id(domain);
520 if (d == NULL)
521 return -ESRCH;
522 if (!IS_PRIV_FOR(current->domain, d)) {
523 ret = -EPERM;
524 goto out;
525 }
527 switch (cmd) {
528 case XEN_IA64_DEBUG_OP_SET_FLAGS:
529 d->arch.debug_flags = op->flags;
530 break;
531 case XEN_IA64_DEBUG_OP_GET_FLAGS:
532 op->flags = d->arch.debug_flags;
533 if (copy_to_guest(u_debug_op, op, 1))
534 ret = -EFAULT;
535 break;
536 default:
537 ret = -ENOSYS;
538 }
539 out:
540 rcu_unlock_domain(d);
541 return ret;
542 }