ia64/xen-unstable

view xen/arch/ia64/xen/hypercall.c @ 19402:f02a528d2e56

Xen: use proper device ID to search VT-d unit for ARI and SR-IOV device

PCIe Alternative Routing-ID Interpretation (ARI) ECN defines the Extended
Function -- a function whose function number is greater than 7 within an
ARI Device. Intel VT-d spec 1.2 section 8.3.2 specifies that the Extended
Function is under the scope of the same remapping unit as the traditional
function. The hypervisor needs to know if a function is Extended
Function so it can find proper DMAR for it.

And section 8.3.3 specifies that the SR-IOV Virtual Function is under the
scope of the same remapping unit as the Physical Function. The hypervisor
also needs to know if a function is the Virtual Function and which
Physical Function it's associated with for same reason.

Signed-off-by: Yu Zhao <yu.zhao@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Mar 19 10:20:11 2009 +0000 (2009-03-19)
parents 9e3be0660c1e
children 5839491bbf20
line source
1 /*
2 * Hypercall implementations
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co.
5 * Dan Magenheimer (dan.magenheimer@hp.com)
6 *
7 */
9 #include <xen/config.h>
10 #include <xen/sched.h>
11 #include <xen/hypercall.h>
12 #include <xen/multicall.h>
13 #include <xen/guest_access.h>
14 #include <xen/mm.h>
16 #include <linux/efi.h> /* FOR EFI_UNIMPLEMENTED */
17 #include <asm/sal.h> /* FOR struct ia64_sal_retval */
18 #include <asm/fpswa.h> /* FOR struct fpswa_ret_t */
20 #include <asm/vmx.h>
21 #include <asm/vmx_vcpu.h>
22 #include <asm/vcpu.h>
23 #include <asm/dom_fw.h>
24 #include <public/domctl.h>
25 #include <public/sysctl.h>
26 #include <public/event_channel.h>
27 #include <public/memory.h>
28 #include <public/sched.h>
29 #include <xen/irq.h>
30 #include <asm/hw_irq.h>
31 #include <public/physdev.h>
32 #include <xen/domain.h>
33 #include <public/callback.h>
34 #include <xen/event.h>
35 #include <xen/perfc.h>
36 #include <public/arch-ia64/debug_op.h>
37 #include <asm/sioemu.h>
38 #include <public/arch-ia64/sioemu.h>
39 #include <xen/pci.h>
41 static IA64FAULT
42 xen_hypercall (struct pt_regs *regs)
43 {
44 uint32_t cmd = (uint32_t)regs->r2;
45 printk("Warning %s should not be called %d\n", __FUNCTION__, cmd);
46 return IA64_NO_FAULT;
47 }
49 static IA64FAULT
50 xen_fast_hypercall (struct pt_regs *regs)
51 {
52 uint32_t cmd = (uint32_t)regs->r2;
53 switch (cmd) {
54 case __HYPERVISOR_ia64_fast_eoi:
55 printk("Warning %s should not be called %d\n",
56 __FUNCTION__, cmd);
57 break;
58 default:
59 regs->r8 = -ENOSYS;
60 }
61 return IA64_NO_FAULT;
62 }
64 static long __do_pirq_guest_eoi(struct domain *d, int pirq)
65 {
66 if ( pirq < 0 || pirq >= NR_IRQS )
67 return -EINVAL;
68 if ( d->arch.pirq_eoi_map )
69 evtchn_unmask(d->pirq_to_evtchn[pirq]);
70 return pirq_guest_eoi(d, pirq);
71 }
73 long do_pirq_guest_eoi(int pirq)
74 {
75 return __do_pirq_guest_eoi(current->domain, pirq);
76 }
78 static void
79 fw_hypercall_ipi (struct pt_regs *regs)
80 {
81 int cpu = regs->r14;
82 int vector = regs->r15;
83 struct vcpu *targ;
84 struct domain *d = current->domain;
86 /* Be sure the target exists. */
87 if (cpu > MAX_VIRT_CPUS)
88 return;
89 targ = d->vcpu[cpu];
90 if (targ == NULL)
91 return;
93 if (vector == XEN_SAL_BOOT_RENDEZ_VEC
94 && (!targ->is_initialised
95 || test_bit(_VPF_down, &targ->pause_flags))) {
97 /* First start: initialize vpcu. */
98 if (!targ->is_initialised) {
99 if (arch_set_info_guest (targ, NULL) != 0) {
100 printk ("arch_boot_vcpu: failure\n");
101 return;
102 }
103 }
105 /* First or next rendez-vous: set registers. */
106 vcpu_init_regs (targ);
107 vcpu_regs (targ)->cr_iip = d->arch.sal_data->boot_rdv_ip;
108 vcpu_regs (targ)->r1 = d->arch.sal_data->boot_rdv_r1;
109 vcpu_regs (targ)->b0 = FW_HYPERCALL_SAL_RETURN_PADDR;
111 if (test_and_clear_bit(_VPF_down,
112 &targ->pause_flags)) {
113 vcpu_wake(targ);
114 printk(XENLOG_INFO "arch_boot_vcpu: vcpu %d awaken\n",
115 targ->vcpu_id);
116 }
117 else
118 printk ("arch_boot_vcpu: huu, already awaken!\n");
119 }
120 else {
121 int running = targ->is_running;
122 vcpu_pend_interrupt(targ, vector);
123 vcpu_unblock(targ);
124 if (running)
125 smp_send_event_check_cpu(targ->processor);
126 }
127 return;
128 }
130 static int
131 fpswa_get_domain_addr(struct vcpu *v, unsigned long gpaddr, size_t size,
132 void **virt, struct page_info **page, const char *name)
133 {
134 int cross_page_boundary;
136 if (gpaddr == 0) {
137 *virt = 0;
138 return 0;
139 }
141 cross_page_boundary = (((gpaddr & ~PAGE_MASK) + size) > PAGE_SIZE);
142 if (unlikely(cross_page_boundary)) {
143 /* this case isn't implemented */
144 gdprintk(XENLOG_ERR,
145 "%s: fpswa hypercall is called with "
146 "page crossing argument %s 0x%lx\n",
147 __func__, name, gpaddr);
148 return -ENOSYS;
149 }
151 again:
152 *virt = domain_mpa_to_imva(v->domain, gpaddr);
153 *page = virt_to_page(*virt);
154 if (get_page(*page, current->domain) == 0) {
155 if (page_get_owner(*page) != current->domain) {
156 *page = NULL;
157 return -EFAULT;
158 }
159 goto again;
160 }
162 return 0;
163 }
165 static fpswa_ret_t
166 fw_hypercall_fpswa (struct vcpu *v, struct pt_regs *regs)
167 {
168 fpswa_ret_t ret = {-1, 0, 0, 0};
169 unsigned long bundle[2] = { regs->r15, regs->r16};
170 fp_state_t fp_state;
171 struct page_info *lp_page = NULL;
172 struct page_info *lv_page = NULL;
173 struct page_info *hp_page = NULL;
174 struct page_info *hv_page = NULL;
175 XEN_EFI_RR_DECLARE(rr6, rr7);
177 if (unlikely(PSCBX(v, fpswa_ret).status != 0 &&
178 PSCBX(v, fpswa_ret).status != IA64_RETRY)) {
179 ret = PSCBX(v, fpswa_ret);
180 PSCBX(v, fpswa_ret) = (fpswa_ret_t){0, 0, 0, 0};
181 return ret;
182 }
184 if (!fpswa_interface)
185 goto error;
187 memset(&fp_state, 0, sizeof(fp_state));
188 fp_state.bitmask_low64 = regs->r22;
189 fp_state.bitmask_high64 = regs->r23;
191 /* bit6..bit11 */
192 if ((fp_state.bitmask_low64 & 0xfc0) != 0xfc0) {
193 /* other cases isn't supported yet */
194 gdprintk(XENLOG_ERR, "%s unsupported bitmask_low64 0x%lx\n",
195 __func__, fp_state.bitmask_low64);
196 goto error;
197 }
198 if (regs->r25 == 0)
199 /* fp_state.fp_state_low_volatile must be supplied */
200 goto error;
202 /* eager save/lazy restore fpu: f32...f127 */
203 if ((~fp_state.bitmask_low64 & ((1UL << 31) - 1)) != 0 ||
204 ~fp_state.bitmask_high64 != 0) {
205 if (VMX_DOMAIN(v))
206 vmx_lazy_load_fpu(v);
207 else
208 ia64_lazy_load_fpu(v);
209 }
211 if (fpswa_get_domain_addr(v, regs->r24,
212 sizeof(fp_state.fp_state_low_preserved),
213 (void*)&fp_state.fp_state_low_preserved,
214 &lp_page, "fp_state_low_preserved") < 0)
215 goto error;
216 if (fpswa_get_domain_addr(v, regs->r25,
217 sizeof(fp_state.fp_state_low_volatile),
218 (void*)&fp_state.fp_state_low_volatile,
219 &lv_page, "fp_state_low_volatile") < 0)
220 goto error;
221 if (fpswa_get_domain_addr(v, regs->r26,
222 sizeof(fp_state.fp_state_high_preserved),
223 (void*)&fp_state.fp_state_high_preserved,
224 &hp_page, "fp_state_low_preserved") < 0)
225 goto error;
226 if (fpswa_get_domain_addr(v, regs->r27,
227 sizeof(fp_state.fp_state_high_volatile),
228 (void*)&fp_state.fp_state_high_volatile,
229 &hv_page, "fp_state_high_volatile") < 0)
230 goto error;
232 XEN_EFI_RR_ENTER(rr6, rr7);
233 ret = (*fpswa_interface->fpswa)(regs->r14,
234 bundle,
235 &regs->r17, /* pipsr */
236 &regs->r18, /* pfsr */
237 &regs->r19, /* pisr */
238 &regs->r20, /* ppreds */
239 &regs->r21, /* pifs */
240 &fp_state);
241 XEN_EFI_RR_LEAVE(rr6, rr7);
243 error:
244 if (lp_page != NULL)
245 put_page(lp_page);
246 if (lv_page != NULL)
247 put_page(lv_page);
248 if (hp_page != NULL)
249 put_page(hp_page);
250 if (hv_page != NULL)
251 put_page(hv_page);
252 return ret;
253 }
255 static fpswa_ret_t
256 fw_hypercall_fpswa_error(void)
257 {
258 return (fpswa_ret_t) {-1, 0, 0, 0};
259 }
261 IA64FAULT
262 ia64_hypercall(struct pt_regs *regs)
263 {
264 struct vcpu *v = current;
265 struct sal_ret_values x;
266 efi_status_t efi_ret_value;
267 fpswa_ret_t fpswa_ret;
268 IA64FAULT fault;
269 unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH;
271 perfc_incra(fw_hypercall, index >> 8);
272 switch (index) {
273 case FW_HYPERCALL_XEN:
274 return xen_hypercall(regs);
276 case FW_HYPERCALL_XEN_FAST:
277 return xen_fast_hypercall(regs);
279 case FW_HYPERCALL_PAL_CALL:
280 //printk("*** PAL hypercall: index=%d\n",regs->r28);
281 //FIXME: This should call a C routine
282 #if 0
283 // This is very conservative, but avoids a possible
284 // (and deadly) freeze in paravirtualized domains due
285 // to a yet-to-be-found bug where pending_interruption
286 // is zero when it shouldn't be. Since PAL is called
287 // in the idle loop, this should resolve it
288 VCPU(v,pending_interruption) = 1;
289 #endif
290 if (regs->r28 == PAL_HALT_LIGHT) {
291 if (vcpu_deliverable_interrupts(v) ||
292 event_pending(v)) {
293 perfc_incr(idle_when_pending);
294 vcpu_pend_unspecified_interrupt(v);
295 //printk("idle w/int#%d pending!\n",pi);
296 //this shouldn't happen, but it apparently does quite a bit! so don't
297 //allow it to happen... i.e. if a domain has an interrupt pending and
298 //it tries to halt itself because it thinks it is idle, just return here
299 //as deliver_pending_interrupt is called on the way out and will deliver it
300 }
301 else {
302 perfc_incr(pal_halt_light);
303 migrate_timer(&v->arch.hlt_timer,
304 v->processor);
305 set_timer(&v->arch.hlt_timer,
306 vcpu_get_next_timer_ns(v));
307 do_sched_op_compat(SCHEDOP_block, 0);
308 /* do_block only pends a softirq */
309 do_softirq();
310 stop_timer(&v->arch.hlt_timer);
311 /* do_block() calls
312 * local_event_delivery_enable(),
313 * but PAL CALL must be called with
314 * psr.i = 0 and psr.i is unchanged.
315 * SDM vol.2 Part I 11.10.2
316 * PAL Calling Conventions.
317 */
318 local_event_delivery_disable();
319 }
320 regs->r8 = 0;
321 regs->r9 = 0;
322 regs->r10 = 0;
323 regs->r11 = 0;
324 }
325 else {
326 struct ia64_pal_retval y;
328 if (regs->r28 >= PAL_COPY_PAL)
329 y = xen_pal_emulator
330 (regs->r28, vcpu_get_gr (v, 33),
331 vcpu_get_gr (v, 34),
332 vcpu_get_gr (v, 35));
333 else
334 y = xen_pal_emulator(regs->r28,regs->r29,
335 regs->r30,regs->r31);
336 regs->r8 = y.status; regs->r9 = y.v0;
337 regs->r10 = y.v1; regs->r11 = y.v2;
338 }
339 break;
340 case FW_HYPERCALL_SAL_CALL:
341 x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
342 vcpu_get_gr(v,34),vcpu_get_gr(v,35),
343 vcpu_get_gr(v,36),vcpu_get_gr(v,37),
344 vcpu_get_gr(v,38),vcpu_get_gr(v,39));
345 regs->r8 = x.r8; regs->r9 = x.r9;
346 regs->r10 = x.r10; regs->r11 = x.r11;
347 break;
348 case FW_HYPERCALL_SAL_RETURN:
349 if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
350 vcpu_sleep_nosync(v);
351 break;
352 case FW_HYPERCALL_EFI_CALL:
353 efi_ret_value = efi_emulator (regs, &fault);
354 if (fault != IA64_NO_FAULT) return fault;
355 regs->r8 = efi_ret_value;
356 break;
357 case FW_HYPERCALL_IPI:
358 fw_hypercall_ipi (regs);
359 break;
360 case FW_HYPERCALL_SET_SHARED_INFO_VA:
361 regs->r8 = domain_set_shared_info_va (regs->r28);
362 break;
363 case FW_HYPERCALL_FPSWA_BASE:
364 switch (regs->r2) {
365 case FW_HYPERCALL_FPSWA_BROKEN:
366 gdprintk(XENLOG_WARNING,
367 "Old fpswa hypercall was called (0x%lx).\n"
368 "Please update your domain builder. ip 0x%lx\n",
369 FW_HYPERCALL_FPSWA_BROKEN, regs->cr_iip);
370 fpswa_ret = fw_hypercall_fpswa_error();
371 break;
372 case FW_HYPERCALL_FPSWA:
373 fpswa_ret = fw_hypercall_fpswa(v, regs);
374 break;
375 default:
376 gdprintk(XENLOG_ERR, "unknown fpswa hypercall %lx\n",
377 regs->r2);
378 fpswa_ret = fw_hypercall_fpswa_error();
379 break;
380 }
381 regs->r8 = fpswa_ret.status;
382 regs->r9 = fpswa_ret.err0;
383 regs->r10 = fpswa_ret.err1;
384 regs->r11 = fpswa_ret.err2;
385 break;
386 case __HYPERVISOR_opt_feature:
387 {
388 XEN_GUEST_HANDLE(void) arg;
389 struct xen_ia64_opt_feature optf;
390 set_xen_guest_handle(arg, (void*)(vcpu_get_gr(v, 32)));
391 if (copy_from_guest(&optf, arg, 1) == 0)
392 regs->r8 = domain_opt_feature(v->domain, &optf);
393 else
394 regs->r8 = -EFAULT;
395 break;
396 }
397 case FW_HYPERCALL_SIOEMU:
398 sioemu_hypercall(regs);
399 break;
400 default:
401 printk("unknown ia64 fw hypercall %lx\n", regs->r2);
402 regs->r8 = do_ni_hypercall();
403 }
404 return IA64_NO_FAULT;
405 }
407 unsigned long hypercall_create_continuation(
408 unsigned int op, const char *format, ...)
409 {
410 struct mc_state *mcs = &this_cpu(mc_state);
411 struct vcpu *v = current;
412 const char *p = format;
413 unsigned long arg;
414 unsigned int i;
415 va_list args;
417 va_start(args, format);
418 if (test_bit(_MCSF_in_multicall, &mcs->flags))
419 panic("PREEMPT happen in multicall\n"); // Not support yet
421 vcpu_set_gr(v, 15, op, 0);
423 for (i = 0; *p != '\0'; i++) {
424 switch ( *p++ )
425 {
426 case 'i':
427 arg = (unsigned long)va_arg(args, unsigned int);
428 break;
429 case 'l':
430 arg = (unsigned long)va_arg(args, unsigned long);
431 break;
432 case 'h':
433 arg = (unsigned long)va_arg(args, void *);
434 break;
435 default:
436 arg = 0;
437 BUG();
438 }
439 vcpu_set_gr(v, 16 + i, arg, 0);
440 }
442 if (i >= 6)
443 panic("Too many args for hypercall continuation\n");
445 // Clean other argument to 0
446 while (i < 6) {
447 vcpu_set_gr(v, 16 + i, 0, 0);
448 i++;
449 }
451 // re-execute break;
452 vcpu_decrement_iip(v);
454 v->arch.hypercall_continuation = 1;
455 va_end(args);
456 return op;
457 }
459 /* Need make this function common */
460 extern int
461 iosapic_guest_read(
462 unsigned long physbase, unsigned int reg, u32 *pval);
463 extern int
464 iosapic_guest_write(
465 unsigned long physbase, unsigned int reg, u32 pval);
468 /*
469 * XXX: We don't support MSI for PCI passthrough at present, so make the
470 * following 2 functions dummy for now. They shouldn't return -ENOSYS
471 * because xend invokes them (the x86 version of them is necessary for
472 * x86 Xen); if they return -ENOSYS, xend would disallow us to create
473 * IPF HVM guest with devices assigned so here they can return 0.
474 */
475 static int physdev_map_pirq(struct physdev_map_pirq *map)
476 {
477 return 0;
478 }
480 static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
481 {
482 return 0;
483 }
486 long do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
487 {
488 int irq;
489 long ret;
491 switch ( cmd )
492 {
493 case PHYSDEVOP_eoi: {
494 struct physdev_eoi eoi;
495 ret = -EFAULT;
496 if ( copy_from_guest(&eoi, arg, 1) != 0 )
497 break;
498 ret = __do_pirq_guest_eoi(current->domain, eoi.irq);
499 break;
500 }
502 case PHYSDEVOP_pirq_eoi_gmfn: {
503 struct physdev_pirq_eoi_gmfn info;
504 unsigned long mfn;
506 BUILD_BUG_ON(NR_IRQS > (PAGE_SIZE * 8));
508 ret = -EFAULT;
509 if ( copy_from_guest(&info, arg, 1) != 0 )
510 break;
512 ret = -EINVAL;
513 mfn = gmfn_to_mfn(current->domain, info.gmfn);
514 if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), current->domain) )
515 break;
517 if ( cmpxchg(&current->domain->arch.pirq_eoi_map_mfn, 0, mfn) != 0 )
518 {
519 put_page(mfn_to_page(mfn));
520 ret = -EBUSY;
521 break;
522 }
524 current->domain->arch.pirq_eoi_map = mfn_to_virt(mfn);
525 ret = 0;
526 break;
527 }
529 /* Legacy since 0x00030202. */
530 case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
531 ret = pirq_guest_unmask(current->domain);
532 break;
533 }
535 case PHYSDEVOP_irq_status_query: {
536 struct physdev_irq_status_query irq_status_query;
537 ret = -EFAULT;
538 if ( copy_from_guest(&irq_status_query, arg, 1) != 0 )
539 break;
540 irq = irq_status_query.irq;
541 ret = -EINVAL;
542 if ( (irq < 0) || (irq >= NR_IRQS) )
543 break;
544 irq_status_query.flags = 0;
545 /* Edge-triggered interrupts don't need an explicit unmask downcall. */
546 if ( !strstr(irq_descp(irq)->handler->typename, "edge") )
547 irq_status_query.flags |= XENIRQSTAT_needs_eoi;
548 ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
549 break;
550 }
552 case PHYSDEVOP_apic_read: {
553 struct physdev_apic apic;
554 ret = -EFAULT;
555 if ( copy_from_guest(&apic, arg, 1) != 0 )
556 break;
557 ret = -EPERM;
558 if ( !IS_PRIV(current->domain) )
559 break;
560 ret = iosapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
561 if ( copy_to_guest(arg, &apic, 1) != 0 )
562 ret = -EFAULT;
563 break;
564 }
566 case PHYSDEVOP_apic_write: {
567 struct physdev_apic apic;
568 ret = -EFAULT;
569 if ( copy_from_guest(&apic, arg, 1) != 0 )
570 break;
571 ret = -EPERM;
572 if ( !IS_PRIV(current->domain) )
573 break;
574 ret = iosapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
575 break;
576 }
578 case PHYSDEVOP_alloc_irq_vector: {
579 struct physdev_irq irq_op;
581 ret = -EFAULT;
582 if ( copy_from_guest(&irq_op, arg, 1) != 0 )
583 break;
585 ret = -EPERM;
586 if ( !IS_PRIV(current->domain) )
587 break;
589 ret = -EINVAL;
590 if ( (irq = irq_op.irq) >= NR_IRQS )
591 break;
593 irq_op.vector = assign_irq_vector(irq);
594 ret = copy_to_guest(arg, &irq_op, 1) ? -EFAULT : 0;
595 break;
596 }
598 case PHYSDEVOP_free_irq_vector: {
599 struct physdev_irq irq_op;
600 int vector;
602 ret = -EFAULT;
603 if ( copy_from_guest(&irq_op, arg, 1) != 0 )
604 break;
606 ret = -EPERM;
607 if ( !IS_PRIV(current->domain) )
608 break;
610 ret = -EINVAL;
611 vector = irq_op.vector;
612 if (vector < IA64_FIRST_DEVICE_VECTOR ||
613 vector > IA64_LAST_DEVICE_VECTOR)
614 break;
616 /* XXX This should be called, but causes a NAT consumption via the
617 * reboot notifier_call_chain in dom0 if a device is hidden for
618 * a driver domain using pciback.hide= (specifically, hiding function
619 * 1 of a 2 port e1000 card).
620 * free_irq_vector(vector);
621 */
622 ret = 0;
623 break;
624 }
626 case PHYSDEVOP_map_pirq: {
627 struct physdev_map_pirq map;
629 ret = -EFAULT;
630 if ( copy_from_guest(&map, arg, 1) != 0 )
631 break;
633 ret = physdev_map_pirq(&map);
635 if ( copy_to_guest(arg, &map, 1) != 0 )
636 ret = -EFAULT;
637 break;
638 }
640 case PHYSDEVOP_unmap_pirq: {
641 struct physdev_unmap_pirq unmap;
643 ret = -EFAULT;
644 if ( copy_from_guest(&unmap, arg, 1) != 0 )
645 break;
647 ret = physdev_unmap_pirq(&unmap);
648 break;
649 }
651 case PHYSDEVOP_manage_pci_add: {
652 struct physdev_manage_pci manage_pci;
653 ret = -EPERM;
654 if ( !IS_PRIV(current->domain) )
655 break;
656 ret = -EFAULT;
657 if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
658 break;
660 ret = pci_add_device(manage_pci.bus, manage_pci.devfn);
661 break;
662 }
664 case PHYSDEVOP_manage_pci_remove: {
665 struct physdev_manage_pci manage_pci;
666 ret = -EPERM;
667 if ( !IS_PRIV(current->domain) )
668 break;
669 ret = -EFAULT;
670 if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
671 break;
673 ret = pci_remove_device(manage_pci.bus, manage_pci.devfn);
674 break;
675 }
677 case PHYSDEVOP_manage_pci_add_ext: {
678 struct physdev_manage_pci_ext manage_pci_ext;
679 struct pci_dev_info pdev_info;
681 ret = -EPERM;
682 if ( !IS_PRIV(current->domain) )
683 break;
685 ret = -EFAULT;
686 if ( copy_from_guest(&manage_pci_ext, arg, 1) != 0 )
687 break;
689 pdev_info.is_extfn = manage_pci_ext.is_extfn;
690 pdev_info.is_virtfn = manage_pci_ext.is_virtfn;
691 pdev_info.physfn.bus = manage_pci_ext.physfn.bus;
692 pdev_info.physfn.devfn = manage_pci_ext.physfn.devfn;
693 ret = pci_add_device_ext(manage_pci_ext.bus,
694 manage_pci_ext.devfn,
695 &pdev_info);
696 break;
697 }
699 default:
700 ret = -ENOSYS;
701 printk("not implemented do_physdev_op: %d\n", cmd);
702 break;
703 }
705 return ret;
706 }
708 static long register_guest_callback(struct callback_register *reg)
709 {
710 long ret = 0;
711 struct vcpu *v = current;
713 if (IS_VMM_ADDRESS(reg->address))
714 return -EINVAL;
716 switch ( reg->type )
717 {
718 case CALLBACKTYPE_event:
719 v->arch.event_callback_ip = reg->address;
720 break;
722 case CALLBACKTYPE_failsafe:
723 v->arch.failsafe_callback_ip = reg->address;
724 break;
726 default:
727 ret = -ENOSYS;
728 break;
729 }
731 return ret;
732 }
734 static long unregister_guest_callback(struct callback_unregister *unreg)
735 {
736 return -EINVAL;
737 }
739 /* First time to add callback to xen/ia64, so let's just stick to
740 * the newer callback interface.
741 */
742 long do_callback_op(int cmd, XEN_GUEST_HANDLE(const_void) arg)
743 {
744 long ret;
746 switch ( cmd )
747 {
748 case CALLBACKOP_register:
749 {
750 struct callback_register reg;
752 ret = -EFAULT;
753 if ( copy_from_guest(&reg, arg, 1) )
754 break;
756 ret = register_guest_callback(&reg);
757 }
758 break;
760 case CALLBACKOP_unregister:
761 {
762 struct callback_unregister unreg;
764 ret = -EFAULT;
765 if ( copy_from_guest(&unreg, arg, 1) )
766 break;
768 ret = unregister_guest_callback(&unreg);
769 }
770 break;
772 default:
773 ret = -ENOSYS;
774 break;
775 }
777 return ret;
778 }
780 unsigned long
781 do_ia64_debug_op(unsigned long cmd, unsigned long domain,
782 XEN_GUEST_HANDLE(xen_ia64_debug_op_t) u_debug_op)
783 {
784 xen_ia64_debug_op_t curop, *op = &curop;
785 struct domain *d;
786 long ret = 0;
788 if (copy_from_guest(op, u_debug_op, 1))
789 return -EFAULT;
790 d = rcu_lock_domain_by_id(domain);
791 if (d == NULL)
792 return -ESRCH;
793 if (!IS_PRIV_FOR(current->domain, d)) {
794 ret = -EPERM;
795 goto out;
796 }
798 switch (cmd) {
799 case XEN_IA64_DEBUG_OP_SET_FLAGS:
800 d->arch.debug_flags = op->flags;
801 break;
802 case XEN_IA64_DEBUG_OP_GET_FLAGS:
803 op->flags = d->arch.debug_flags;
804 if (copy_to_guest(u_debug_op, op, 1))
805 ret = -EFAULT;
806 break;
807 default:
808 ret = -ENOSYS;
809 }
810 out:
811 rcu_unlock_domain(d);
812 return ret;
813 }