ia64/xen-unstable

view xen/arch/x86/physdev.c @ 18467:65dc37be0443

x86: Remove MSI boot parameter -- now always on.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Sep 10 10:47:46 2008 +0100 (2008-09-10)
parents 7f1c71c6d4c8
children 1eeb3aecea57
line source
2 #include <xen/config.h>
3 #include <xen/init.h>
4 #include <xen/lib.h>
5 #include <xen/types.h>
6 #include <xen/sched.h>
7 #include <xen/irq.h>
8 #include <xen/event.h>
9 #include <xen/guest_access.h>
10 #include <xen/iocap.h>
11 #include <asm/current.h>
12 #include <asm/msi.h>
13 #include <asm/hypercall.h>
14 #include <public/xen.h>
15 #include <public/physdev.h>
16 #include <xsm/xsm.h>
18 #ifndef COMPAT
19 typedef long ret_t;
20 #endif
22 int
23 ioapic_guest_read(
24 unsigned long physbase, unsigned int reg, u32 *pval);
25 int
26 ioapic_guest_write(
27 unsigned long physbase, unsigned int reg, u32 pval);
30 extern struct hw_interrupt_type pci_msi_type;
32 static int get_free_pirq(struct domain *d, int type, int index)
33 {
34 int i;
36 if ( d == NULL )
37 return -EINVAL;
39 ASSERT(spin_is_locked(&d->arch.irq_lock));
41 if ( type == MAP_PIRQ_TYPE_GSI )
42 {
43 for ( i = 16; i < NR_PIRQS; i++ )
44 if ( !d->arch.pirq_vector[i] )
45 break;
46 if ( i == NR_PIRQS )
47 return -ENOSPC;
48 }
49 else
50 {
51 for ( i = NR_PIRQS - 1; i >= 16; i-- )
52 if ( !d->arch.pirq_vector[i] )
53 break;
54 if ( i == 16 )
55 return -ENOSPC;
56 }
58 return i;
59 }
61 static int map_domain_pirq(struct domain *d, int pirq, int vector,
62 struct physdev_map_pirq *map)
63 {
64 int ret = 0;
65 int old_vector, old_pirq;
66 struct msi_info msi;
68 if ( d == NULL )
69 return -EINVAL;
71 ASSERT(spin_is_locked(&d->arch.irq_lock));
73 if ( !IS_PRIV(current->domain) )
74 return -EPERM;
76 if ( pirq < 0 || pirq >= NR_PIRQS || vector < 0 || vector >= NR_VECTORS )
77 {
78 gdprintk(XENLOG_G_ERR,
79 "invalid pirq %x or vector %x\n", pirq, vector);
80 return -EINVAL;
81 }
83 old_vector = d->arch.pirq_vector[pirq];
84 old_pirq = d->arch.vector_pirq[vector];
86 if ( (old_vector && (old_vector != vector) ) ||
87 (old_pirq && (old_pirq != pirq)) )
88 {
89 gdprintk(XENLOG_G_ERR, "remap pirq %x vector %x while not unmap\n",
90 pirq, vector);
91 ret = -EINVAL;
92 goto done;
93 }
95 ret = irq_permit_access(d, pirq);
96 if ( ret )
97 {
98 gdprintk(XENLOG_G_ERR, "add irq permit access %x failed\n", pirq);
99 ret = -EINVAL;
100 goto done;
101 }
103 if ( map && MAP_PIRQ_TYPE_MSI == map->type )
104 {
105 irq_desc_t *desc;
106 unsigned long flags;
108 desc = &irq_desc[vector];
110 spin_lock_irqsave(&desc->lock, flags);
111 if ( desc->handler != &no_irq_type )
112 gdprintk(XENLOG_G_ERR, "Map vector %x to msi while it is in use\n",
113 vector);
114 desc->handler = &pci_msi_type;
116 msi.bus = map->bus;
117 msi.devfn = map->devfn;
118 msi.entry_nr = map->entry_nr;
119 msi.table_base = map->table_base;
120 msi.vector = vector;
122 ret = pci_enable_msi(&msi);
124 spin_unlock_irqrestore(&desc->lock, flags);
125 if ( ret )
126 goto done;
127 }
129 d->arch.pirq_vector[pirq] = vector;
130 d->arch.vector_pirq[vector] = pirq;
132 done:
133 return ret;
134 }
136 /* The pirq should have been unbound before this call. */
137 static int unmap_domain_pirq(struct domain *d, int pirq)
138 {
139 unsigned long flags;
140 irq_desc_t *desc;
141 int vector, ret = 0;
143 if ( d == NULL || pirq < 0 || pirq >= NR_PIRQS )
144 return -EINVAL;
146 if ( !IS_PRIV(current->domain) )
147 return -EINVAL;
149 ASSERT(spin_is_locked(&d->arch.irq_lock));
151 vector = d->arch.pirq_vector[pirq];
153 if ( !vector )
154 {
155 gdprintk(XENLOG_G_ERR, "domain %X: pirq %x not mapped still\n",
156 d->domain_id, pirq);
157 ret = -EINVAL;
158 goto done;
159 }
161 desc = &irq_desc[vector];
162 spin_lock_irqsave(&desc->lock, flags);
163 if ( desc->msi_desc )
164 pci_disable_msi(vector);
166 if ( desc->handler == &pci_msi_type )
167 {
168 /* MSI is not shared, so should be released already */
169 BUG_ON(desc->status & IRQ_GUEST);
170 irq_desc[vector].handler = &no_irq_type;
171 }
172 spin_unlock_irqrestore(&desc->lock, flags);
174 d->arch.pirq_vector[pirq] = d->arch.vector_pirq[vector] = 0;
176 ret = irq_deny_access(d, pirq);
177 if ( ret )
178 gdprintk(XENLOG_G_ERR, "deny irq %x access failed\n", pirq);
180 done:
181 return ret;
182 }
184 static int physdev_map_pirq(struct physdev_map_pirq *map)
185 {
186 struct domain *d;
187 int vector, pirq, ret = 0;
188 unsigned long flags;
190 if ( !IS_PRIV(current->domain) )
191 return -EPERM;
193 if ( !map )
194 return -EINVAL;
196 if ( map->domid == DOMID_SELF )
197 d = rcu_lock_domain(current->domain);
198 else
199 d = rcu_lock_domain_by_id(map->domid);
201 if ( d == NULL )
202 {
203 ret = -ESRCH;
204 goto free_domain;
205 }
207 switch ( map->type )
208 {
209 case MAP_PIRQ_TYPE_GSI:
210 if ( map->index >= NR_IRQS )
211 {
212 ret = -EINVAL;
213 gdprintk(XENLOG_G_ERR,
214 "map invalid irq %x\n", map->index);
215 goto free_domain;
216 }
217 vector = IO_APIC_VECTOR(map->index);
218 if ( !vector )
219 {
220 ret = -EINVAL;
221 gdprintk(XENLOG_G_ERR,
222 "map irq with no vector %x\n", map->index);
223 goto free_domain;
224 }
225 break;
226 case MAP_PIRQ_TYPE_MSI:
227 vector = map->index;
228 if ( vector == -1 )
229 vector = assign_irq_vector(AUTO_ASSIGN);
231 if ( vector < 0 || vector >= NR_VECTORS )
232 {
233 ret = -EINVAL;
234 gdprintk(XENLOG_G_ERR,
235 "map_pirq with wrong vector %x\n", map->index);
236 goto free_domain;
237 }
238 break;
239 default:
240 ret = -EINVAL;
241 gdprintk(XENLOG_G_ERR, "wrong map_pirq type %x\n", map->type);
242 goto free_domain;
243 break;
244 }
246 spin_lock_irqsave(&d->arch.irq_lock, flags);
247 if ( map->pirq == -1 )
248 {
249 if ( d->arch.vector_pirq[vector] )
250 {
251 gdprintk(XENLOG_G_ERR, "%x %x mapped already%x\n",
252 map->index, map->pirq,
253 d->arch.vector_pirq[vector]);
254 pirq = d->arch.vector_pirq[vector];
255 }
256 else
257 {
258 pirq = get_free_pirq(d, map->type, map->index);
259 if ( pirq < 0 )
260 {
261 ret = pirq;
262 gdprintk(XENLOG_G_ERR, "No free pirq\n");
263 goto done;
264 }
265 }
266 }
267 else
268 {
269 if ( d->arch.vector_pirq[vector] &&
270 d->arch.vector_pirq[vector] != map->pirq )
271 {
272 gdprintk(XENLOG_G_ERR, "%x conflict with %x\n",
273 map->index, map->pirq);
274 ret = -EEXIST;
275 goto done;
276 }
277 else
278 pirq = map->pirq;
279 }
282 ret = map_domain_pirq(d, pirq, vector, map);
284 if ( !ret )
285 map->pirq = pirq;
286 done:
287 spin_unlock_irqrestore(&d->arch.irq_lock, flags);
288 free_domain:
289 rcu_unlock_domain(d);
290 return ret;
291 }
293 static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
294 {
295 struct domain *d;
296 unsigned long flags;
297 int ret;
299 if ( !IS_PRIV(current->domain) )
300 return -EPERM;
302 if ( unmap->domid == DOMID_SELF )
303 d = rcu_lock_domain(current->domain);
304 else
305 d = rcu_lock_domain_by_id(unmap->domid);
307 if ( d == NULL )
308 return -ESRCH;
310 spin_lock_irqsave(&d->arch.irq_lock, flags);
311 ret = unmap_domain_pirq(d, unmap->pirq);
312 spin_unlock_irqrestore(&d->arch.irq_lock, flags);
314 rcu_unlock_domain(d);
316 return ret;
317 }
319 ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
320 {
321 int irq;
322 ret_t ret;
323 struct vcpu *v = current;
325 switch ( cmd )
326 {
327 case PHYSDEVOP_eoi: {
328 struct physdev_eoi eoi;
329 ret = -EFAULT;
330 if ( copy_from_guest(&eoi, arg, 1) != 0 )
331 break;
332 ret = pirq_guest_eoi(v->domain, eoi.irq);
333 break;
334 }
336 /* Legacy since 0x00030202. */
337 case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
338 ret = pirq_guest_unmask(v->domain);
339 break;
340 }
342 case PHYSDEVOP_irq_status_query: {
343 struct physdev_irq_status_query irq_status_query;
344 ret = -EFAULT;
345 if ( copy_from_guest(&irq_status_query, arg, 1) != 0 )
346 break;
347 irq = irq_status_query.irq;
348 ret = -EINVAL;
349 if ( (irq < 0) || (irq >= NR_IRQS) )
350 break;
351 irq_status_query.flags = 0;
352 if ( pirq_acktype(v->domain, irq) != 0 )
353 irq_status_query.flags |= XENIRQSTAT_needs_eoi;
354 if ( pirq_shared(v->domain, irq) )
355 irq_status_query.flags |= XENIRQSTAT_shared;
356 ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
357 break;
358 }
360 case PHYSDEVOP_map_pirq: {
361 struct physdev_map_pirq map;
363 ret = -EFAULT;
364 if ( copy_from_guest(&map, arg, 1) != 0 )
365 break;
367 ret = physdev_map_pirq(&map);
369 if ( copy_to_guest(arg, &map, 1) != 0 )
370 ret = -EFAULT;
371 break;
372 }
374 case PHYSDEVOP_unmap_pirq: {
375 struct physdev_unmap_pirq unmap;
377 ret = -EFAULT;
378 if ( copy_from_guest(&unmap, arg, 1) != 0 )
379 break;
381 ret = physdev_unmap_pirq(&unmap);
382 break;
383 }
385 case PHYSDEVOP_apic_read: {
386 struct physdev_apic apic;
387 ret = -EFAULT;
388 if ( copy_from_guest(&apic, arg, 1) != 0 )
389 break;
390 ret = -EPERM;
391 if ( !IS_PRIV(v->domain) )
392 break;
393 ret = xsm_apic(v->domain, cmd);
394 if ( ret )
395 break;
396 ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
397 if ( copy_to_guest(arg, &apic, 1) != 0 )
398 ret = -EFAULT;
399 break;
400 }
402 case PHYSDEVOP_apic_write: {
403 struct physdev_apic apic;
404 ret = -EFAULT;
405 if ( copy_from_guest(&apic, arg, 1) != 0 )
406 break;
407 ret = -EPERM;
408 if ( !IS_PRIV(v->domain) )
409 break;
410 ret = xsm_apic(v->domain, cmd);
411 if ( ret )
412 break;
413 ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
414 break;
415 }
417 case PHYSDEVOP_alloc_irq_vector: {
418 struct physdev_irq irq_op;
419 unsigned long flags;
421 ret = -EFAULT;
422 if ( copy_from_guest(&irq_op, arg, 1) != 0 )
423 break;
425 ret = -EPERM;
426 if ( !IS_PRIV(v->domain) )
427 break;
429 ret = xsm_assign_vector(v->domain, irq_op.irq);
430 if ( ret )
431 break;
433 irq = irq_op.irq;
434 ret = -EINVAL;
435 if ( (irq < 0) || (irq >= NR_IRQS) )
436 break;
438 irq_op.vector = assign_irq_vector(irq);
440 spin_lock_irqsave(&dom0->arch.irq_lock, flags);
441 ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector, NULL);
442 spin_unlock_irqrestore(&dom0->arch.irq_lock, flags);
444 if ( copy_to_guest(arg, &irq_op, 1) != 0 )
445 ret = -EFAULT;
446 break;
447 }
449 case PHYSDEVOP_set_iopl: {
450 struct physdev_set_iopl set_iopl;
451 ret = -EFAULT;
452 if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
453 break;
454 ret = -EINVAL;
455 if ( set_iopl.iopl > 3 )
456 break;
457 ret = 0;
458 v->arch.iopl = set_iopl.iopl;
459 break;
460 }
462 case PHYSDEVOP_set_iobitmap: {
463 struct physdev_set_iobitmap set_iobitmap;
464 ret = -EFAULT;
465 if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
466 break;
467 ret = -EINVAL;
468 if ( !guest_handle_okay(set_iobitmap.bitmap, IOBMP_BYTES) ||
469 (set_iobitmap.nr_ports > 65536) )
470 break;
471 ret = 0;
472 #ifndef COMPAT
473 v->arch.iobmp = set_iobitmap.bitmap;
474 #else
475 guest_from_compat_handle(v->arch.iobmp, set_iobitmap.bitmap);
476 #endif
477 v->arch.iobmp_limit = set_iobitmap.nr_ports;
478 break;
479 }
481 case PHYSDEVOP_manage_pci_add: {
482 struct physdev_manage_pci manage_pci;
483 ret = -EPERM;
484 if ( !IS_PRIV(v->domain) )
485 break;
486 ret = -EFAULT;
487 if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
488 break;
490 ret = pci_add_device(manage_pci.bus, manage_pci.devfn);
491 break;
492 }
494 case PHYSDEVOP_manage_pci_remove: {
495 struct physdev_manage_pci manage_pci;
496 ret = -EPERM;
497 if ( !IS_PRIV(v->domain) )
498 break;
499 ret = -EFAULT;
500 if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
501 break;
503 ret = pci_remove_device(manage_pci.bus, manage_pci.devfn);
504 break;
505 }
507 default:
508 ret = -ENOSYS;
509 break;
510 }
512 return ret;
513 }
515 /*
516 * Local variables:
517 * mode: C
518 * c-set-style: "BSD"
519 * c-basic-offset: 4
520 * tab-width: 4
521 * indent-tabs-mode: nil
522 * End:
523 */