ia64/xen-unstable

view xen/arch/x86/physdev.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 6705898f768d
children
line source
2 #include <xen/config.h>
3 #include <xen/init.h>
4 #include <xen/lib.h>
5 #include <xen/types.h>
6 #include <xen/sched.h>
7 #include <xen/irq.h>
8 #include <xen/event.h>
9 #include <xen/guest_access.h>
10 #include <xen/iocap.h>
11 #include <asm/current.h>
12 #include <asm/msi.h>
13 #include <asm/hypercall.h>
14 #include <public/xen.h>
15 #include <public/physdev.h>
16 #include <xsm/xsm.h>
17 #include <asm/p2m.h>
19 #ifndef COMPAT
20 typedef long ret_t;
21 #endif
23 int
24 ioapic_guest_read(
25 unsigned long physbase, unsigned int reg, u32 *pval);
26 int
27 ioapic_guest_write(
28 unsigned long physbase, unsigned int reg, u32 pval);
30 static int physdev_map_pirq(struct physdev_map_pirq *map)
31 {
32 struct domain *d;
33 int vector, pirq, ret = 0;
34 struct msi_info _msi;
35 void *map_data = NULL;
37 if ( !IS_PRIV(current->domain) )
38 return -EPERM;
40 if ( !map )
41 return -EINVAL;
43 if ( map->domid == DOMID_SELF )
44 d = rcu_lock_domain(current->domain);
45 else
46 d = rcu_lock_domain_by_id(map->domid);
48 if ( d == NULL )
49 {
50 ret = -ESRCH;
51 goto free_domain;
52 }
54 /* Verify or get vector. */
55 switch ( map->type )
56 {
57 case MAP_PIRQ_TYPE_GSI:
58 if ( map->index < 0 || map->index >= nr_irqs )
59 {
60 dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
61 d->domain_id, map->index);
62 ret = -EINVAL;
63 goto free_domain;
64 }
65 vector = domain_irq_to_vector(current->domain, map->index);
66 if ( !vector )
67 {
68 dprintk(XENLOG_G_ERR, "dom%d: map irq with no vector %d\n",
69 d->domain_id, vector);
70 ret = -EINVAL;
71 goto free_domain;
72 }
73 break;
75 case MAP_PIRQ_TYPE_MSI:
76 vector = map->index;
77 if ( vector == -1 )
78 vector = assign_irq_vector(AUTO_ASSIGN_IRQ);
80 if ( vector < 0 || vector >= NR_VECTORS )
81 {
82 dprintk(XENLOG_G_ERR, "dom%d: map irq with wrong vector %d\n",
83 d->domain_id, vector);
84 ret = -EINVAL;
85 goto free_domain;
86 }
88 _msi.bus = map->bus;
89 _msi.devfn = map->devfn;
90 _msi.entry_nr = map->entry_nr;
91 _msi.table_base = map->table_base;
92 _msi.vector = vector;
93 map_data = &_msi;
94 break;
96 default:
97 dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
98 d->domain_id, map->type);
99 ret = -EINVAL;
100 goto free_domain;
101 }
103 spin_lock(&pcidevs_lock);
104 /* Verify or get pirq. */
105 spin_lock(&d->event_lock);
106 pirq = domain_vector_to_irq(d, vector);
107 if ( map->pirq < 0 )
108 {
109 if ( pirq )
110 {
111 dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n",
112 d->domain_id, map->index, map->pirq,
113 pirq);
114 if ( pirq < 0 )
115 {
116 ret = -EBUSY;
117 goto done;
118 }
119 }
120 else
121 {
122 pirq = get_free_pirq(d, map->type, map->index);
123 if ( pirq < 0 )
124 {
125 dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id);
126 ret = pirq;
127 goto done;
128 }
129 }
130 }
131 else
132 {
133 if ( pirq && pirq != map->pirq )
134 {
135 dprintk(XENLOG_G_ERR, "dom%d: vector %d conflicts with irq %d\n",
136 d->domain_id, map->index, map->pirq);
137 ret = -EEXIST;
138 goto done;
139 }
140 else
141 pirq = map->pirq;
142 }
144 ret = map_domain_pirq(d, pirq, vector, map->type, map_data);
145 if ( ret == 0 )
146 map->pirq = pirq;
148 done:
149 spin_unlock(&d->event_lock);
150 spin_unlock(&pcidevs_lock);
151 if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
152 free_irq_vector(vector);
153 free_domain:
154 rcu_unlock_domain(d);
155 return ret;
156 }
158 static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
159 {
160 struct domain *d;
161 int ret;
163 if ( !IS_PRIV(current->domain) )
164 return -EPERM;
166 if ( unmap->domid == DOMID_SELF )
167 d = rcu_lock_domain(current->domain);
168 else
169 d = rcu_lock_domain_by_id(unmap->domid);
171 if ( d == NULL )
172 return -ESRCH;
174 spin_lock(&pcidevs_lock);
175 spin_lock(&d->event_lock);
176 ret = unmap_domain_pirq(d, unmap->pirq);
177 spin_unlock(&d->event_lock);
178 spin_unlock(&pcidevs_lock);
180 rcu_unlock_domain(d);
182 return ret;
183 }
185 ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
186 {
187 int irq;
188 ret_t ret;
189 struct vcpu *v = current;
191 switch ( cmd )
192 {
193 case PHYSDEVOP_eoi: {
194 struct physdev_eoi eoi;
195 ret = -EFAULT;
196 if ( copy_from_guest(&eoi, arg, 1) != 0 )
197 break;
198 ret = -EINVAL;
199 if ( eoi.irq < 0 || eoi.irq >= v->domain->nr_pirqs )
200 break;
201 if ( v->domain->arch.pirq_eoi_map )
202 evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]);
203 ret = pirq_guest_eoi(v->domain, eoi.irq);
204 break;
205 }
207 case PHYSDEVOP_pirq_eoi_gmfn: {
208 struct physdev_pirq_eoi_gmfn info;
209 unsigned long mfn;
211 ret = -EFAULT;
212 if ( copy_from_guest(&info, arg, 1) != 0 )
213 break;
215 ret = -EINVAL;
216 mfn = gmfn_to_mfn(current->domain, info.gmfn);
217 if ( !mfn_valid(mfn) ||
218 !get_page_and_type(mfn_to_page(mfn), v->domain,
219 PGT_writable_page) )
220 break;
222 if ( cmpxchg(&v->domain->arch.pirq_eoi_map_mfn, 0, mfn) != 0 )
223 {
224 put_page_and_type(mfn_to_page(mfn));
225 ret = -EBUSY;
226 break;
227 }
229 v->domain->arch.pirq_eoi_map = map_domain_page_global(mfn);
230 if ( v->domain->arch.pirq_eoi_map == NULL )
231 {
232 v->domain->arch.pirq_eoi_map_mfn = 0;
233 put_page_and_type(mfn_to_page(mfn));
234 ret = -ENOSPC;
235 break;
236 }
238 ret = 0;
239 break;
240 }
242 /* Legacy since 0x00030202. */
243 case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
244 ret = pirq_guest_unmask(v->domain);
245 break;
246 }
248 case PHYSDEVOP_irq_status_query: {
249 struct physdev_irq_status_query irq_status_query;
250 ret = -EFAULT;
251 if ( copy_from_guest(&irq_status_query, arg, 1) != 0 )
252 break;
253 irq = irq_status_query.irq;
254 ret = -EINVAL;
255 if ( (irq < 0) || (irq >= v->domain->nr_pirqs) )
256 break;
257 irq_status_query.flags = 0;
258 /*
259 * Even edge-triggered or message-based IRQs can need masking from
260 * time to time. If teh guest is not dynamically checking for this
261 * via the new pirq_eoi_map mechanism, it must conservatively always
262 * execute the EOI hypercall. In practice, this only really makes a
263 * difference for maskable MSI sources, and if those are supported
264 * then dom0 is probably modern anyway.
265 */
266 irq_status_query.flags |= XENIRQSTAT_needs_eoi;
267 if ( pirq_shared(v->domain, irq) )
268 irq_status_query.flags |= XENIRQSTAT_shared;
269 ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
270 break;
271 }
273 case PHYSDEVOP_map_pirq: {
274 struct physdev_map_pirq map;
276 ret = -EFAULT;
277 if ( copy_from_guest(&map, arg, 1) != 0 )
278 break;
280 ret = physdev_map_pirq(&map);
282 if ( copy_to_guest(arg, &map, 1) != 0 )
283 ret = -EFAULT;
284 break;
285 }
287 case PHYSDEVOP_unmap_pirq: {
288 struct physdev_unmap_pirq unmap;
290 ret = -EFAULT;
291 if ( copy_from_guest(&unmap, arg, 1) != 0 )
292 break;
294 ret = physdev_unmap_pirq(&unmap);
295 break;
296 }
298 case PHYSDEVOP_apic_read: {
299 struct physdev_apic apic;
300 ret = -EFAULT;
301 if ( copy_from_guest(&apic, arg, 1) != 0 )
302 break;
303 ret = -EPERM;
304 if ( !IS_PRIV(v->domain) )
305 break;
306 ret = xsm_apic(v->domain, cmd);
307 if ( ret )
308 break;
309 ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
310 if ( copy_to_guest(arg, &apic, 1) != 0 )
311 ret = -EFAULT;
312 break;
313 }
315 case PHYSDEVOP_apic_write: {
316 struct physdev_apic apic;
317 ret = -EFAULT;
318 if ( copy_from_guest(&apic, arg, 1) != 0 )
319 break;
320 ret = -EPERM;
321 if ( !IS_PRIV(v->domain) )
322 break;
323 ret = xsm_apic(v->domain, cmd);
324 if ( ret )
325 break;
326 ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
327 break;
328 }
330 case PHYSDEVOP_alloc_irq_vector: {
331 struct physdev_irq irq_op;
333 ret = -EFAULT;
334 if ( copy_from_guest(&irq_op, arg, 1) != 0 )
335 break;
337 ret = -EPERM;
338 if ( !IS_PRIV(v->domain) )
339 break;
341 ret = xsm_assign_vector(v->domain, irq_op.irq);
342 if ( ret )
343 break;
345 irq = irq_op.irq;
346 ret = -EINVAL;
347 if ( (irq < 0) || (irq >= nr_irqs) )
348 break;
350 irq_op.vector = assign_irq_vector(irq);
352 spin_lock(&pcidevs_lock);
353 spin_lock(&dom0->event_lock);
354 ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
355 MAP_PIRQ_TYPE_GSI, NULL);
356 spin_unlock(&dom0->event_lock);
357 spin_unlock(&pcidevs_lock);
359 if ( copy_to_guest(arg, &irq_op, 1) != 0 )
360 ret = -EFAULT;
361 break;
362 }
364 case PHYSDEVOP_set_iopl: {
365 struct physdev_set_iopl set_iopl;
366 ret = -EFAULT;
367 if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
368 break;
369 ret = -EINVAL;
370 if ( set_iopl.iopl > 3 )
371 break;
372 ret = 0;
373 v->arch.iopl = set_iopl.iopl;
374 break;
375 }
377 case PHYSDEVOP_set_iobitmap: {
378 struct physdev_set_iobitmap set_iobitmap;
379 ret = -EFAULT;
380 if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
381 break;
382 ret = -EINVAL;
383 if ( !guest_handle_okay(set_iobitmap.bitmap, IOBMP_BYTES) ||
384 (set_iobitmap.nr_ports > 65536) )
385 break;
386 ret = 0;
387 #ifndef COMPAT
388 v->arch.iobmp = set_iobitmap.bitmap;
389 #else
390 guest_from_compat_handle(v->arch.iobmp, set_iobitmap.bitmap);
391 #endif
392 v->arch.iobmp_limit = set_iobitmap.nr_ports;
393 break;
394 }
396 case PHYSDEVOP_manage_pci_add: {
397 struct physdev_manage_pci manage_pci;
398 ret = -EPERM;
399 if ( !IS_PRIV(v->domain) )
400 break;
401 ret = -EFAULT;
402 if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
403 break;
405 ret = pci_add_device(manage_pci.bus, manage_pci.devfn);
406 break;
407 }
409 case PHYSDEVOP_manage_pci_remove: {
410 struct physdev_manage_pci manage_pci;
411 ret = -EPERM;
412 if ( !IS_PRIV(v->domain) )
413 break;
414 ret = -EFAULT;
415 if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
416 break;
418 ret = pci_remove_device(manage_pci.bus, manage_pci.devfn);
419 break;
420 }
422 case PHYSDEVOP_manage_pci_add_ext: {
423 struct physdev_manage_pci_ext manage_pci_ext;
424 struct pci_dev_info pdev_info;
426 ret = -EPERM;
427 if ( !IS_PRIV(current->domain) )
428 break;
430 ret = -EFAULT;
431 if ( copy_from_guest(&manage_pci_ext, arg, 1) != 0 )
432 break;
434 ret = -EINVAL;
435 if ( (manage_pci_ext.is_extfn > 1) || (manage_pci_ext.is_virtfn > 1) )
436 break;
438 pdev_info.is_extfn = manage_pci_ext.is_extfn;
439 pdev_info.is_virtfn = manage_pci_ext.is_virtfn;
440 pdev_info.physfn.bus = manage_pci_ext.physfn.bus;
441 pdev_info.physfn.devfn = manage_pci_ext.physfn.devfn;
442 ret = pci_add_device_ext(manage_pci_ext.bus,
443 manage_pci_ext.devfn,
444 &pdev_info);
445 break;
446 }
448 case PHYSDEVOP_restore_msi: {
449 struct physdev_restore_msi restore_msi;
450 struct pci_dev *pdev;
452 ret = -EPERM;
453 if ( !IS_PRIV(v->domain) )
454 break;
456 ret = -EFAULT;
457 if ( copy_from_guest(&restore_msi, arg, 1) != 0 )
458 break;
460 spin_lock(&pcidevs_lock);
461 pdev = pci_get_pdev(restore_msi.bus, restore_msi.devfn);
462 ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV;
463 spin_unlock(&pcidevs_lock);
464 break;
465 }
466 default:
467 ret = -ENOSYS;
468 break;
469 }
471 return ret;
472 }
474 /*
475 * Local variables:
476 * mode: C
477 * c-set-style: "BSD"
478 * c-basic-offset: 4
479 * tab-width: 4
480 * indent-tabs-mode: nil
481 * End:
482 */