ia64/xen-unstable

view xen/arch/x86/physdev.c @ 18806:ed8524f4a044

x86: Re-initialise HPET on resume from S3

Signed-off-by: Guanqun Lu <guanqun.lu@intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 18 15:55:14 2008 +0000 (2008-11-18)
parents 0033c944318f
children c820bf73a914
line source
2 #include <xen/config.h>
3 #include <xen/init.h>
4 #include <xen/lib.h>
5 #include <xen/types.h>
6 #include <xen/sched.h>
7 #include <xen/irq.h>
8 #include <xen/event.h>
9 #include <xen/guest_access.h>
10 #include <xen/iocap.h>
11 #include <asm/current.h>
12 #include <asm/msi.h>
13 #include <asm/hypercall.h>
14 #include <public/xen.h>
15 #include <public/physdev.h>
16 #include <xsm/xsm.h>
18 #ifndef COMPAT
19 typedef long ret_t;
20 #endif
22 int
23 ioapic_guest_read(
24 unsigned long physbase, unsigned int reg, u32 *pval);
25 int
26 ioapic_guest_write(
27 unsigned long physbase, unsigned int reg, u32 pval);
29 static int physdev_map_pirq(struct physdev_map_pirq *map)
30 {
31 struct domain *d;
32 int vector, pirq, ret = 0;
33 struct msi_info _msi;
34 void *map_data = NULL;
36 if ( !IS_PRIV(current->domain) )
37 return -EPERM;
39 if ( !map )
40 return -EINVAL;
42 if ( map->domid == DOMID_SELF )
43 d = rcu_lock_domain(current->domain);
44 else
45 d = rcu_lock_domain_by_id(map->domid);
47 if ( d == NULL )
48 {
49 ret = -ESRCH;
50 goto free_domain;
51 }
53 /* Verify or get vector. */
54 switch ( map->type )
55 {
56 case MAP_PIRQ_TYPE_GSI:
57 if ( map->index < 0 || map->index >= NR_IRQS )
58 {
59 dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
60 d->domain_id, map->index);
61 ret = -EINVAL;
62 goto free_domain;
63 }
64 vector = IO_APIC_VECTOR(map->index);
65 if ( !vector )
66 {
67 dprintk(XENLOG_G_ERR, "dom%d: map irq with no vector %d\n",
68 d->domain_id, vector);
69 ret = -EINVAL;
70 goto free_domain;
71 }
72 break;
74 case MAP_PIRQ_TYPE_MSI:
75 vector = map->index;
76 if ( vector == -1 )
77 vector = assign_irq_vector(AUTO_ASSIGN);
79 if ( vector < 0 || vector >= NR_VECTORS )
80 {
81 dprintk(XENLOG_G_ERR, "dom%d: map irq with wrong vector %d\n",
82 d->domain_id, vector);
83 ret = -EINVAL;
84 goto free_domain;
85 }
87 _msi.bus = map->bus;
88 _msi.devfn = map->devfn;
89 _msi.entry_nr = map->entry_nr;
90 _msi.table_base = map->table_base;
91 _msi.vector = vector;
92 map_data = &_msi;
93 break;
95 default:
96 dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
97 d->domain_id, map->type);
98 ret = -EINVAL;
99 goto free_domain;
100 }
102 /* Verify or get pirq. */
103 spin_lock(&d->event_lock);
104 if ( map->pirq < 0 )
105 {
106 if ( d->arch.vector_pirq[vector] )
107 {
108 dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n",
109 d->domain_id, map->index, map->pirq,
110 d->arch.vector_pirq[vector]);
111 pirq = d->arch.vector_pirq[vector];
112 if ( pirq < 0 )
113 {
114 ret = -EBUSY;
115 goto done;
116 }
117 }
118 else
119 {
120 pirq = get_free_pirq(d, map->type, map->index);
121 if ( pirq < 0 )
122 {
123 dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id);
124 ret = pirq;
125 goto done;
126 }
127 }
128 }
129 else
130 {
131 if ( d->arch.vector_pirq[vector] &&
132 d->arch.vector_pirq[vector] != map->pirq )
133 {
134 dprintk(XENLOG_G_ERR, "dom%d: vector %d conflicts with irq %d\n",
135 d->domain_id, map->index, map->pirq);
136 ret = -EEXIST;
137 goto done;
138 }
139 else
140 pirq = map->pirq;
141 }
143 ret = map_domain_pirq(d, pirq, vector, map->type, map_data);
144 if ( ret == 0 )
145 map->pirq = pirq;
147 done:
148 spin_unlock(&d->event_lock);
149 if ( (ret != 0) && (map->type == MAP_PIRQ_TYPE_MSI) && (map->index == -1) )
150 free_irq_vector(vector);
151 free_domain:
152 rcu_unlock_domain(d);
153 return ret;
154 }
156 static int physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
157 {
158 struct domain *d;
159 int ret;
161 if ( !IS_PRIV(current->domain) )
162 return -EPERM;
164 if ( unmap->domid == DOMID_SELF )
165 d = rcu_lock_domain(current->domain);
166 else
167 d = rcu_lock_domain_by_id(unmap->domid);
169 if ( d == NULL )
170 return -ESRCH;
172 spin_lock(&d->event_lock);
173 ret = unmap_domain_pirq(d, unmap->pirq);
174 spin_unlock(&d->event_lock);
176 rcu_unlock_domain(d);
178 return ret;
179 }
181 ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE(void) arg)
182 {
183 int irq;
184 ret_t ret;
185 struct vcpu *v = current;
187 switch ( cmd )
188 {
189 case PHYSDEVOP_eoi: {
190 struct physdev_eoi eoi;
191 ret = -EFAULT;
192 if ( copy_from_guest(&eoi, arg, 1) != 0 )
193 break;
194 ret = pirq_guest_eoi(v->domain, eoi.irq);
195 break;
196 }
198 /* Legacy since 0x00030202. */
199 case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
200 ret = pirq_guest_unmask(v->domain);
201 break;
202 }
204 case PHYSDEVOP_irq_status_query: {
205 struct physdev_irq_status_query irq_status_query;
206 ret = -EFAULT;
207 if ( copy_from_guest(&irq_status_query, arg, 1) != 0 )
208 break;
209 irq = irq_status_query.irq;
210 ret = -EINVAL;
211 if ( (irq < 0) || (irq >= NR_IRQS) )
212 break;
213 irq_status_query.flags = 0;
214 if ( pirq_acktype(v->domain, irq) != 0 )
215 irq_status_query.flags |= XENIRQSTAT_needs_eoi;
216 if ( pirq_shared(v->domain, irq) )
217 irq_status_query.flags |= XENIRQSTAT_shared;
218 ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
219 break;
220 }
222 case PHYSDEVOP_map_pirq: {
223 struct physdev_map_pirq map;
225 ret = -EFAULT;
226 if ( copy_from_guest(&map, arg, 1) != 0 )
227 break;
229 ret = physdev_map_pirq(&map);
231 if ( copy_to_guest(arg, &map, 1) != 0 )
232 ret = -EFAULT;
233 break;
234 }
236 case PHYSDEVOP_unmap_pirq: {
237 struct physdev_unmap_pirq unmap;
239 ret = -EFAULT;
240 if ( copy_from_guest(&unmap, arg, 1) != 0 )
241 break;
243 ret = physdev_unmap_pirq(&unmap);
244 break;
245 }
247 case PHYSDEVOP_apic_read: {
248 struct physdev_apic apic;
249 ret = -EFAULT;
250 if ( copy_from_guest(&apic, arg, 1) != 0 )
251 break;
252 ret = -EPERM;
253 if ( !IS_PRIV(v->domain) )
254 break;
255 ret = xsm_apic(v->domain, cmd);
256 if ( ret )
257 break;
258 ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
259 if ( copy_to_guest(arg, &apic, 1) != 0 )
260 ret = -EFAULT;
261 break;
262 }
264 case PHYSDEVOP_apic_write: {
265 struct physdev_apic apic;
266 ret = -EFAULT;
267 if ( copy_from_guest(&apic, arg, 1) != 0 )
268 break;
269 ret = -EPERM;
270 if ( !IS_PRIV(v->domain) )
271 break;
272 ret = xsm_apic(v->domain, cmd);
273 if ( ret )
274 break;
275 ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
276 break;
277 }
279 case PHYSDEVOP_alloc_irq_vector: {
280 struct physdev_irq irq_op;
282 ret = -EFAULT;
283 if ( copy_from_guest(&irq_op, arg, 1) != 0 )
284 break;
286 ret = -EPERM;
287 if ( !IS_PRIV(v->domain) )
288 break;
290 ret = xsm_assign_vector(v->domain, irq_op.irq);
291 if ( ret )
292 break;
294 irq = irq_op.irq;
295 ret = -EINVAL;
296 if ( (irq < 0) || (irq >= NR_IRQS) )
297 break;
299 irq_op.vector = assign_irq_vector(irq);
301 spin_lock(&dom0->event_lock);
302 ret = map_domain_pirq(dom0, irq_op.irq, irq_op.vector,
303 MAP_PIRQ_TYPE_GSI, NULL);
304 spin_unlock(&dom0->event_lock);
306 if ( copy_to_guest(arg, &irq_op, 1) != 0 )
307 ret = -EFAULT;
308 break;
309 }
311 case PHYSDEVOP_set_iopl: {
312 struct physdev_set_iopl set_iopl;
313 ret = -EFAULT;
314 if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
315 break;
316 ret = -EINVAL;
317 if ( set_iopl.iopl > 3 )
318 break;
319 ret = 0;
320 v->arch.iopl = set_iopl.iopl;
321 break;
322 }
324 case PHYSDEVOP_set_iobitmap: {
325 struct physdev_set_iobitmap set_iobitmap;
326 ret = -EFAULT;
327 if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
328 break;
329 ret = -EINVAL;
330 if ( !guest_handle_okay(set_iobitmap.bitmap, IOBMP_BYTES) ||
331 (set_iobitmap.nr_ports > 65536) )
332 break;
333 ret = 0;
334 #ifndef COMPAT
335 v->arch.iobmp = set_iobitmap.bitmap;
336 #else
337 guest_from_compat_handle(v->arch.iobmp, set_iobitmap.bitmap);
338 #endif
339 v->arch.iobmp_limit = set_iobitmap.nr_ports;
340 break;
341 }
343 case PHYSDEVOP_manage_pci_add: {
344 struct physdev_manage_pci manage_pci;
345 ret = -EPERM;
346 if ( !IS_PRIV(v->domain) )
347 break;
348 ret = -EFAULT;
349 if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
350 break;
352 ret = pci_add_device(manage_pci.bus, manage_pci.devfn);
353 break;
354 }
356 case PHYSDEVOP_manage_pci_remove: {
357 struct physdev_manage_pci manage_pci;
358 ret = -EPERM;
359 if ( !IS_PRIV(v->domain) )
360 break;
361 ret = -EFAULT;
362 if ( copy_from_guest(&manage_pci, arg, 1) != 0 )
363 break;
365 ret = pci_remove_device(manage_pci.bus, manage_pci.devfn);
366 break;
367 }
369 default:
370 ret = -ENOSYS;
371 break;
372 }
374 return ret;
375 }
377 /*
378 * Local variables:
379 * mode: C
380 * c-set-style: "BSD"
381 * c-basic-offset: 4
382 * tab-width: 4
383 * indent-tabs-mode: nil
384 * End:
385 */