ia64/xen-unstable

view unmodified_drivers/linux-2.6/platform-pci/evtchn.c @ 15342:1623f5f5094f

[IA64] Don't try to save nvram on PV domains

Signed-off-by: Alex Williamson <alex.williamson@hp.com>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jun 14 15:32:22 2007 -0600 (2007-06-14)
parents 6e7ef794cdbc
children 3d97c1c1f7c8
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * A simplified event channel for para-drivers in unmodified linux
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 *
9 * This file may be distributed separately from the Linux kernel, or
10 * incorporated into other software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
31 #include <linux/config.h>
32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/spinlock.h>
35 #include <xen/evtchn.h>
36 #include <xen/interface/hvm/ioreq.h>
37 #include <xen/features.h>
38 #include "platform-pci.h"
40 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
41 #include <xen/platform-compat.h>
42 #endif
44 void *shared_info_area;
46 #define is_valid_evtchn(x) ((x) != 0)
47 #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn)
49 static struct {
50 spinlock_t lock;
51 irqreturn_t(*handler) (int, void *, struct pt_regs *);
52 void *dev_id;
53 int evtchn;
54 int close:1; /* close on unbind_from_irqhandler()? */
55 int inuse:1;
56 int in_handler:1;
57 } irq_evtchn[256];
58 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
59 [0 ... NR_EVENT_CHANNELS-1] = -1 };
61 static DEFINE_SPINLOCK(irq_alloc_lock);
63 static int alloc_xen_irq(void)
64 {
65 static int warned;
66 int irq;
68 spin_lock(&irq_alloc_lock);
70 for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) {
71 if (irq_evtchn[irq].inuse)
72 continue;
73 irq_evtchn[irq].inuse = 1;
74 spin_unlock(&irq_alloc_lock);
75 return irq;
76 }
78 if (!warned) {
79 warned = 1;
80 printk(KERN_WARNING "No available IRQ to bind to: "
81 "increase irq_evtchn[] size in evtchn.c.\n");
82 }
84 spin_unlock(&irq_alloc_lock);
86 return -ENOSPC;
87 }
89 static void free_xen_irq(int irq)
90 {
91 spin_lock(&irq_alloc_lock);
92 irq_evtchn[irq].inuse = 0;
93 spin_unlock(&irq_alloc_lock);
94 }
96 int irq_to_evtchn_port(int irq)
97 {
98 return irq_evtchn[irq].evtchn;
99 }
100 EXPORT_SYMBOL(irq_to_evtchn_port);
102 void mask_evtchn(int port)
103 {
104 shared_info_t *s = shared_info_area;
105 synch_set_bit(port, &s->evtchn_mask[0]);
106 }
107 EXPORT_SYMBOL(mask_evtchn);
109 void unmask_evtchn(int port)
110 {
111 unsigned int cpu;
112 shared_info_t *s = shared_info_area;
113 vcpu_info_t *vcpu_info;
115 cpu = get_cpu();
116 vcpu_info = &s->vcpu_info[cpu];
118 /* Slow path (hypercall) if this is a non-local port. We only
119 ever bind event channels to vcpu 0 in HVM guests. */
120 if (unlikely(cpu != 0)) {
121 evtchn_unmask_t op = { .port = port };
122 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask,
123 &op);
124 put_cpu();
125 return;
126 }
128 synch_clear_bit(port, &s->evtchn_mask[0]);
130 /*
131 * The following is basically the equivalent of
132 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose the
133 * interrupt edge' if the channel is masked.
134 */
135 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
136 !synch_test_and_set_bit(port / BITS_PER_LONG,
137 &vcpu_info->evtchn_pending_sel)) {
138 vcpu_info->evtchn_upcall_pending = 1;
139 if (!vcpu_info->evtchn_upcall_mask)
140 force_evtchn_callback();
141 }
143 put_cpu();
144 }
145 EXPORT_SYMBOL(unmask_evtchn);
147 int bind_listening_port_to_irqhandler(
148 unsigned int remote_domain,
149 irqreturn_t (*handler)(int, void *, struct pt_regs *),
150 unsigned long irqflags,
151 const char *devname,
152 void *dev_id)
153 {
154 struct evtchn_alloc_unbound alloc_unbound;
155 int err, irq;
157 irq = alloc_xen_irq();
158 if (irq < 0)
159 return irq;
161 spin_lock_irq(&irq_evtchn[irq].lock);
163 alloc_unbound.dom = DOMID_SELF;
164 alloc_unbound.remote_dom = remote_domain;
165 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
166 &alloc_unbound);
167 if (err) {
168 spin_unlock_irq(&irq_evtchn[irq].lock);
169 free_xen_irq(irq);
170 return err;
171 }
173 irq_evtchn[irq].handler = handler;
174 irq_evtchn[irq].dev_id = dev_id;
175 irq_evtchn[irq].evtchn = alloc_unbound.port;
176 irq_evtchn[irq].close = 1;
178 evtchn_to_irq[alloc_unbound.port] = irq;
180 unmask_evtchn(alloc_unbound.port);
182 spin_unlock_irq(&irq_evtchn[irq].lock);
184 return irq;
185 }
186 EXPORT_SYMBOL(bind_listening_port_to_irqhandler);
188 int bind_caller_port_to_irqhandler(
189 unsigned int caller_port,
190 irqreturn_t (*handler)(int, void *, struct pt_regs *),
191 unsigned long irqflags,
192 const char *devname,
193 void *dev_id)
194 {
195 int irq;
197 irq = alloc_xen_irq();
198 if (irq < 0)
199 return irq;
201 spin_lock_irq(&irq_evtchn[irq].lock);
203 irq_evtchn[irq].handler = handler;
204 irq_evtchn[irq].dev_id = dev_id;
205 irq_evtchn[irq].evtchn = caller_port;
206 irq_evtchn[irq].close = 0;
208 evtchn_to_irq[caller_port] = irq;
210 unmask_evtchn(caller_port);
212 spin_unlock_irq(&irq_evtchn[irq].lock);
214 return irq;
215 }
216 EXPORT_SYMBOL(bind_caller_port_to_irqhandler);
218 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
219 {
220 int evtchn;
222 spin_lock_irq(&irq_evtchn[irq].lock);
224 evtchn = evtchn_from_irq(irq);
226 if (is_valid_evtchn(evtchn)) {
227 evtchn_to_irq[irq] = -1;
228 mask_evtchn(evtchn);
229 if (irq_evtchn[irq].close) {
230 struct evtchn_close close = { .port = evtchn };
231 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
232 }
233 }
235 irq_evtchn[irq].handler = NULL;
236 irq_evtchn[irq].evtchn = 0;
238 spin_unlock_irq(&irq_evtchn[irq].lock);
240 while (irq_evtchn[irq].in_handler)
241 cpu_relax();
243 free_xen_irq(irq);
244 }
245 EXPORT_SYMBOL(unbind_from_irqhandler);
247 void notify_remote_via_irq(int irq)
248 {
249 int evtchn;
251 evtchn = evtchn_from_irq(irq);
252 if (is_valid_evtchn(evtchn))
253 notify_remote_via_evtchn(evtchn);
254 }
255 EXPORT_SYMBOL(notify_remote_via_irq);
257 static irqreturn_t evtchn_interrupt(int irq, void *dev_id,
258 struct pt_regs *regs)
259 {
260 unsigned int l1i, port;
261 /* XXX: All events are bound to vcpu0 but irq may be redirected. */
262 int cpu = 0; /*smp_processor_id();*/
263 irqreturn_t(*handler) (int, void *, struct pt_regs *);
264 shared_info_t *s = shared_info_area;
265 vcpu_info_t *v = &s->vcpu_info[cpu];
266 unsigned long l1, l2;
268 v->evtchn_upcall_pending = 0;
269 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
270 l1 = xchg(&v->evtchn_pending_sel, 0);
271 while (l1 != 0) {
272 l1i = __ffs(l1);
273 l1 &= ~(1 << l1i);
274 while ((l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i])) {
275 port = (l1i * BITS_PER_LONG) + __ffs(l2);
276 synch_clear_bit(port, &s->evtchn_pending[0]);
278 irq = evtchn_to_irq[port];
279 if (irq < 0)
280 continue;
282 spin_lock(&irq_evtchn[irq].lock);
283 handler = irq_evtchn[irq].handler;
284 dev_id = irq_evtchn[irq].dev_id;
285 if (unlikely(handler == NULL)) {
286 printk("Xen IRQ%d (port %d) has no handler!\n",
287 irq, port);
288 spin_unlock(&irq_evtchn[irq].lock);
289 continue;
290 }
291 irq_evtchn[irq].in_handler = 1;
292 spin_unlock(&irq_evtchn[irq].lock);
294 local_irq_enable();
295 handler(irq, irq_evtchn[irq].dev_id, regs);
296 local_irq_disable();
298 spin_lock(&irq_evtchn[irq].lock);
299 irq_evtchn[irq].in_handler = 0;
300 spin_unlock(&irq_evtchn[irq].lock);
301 }
302 }
304 return IRQ_HANDLED;
305 }
307 void force_evtchn_callback(void)
308 {
309 (void)HYPERVISOR_xen_version(0, NULL);
310 }
311 EXPORT_SYMBOL(force_evtchn_callback);
313 void irq_resume(void)
314 {
315 int evtchn, irq;
317 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) {
318 mask_evtchn(evtchn);
319 evtchn_to_irq[evtchn] = -1;
320 }
322 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
323 irq_evtchn[irq].evtchn = 0;
324 }
326 int xen_irq_init(struct pci_dev *pdev)
327 {
328 int irq;
330 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
331 spin_lock_init(&irq_evtchn[irq].lock);
333 return request_irq(pdev->irq, evtchn_interrupt,
334 SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT,
335 "xen-platform-pci", pdev);
336 }