ia64/xen-unstable

view unmodified_drivers/linux-2.6/platform-pci/evtchn.c @ 16242:3d97c1c1f7c8

pv-on-hvm: fixes for unmodified drivers build and modern Linux

- The adjustments to README and overrides.mk are generic.
- The removal of explicit linux/config.h inclusion should also not
cause any issues.
- The introduction of irq_handler_t should eliminiate warnings on
2.6.19+ kernels (I didn't check they're there, but since the
request_irq prototype changed, I'm sure there's at least
one. However, as a result changes to the Linux tree are expected to
be required.
- The change setup_xen_features -> xen_setup_features follows the
naming in mainline 2.6.23 but would apparently also require changes
to the Linux tree.
- The changes SA_* -> IRQF_ and pci_module_init ->
pci_register_driver should also not cause issues.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir@xensource.com>
date Thu Oct 25 15:54:19 2007 +0100 (2007-10-25)
parents 6e7ef794cdbc
children 6c218ca2db7e
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * A simplified event channel for para-drivers in unmodified linux
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 *
9 * This file may be distributed separately from the Linux kernel, or
10 * incorporated into other software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/spinlock.h>
34 #include <xen/evtchn.h>
35 #include <xen/interface/hvm/ioreq.h>
36 #include <xen/features.h>
37 #include "platform-pci.h"
39 #ifdef HAVE_XEN_PLATFORM_COMPAT_H
40 #include <xen/platform-compat.h>
41 #endif
43 void *shared_info_area;
45 #define is_valid_evtchn(x) ((x) != 0)
46 #define evtchn_from_irq(x) (irq_evtchn[irq].evtchn)
48 static struct {
49 spinlock_t lock;
50 irq_handler_t handler;
51 void *dev_id;
52 int evtchn;
53 int close:1; /* close on unbind_from_irqhandler()? */
54 int inuse:1;
55 int in_handler:1;
56 } irq_evtchn[256];
57 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
58 [0 ... NR_EVENT_CHANNELS-1] = -1 };
60 static DEFINE_SPINLOCK(irq_alloc_lock);
62 static int alloc_xen_irq(void)
63 {
64 static int warned;
65 int irq;
67 spin_lock(&irq_alloc_lock);
69 for (irq = 1; irq < ARRAY_SIZE(irq_evtchn); irq++) {
70 if (irq_evtchn[irq].inuse)
71 continue;
72 irq_evtchn[irq].inuse = 1;
73 spin_unlock(&irq_alloc_lock);
74 return irq;
75 }
77 if (!warned) {
78 warned = 1;
79 printk(KERN_WARNING "No available IRQ to bind to: "
80 "increase irq_evtchn[] size in evtchn.c.\n");
81 }
83 spin_unlock(&irq_alloc_lock);
85 return -ENOSPC;
86 }
88 static void free_xen_irq(int irq)
89 {
90 spin_lock(&irq_alloc_lock);
91 irq_evtchn[irq].inuse = 0;
92 spin_unlock(&irq_alloc_lock);
93 }
95 int irq_to_evtchn_port(int irq)
96 {
97 return irq_evtchn[irq].evtchn;
98 }
99 EXPORT_SYMBOL(irq_to_evtchn_port);
101 void mask_evtchn(int port)
102 {
103 shared_info_t *s = shared_info_area;
104 synch_set_bit(port, &s->evtchn_mask[0]);
105 }
106 EXPORT_SYMBOL(mask_evtchn);
108 void unmask_evtchn(int port)
109 {
110 unsigned int cpu;
111 shared_info_t *s = shared_info_area;
112 vcpu_info_t *vcpu_info;
114 cpu = get_cpu();
115 vcpu_info = &s->vcpu_info[cpu];
117 /* Slow path (hypercall) if this is a non-local port. We only
118 ever bind event channels to vcpu 0 in HVM guests. */
119 if (unlikely(cpu != 0)) {
120 evtchn_unmask_t op = { .port = port };
121 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask,
122 &op);
123 put_cpu();
124 return;
125 }
127 synch_clear_bit(port, &s->evtchn_mask[0]);
129 /*
130 * The following is basically the equivalent of
131 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose the
132 * interrupt edge' if the channel is masked.
133 */
134 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
135 !synch_test_and_set_bit(port / BITS_PER_LONG,
136 &vcpu_info->evtchn_pending_sel)) {
137 vcpu_info->evtchn_upcall_pending = 1;
138 if (!vcpu_info->evtchn_upcall_mask)
139 force_evtchn_callback();
140 }
142 put_cpu();
143 }
144 EXPORT_SYMBOL(unmask_evtchn);
146 int bind_listening_port_to_irqhandler(
147 unsigned int remote_domain,
148 irq_handler_t handler,
149 unsigned long irqflags,
150 const char *devname,
151 void *dev_id)
152 {
153 struct evtchn_alloc_unbound alloc_unbound;
154 int err, irq;
156 irq = alloc_xen_irq();
157 if (irq < 0)
158 return irq;
160 spin_lock_irq(&irq_evtchn[irq].lock);
162 alloc_unbound.dom = DOMID_SELF;
163 alloc_unbound.remote_dom = remote_domain;
164 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
165 &alloc_unbound);
166 if (err) {
167 spin_unlock_irq(&irq_evtchn[irq].lock);
168 free_xen_irq(irq);
169 return err;
170 }
172 irq_evtchn[irq].handler = handler;
173 irq_evtchn[irq].dev_id = dev_id;
174 irq_evtchn[irq].evtchn = alloc_unbound.port;
175 irq_evtchn[irq].close = 1;
177 evtchn_to_irq[alloc_unbound.port] = irq;
179 unmask_evtchn(alloc_unbound.port);
181 spin_unlock_irq(&irq_evtchn[irq].lock);
183 return irq;
184 }
185 EXPORT_SYMBOL(bind_listening_port_to_irqhandler);
187 int bind_caller_port_to_irqhandler(
188 unsigned int caller_port,
189 irq_handler_t handler,
190 unsigned long irqflags,
191 const char *devname,
192 void *dev_id)
193 {
194 int irq;
196 irq = alloc_xen_irq();
197 if (irq < 0)
198 return irq;
200 spin_lock_irq(&irq_evtchn[irq].lock);
202 irq_evtchn[irq].handler = handler;
203 irq_evtchn[irq].dev_id = dev_id;
204 irq_evtchn[irq].evtchn = caller_port;
205 irq_evtchn[irq].close = 0;
207 evtchn_to_irq[caller_port] = irq;
209 unmask_evtchn(caller_port);
211 spin_unlock_irq(&irq_evtchn[irq].lock);
213 return irq;
214 }
215 EXPORT_SYMBOL(bind_caller_port_to_irqhandler);
217 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
218 {
219 int evtchn;
221 spin_lock_irq(&irq_evtchn[irq].lock);
223 evtchn = evtchn_from_irq(irq);
225 if (is_valid_evtchn(evtchn)) {
226 evtchn_to_irq[irq] = -1;
227 mask_evtchn(evtchn);
228 if (irq_evtchn[irq].close) {
229 struct evtchn_close close = { .port = evtchn };
230 HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
231 }
232 }
234 irq_evtchn[irq].handler = NULL;
235 irq_evtchn[irq].evtchn = 0;
237 spin_unlock_irq(&irq_evtchn[irq].lock);
239 while (irq_evtchn[irq].in_handler)
240 cpu_relax();
242 free_xen_irq(irq);
243 }
244 EXPORT_SYMBOL(unbind_from_irqhandler);
246 void notify_remote_via_irq(int irq)
247 {
248 int evtchn;
250 evtchn = evtchn_from_irq(irq);
251 if (is_valid_evtchn(evtchn))
252 notify_remote_via_evtchn(evtchn);
253 }
254 EXPORT_SYMBOL(notify_remote_via_irq);
256 static irqreturn_t evtchn_interrupt(int irq, void *dev_id
257 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
258 , struct pt_regs *regs
259 #else
260 # define handler(irq, dev_id, regs) handler(irq, dev_id)
261 #endif
262 )
263 {
264 unsigned int l1i, port;
265 /* XXX: All events are bound to vcpu0 but irq may be redirected. */
266 int cpu = 0; /*smp_processor_id();*/
267 irq_handler_t handler;
268 shared_info_t *s = shared_info_area;
269 vcpu_info_t *v = &s->vcpu_info[cpu];
270 unsigned long l1, l2;
272 v->evtchn_upcall_pending = 0;
273 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
274 l1 = xchg(&v->evtchn_pending_sel, 0);
275 while (l1 != 0) {
276 l1i = __ffs(l1);
277 l1 &= ~(1 << l1i);
278 while ((l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i])) {
279 port = (l1i * BITS_PER_LONG) + __ffs(l2);
280 synch_clear_bit(port, &s->evtchn_pending[0]);
282 irq = evtchn_to_irq[port];
283 if (irq < 0)
284 continue;
286 spin_lock(&irq_evtchn[irq].lock);
287 handler = irq_evtchn[irq].handler;
288 dev_id = irq_evtchn[irq].dev_id;
289 if (unlikely(handler == NULL)) {
290 printk("Xen IRQ%d (port %d) has no handler!\n",
291 irq, port);
292 spin_unlock(&irq_evtchn[irq].lock);
293 continue;
294 }
295 irq_evtchn[irq].in_handler = 1;
296 spin_unlock(&irq_evtchn[irq].lock);
298 local_irq_enable();
299 handler(irq, irq_evtchn[irq].dev_id, regs);
300 local_irq_disable();
302 spin_lock(&irq_evtchn[irq].lock);
303 irq_evtchn[irq].in_handler = 0;
304 spin_unlock(&irq_evtchn[irq].lock);
305 }
306 }
308 return IRQ_HANDLED;
309 }
311 void force_evtchn_callback(void)
312 {
313 (void)HYPERVISOR_xen_version(0, NULL);
314 }
315 EXPORT_SYMBOL(force_evtchn_callback);
317 void irq_resume(void)
318 {
319 int evtchn, irq;
321 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) {
322 mask_evtchn(evtchn);
323 evtchn_to_irq[evtchn] = -1;
324 }
326 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
327 irq_evtchn[irq].evtchn = 0;
328 }
330 int xen_irq_init(struct pci_dev *pdev)
331 {
332 int irq;
334 for (irq = 0; irq < ARRAY_SIZE(irq_evtchn); irq++)
335 spin_lock_init(&irq_evtchn[irq].lock);
337 return request_irq(pdev->irq, evtchn_interrupt,
338 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
339 SA_SHIRQ | SA_SAMPLE_RANDOM | SA_INTERRUPT,
340 #else
341 IRQF_SHARED | IRQF_SAMPLE_RANDOM | IRQF_DISABLED,
342 #endif
343 "xen-platform-pci", pdev);
344 }