ia64/xen-unstable

view linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c @ 8780:ecc8595f2c0b

[IA64] evtchn_ia64 spin lock - fix dead lock

added spin_unlock() in case error occurs.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Thu Feb 09 13:48:05 2006 -0700 (2006-02-09)
parents 3b9c2c410b14
children 42a8e3101c6c
line source
1 /* NOTE: This file split off from evtchn.c because there was
2 some discussion that the mechanism is sufficiently different.
3 It may be possible to merge it back in the future... djm */
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/hw_irq.h>
7 #include <xen/evtchn.h>
9 #define MAX_EVTCHN 1024
11 /* Xen will never allocate port zero for any purpose. */
12 #define VALID_EVTCHN(_chn) (((_chn) != 0) && ((_chn) < MAX_EVTCHN))
14 /* Binding types. Hey, only IRQT_VIRQ and IRQT_EVTCHN are supported now
15 * for XEN/IA64 - ktian1
16 */
17 enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
19 /* Constructor for packed IRQ information. */
20 #define mk_irq_info(type, index, evtchn) \
21 (((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
22 /* Convenient shorthand for packed representation of an unbound IRQ. */
23 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
24 /* Accessor macros for packed IRQ information. */
25 #define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
26 #define index_from_irq(irq) ((u8)(irq_info[irq] >> 16))
27 #define type_from_irq(irq) ((u8)(irq_info[irq] >> 24))
29 /* Packed IRQ information: binding type, sub-type index, and event channel. */
30 static u32 irq_info[NR_IRQS];
32 /* One note for XEN/IA64 is that we have all event channels bound to one
33 * physical irq vector. So we always mean evtchn vector identical to 'irq'
34 * vector in this context. - ktian1
35 */
36 static struct {
37 irqreturn_t (*handler)(int, void *, struct pt_regs *);
38 void *dev_id;
39 char opened; /* Whether allocated */
40 } evtchns[MAX_EVTCHN];
42 /*
43 * This lock protects updates to the following mapping and reference-count
44 * arrays. The lock does not need to be acquired to read the mapping tables.
45 */
46 static spinlock_t irq_mapping_update_lock;
48 void mask_evtchn(int port)
49 {
50 shared_info_t *s = HYPERVISOR_shared_info;
51 synch_set_bit(port, &s->evtchn_mask[0]);
52 }
53 EXPORT_SYMBOL(mask_evtchn);
55 void unmask_evtchn(int port)
56 {
57 shared_info_t *s = HYPERVISOR_shared_info;
58 unsigned int cpu = smp_processor_id();
59 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
61 #if 0 // FIXME: diverged from x86 evtchn.c
62 /* Slow path (hypercall) if this is a non-local port. */
63 if (unlikely(cpu != cpu_from_evtchn(port))) {
64 evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
65 .u.unmask.port = port };
66 (void)HYPERVISOR_event_channel_op(&op);
67 return;
68 }
69 #endif
71 synch_clear_bit(port, &s->evtchn_mask[0]);
73 /*
74 * The following is basically the equivalent of 'hw_resend_irq'. Just
75 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
76 * masked.
77 */
78 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
79 !synch_test_and_set_bit(port / BITS_PER_LONG,
80 &vcpu_info->evtchn_pending_sel)) {
81 vcpu_info->evtchn_upcall_pending = 1;
82 if (!vcpu_info->evtchn_upcall_mask)
83 force_evtchn_callback();
84 }
85 }
86 EXPORT_SYMBOL(unmask_evtchn);
89 #define unbound_irq(e) (VALID_EVTCHN(e) && (!evtchns[(e)].opened))
90 int bind_virq_to_irqhandler(
91 unsigned int virq,
92 unsigned int cpu,
93 irqreturn_t (*handler)(int, void *, struct pt_regs *),
94 unsigned long irqflags,
95 const char *devname,
96 void *dev_id)
97 {
98 evtchn_op_t op;
99 int evtchn;
101 spin_lock(&irq_mapping_update_lock);
103 op.cmd = EVTCHNOP_bind_virq;
104 op.u.bind_virq.virq = virq;
105 op.u.bind_virq.vcpu = cpu;
106 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0 );
107 evtchn = op.u.bind_virq.port;
109 if (!unbound_irq(evtchn)) {
110 evtchn = -EINVAL;
111 goto out;
112 }
114 evtchns[evtchn].handler = handler;
115 evtchns[evtchn].dev_id = dev_id;
116 evtchns[evtchn].opened = 1;
117 irq_info[evtchn] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
119 unmask_evtchn(evtchn);
120 out:
121 spin_unlock(&irq_mapping_update_lock);
122 return evtchn;
123 }
125 int bind_evtchn_to_irqhandler(unsigned int evtchn,
126 irqreturn_t (*handler)(int, void *, struct pt_regs *),
127 unsigned long irqflags, const char * devname, void *dev_id)
128 {
129 spin_lock(&irq_mapping_update_lock);
131 if (!unbound_irq(evtchn)) {
132 evtchn = -EINVAL;
133 goto out;
134 }
136 evtchns[evtchn].handler = handler;
137 evtchns[evtchn].dev_id = dev_id;
138 evtchns[evtchn].opened = 1;
139 irq_info[evtchn] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
141 unmask_evtchn(evtchn);
142 out:
143 spin_unlock(&irq_mapping_update_lock);
144 return evtchn;
145 }
147 int bind_ipi_to_irqhandler(
148 unsigned int ipi,
149 unsigned int cpu,
150 irqreturn_t (*handler)(int, void *, struct pt_regs *),
151 unsigned long irqflags,
152 const char *devname,
153 void *dev_id)
154 {
155 printk("%s is called which has not been supported now...?\n", __FUNCTION__);
156 while(1);
157 }
159 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
160 {
161 evtchn_op_t op;
162 int evtchn = evtchn_from_irq(irq);
164 spin_lock(&irq_mapping_update_lock);
166 if (unbound_irq(irq))
167 goto out;
169 op.cmd = EVTCHNOP_close;
170 op.u.close.port = evtchn;
171 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
173 switch (type_from_irq(irq)) {
174 case IRQT_VIRQ:
175 /* Add smp stuff later... */
176 break;
177 case IRQT_IPI:
178 /* Add smp stuff later... */
179 break;
180 default:
181 break;
182 }
184 mask_evtchn(evtchn);
185 evtchns[evtchn].handler = NULL;
186 evtchns[evtchn].opened = 0;
188 out:
189 spin_unlock(&irq_mapping_update_lock);
190 }
192 void notify_remote_via_irq(int irq)
193 {
194 int evtchn = evtchn_from_irq(irq);
196 if (!unbound_irq(evtchn))
197 notify_remote_via_evtchn(evtchn);
198 }
200 irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
201 {
202 unsigned long l1, l2;
203 unsigned int l1i, l2i, port;
204 irqreturn_t (*handler)(int, void *, struct pt_regs *);
205 shared_info_t *s = HYPERVISOR_shared_info;
206 vcpu_info_t *vcpu_info = &s->vcpu_info[smp_processor_id()];
208 vcpu_info->evtchn_upcall_mask = 1;
209 vcpu_info->evtchn_upcall_pending = 0;
211 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
212 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
213 while ( l1 != 0 )
214 {
215 l1i = __ffs(l1);
216 l1 &= ~(1UL << l1i);
218 while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
219 {
220 l2i = __ffs(l2);
221 l2 &= ~(1UL << l2i);
223 port = (l1i * BITS_PER_LONG) + l2i;
224 if ( (handler = evtchns[port].handler) != NULL )
225 {
226 clear_evtchn(port);
227 handler(port, evtchns[port].dev_id, regs);
228 }
229 else
230 {
231 evtchn_device_upcall(port);
232 }
233 }
234 }
235 vcpu_info->evtchn_upcall_mask = 0;
236 return IRQ_HANDLED;
237 }
239 void force_evtchn_callback(void)
240 {
241 //(void)HYPERVISOR_xen_version(0, NULL);
242 }
244 static struct irqaction evtchn_irqaction = {
245 .handler = evtchn_interrupt,
246 .flags = SA_INTERRUPT,
247 .name = "xen-event-channel"
248 };
250 int evtchn_irq = 0xe9;
251 void __init evtchn_init(void)
252 {
253 shared_info_t *s = HYPERVISOR_shared_info;
254 vcpu_info_t *vcpu_info = &s->vcpu_info[smp_processor_id()];
256 #if 0
257 int ret;
258 irq = assign_irq_vector(AUTO_ASSIGN);
259 ret = request_irq(irq, evtchn_interrupt, 0, "xen-event-channel", NULL);
260 if (ret < 0)
261 {
262 printk("xen-event-channel unable to get irq %d (%d)\n", irq, ret);
263 return;
264 }
265 #endif
266 register_percpu_irq(evtchn_irq, &evtchn_irqaction);
268 vcpu_info->arch.evtchn_vector = evtchn_irq;
269 printk("xen-event-channel using irq %d\n", evtchn_irq);
271 spin_lock_init(&irq_mapping_update_lock);
272 memset(evtchns, 0, sizeof(evtchns));
273 }