ia64/xen-unstable

view linux-2.6-xen-sparse/arch/ia64/xen/drivers/evtchn_ia64.c @ 8366:5eb7ca1878bd

Mirror changes to x86 un/mask_evtchn
author djm@kirby.fc.hp.com
date Wed Dec 14 13:02:37 2005 -0600 (2005-12-14)
parents 36ab34f1c31e
children 9ef5f0ec9df8
line source
1 /* NOTE: This file split off from evtchn.c because there was
2 some discussion that the mechanism is sufficiently different.
3 It may be possible to merge it back in the future... djm */
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/hw_irq.h>
7 #include <asm-xen/evtchn.h>
9 #define MAX_EVTCHN 1024
11 /* Xen will never allocate port zero for any purpose. */
12 #define VALID_EVTCHN(_chn) (((_chn) != 0) && ((_chn) < MAX_EVTCHN))
14 /* Binding types. Hey, only IRQT_VIRQ and IRQT_EVTCHN are supported now
15 * for XEN/IA64 - ktian1
16 */
17 enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
19 /* Constructor for packed IRQ information. */
20 #define mk_irq_info(type, index, evtchn) \
21 (((u32)(type) << 24) | ((u32)(index) << 16) | (u32)(evtchn))
22 /* Convenient shorthand for packed representation of an unbound IRQ. */
23 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
24 /* Accessor macros for packed IRQ information. */
25 #define evtchn_from_irq(irq) ((u16)(irq_info[irq]))
26 #define index_from_irq(irq) ((u8)(irq_info[irq] >> 16))
27 #define type_from_irq(irq) ((u8)(irq_info[irq] >> 24))
29 /* Packed IRQ information: binding type, sub-type index, and event channel. */
30 static u32 irq_info[NR_IRQS];
32 /* One note for XEN/IA64 is that we have all event channels bound to one
33 * physical irq vector. So we always mean evtchn vector identical to 'irq'
34 * vector in this context. - ktian1
35 */
36 static struct {
37 irqreturn_t (*handler)(int, void *, struct pt_regs *);
38 void *dev_id;
39 char opened; /* Whether allocated */
40 } evtchns[MAX_EVTCHN];
42 /*
43 * This lock protects updates to the following mapping and reference-count
44 * arrays. The lock does not need to be acquired to read the mapping tables.
45 */
46 static spinlock_t irq_mapping_update_lock;
48 void mask_evtchn(int port)
49 {
50 shared_info_t *s = HYPERVISOR_shared_info;
51 synch_set_bit(port, &s->evtchn_mask[0]);
52 }
53 EXPORT_SYMBOL(mask_evtchn);
55 void unmask_evtchn(int port)
56 {
57 shared_info_t *s = HYPERVISOR_shared_info;
58 unsigned int cpu = smp_processor_id();
59 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
61 #if 0 // FIXME: diverged from x86 evtchn.c
62 /* Slow path (hypercall) if this is a non-local port. */
63 if (unlikely(cpu != cpu_from_evtchn(port))) {
64 evtchn_op_t op = { .cmd = EVTCHNOP_unmask,
65 .u.unmask.port = port };
66 (void)HYPERVISOR_event_channel_op(&op);
67 return;
68 }
69 #endif
71 synch_clear_bit(port, &s->evtchn_mask[0]);
73 /*
74 * The following is basically the equivalent of 'hw_resend_irq'. Just
75 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
76 * masked.
77 */
78 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
79 !synch_test_and_set_bit(port / BITS_PER_LONG,
80 &vcpu_info->evtchn_pending_sel)) {
81 vcpu_info->evtchn_upcall_pending = 1;
82 if (!vcpu_info->evtchn_upcall_mask)
83 force_evtchn_callback();
84 }
85 }
86 EXPORT_SYMBOL(unmask_evtchn);
89 #define unbound_irq(e) (VALID_EVTCHN(e) && (!evtchns[(e)].opened))
90 int bind_virq_to_irqhandler(
91 unsigned int virq,
92 unsigned int cpu,
93 irqreturn_t (*handler)(int, void *, struct pt_regs *),
94 unsigned long irqflags,
95 const char *devname,
96 void *dev_id)
97 {
98 evtchn_op_t op;
99 int evtchn;
101 spin_lock(&irq_mapping_update_lock);
103 op.cmd = EVTCHNOP_bind_virq;
104 op.u.bind_virq.virq = virq;
105 op.u.bind_virq.vcpu = cpu;
106 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0 );
107 evtchn = op.u.bind_virq.port;
109 if (!unbound_irq(evtchn))
110 return -EINVAL;
112 evtchns[evtchn].handler = handler;
113 evtchns[evtchn].dev_id = dev_id;
114 evtchns[evtchn].opened = 1;
115 irq_info[evtchn] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
117 unmask_evtchn(evtchn);
118 spin_unlock(&irq_mapping_update_lock);
119 return evtchn;
120 }
122 int bind_evtchn_to_irqhandler(unsigned int evtchn,
123 irqreturn_t (*handler)(int, void *, struct pt_regs *),
124 unsigned long irqflags, const char * devname, void *dev_id)
125 {
126 spin_lock(&irq_mapping_update_lock);
128 if (!unbound_irq(evtchn))
129 return -EINVAL;
131 evtchns[evtchn].handler = handler;
132 evtchns[evtchn].dev_id = dev_id;
133 evtchns[evtchn].opened = 1;
134 irq_info[evtchn] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
136 unmask_evtchn(evtchn);
137 spin_unlock(&irq_mapping_update_lock);
138 return evtchn;
139 }
141 int bind_ipi_to_irqhandler(
142 unsigned int ipi,
143 unsigned int cpu,
144 irqreturn_t (*handler)(int, void *, struct pt_regs *),
145 unsigned long irqflags,
146 const char *devname,
147 void *dev_id)
148 {
149 printk("%s is called which has not been supported now...?\n", __FUNCTION__);
150 while(1);
151 }
153 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
154 {
155 evtchn_op_t op;
156 int evtchn = evtchn_from_irq(irq);
158 spin_lock(&irq_mapping_update_lock);
160 if (unbound_irq(irq))
161 return;
163 op.cmd = EVTCHNOP_close;
164 op.u.close.port = evtchn;
165 BUG_ON(HYPERVISOR_event_channel_op(&op) != 0);
167 switch (type_from_irq(irq)) {
168 case IRQT_VIRQ:
169 /* Add smp stuff later... */
170 break;
171 case IRQT_IPI:
172 /* Add smp stuff later... */
173 break;
174 default:
175 break;
176 }
178 mask_evtchn(evtchn);
179 evtchns[evtchn].handler = NULL;
180 evtchns[evtchn].opened = 0;
182 spin_unlock(&irq_mapping_update_lock);
183 }
185 void notify_remote_via_irq(int irq)
186 {
187 int evtchn = evtchn_from_irq(irq);
189 if (!unbound_irq(evtchn))
190 notify_remote_via_evtchn(evtchn);
191 }
193 irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
194 {
195 unsigned long l1, l2;
196 unsigned int l1i, l2i, port;
197 irqreturn_t (*handler)(int, void *, struct pt_regs *);
198 shared_info_t *s = HYPERVISOR_shared_info;
199 vcpu_info_t *vcpu_info = &s->vcpu_info[smp_processor_id()];
201 vcpu_info->evtchn_upcall_mask = 1;
202 vcpu_info->evtchn_upcall_pending = 0;
204 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
205 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
206 while ( l1 != 0 )
207 {
208 l1i = __ffs(l1);
209 l1 &= ~(1UL << l1i);
211 while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
212 {
213 l2i = __ffs(l2);
214 l2 &= ~(1UL << l2i);
216 port = (l1i * BITS_PER_LONG) + l2i;
217 if ( (handler = evtchns[port].handler) != NULL )
218 {
219 clear_evtchn(port);
220 handler(port, evtchns[port].dev_id, regs);
221 }
222 else
223 {
224 evtchn_device_upcall(port);
225 }
226 }
227 }
228 vcpu_info->evtchn_upcall_mask = 0;
229 return IRQ_HANDLED;
230 }
232 void force_evtchn_callback(void)
233 {
234 //(void)HYPERVISOR_xen_version(0, NULL);
235 }
237 static struct irqaction evtchn_irqaction = {
238 .handler = evtchn_interrupt,
239 .flags = SA_INTERRUPT,
240 .name = "xen-event-channel"
241 };
243 int evtchn_irq = 0xe9;
244 void __init evtchn_init(void)
245 {
246 shared_info_t *s = HYPERVISOR_shared_info;
247 vcpu_info_t *vcpu_info = &s->vcpu_info[smp_processor_id()];
249 #if 0
250 int ret;
251 irq = assign_irq_vector(AUTO_ASSIGN);
252 ret = request_irq(irq, evtchn_interrupt, 0, "xen-event-channel", NULL);
253 if (ret < 0)
254 {
255 printk("xen-event-channel unable to get irq %d (%d)\n", irq, ret);
256 return;
257 }
258 #endif
259 register_percpu_irq(evtchn_irq, &evtchn_irqaction);
261 vcpu_info->arch.evtchn_vector = evtchn_irq;
262 printk("xen-event-channel using irq %d\n", evtchn_irq);
264 spin_lock_init(&irq_mapping_update_lock);
265 memset(evtchns, 0, sizeof(evtchns));
266 }