direct-io.hg

annotate linux-2.6-xen-sparse/drivers/xen/core/evtchn.c @ 10472:fc1c6dfd1807

[LINUX] Transparent virtualization fixes.
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author kaf24@firebug.cl.cam.ac.uk
date Wed Jun 21 16:54:09 2006 +0100 (2006-06-21)
parents be05097d5d69
children c9696012fe05
rev   line source
cl349@4087 1 /******************************************************************************
cl349@4087 2 * evtchn.c
cl349@4087 3 *
cl349@4087 4 * Communication via Xen event channels.
cl349@4087 5 *
kaf24@7123 6 * Copyright (c) 2002-2005, K A Fraser
cl349@4087 7 *
kaf24@9373 8 * This program is free software; you can redistribute it and/or
kaf24@9373 9 * modify it under the terms of the GNU General Public License version 2
kaf24@9373 10 * as published by the Free Software Foundation; or, when distributed
kaf24@9373 11 * separately from the Linux kernel or incorporated into other
kaf24@9373 12 * software packages, subject to the following license:
cl349@4087 13 *
cl349@4087 14 * Permission is hereby granted, free of charge, to any person obtaining a copy
cl349@4087 15 * of this source file (the "Software"), to deal in the Software without
cl349@4087 16 * restriction, including without limitation the rights to use, copy, modify,
cl349@4087 17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
cl349@4087 18 * and to permit persons to whom the Software is furnished to do so, subject to
cl349@4087 19 * the following conditions:
cl349@4087 20 *
cl349@4087 21 * The above copyright notice and this permission notice shall be included in
cl349@4087 22 * all copies or substantial portions of the Software.
cl349@4087 23 *
cl349@4087 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
cl349@4087 25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
cl349@4087 26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
cl349@4087 27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
cl349@4087 28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
cl349@4087 29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
cl349@4087 30 * IN THE SOFTWARE.
cl349@4087 31 */
cl349@4087 32
cl349@4087 33 #include <linux/config.h>
cl349@4087 34 #include <linux/module.h>
cl349@4087 35 #include <linux/irq.h>
cl349@4087 36 #include <linux/interrupt.h>
cl349@4087 37 #include <linux/sched.h>
cl349@4087 38 #include <linux/kernel_stat.h>
cl349@4087 39 #include <linux/version.h>
cl349@4087 40 #include <asm/atomic.h>
cl349@4087 41 #include <asm/system.h>
cl349@4087 42 #include <asm/ptrace.h>
kaf24@6760 43 #include <asm/synch_bitops.h>
kaf24@10029 44 #include <xen/evtchn.h>
cl349@8706 45 #include <xen/interface/event_channel.h>
cl349@8706 46 #include <xen/interface/physdev.h>
kaf24@6799 47 #include <asm/hypervisor.h>
kaf24@7399 48 #include <linux/mc146818rtc.h> /* RTC_IRQ */
cl349@4087 49
cl349@4087 50 /*
cl349@4087 51 * This lock protects updates to the following mapping and reference-count
cl349@4087 52 * arrays. The lock does not need to be acquired to read the mapping tables.
cl349@4087 53 */
kaf24@10145 54 static DEFINE_SPINLOCK(irq_mapping_update_lock);
cl349@4087 55
cl349@4087 56 /* IRQ <-> event-channel mappings. */
kaf24@10317 57 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
kaf24@10317 58 [0 ... NR_EVENT_CHANNELS-1] = -1 };
kaf24@7699 59
kaf24@7699 60 /* Packed IRQ information: binding type, sub-type index, and event channel. */
kaf24@7699 61 static u32 irq_info[NR_IRQS];
kaf24@9376 62
kaf24@7699 63 /* Binding types. */
kaf24@7699 64 enum { IRQT_UNBOUND, IRQT_PIRQ, IRQT_VIRQ, IRQT_IPI, IRQT_EVTCHN };
kaf24@9376 65
kaf24@7699 66 /* Constructor for packed IRQ information. */
kaf24@9376 67 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
kaf24@9376 68 {
kaf24@9376 69 return ((type << 24) | (index << 16) | evtchn);
kaf24@9376 70 }
kaf24@9376 71
kaf24@7699 72 /* Convenient shorthand for packed representation of an unbound IRQ. */
kaf24@7699 73 #define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
kaf24@9376 74
kaf24@9376 75 /*
kaf24@9376 76 * Accessors for packed IRQ information.
kaf24@9376 77 */
kaf24@9376 78
kaf24@9376 79 static inline unsigned int evtchn_from_irq(int irq)
kaf24@9376 80 {
kaf24@9376 81 return (u16)(irq_info[irq]);
kaf24@9376 82 }
kaf24@9376 83
kaf24@9376 84 static inline unsigned int index_from_irq(int irq)
kaf24@9376 85 {
kaf24@9376 86 return (u8)(irq_info[irq] >> 16);
kaf24@9376 87 }
kaf24@9376 88
kaf24@9376 89 static inline unsigned int type_from_irq(int irq)
kaf24@9376 90 {
kaf24@9376 91 return (u8)(irq_info[irq] >> 24);
kaf24@9376 92 }
cl349@4087 93
cl349@4087 94 /* IRQ <-> VIRQ mapping. */
kaf24@10147 95 DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
cl349@4112 96
kaf24@7699 97 /* IRQ <-> IPI mapping. */
cl349@4112 98 #ifndef NR_IPIS
cl349@9149 99 #define NR_IPIS 1
cl349@4112 100 #endif
kaf24@10147 101 DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
cl349@4087 102
cl349@4087 103 /* Reference counts for bindings to IRQs. */
cl349@4087 104 static int irq_bindcount[NR_IRQS];
cl349@4087 105
cl349@4087 106 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
kaf24@9889 107 static unsigned long pirq_needs_eoi[NR_PIRQS/sizeof(unsigned long)];
cl349@4087 108
kaf24@5308 109 #ifdef CONFIG_SMP
kaf24@5308 110
kaf24@7402 111 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
kaf24@7402 112 static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
kaf24@5308 113
kaf24@9376 114 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
kaf24@9376 115 unsigned int idx)
kaf24@9376 116 {
kaf24@9376 117 return (sh->evtchn_pending[idx] &
kaf24@9376 118 cpu_evtchn_mask[cpu][idx] &
kaf24@9376 119 ~sh->evtchn_mask[idx]);
kaf24@9376 120 }
kaf24@5308 121
kaf24@7124 122 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
kaf24@5308 123 {
kaf24@10317 124 int irq = evtchn_to_irq[chn];
kaf24@10317 125
kaf24@10317 126 BUG_ON(irq == -1);
kaf24@10317 127 set_native_irq_info(irq, cpumask_of_cpu(cpu));
kaf24@10317 128
kaf24@7123 129 clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
kaf24@7123 130 set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
kaf24@7123 131 cpu_evtchn[chn] = cpu;
kaf24@5308 132 }
kaf24@5308 133
kaf24@7124 134 static void init_evtchn_cpu_bindings(void)
kaf24@7124 135 {
kaf24@10317 136 int i;
kaf24@10317 137
kaf24@7124 138 /* By default all event channels notify CPU#0. */
kaf24@10317 139 for (i = 0; i < NR_IRQS; i++)
kaf24@10317 140 set_native_irq_info(i, cpumask_of_cpu(0));
kaf24@10317 141
kaf24@7124 142 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
kaf24@7124 143 memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
kaf24@7124 144 }
kaf24@7124 145
kaf24@9376 146 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
kaf24@9376 147 {
kaf24@9376 148 return cpu_evtchn[evtchn];
kaf24@9376 149 }
kaf24@7699 150
kaf24@5308 151 #else
kaf24@5308 152
kaf24@9376 153 static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
kaf24@9376 154 unsigned int idx)
kaf24@9376 155 {
kaf24@9376 156 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
kaf24@9376 157 }
kaf24@9376 158
kaf24@9376 159 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
kaf24@9376 160 {
kaf24@9376 161 }
kaf24@9376 162
kaf24@9376 163 static void init_evtchn_cpu_bindings(void)
kaf24@9376 164 {
kaf24@9376 165 }
kaf24@9376 166
kaf24@9376 167 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
kaf24@9376 168 {
kaf24@9376 169 return 0;
kaf24@9376 170 }
kaf24@5308 171
kaf24@5308 172 #endif
kaf24@5308 173
cl349@4087 174 /* Upcall to generic IRQ layer. */
cl349@4087 175 #ifdef CONFIG_X86
cl349@4087 176 extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
kaf24@10029 177 void __init xen_init_IRQ(void);
kaf24@10029 178 void __init init_IRQ(void)
kaf24@10029 179 {
kaf24@10029 180 irq_ctx_init(0);
kaf24@10029 181 xen_init_IRQ();
kaf24@10029 182 }
cl349@4492 183 #if defined (__i386__)
cl349@8785 184 static inline void exit_idle(void) {}
cl349@4492 185 #define IRQ_REG orig_eax
cl349@4492 186 #elif defined (__x86_64__)
cl349@8785 187 #include <asm/idle.h>
cl349@4492 188 #define IRQ_REG orig_rax
cl349@4492 189 #endif
cl349@9502 190 #define do_IRQ(irq, regs) do { \
cl349@9502 191 (regs)->IRQ_REG = ~(irq); \
cl349@9502 192 do_IRQ((regs)); \
cl349@4087 193 } while (0)
cl349@4087 194 #endif
cl349@4087 195
kaf24@7699 196 /* Xen will never allocate port zero for any purpose. */
kaf24@7699 197 #define VALID_EVTCHN(chn) ((chn) != 0)
cl349@4087 198
cl349@4087 199 /*
cl349@4087 200 * Force a proper event-channel callback from Xen after clearing the
cl349@4087 201 * callback mask. We do this in a very simple manner, by making a call
cl349@4087 202 * down into Xen. The pending flag will be checked by Xen on return.
cl349@4087 203 */
cl349@4087 204 void force_evtchn_callback(void)
cl349@4087 205 {
kaf24@7123 206 (void)HYPERVISOR_xen_version(0, NULL);
cl349@4087 207 }
kaf24@9746 208 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
kaf24@9746 209 EXPORT_SYMBOL(force_evtchn_callback);
cl349@4087 210
cl349@4087 211 /* NB. Interrupts are disabled on entry. */
cl349@4087 212 asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
cl349@4087 213 {
kaf24@7402 214 unsigned long l1, l2;
kaf24@7123 215 unsigned int l1i, l2i, port;
kaf24@7123 216 int irq, cpu = smp_processor_id();
kaf24@7123 217 shared_info_t *s = HYPERVISOR_shared_info;
kaf24@8092 218 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
cl349@4087 219
kaf24@7123 220 vcpu_info->evtchn_upcall_pending = 0;
sos22@6270 221
kaf24@7123 222 /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
kaf24@7123 223 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
kaf24@7123 224 while (l1 != 0) {
kaf24@7123 225 l1i = __ffs(l1);
kaf24@7402 226 l1 &= ~(1UL << l1i);
kaf24@8380 227
kaf24@7123 228 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
kaf24@7123 229 l2i = __ffs(l2);
kaf24@8380 230
kaf24@7402 231 port = (l1i * BITS_PER_LONG) + l2i;
kaf24@7123 232 if ((irq = evtchn_to_irq[port]) != -1)
kaf24@7123 233 do_IRQ(irq, regs);
cl349@8785 234 else {
cl349@8785 235 exit_idle();
kaf24@7123 236 evtchn_device_upcall(port);
cl349@8785 237 }
kaf24@7123 238 }
kaf24@7123 239 }
cl349@4087 240 }
cl349@4087 241
cl349@4087 242 static int find_unbound_irq(void)
cl349@4087 243 {
kaf24@7123 244 int irq;
cl349@4087 245
kaf24@9862 246 /* Only allocate from dynirq range */
kaf24@9862 247 for (irq = DYNIRQ_BASE; irq < NR_IRQS; irq++)
kaf24@7123 248 if (irq_bindcount[irq] == 0)
kaf24@7123 249 break;
cl349@4087 250
kaf24@7123 251 if (irq == NR_IRQS)
kaf24@7123 252 panic("No available IRQ to bind to: increase NR_IRQS!\n");
cl349@4087 253
kaf24@7123 254 return irq;
cl349@4087 255 }
cl349@4087 256
kaf24@7699 257 static int bind_evtchn_to_irq(unsigned int evtchn)
kaf24@7699 258 {
kaf24@7699 259 int irq;
kaf24@7699 260
kaf24@7699 261 spin_lock(&irq_mapping_update_lock);
kaf24@7699 262
kaf24@7699 263 if ((irq = evtchn_to_irq[evtchn]) == -1) {
kaf24@7699 264 irq = find_unbound_irq();
kaf24@7699 265 evtchn_to_irq[evtchn] = irq;
kaf24@7699 266 irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
kaf24@7699 267 }
kaf24@7699 268
kaf24@7699 269 irq_bindcount[irq]++;
kaf24@7699 270
kaf24@7699 271 spin_unlock(&irq_mapping_update_lock);
cl349@9149 272
kaf24@7699 273 return irq;
kaf24@7699 274 }
kaf24@7699 275
kaf24@7699 276 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
cl349@4087 277 {
kaf24@9889 278 struct evtchn_bind_virq bind_virq;
kaf24@7123 279 int evtchn, irq;
cl349@4087 280
kaf24@7123 281 spin_lock(&irq_mapping_update_lock);
cl349@4087 282
kaf24@7123 283 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
kaf24@9889 284 bind_virq.virq = virq;
kaf24@9889 285 bind_virq.vcpu = cpu;
kaf24@9889 286 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
kaf24@9889 287 &bind_virq) != 0)
kaf24@9889 288 BUG();
kaf24@9889 289 evtchn = bind_virq.port;
cl349@4087 290
kaf24@7123 291 irq = find_unbound_irq();
kaf24@7123 292 evtchn_to_irq[evtchn] = irq;
kaf24@7699 293 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
cl349@4087 294
kaf24@7123 295 per_cpu(virq_to_irq, cpu)[virq] = irq;
kaf24@5308 296
kaf24@7123 297 bind_evtchn_to_cpu(evtchn, cpu);
kaf24@7123 298 }
cl349@4087 299
kaf24@7123 300 irq_bindcount[irq]++;
cl349@4087 301
kaf24@7123 302 spin_unlock(&irq_mapping_update_lock);
cl349@9149 303
kaf24@7123 304 return irq;
cl349@4087 305 }
cl349@4087 306
kaf24@7699 307 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
cl349@4112 308 {
kaf24@9889 309 struct evtchn_bind_ipi bind_ipi;
kaf24@7123 310 int evtchn, irq;
cl349@4112 311
kaf24@7123 312 spin_lock(&irq_mapping_update_lock);
kaf24@7123 313
kaf24@7699 314 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
kaf24@9889 315 bind_ipi.vcpu = cpu;
kaf24@9889 316 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
kaf24@9889 317 &bind_ipi) != 0)
kaf24@9889 318 BUG();
kaf24@9889 319 evtchn = bind_ipi.port;
cl349@4112 320
kaf24@7123 321 irq = find_unbound_irq();
kaf24@7123 322 evtchn_to_irq[evtchn] = irq;
kaf24@7699 323 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
cl349@4112 324
kaf24@7699 325 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
kaf24@5308 326
kaf24@7123 327 bind_evtchn_to_cpu(evtchn, cpu);
kaf24@7123 328 }
cl349@4112 329
kaf24@7123 330 irq_bindcount[irq]++;
cl349@4112 331
kaf24@7123 332 spin_unlock(&irq_mapping_update_lock);
cl349@4112 333
kaf24@7123 334 return irq;
cl349@4112 335 }
cl349@4112 336
kaf24@7699 337 static void unbind_from_irq(unsigned int irq)
cl349@4112 338 {
kaf24@9889 339 struct evtchn_close close;
kaf24@7699 340 int evtchn = evtchn_from_irq(irq);
cl349@4112 341
kaf24@7123 342 spin_lock(&irq_mapping_update_lock);
cl349@4112 343
kaf24@7699 344 if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
kaf24@9889 345 close.port = evtchn;
kaf24@9889 346 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
kaf24@9889 347 BUG();
cl349@4112 348
kaf24@7699 349 switch (type_from_irq(irq)) {
kaf24@7699 350 case IRQT_VIRQ:
kaf24@7699 351 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
kaf24@7699 352 [index_from_irq(irq)] = -1;
kaf24@7699 353 break;
kaf24@7699 354 case IRQT_IPI:
kaf24@7699 355 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
kaf24@7699 356 [index_from_irq(irq)] = -1;
kaf24@7699 357 break;
kaf24@7699 358 default:
kaf24@7699 359 break;
kaf24@7699 360 }
cl349@4087 361
kaf24@7699 362 /* Closed ports are implicitly re-bound to VCPU0. */
kaf24@7699 363 bind_evtchn_to_cpu(evtchn, 0);
kaf24@7125 364
kaf24@7123 365 evtchn_to_irq[evtchn] = -1;
kaf24@7699 366 irq_info[irq] = IRQ_UNBOUND;
kaf24@7123 367 }
cl349@4087 368
kaf24@7123 369 spin_unlock(&irq_mapping_update_lock);
cl349@4087 370 }
cl349@4087 371
kaf24@6003 372 int bind_evtchn_to_irqhandler(
kaf24@7123 373 unsigned int evtchn,
kaf24@7123 374 irqreturn_t (*handler)(int, void *, struct pt_regs *),
kaf24@7123 375 unsigned long irqflags,
kaf24@7123 376 const char *devname,
kaf24@7123 377 void *dev_id)
kaf24@6003 378 {
kaf24@7123 379 unsigned int irq;
kaf24@7123 380 int retval;
kaf24@6003 381
kaf24@7123 382 irq = bind_evtchn_to_irq(evtchn);
kaf24@7123 383 retval = request_irq(irq, handler, irqflags, devname, dev_id);
kaf24@7249 384 if (retval != 0) {
kaf24@7699 385 unbind_from_irq(irq);
kaf24@7249 386 return retval;
kaf24@7249 387 }
kaf24@6003 388
kaf24@7125 389 return irq;
kaf24@6003 390 }
kaf24@9373 391 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
kaf24@6003 392
kaf24@7699 393 int bind_virq_to_irqhandler(
kaf24@7699 394 unsigned int virq,
kaf24@7699 395 unsigned int cpu,
kaf24@7699 396 irqreturn_t (*handler)(int, void *, struct pt_regs *),
kaf24@7699 397 unsigned long irqflags,
kaf24@7699 398 const char *devname,
kaf24@7699 399 void *dev_id)
kaf24@7699 400 {
kaf24@7699 401 unsigned int irq;
kaf24@7699 402 int retval;
kaf24@7699 403
kaf24@7699 404 irq = bind_virq_to_irq(virq, cpu);
kaf24@7699 405 retval = request_irq(irq, handler, irqflags, devname, dev_id);
kaf24@7699 406 if (retval != 0) {
kaf24@7699 407 unbind_from_irq(irq);
kaf24@7699 408 return retval;
kaf24@7699 409 }
kaf24@7699 410
kaf24@7699 411 return irq;
kaf24@7699 412 }
kaf24@9373 413 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
kaf24@7699 414
kaf24@7699 415 int bind_ipi_to_irqhandler(
kaf24@7699 416 unsigned int ipi,
kaf24@7699 417 unsigned int cpu,
kaf24@7699 418 irqreturn_t (*handler)(int, void *, struct pt_regs *),
kaf24@7699 419 unsigned long irqflags,
kaf24@7699 420 const char *devname,
kaf24@7699 421 void *dev_id)
kaf24@7699 422 {
kaf24@7699 423 unsigned int irq;
kaf24@7699 424 int retval;
kaf24@7699 425
kaf24@7699 426 irq = bind_ipi_to_irq(ipi, cpu);
kaf24@7699 427 retval = request_irq(irq, handler, irqflags, devname, dev_id);
kaf24@7699 428 if (retval != 0) {
kaf24@7699 429 unbind_from_irq(irq);
kaf24@7699 430 return retval;
kaf24@7699 431 }
kaf24@7699 432
kaf24@7699 433 return irq;
kaf24@7699 434 }
kaf24@9373 435 EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
kaf24@7699 436
kaf24@7699 437 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
kaf24@6003 438 {
kaf24@7123 439 free_irq(irq, dev_id);
kaf24@7699 440 unbind_from_irq(irq);
kaf24@6003 441 }
kaf24@9373 442 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
kaf24@6003 443
sos22@5705 444 /* Rebind an evtchn so that it gets delivered to a specific cpu */
sos22@5705 445 static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
sos22@5705 446 {
kaf24@9889 447 struct evtchn_bind_vcpu bind_vcpu;
kaf24@10317 448 int evtchn = evtchn_from_irq(irq);
kaf24@7699 449
kaf24@10317 450 if (!VALID_EVTCHN(evtchn))
kaf24@7123 451 return;
sos22@5705 452
kaf24@7123 453 /* Send future instances of this interrupt to other vcpu. */
kaf24@9889 454 bind_vcpu.port = evtchn;
kaf24@9889 455 bind_vcpu.vcpu = tcpu;
sos22@5705 456
kaf24@7123 457 /*
kaf24@7123 458 * If this fails, it usually just indicates that we're dealing with a
kaf24@7123 459 * virq or IPI channel, which don't actually need to be rebound. Ignore
kaf24@7123 460 * it, but don't do the xenlinux-level rebind in that case.
kaf24@7123 461 */
kaf24@9889 462 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
kaf24@7123 463 bind_evtchn_to_cpu(evtchn, tcpu);
sos22@5705 464 }
sos22@5705 465
sos22@5705 466
sos22@5705 467 static void set_affinity_irq(unsigned irq, cpumask_t dest)
sos22@5705 468 {
kaf24@7123 469 unsigned tcpu = first_cpu(dest);
kaf24@7123 470 rebind_irq_to_cpu(irq, tcpu);
sos22@5705 471 }
cl349@4087 472
cl349@4087 473 /*
cl349@4087 474 * Interface to generic handling in irq.c
cl349@4087 475 */
cl349@4087 476
cl349@4087 477 static unsigned int startup_dynirq(unsigned int irq)
cl349@4087 478 {
kaf24@7699 479 int evtchn = evtchn_from_irq(irq);
cl349@4087 480
kaf24@7125 481 if (VALID_EVTCHN(evtchn))
kaf24@7125 482 unmask_evtchn(evtchn);
kaf24@7123 483 return 0;
cl349@4087 484 }
cl349@4087 485
cl349@4087 486 static void shutdown_dynirq(unsigned int irq)
cl349@4087 487 {
kaf24@7699 488 int evtchn = evtchn_from_irq(irq);
cl349@4087 489
kaf24@7125 490 if (VALID_EVTCHN(evtchn))
kaf24@7125 491 mask_evtchn(evtchn);
cl349@4087 492 }
cl349@4087 493
cl349@4087 494 static void enable_dynirq(unsigned int irq)
cl349@4087 495 {
kaf24@7699 496 int evtchn = evtchn_from_irq(irq);
cl349@4087 497
kaf24@7125 498 if (VALID_EVTCHN(evtchn))
kaf24@7125 499 unmask_evtchn(evtchn);
cl349@4087 500 }
cl349@4087 501
cl349@4087 502 static void disable_dynirq(unsigned int irq)
cl349@4087 503 {
kaf24@7699 504 int evtchn = evtchn_from_irq(irq);
cl349@4087 505
kaf24@7125 506 if (VALID_EVTCHN(evtchn))
kaf24@7125 507 mask_evtchn(evtchn);
cl349@4087 508 }
cl349@4087 509
cl349@4087 510 static void ack_dynirq(unsigned int irq)
cl349@4087 511 {
kaf24@7699 512 int evtchn = evtchn_from_irq(irq);
cl349@4087 513
kaf24@9643 514 move_native_irq(irq);
kaf24@9643 515
kaf24@7125 516 if (VALID_EVTCHN(evtchn)) {
kaf24@7125 517 mask_evtchn(evtchn);
kaf24@7125 518 clear_evtchn(evtchn);
kaf24@7125 519 }
cl349@4087 520 }
cl349@4087 521
cl349@4087 522 static void end_dynirq(unsigned int irq)
cl349@4087 523 {
kaf24@7699 524 int evtchn = evtchn_from_irq(irq);
cl349@4087 525
kaf24@7125 526 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
kaf24@7123 527 unmask_evtchn(evtchn);
cl349@4087 528 }
cl349@4087 529
cl349@4087 530 static struct hw_interrupt_type dynirq_type = {
kaf24@7123 531 "Dynamic-irq",
kaf24@7123 532 startup_dynirq,
kaf24@7123 533 shutdown_dynirq,
kaf24@7123 534 enable_dynirq,
kaf24@7123 535 disable_dynirq,
kaf24@7123 536 ack_dynirq,
kaf24@7123 537 end_dynirq,
kaf24@7123 538 set_affinity_irq
cl349@4087 539 };
cl349@4087 540
cl349@4087 541 static inline void pirq_unmask_notify(int pirq)
cl349@4087 542 {
kaf24@9889 543 struct physdev_eoi eoi = { .irq = pirq };
kaf24@9889 544 if (unlikely(test_bit(pirq, &pirq_needs_eoi[0])))
kaf24@9889 545 (void)HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
cl349@4087 546 }
cl349@4087 547
cl349@4087 548 static inline void pirq_query_unmask(int pirq)
cl349@4087 549 {
kaf24@9889 550 struct physdev_irq_status_query irq_status;
kaf24@9889 551 irq_status.irq = pirq;
kaf24@9889 552 (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
kaf24@9889 553 clear_bit(pirq, &pirq_needs_eoi[0]);
kaf24@9889 554 if (irq_status.flags & XENIRQSTAT_needs_eoi)
kaf24@9889 555 set_bit(pirq, &pirq_needs_eoi[0]);
cl349@4087 556 }
cl349@4087 557
cl349@4087 558 /*
cl349@4087 559 * On startup, if there is no action associated with the IRQ then we are
cl349@4087 560 * probing. In this case we should not share with others as it will confuse us.
cl349@4087 561 */
cl349@4087 562 #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
cl349@4087 563
cl349@4087 564 static unsigned int startup_pirq(unsigned int irq)
cl349@4087 565 {
kaf24@9889 566 struct evtchn_bind_pirq bind_pirq;
kaf24@7699 567 int evtchn = evtchn_from_irq(irq);
kaf24@7241 568
kaf24@7241 569 if (VALID_EVTCHN(evtchn))
kaf24@7241 570 goto out;
cl349@4087 571
kaf24@9889 572 bind_pirq.pirq = irq;
kaf24@7123 573 /* NB. We are happy to share unless we are probing. */
kaf24@9889 574 bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
kaf24@9889 575 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
cl349@9149 576 if (!probing_irq(irq))
cl349@9149 577 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
cl349@9149 578 irq);
kaf24@7123 579 return 0;
kaf24@7123 580 }
kaf24@9889 581 evtchn = bind_pirq.port;
cl349@4087 582
kaf24@7123 583 pirq_query_unmask(irq_to_pirq(irq));
cl349@4087 584
kaf24@10317 585 evtchn_to_irq[evtchn] = irq;
kaf24@7123 586 bind_evtchn_to_cpu(evtchn, 0);
kaf24@7699 587 irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, evtchn);
cl349@4087 588
kaf24@7241 589 out:
kaf24@7123 590 unmask_evtchn(evtchn);
kaf24@7123 591 pirq_unmask_notify(irq_to_pirq(irq));
cl349@4087 592
kaf24@7123 593 return 0;
cl349@4087 594 }
cl349@4087 595
cl349@4087 596 static void shutdown_pirq(unsigned int irq)
cl349@4087 597 {
kaf24@9889 598 struct evtchn_close close;
kaf24@7699 599 int evtchn = evtchn_from_irq(irq);
cl349@4087 600
kaf24@7123 601 if (!VALID_EVTCHN(evtchn))
kaf24@7123 602 return;
cl349@4087 603
kaf24@7123 604 mask_evtchn(evtchn);
cl349@4087 605
kaf24@9889 606 close.port = evtchn;
kaf24@9889 607 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
kaf24@9889 608 BUG();
cl349@4087 609
kaf24@7123 610 bind_evtchn_to_cpu(evtchn, 0);
kaf24@7123 611 evtchn_to_irq[evtchn] = -1;
kaf24@7699 612 irq_info[irq] = IRQ_UNBOUND;
cl349@4087 613 }
cl349@4087 614
cl349@4087 615 static void enable_pirq(unsigned int irq)
cl349@4087 616 {
kaf24@7699 617 int evtchn = evtchn_from_irq(irq);
kaf24@7125 618
kaf24@7125 619 if (VALID_EVTCHN(evtchn)) {
kaf24@7125 620 unmask_evtchn(evtchn);
kaf24@7125 621 pirq_unmask_notify(irq_to_pirq(irq));
kaf24@7125 622 }
cl349@4087 623 }
cl349@4087 624
cl349@4087 625 static void disable_pirq(unsigned int irq)
cl349@4087 626 {
kaf24@7699 627 int evtchn = evtchn_from_irq(irq);
kaf24@7125 628
kaf24@7125 629 if (VALID_EVTCHN(evtchn))
kaf24@7125 630 mask_evtchn(evtchn);
cl349@4087 631 }
cl349@4087 632
cl349@4087 633 static void ack_pirq(unsigned int irq)
cl349@4087 634 {
kaf24@7699 635 int evtchn = evtchn_from_irq(irq);
kaf24@7125 636
kaf24@9643 637 move_native_irq(irq);
kaf24@9643 638
kaf24@7125 639 if (VALID_EVTCHN(evtchn)) {
kaf24@7125 640 mask_evtchn(evtchn);
kaf24@7125 641 clear_evtchn(evtchn);
kaf24@7125 642 }
cl349@4087 643 }
cl349@4087 644
cl349@4087 645 static void end_pirq(unsigned int irq)
cl349@4087 646 {
kaf24@7699 647 int evtchn = evtchn_from_irq(irq);
kaf24@7125 648
kaf24@7125 649 if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED)) {
kaf24@7123 650 unmask_evtchn(evtchn);
kaf24@7123 651 pirq_unmask_notify(irq_to_pirq(irq));
kaf24@7123 652 }
cl349@4087 653 }
cl349@4087 654
cl349@4087 655 static struct hw_interrupt_type pirq_type = {
kaf24@7123 656 "Phys-irq",
kaf24@7123 657 startup_pirq,
kaf24@7123 658 shutdown_pirq,
kaf24@7123 659 enable_pirq,
kaf24@7123 660 disable_pirq,
kaf24@7123 661 ack_pirq,
kaf24@7123 662 end_pirq,
kaf24@7123 663 set_affinity_irq
cl349@4087 664 };
cl349@4087 665
kaf24@10298 666 int irq_ignore_unhandled(unsigned int irq)
kaf24@10298 667 {
kaf24@10298 668 struct physdev_irq_status_query irq_status = { .irq = irq };
kaf24@10472 669
kaf24@10472 670 if (!is_running_on_xen())
kaf24@10472 671 return 0;
kaf24@10472 672
kaf24@10298 673 (void)HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status);
kaf24@10298 674 return !!(irq_status.flags & XENIRQSTAT_shared);
kaf24@10298 675 }
kaf24@10298 676
kaf24@10294 677 void resend_irq_on_evtchn(struct hw_interrupt_type *h, unsigned int i)
kaf24@6042 678 {
kaf24@7699 679 int evtchn = evtchn_from_irq(i);
kaf24@7123 680 shared_info_t *s = HYPERVISOR_shared_info;
kaf24@7123 681 if (!VALID_EVTCHN(evtchn))
kaf24@7123 682 return;
kaf24@7123 683 BUG_ON(!synch_test_bit(evtchn, &s->evtchn_mask[0]));
kaf24@7123 684 synch_set_bit(evtchn, &s->evtchn_pending[0]);
kaf24@6042 685 }
kaf24@6042 686
kaf24@7126 687 void notify_remote_via_irq(int irq)
kaf24@7126 688 {
kaf24@7699 689 int evtchn = evtchn_from_irq(irq);
kaf24@7126 690
kaf24@7126 691 if (VALID_EVTCHN(evtchn))
kaf24@7126 692 notify_remote_via_evtchn(evtchn);
kaf24@7126 693 }
kaf24@9373 694 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
kaf24@7126 695
kaf24@8351 696 void mask_evtchn(int port)
kaf24@8351 697 {
kaf24@8351 698 shared_info_t *s = HYPERVISOR_shared_info;
kaf24@8351 699 synch_set_bit(port, &s->evtchn_mask[0]);
kaf24@8351 700 }
kaf24@9373 701 EXPORT_SYMBOL_GPL(mask_evtchn);
kaf24@8351 702
kaf24@8351 703 void unmask_evtchn(int port)
kaf24@8351 704 {
kaf24@8351 705 shared_info_t *s = HYPERVISOR_shared_info;
kaf24@8351 706 unsigned int cpu = smp_processor_id();
kaf24@8351 707 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
kaf24@8351 708
kaf24@10315 709 BUG_ON(!irqs_disabled());
kaf24@10315 710
kaf24@8351 711 /* Slow path (hypercall) if this is a non-local port. */
kaf24@8351 712 if (unlikely(cpu != cpu_from_evtchn(port))) {
kaf24@9889 713 struct evtchn_unmask unmask = { .port = port };
kaf24@9889 714 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
kaf24@8351 715 return;
kaf24@8351 716 }
kaf24@8351 717
kaf24@8351 718 synch_clear_bit(port, &s->evtchn_mask[0]);
kaf24@8351 719
kaf24@8351 720 /*
kaf24@8351 721 * The following is basically the equivalent of 'hw_resend_irq'. Just
kaf24@8351 722 * like a real IO-APIC we 'lose the interrupt edge' if the channel is
kaf24@8351 723 * masked.
kaf24@8351 724 */
cl349@9149 725 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
kaf24@8351 726 !synch_test_and_set_bit(port / BITS_PER_LONG,
kaf24@10315 727 &vcpu_info->evtchn_pending_sel))
kaf24@8351 728 vcpu_info->evtchn_upcall_pending = 1;
kaf24@8351 729 }
kaf24@9373 730 EXPORT_SYMBOL_GPL(unmask_evtchn);
kaf24@8351 731
cl349@4087 732 void irq_resume(void)
cl349@4087 733 {
kaf24@9889 734 struct evtchn_bind_virq bind_virq;
kaf24@9889 735 struct evtchn_bind_ipi bind_ipi;
kaf24@9889 736 int cpu, pirq, virq, ipi, irq, evtchn;
kaf24@7124 737
kaf24@7124 738 init_evtchn_cpu_bindings();
cl349@4087 739
kaf24@7123 740 /* New event-channel space is not 'live' yet. */
kaf24@7123 741 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
kaf24@7123 742 mask_evtchn(evtchn);
cl349@4087 743
kaf24@7124 744 /* Check that no PIRQs are still bound. */
kaf24@7124 745 for (pirq = 0; pirq < NR_PIRQS; pirq++)
kaf24@7699 746 BUG_ON(irq_info[pirq_to_irq(pirq)] != IRQ_UNBOUND);
kaf24@7124 747
kaf24@7124 748 /* Secondary CPUs must have no VIRQ or IPI bindings. */
kaf24@10147 749 for_each_possible_cpu(cpu) {
kaf24@10147 750 if (cpu == 0)
kaf24@10147 751 continue;
kaf24@7124 752 for (virq = 0; virq < NR_VIRQS; virq++)
kaf24@7124 753 BUG_ON(per_cpu(virq_to_irq, cpu)[virq] != -1);
kaf24@7124 754 for (ipi = 0; ipi < NR_IPIS; ipi++)
kaf24@7699 755 BUG_ON(per_cpu(ipi_to_irq, cpu)[ipi] != -1);
kaf24@7124 756 }
kaf24@7124 757
kaf24@7699 758 /* No IRQ <-> event-channel mappings. */
kaf24@7125 759 for (irq = 0; irq < NR_IRQS; irq++)
kaf24@7699 760 irq_info[irq] &= ~0xFFFF; /* zap event-channel binding */
kaf24@7699 761 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
kaf24@7699 762 evtchn_to_irq[evtchn] = -1;
kaf24@7125 763
kaf24@7124 764 /* Primary CPU: rebind VIRQs automatically. */
kaf24@7123 765 for (virq = 0; virq < NR_VIRQS; virq++) {
kaf24@7124 766 if ((irq = per_cpu(virq_to_irq, 0)[virq]) == -1)
kaf24@7123 767 continue;
cl349@4087 768
kaf24@7699 769 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
kaf24@7699 770
kaf24@7123 771 /* Get a new binding from Xen. */
kaf24@9889 772 bind_virq.virq = virq;
kaf24@9889 773 bind_virq.vcpu = 0;
kaf24@9889 774 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
kaf24@9889 775 &bind_virq) != 0)
kaf24@9889 776 BUG();
kaf24@9889 777 evtchn = bind_virq.port;
cl349@9149 778
kaf24@7123 779 /* Record the new mapping. */
kaf24@7124 780 evtchn_to_irq[evtchn] = irq;
kaf24@7699 781 irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
kaf24@7124 782
kaf24@7124 783 /* Ready for use. */
kaf24@7124 784 unmask_evtchn(evtchn);
kaf24@7124 785 }
kaf24@7124 786
kaf24@7124 787 /* Primary CPU: rebind IPIs automatically. */
kaf24@7124 788 for (ipi = 0; ipi < NR_IPIS; ipi++) {
kaf24@7699 789 if ((irq = per_cpu(ipi_to_irq, 0)[ipi]) == -1)
kaf24@7124 790 continue;
kaf24@7124 791
kaf24@7699 792 BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
kaf24@7124 793
kaf24@7124 794 /* Get a new binding from Xen. */
kaf24@9889 795 bind_ipi.vcpu = 0;
kaf24@9889 796 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
kaf24@9889 797 &bind_ipi) != 0)
kaf24@9889 798 BUG();
kaf24@9889 799 evtchn = bind_ipi.port;
cl349@9149 800
kaf24@7124 801 /* Record the new mapping. */
kaf24@7123 802 evtchn_to_irq[evtchn] = irq;
kaf24@7699 803 irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
cl349@4087 804
kaf24@7123 805 /* Ready for use. */
kaf24@7123 806 unmask_evtchn(evtchn);
kaf24@7123 807 }
cl349@4087 808 }
cl349@4087 809
kaf24@10029 810 void __init xen_init_IRQ(void)
cl349@4087 811 {
kaf24@7123 812 int i;
cl349@4087 813
kaf24@7124 814 init_evtchn_cpu_bindings();
kaf24@5308 815
kaf24@10147 816 /* No event channels are 'live' right now. */
kaf24@10147 817 for (i = 0; i < NR_EVENT_CHANNELS; i++)
kaf24@10147 818 mask_evtchn(i);
cl349@4087 819
kaf24@7123 820 /* No IRQ -> event-channel mappings. */
kaf24@7123 821 for (i = 0; i < NR_IRQS; i++)
kaf24@7699 822 irq_info[i] = IRQ_UNBOUND;
kaf24@7123 823
kaf24@7123 824 /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
kaf24@7123 825 for (i = 0; i < NR_DYNIRQS; i++) {
kaf24@7123 826 irq_bindcount[dynirq_to_irq(i)] = 0;
cl349@4087 827
kaf24@7123 828 irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
kaf24@7824 829 irq_desc[dynirq_to_irq(i)].action = NULL;
kaf24@7123 830 irq_desc[dynirq_to_irq(i)].depth = 1;
kaf24@7123 831 irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
kaf24@7123 832 }
cl349@4087 833
kaf24@7123 834 /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
cl349@9149 835 for (i = 0; i < NR_PIRQS; i++) {
kaf24@7123 836 irq_bindcount[pirq_to_irq(i)] = 1;
cl349@4087 837
kaf24@7399 838 #ifdef RTC_IRQ
kaf24@7399 839 /* If not domain 0, force our RTC driver to fail its probe. */
kaf24@7399 840 if ((i == RTC_IRQ) &&
kaf24@7399 841 !(xen_start_info->flags & SIF_INITDOMAIN))
kaf24@7399 842 continue;
kaf24@7399 843 #endif
kaf24@7399 844
kaf24@7123 845 irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
kaf24@7824 846 irq_desc[pirq_to_irq(i)].action = NULL;
kaf24@7123 847 irq_desc[pirq_to_irq(i)].depth = 1;
kaf24@7123 848 irq_desc[pirq_to_irq(i)].handler = &pirq_type;
kaf24@7123 849 }
kaf24@7123 850 }