ia64/linux-2.6.18-xen.hg

view arch/sparc64/kernel/irq.c @ 897:329ea0ccb344

balloon: try harder to balloon up under memory pressure.

Currently if the balloon driver is unable to increase the guest's
reservation it assumes the failure was due to reaching its full
allocation, gives up on the ballooning operation and records the limit
it reached as the "hard limit". The driver will not try again until
the target is set again (even to the same value).

However it is possible that ballooning has in fact failed due to
memory pressure in the host and therefore it is desirable to keep
attempting to reach the target in case memory becomes available. The
most likely scenario is that some guests are ballooning down while
others are ballooning up and therefore there is temporary memory
pressure while things stabilise. You would not expect a well behaved
toolstack to ask a domain to balloon to more than its allocation nor
would you expect it to deliberately over-commit memory by setting
balloon targets which exceed the total host memory.

This patch drops the concept of a hard limit and causes the balloon
driver to retry increasing the reservation on a timer in the same
manner as when decreasing the reservation.

Also if we partially succeed in increasing the reservation
(i.e. receive less pages than we asked for) then we may as well keep
those pages rather than returning them to Xen.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Fri Jun 05 14:01:20 2009 +0100 (2009-06-05)
parents 831230e53067
children
line source
1 /* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
2 * irq.c: UltraSparc IRQ handling/init/registry.
3 *
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
7 */
9 #include <linux/module.h>
10 #include <linux/sched.h>
11 #include <linux/ptrace.h>
12 #include <linux/errno.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/signal.h>
15 #include <linux/mm.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/random.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/bootmem.h>
24 #include <linux/irq.h>
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
28 #include <asm/atomic.h>
29 #include <asm/system.h>
30 #include <asm/irq.h>
31 #include <asm/io.h>
32 #include <asm/sbus.h>
33 #include <asm/iommu.h>
34 #include <asm/upa.h>
35 #include <asm/oplib.h>
36 #include <asm/prom.h>
37 #include <asm/timer.h>
38 #include <asm/smp.h>
39 #include <asm/starfire.h>
40 #include <asm/uaccess.h>
41 #include <asm/cache.h>
42 #include <asm/cpudata.h>
43 #include <asm/auxio.h>
44 #include <asm/head.h>
46 /* UPA nodes send interrupt packet to UltraSparc with first data reg
47 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
48 * delivered. We must translate this into a non-vector IRQ so we can
49 * set the softint on this cpu.
50 *
51 * To make processing these packets efficient and race free we use
52 * an array of irq buckets below. The interrupt vector handler in
53 * entry.S feeds incoming packets into per-cpu pil-indexed lists.
54 * The IVEC handler does not need to act atomically, the PIL dispatch
55 * code uses CAS to get an atomic snapshot of the list and clear it
56 * at the same time.
57 *
58 * If you make changes to ino_bucket, please update hand coded assembler
59 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
60 */
61 struct ino_bucket {
62 /* Next handler in per-CPU IRQ worklist. We know that
63 * bucket pointers have the high 32-bits clear, so to
64 * save space we only store the bits we need.
65 */
66 /*0x00*/unsigned int irq_chain;
68 /* Virtual interrupt number assigned to this INO. */
69 /*0x04*/unsigned int virt_irq;
70 };
72 #define NUM_IVECS (IMAP_INR + 1)
73 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
75 #define __irq_ino(irq) \
76 (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
77 #define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
78 #define __irq(bucket) ((unsigned int)(unsigned long)(bucket))
80 /* This has to be in the main kernel image, it cannot be
81 * turned into per-cpu data. The reason is that the main
82 * kernel image is locked into the TLB and this structure
83 * is accessed from the vectored interrupt trap handler. If
84 * access to this structure takes a TLB miss it could cause
85 * the 5-level sparc v9 trap stack to overflow.
86 */
87 #define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
89 static unsigned int virt_to_real_irq_table[NR_IRQS];
90 static unsigned char virt_irq_cur = 1;
92 static unsigned char virt_irq_alloc(unsigned int real_irq)
93 {
94 unsigned char ent;
96 BUILD_BUG_ON(NR_IRQS >= 256);
98 ent = virt_irq_cur;
99 if (ent >= NR_IRQS) {
100 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
101 return 0;
102 }
104 virt_irq_cur = ent + 1;
105 virt_to_real_irq_table[ent] = real_irq;
107 return ent;
108 }
110 #if 0 /* Currently unused. */
111 static unsigned char real_to_virt_irq(unsigned int real_irq)
112 {
113 struct ino_bucket *bucket = __bucket(real_irq);
115 return bucket->virt_irq;
116 }
117 #endif
119 static unsigned int virt_to_real_irq(unsigned char virt_irq)
120 {
121 return virt_to_real_irq_table[virt_irq];
122 }
124 /*
125 * /proc/interrupts printing:
126 */
128 int show_interrupts(struct seq_file *p, void *v)
129 {
130 int i = *(loff_t *) v, j;
131 struct irqaction * action;
132 unsigned long flags;
134 if (i == 0) {
135 seq_printf(p, " ");
136 for_each_online_cpu(j)
137 seq_printf(p, "CPU%d ",j);
138 seq_putc(p, '\n');
139 }
141 if (i < NR_IRQS) {
142 spin_lock_irqsave(&irq_desc[i].lock, flags);
143 action = irq_desc[i].action;
144 if (!action)
145 goto skip;
146 seq_printf(p, "%3d: ",i);
147 #ifndef CONFIG_SMP
148 seq_printf(p, "%10u ", kstat_irqs(i));
149 #else
150 for_each_online_cpu(j)
151 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
152 #endif
153 seq_printf(p, " %9s", irq_desc[i].chip->typename);
154 seq_printf(p, " %s", action->name);
156 for (action=action->next; action; action = action->next)
157 seq_printf(p, ", %s", action->name);
159 seq_putc(p, '\n');
160 skip:
161 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
162 }
163 return 0;
164 }
166 extern unsigned long real_hard_smp_processor_id(void);
168 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
169 {
170 unsigned int tid;
172 if (this_is_starfire) {
173 tid = starfire_translate(imap, cpuid);
174 tid <<= IMAP_TID_SHIFT;
175 tid &= IMAP_TID_UPA;
176 } else {
177 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
178 unsigned long ver;
180 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
181 if ((ver >> 32UL) == __JALAPENO_ID ||
182 (ver >> 32UL) == __SERRANO_ID) {
183 tid = cpuid << IMAP_TID_SHIFT;
184 tid &= IMAP_TID_JBUS;
185 } else {
186 unsigned int a = cpuid & 0x1f;
187 unsigned int n = (cpuid >> 5) & 0x1f;
189 tid = ((a << IMAP_AID_SHIFT) |
190 (n << IMAP_NID_SHIFT));
191 tid &= (IMAP_AID_SAFARI |
192 IMAP_NID_SAFARI);;
193 }
194 } else {
195 tid = cpuid << IMAP_TID_SHIFT;
196 tid &= IMAP_TID_UPA;
197 }
198 }
200 return tid;
201 }
203 struct irq_handler_data {
204 unsigned long iclr;
205 unsigned long imap;
207 void (*pre_handler)(unsigned int, void *, void *);
208 void *pre_handler_arg1;
209 void *pre_handler_arg2;
210 };
212 static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
213 {
214 unsigned int real_irq = virt_to_real_irq(virt_irq);
215 struct ino_bucket *bucket = NULL;
217 if (likely(real_irq))
218 bucket = __bucket(real_irq);
220 return bucket;
221 }
223 #ifdef CONFIG_SMP
224 static int irq_choose_cpu(unsigned int virt_irq)
225 {
226 cpumask_t mask = irq_desc[virt_irq].affinity;
227 int cpuid;
229 if (cpus_equal(mask, CPU_MASK_ALL)) {
230 static int irq_rover;
231 static DEFINE_SPINLOCK(irq_rover_lock);
232 unsigned long flags;
234 /* Round-robin distribution... */
235 do_round_robin:
236 spin_lock_irqsave(&irq_rover_lock, flags);
238 while (!cpu_online(irq_rover)) {
239 if (++irq_rover >= NR_CPUS)
240 irq_rover = 0;
241 }
242 cpuid = irq_rover;
243 do {
244 if (++irq_rover >= NR_CPUS)
245 irq_rover = 0;
246 } while (!cpu_online(irq_rover));
248 spin_unlock_irqrestore(&irq_rover_lock, flags);
249 } else {
250 cpumask_t tmp;
252 cpus_and(tmp, cpu_online_map, mask);
254 if (cpus_empty(tmp))
255 goto do_round_robin;
257 cpuid = first_cpu(tmp);
258 }
260 return cpuid;
261 }
262 #else
263 static int irq_choose_cpu(unsigned int virt_irq)
264 {
265 return real_hard_smp_processor_id();
266 }
267 #endif
269 static void sun4u_irq_enable(unsigned int virt_irq)
270 {
271 irq_desc_t *desc = irq_desc + virt_irq;
272 struct irq_handler_data *data = desc->handler_data;
274 if (likely(data)) {
275 unsigned long cpuid, imap;
276 unsigned int tid;
278 cpuid = irq_choose_cpu(virt_irq);
279 imap = data->imap;
281 tid = sun4u_compute_tid(imap, cpuid);
283 upa_writel(tid | IMAP_VALID, imap);
284 }
285 }
287 static void sun4u_irq_disable(unsigned int virt_irq)
288 {
289 irq_desc_t *desc = irq_desc + virt_irq;
290 struct irq_handler_data *data = desc->handler_data;
292 if (likely(data)) {
293 unsigned long imap = data->imap;
294 u32 tmp = upa_readl(imap);
296 tmp &= ~IMAP_VALID;
297 upa_writel(tmp, imap);
298 }
299 }
301 static void sun4u_irq_end(unsigned int virt_irq)
302 {
303 irq_desc_t *desc = irq_desc + virt_irq;
304 struct irq_handler_data *data = desc->handler_data;
306 if (likely(data))
307 upa_writel(ICLR_IDLE, data->iclr);
308 }
310 static void sun4v_irq_enable(unsigned int virt_irq)
311 {
312 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
313 unsigned int ino = bucket - &ivector_table[0];
315 if (likely(bucket)) {
316 unsigned long cpuid;
317 int err;
319 cpuid = irq_choose_cpu(virt_irq);
321 err = sun4v_intr_settarget(ino, cpuid);
322 if (err != HV_EOK)
323 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
324 ino, cpuid, err);
325 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
326 if (err != HV_EOK)
327 printk("sun4v_intr_setenabled(%x): err(%d)\n",
328 ino, err);
329 }
330 }
332 static void sun4v_irq_disable(unsigned int virt_irq)
333 {
334 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
335 unsigned int ino = bucket - &ivector_table[0];
337 if (likely(bucket)) {
338 int err;
340 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
341 if (err != HV_EOK)
342 printk("sun4v_intr_setenabled(%x): "
343 "err(%d)\n", ino, err);
344 }
345 }
347 static void sun4v_irq_end(unsigned int virt_irq)
348 {
349 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
350 unsigned int ino = bucket - &ivector_table[0];
352 if (likely(bucket)) {
353 int err;
355 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
356 if (err != HV_EOK)
357 printk("sun4v_intr_setstate(%x): "
358 "err(%d)\n", ino, err);
359 }
360 }
362 static void run_pre_handler(unsigned int virt_irq)
363 {
364 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
365 irq_desc_t *desc = irq_desc + virt_irq;
366 struct irq_handler_data *data = desc->handler_data;
368 if (likely(data->pre_handler)) {
369 data->pre_handler(__irq_ino(__irq(bucket)),
370 data->pre_handler_arg1,
371 data->pre_handler_arg2);
372 }
373 }
375 static struct hw_interrupt_type sun4u_irq = {
376 .typename = "sun4u",
377 .enable = sun4u_irq_enable,
378 .disable = sun4u_irq_disable,
379 .end = sun4u_irq_end,
380 };
382 static struct hw_interrupt_type sun4u_irq_ack = {
383 .typename = "sun4u+ack",
384 .enable = sun4u_irq_enable,
385 .disable = sun4u_irq_disable,
386 .ack = run_pre_handler,
387 .end = sun4u_irq_end,
388 };
390 static struct hw_interrupt_type sun4v_irq = {
391 .typename = "sun4v",
392 .enable = sun4v_irq_enable,
393 .disable = sun4v_irq_disable,
394 .end = sun4v_irq_end,
395 };
397 static struct hw_interrupt_type sun4v_irq_ack = {
398 .typename = "sun4v+ack",
399 .enable = sun4v_irq_enable,
400 .disable = sun4v_irq_disable,
401 .ack = run_pre_handler,
402 .end = sun4v_irq_end,
403 };
405 void irq_install_pre_handler(int virt_irq,
406 void (*func)(unsigned int, void *, void *),
407 void *arg1, void *arg2)
408 {
409 irq_desc_t *desc = irq_desc + virt_irq;
410 struct irq_handler_data *data = desc->handler_data;
412 data->pre_handler = func;
413 data->pre_handler_arg1 = arg1;
414 data->pre_handler_arg2 = arg2;
416 if (desc->chip == &sun4u_irq_ack ||
417 desc->chip == &sun4v_irq_ack)
418 return;
420 desc->chip = (desc->chip == &sun4u_irq ?
421 &sun4u_irq_ack : &sun4v_irq_ack);
422 }
424 unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
425 {
426 struct ino_bucket *bucket;
427 struct irq_handler_data *data;
428 irq_desc_t *desc;
429 int ino;
431 BUG_ON(tlb_type == hypervisor);
433 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
434 bucket = &ivector_table[ino];
435 if (!bucket->virt_irq) {
436 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
437 irq_desc[bucket->virt_irq].chip = &sun4u_irq;
438 }
440 desc = irq_desc + bucket->virt_irq;
441 if (unlikely(desc->handler_data))
442 goto out;
444 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
445 if (unlikely(!data)) {
446 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
447 prom_halt();
448 }
449 desc->handler_data = data;
451 data->imap = imap;
452 data->iclr = iclr;
454 out:
455 return bucket->virt_irq;
456 }
458 unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
459 {
460 struct ino_bucket *bucket;
461 struct irq_handler_data *data;
462 unsigned long sysino;
463 irq_desc_t *desc;
465 BUG_ON(tlb_type != hypervisor);
467 sysino = sun4v_devino_to_sysino(devhandle, devino);
468 bucket = &ivector_table[sysino];
469 if (!bucket->virt_irq) {
470 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
471 irq_desc[bucket->virt_irq].chip = &sun4v_irq;
472 }
474 desc = irq_desc + bucket->virt_irq;
475 if (unlikely(desc->handler_data))
476 goto out;
478 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
479 if (unlikely(!data)) {
480 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
481 prom_halt();
482 }
483 desc->handler_data = data;
485 /* Catch accidental accesses to these things. IMAP/ICLR handling
486 * is done by hypervisor calls on sun4v platforms, not by direct
487 * register accesses.
488 */
489 data->imap = ~0UL;
490 data->iclr = ~0UL;
492 out:
493 return bucket->virt_irq;
494 }
496 void hw_resend_irq(struct hw_interrupt_type *handler, unsigned int virt_irq)
497 {
498 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
499 unsigned long pstate;
500 unsigned int *ent;
502 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
503 __asm__ __volatile__("wrpr %0, %1, %%pstate"
504 : : "r" (pstate), "i" (PSTATE_IE));
505 ent = irq_work(smp_processor_id());
506 bucket->irq_chain = *ent;
507 *ent = __irq(bucket);
508 set_softint(1 << PIL_DEVICE_IRQ);
509 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
510 }
512 void ack_bad_irq(unsigned int virt_irq)
513 {
514 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
515 unsigned int ino = 0xdeadbeef;
517 if (bucket)
518 ino = bucket - &ivector_table[0];
520 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
521 ino, virt_irq);
522 }
524 #ifndef CONFIG_SMP
525 extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
527 void timer_irq(int irq, struct pt_regs *regs)
528 {
529 unsigned long clr_mask = 1 << irq;
530 unsigned long tick_mask = tick_ops->softint_mask;
532 if (get_softint() & tick_mask) {
533 irq = 0;
534 clr_mask = tick_mask;
535 }
536 clear_softint(clr_mask);
538 irq_enter();
540 kstat_this_cpu.irqs[0]++;
541 timer_interrupt(irq, NULL, regs);
543 irq_exit();
544 }
545 #endif
547 void handler_irq(int irq, struct pt_regs *regs)
548 {
549 struct ino_bucket *bucket;
551 clear_softint(1 << irq);
553 irq_enter();
555 /* Sliiiick... */
556 bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0));
557 while (bucket) {
558 struct ino_bucket *next = __bucket(bucket->irq_chain);
560 bucket->irq_chain = 0;
561 __do_IRQ(bucket->virt_irq, regs);
563 bucket = next;
564 }
566 irq_exit();
567 }
569 struct sun5_timer {
570 u64 count0;
571 u64 limit0;
572 u64 count1;
573 u64 limit1;
574 };
576 static struct sun5_timer *prom_timers;
577 static u64 prom_limit0, prom_limit1;
579 static void map_prom_timers(void)
580 {
581 struct device_node *dp;
582 unsigned int *addr;
584 /* PROM timer node hangs out in the top level of device siblings... */
585 dp = of_find_node_by_path("/");
586 dp = dp->child;
587 while (dp) {
588 if (!strcmp(dp->name, "counter-timer"))
589 break;
590 dp = dp->sibling;
591 }
593 /* Assume if node is not present, PROM uses different tick mechanism
594 * which we should not care about.
595 */
596 if (!dp) {
597 prom_timers = (struct sun5_timer *) 0;
598 return;
599 }
601 /* If PROM is really using this, it must be mapped by him. */
602 addr = of_get_property(dp, "address", NULL);
603 if (!addr) {
604 prom_printf("PROM does not have timer mapped, trying to continue.\n");
605 prom_timers = (struct sun5_timer *) 0;
606 return;
607 }
608 prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
609 }
611 static void kill_prom_timer(void)
612 {
613 if (!prom_timers)
614 return;
616 /* Save them away for later. */
617 prom_limit0 = prom_timers->limit0;
618 prom_limit1 = prom_timers->limit1;
620 /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
621 * We turn both off here just to be paranoid.
622 */
623 prom_timers->limit0 = 0;
624 prom_timers->limit1 = 0;
626 /* Wheee, eat the interrupt packet too... */
627 __asm__ __volatile__(
628 " mov 0x40, %%g2\n"
629 " ldxa [%%g0] %0, %%g1\n"
630 " ldxa [%%g2] %1, %%g1\n"
631 " stxa %%g0, [%%g0] %0\n"
632 " membar #Sync\n"
633 : /* no outputs */
634 : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
635 : "g1", "g2");
636 }
638 void init_irqwork_curcpu(void)
639 {
640 int cpu = hard_smp_processor_id();
642 trap_block[cpu].irq_worklist = 0;
643 }
645 static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
646 {
647 unsigned long num_entries = 128;
648 unsigned long status;
650 status = sun4v_cpu_qconf(type, paddr, num_entries);
651 if (status != HV_EOK) {
652 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
653 "err %lu\n", type, paddr, num_entries, status);
654 prom_halt();
655 }
656 }
658 static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
659 {
660 struct trap_per_cpu *tb = &trap_block[this_cpu];
662 register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
663 register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
664 register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
665 register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
666 }
668 static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem)
669 {
670 void *page;
672 if (use_bootmem)
673 page = alloc_bootmem_low_pages(PAGE_SIZE);
674 else
675 page = (void *) get_zeroed_page(GFP_ATOMIC);
677 if (!page) {
678 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
679 prom_halt();
680 }
682 *pa_ptr = __pa(page);
683 }
685 static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem)
686 {
687 void *page;
689 if (use_bootmem)
690 page = alloc_bootmem_low_pages(PAGE_SIZE);
691 else
692 page = (void *) get_zeroed_page(GFP_ATOMIC);
694 if (!page) {
695 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
696 prom_halt();
697 }
699 *pa_ptr = __pa(page);
700 }
702 static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
703 {
704 #ifdef CONFIG_SMP
705 void *page;
707 BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
709 if (use_bootmem)
710 page = alloc_bootmem_low_pages(PAGE_SIZE);
711 else
712 page = (void *) get_zeroed_page(GFP_ATOMIC);
714 if (!page) {
715 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
716 prom_halt();
717 }
719 tb->cpu_mondo_block_pa = __pa(page);
720 tb->cpu_list_pa = __pa(page + 64);
721 #endif
722 }
724 /* Allocate and register the mondo and error queues for this cpu. */
725 void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
726 {
727 struct trap_per_cpu *tb = &trap_block[cpu];
729 if (alloc) {
730 alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
731 alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
732 alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
733 alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
734 alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
735 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
737 init_cpu_send_mondo_info(tb, use_bootmem);
738 }
740 if (load) {
741 if (cpu != hard_smp_processor_id()) {
742 prom_printf("SUN4V: init mondo on cpu %d not %d\n",
743 cpu, hard_smp_processor_id());
744 prom_halt();
745 }
746 sun4v_register_mondo_queues(cpu);
747 }
748 }
750 static struct irqaction timer_irq_action = {
751 .name = "timer",
752 };
754 /* Only invoked on boot processor. */
755 void __init init_IRQ(void)
756 {
757 map_prom_timers();
758 kill_prom_timer();
759 memset(&ivector_table[0], 0, sizeof(ivector_table));
761 if (tlb_type == hypervisor)
762 sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
764 /* We need to clear any IRQ's pending in the soft interrupt
765 * registers, a spurious one could be left around from the
766 * PROM timer which we just disabled.
767 */
768 clear_softint(get_softint());
770 /* Now that ivector table is initialized, it is safe
771 * to receive IRQ vector traps. We will normally take
772 * one or two right now, in case some device PROM used
773 * to boot us wants to speak to us. We just ignore them.
774 */
775 __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
776 "or %%g1, %0, %%g1\n\t"
777 "wrpr %%g1, 0x0, %%pstate"
778 : /* No outputs */
779 : "i" (PSTATE_IE)
780 : "g1");
782 irq_desc[0].action = &timer_irq_action;
783 }