ia64/linux-2.6.18-xen.hg

view include/asm-m32r/smp.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef _ASM_M32R_SMP_H
2 #define _ASM_M32R_SMP_H
4 /* $Id$ */
7 #ifdef CONFIG_SMP
8 #ifndef __ASSEMBLY__
10 #include <linux/cpumask.h>
11 #include <linux/spinlock.h>
12 #include <linux/threads.h>
13 #include <asm/m32r.h>
15 #define PHYSID_ARRAY_SIZE 1
17 struct physid_mask
18 {
19 unsigned long mask[PHYSID_ARRAY_SIZE];
20 };
22 typedef struct physid_mask physid_mask_t;
24 #define physid_set(physid, map) set_bit(physid, (map).mask)
25 #define physid_clear(physid, map) clear_bit(physid, (map).mask)
26 #define physid_isset(physid, map) test_bit(physid, (map).mask)
27 #define physid_test_and_set(physid, map) test_and_set_bit(physid, (map).mask)
29 #define physids_and(dst, src1, src2) bitmap_and((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
30 #define physids_or(dst, src1, src2) bitmap_or((dst).mask, (src1).mask, (src2).mask, MAX_APICS)
31 #define physids_clear(map) bitmap_zero((map).mask, MAX_APICS)
32 #define physids_complement(dst, src) bitmap_complement((dst).mask,(src).mask, MAX_APICS)
33 #define physids_empty(map) bitmap_empty((map).mask, MAX_APICS)
34 #define physids_equal(map1, map2) bitmap_equal((map1).mask, (map2).mask, MAX_APICS)
35 #define physids_weight(map) bitmap_weight((map).mask, MAX_APICS)
36 #define physids_shift_right(d, s, n) bitmap_shift_right((d).mask, (s).mask, n, MAX_APICS)
37 #define physids_shift_left(d, s, n) bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS)
38 #define physids_coerce(map) ((map).mask[0])
40 #define physids_promote(physids) \
41 ({ \
42 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
43 __physid_mask.mask[0] = physids; \
44 __physid_mask; \
45 })
47 #define physid_mask_of_physid(physid) \
48 ({ \
49 physid_mask_t __physid_mask = PHYSID_MASK_NONE; \
50 physid_set(physid, __physid_mask); \
51 __physid_mask; \
52 })
54 #define PHYSID_MASK_ALL { {[0 ... PHYSID_ARRAY_SIZE-1] = ~0UL} }
55 #define PHYSID_MASK_NONE { {[0 ... PHYSID_ARRAY_SIZE-1] = 0UL} }
57 extern physid_mask_t phys_cpu_present_map;
59 /*
60 * Some lowlevel functions might want to know about
61 * the real CPU ID <-> CPU # mapping.
62 */
63 extern volatile int cpu_2_physid[NR_CPUS];
64 #define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id]
66 #define raw_smp_processor_id() (current_thread_info()->cpu)
68 extern cpumask_t cpu_callout_map;
69 extern cpumask_t cpu_possible_map;
70 extern cpumask_t cpu_present_map;
72 static __inline__ int hard_smp_processor_id(void)
73 {
74 return (int)*(volatile long *)M32R_CPUID_PORTL;
75 }
77 static __inline__ int cpu_logical_map(int cpu)
78 {
79 return cpu;
80 }
82 static __inline__ int cpu_number_map(int cpu)
83 {
84 return cpu;
85 }
87 static __inline__ unsigned int num_booting_cpus(void)
88 {
89 return cpus_weight(cpu_callout_map);
90 }
92 extern void smp_send_timer(void);
93 extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
95 #endif /* not __ASSEMBLY__ */
97 #define NO_PROC_ID (0xff) /* No processor magic marker */
99 #define PROC_CHANGE_PENALTY (15) /* Schedule penalty */
101 /*
102 * M32R-mp IPI
103 */
104 #define RESCHEDULE_IPI (M32R_IRQ_IPI0-M32R_IRQ_IPI0)
105 #define INVALIDATE_TLB_IPI (M32R_IRQ_IPI1-M32R_IRQ_IPI0)
106 #define CALL_FUNCTION_IPI (M32R_IRQ_IPI2-M32R_IRQ_IPI0)
107 #define LOCAL_TIMER_IPI (M32R_IRQ_IPI3-M32R_IRQ_IPI0)
108 #define INVALIDATE_CACHE_IPI (M32R_IRQ_IPI4-M32R_IRQ_IPI0)
109 #define CPU_BOOT_IPI (M32R_IRQ_IPI5-M32R_IRQ_IPI0)
111 #define IPI_SHIFT (0)
112 #define NR_IPIS (8)
114 #endif /* CONFIG_SMP */
116 #endif /* _ASM_M32R_SMP_H */