ia64/linux-2.6.18-xen.hg

view include/asm-v850/system.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 /*
2 * include/asm-v850/system.h -- Low-level interrupt/thread ops
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
14 #ifndef __V850_SYSTEM_H__
15 #define __V850_SYSTEM_H__
17 #include <linux/linkage.h>
18 #include <asm/ptrace.h>
21 /*
22 * switch_to(n) should switch tasks to task ptr, first checking that
23 * ptr isn't the current task, in which case it does nothing.
24 */
25 struct thread_struct;
26 extern void *switch_thread (struct thread_struct *last,
27 struct thread_struct *next);
28 #define switch_to(prev,next,last) \
29 do { \
30 if (prev != next) { \
31 (last) = switch_thread (&prev->thread, &next->thread); \
32 } \
33 } while (0)
36 /* Enable/disable interrupts. */
37 #define local_irq_enable() __asm__ __volatile__ ("ei")
38 #define local_irq_disable() __asm__ __volatile__ ("di")
40 #define local_save_flags(flags) \
41 __asm__ __volatile__ ("stsr %1, %0" : "=r" (flags) : "i" (SR_PSW))
42 #define local_restore_flags(flags) \
43 __asm__ __volatile__ ("ldsr %0, %1" :: "r" (flags), "i" (SR_PSW))
45 /* For spinlocks etc */
46 #define local_irq_save(flags) \
47 do { local_save_flags (flags); local_irq_disable (); } while (0)
48 #define local_irq_restore(flags) \
49 local_restore_flags (flags);
52 static inline int irqs_disabled (void)
53 {
54 unsigned flags;
55 local_save_flags (flags);
56 return !!(flags & 0x20);
57 }
60 /*
61 * Force strict CPU ordering.
62 * Not really required on v850...
63 */
64 #define nop() __asm__ __volatile__ ("nop")
65 #define mb() __asm__ __volatile__ ("" ::: "memory")
66 #define rmb() mb ()
67 #define wmb() mb ()
68 #define read_barrier_depends() ((void)0)
69 #define set_rmb(var, value) do { xchg (&var, value); } while (0)
70 #define set_mb(var, value) set_rmb (var, value)
72 #define smp_mb() mb ()
73 #define smp_rmb() rmb ()
74 #define smp_wmb() wmb ()
75 #define smp_read_barrier_depends() read_barrier_depends()
77 #define xchg(ptr, with) \
78 ((__typeof__ (*(ptr)))__xchg ((unsigned long)(with), (ptr), sizeof (*(ptr))))
79 #define tas(ptr) (xchg ((ptr), 1))
81 static inline unsigned long __xchg (unsigned long with,
82 __volatile__ void *ptr, int size)
83 {
84 unsigned long tmp, flags;
86 local_irq_save (flags);
88 switch (size) {
89 case 1:
90 tmp = *(unsigned char *)ptr;
91 *(unsigned char *)ptr = with;
92 break;
93 case 2:
94 tmp = *(unsigned short *)ptr;
95 *(unsigned short *)ptr = with;
96 break;
97 case 4:
98 tmp = *(unsigned long *)ptr;
99 *(unsigned long *)ptr = with;
100 break;
101 }
103 local_irq_restore (flags);
105 return tmp;
106 }
108 #define arch_align_stack(x) (x)
110 #endif /* __V850_SYSTEM_H__ */