ia64/xen-unstable

view xen/common/kexec.c @ 15647:cc48264ed647

Merge
author Tim Deegan <Tim.Deegan@xensource.com>
date Tue Jul 24 14:53:06 2007 +0100 (2007-07-24)
parents 8eaee9ef472f
children 96f64f4c42f0
line source
1 /******************************************************************************
2 * kexec.c - Achitecture independent kexec code for Xen
3 *
4 * Xen port written by:
5 * - Simon 'Horms' Horman <horms@verge.net.au>
6 * - Magnus Damm <magnus@valinux.co.jp>
7 */
9 #include <xen/lib.h>
10 #include <xen/ctype.h>
11 #include <xen/errno.h>
12 #include <xen/guest_access.h>
13 #include <xen/sched.h>
14 #include <xen/types.h>
15 #include <xen/kexec.h>
16 #include <xen/keyhandler.h>
17 #include <public/kexec.h>
18 #include <xen/cpumask.h>
19 #include <asm/atomic.h>
20 #include <xen/spinlock.h>
21 #include <xen/version.h>
22 #include <xen/console.h>
23 #include <public/elfnote.h>
25 #ifndef COMPAT
27 typedef long ret_t;
29 static DEFINE_PER_CPU(void *, crash_notes);
31 static Elf_Note *xen_crash_note;
33 static cpumask_t crash_saved_cpus;
35 static xen_kexec_image_t kexec_image[KEXEC_IMAGE_NR];
37 #define KEXEC_FLAG_DEFAULT_POS (KEXEC_IMAGE_NR + 0)
38 #define KEXEC_FLAG_CRASH_POS (KEXEC_IMAGE_NR + 1)
39 #define KEXEC_FLAG_IN_PROGRESS (KEXEC_IMAGE_NR + 2)
41 static unsigned long kexec_flags = 0; /* the lowest bits are for KEXEC_IMAGE... */
43 static spinlock_t kexec_lock = SPIN_LOCK_UNLOCKED;
45 xen_kexec_reserve_t kexec_crash_area;
47 static void __init parse_crashkernel(const char *str)
48 {
49 kexec_crash_area.size = parse_size_and_unit(str, &str);
50 if ( *str == '@' )
51 kexec_crash_area.start = parse_size_and_unit(str+1, NULL);
52 }
53 custom_param("crashkernel", parse_crashkernel);
55 static void one_cpu_only(void)
56 {
57 /* Only allow the first cpu to continue - force other cpus to spin */
58 if ( test_and_set_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags) )
59 for ( ; ; ) ;
60 }
62 /* Save the registers in the per-cpu crash note buffer. */
63 void kexec_crash_save_cpu(void)
64 {
65 int cpu = smp_processor_id();
66 Elf_Note *note = per_cpu(crash_notes, cpu);
67 ELF_Prstatus *prstatus;
68 crash_xen_core_t *xencore;
70 if ( cpu_test_and_set(cpu, crash_saved_cpus) )
71 return;
73 prstatus = (ELF_Prstatus *)ELFNOTE_DESC(note);
75 note = ELFNOTE_NEXT(note);
76 xencore = (crash_xen_core_t *)ELFNOTE_DESC(note);
78 elf_core_save_regs(&prstatus->pr_reg, xencore);
79 }
81 /* Set up the single Xen-specific-info crash note. */
82 crash_xen_info_t *kexec_crash_save_info(void)
83 {
84 int cpu = smp_processor_id();
85 crash_xen_info_t info;
86 crash_xen_info_t *out = (crash_xen_info_t *)ELFNOTE_DESC(xen_crash_note);
88 BUG_ON(!cpu_test_and_set(cpu, crash_saved_cpus));
90 memset(&info, 0, sizeof(info));
91 info.xen_major_version = xen_major_version();
92 info.xen_minor_version = xen_minor_version();
93 info.xen_extra_version = __pa(xen_extra_version());
94 info.xen_changeset = __pa(xen_changeset());
95 info.xen_compiler = __pa(xen_compiler());
96 info.xen_compile_date = __pa(xen_compile_date());
97 info.xen_compile_time = __pa(xen_compile_time());
98 info.tainted = tainted;
100 /* Copy from guaranteed-aligned local copy to possibly-unaligned dest. */
101 memcpy(out, &info, sizeof(info));
103 return out;
104 }
106 void kexec_crash(void)
107 {
108 int pos;
110 pos = (test_bit(KEXEC_FLAG_CRASH_POS, &kexec_flags) != 0);
111 if ( !test_bit(KEXEC_IMAGE_CRASH_BASE + pos, &kexec_flags) )
112 return;
114 console_start_sync();
116 one_cpu_only();
117 kexec_crash_save_cpu();
118 machine_crash_shutdown();
120 machine_kexec(&kexec_image[KEXEC_IMAGE_CRASH_BASE + pos]);
122 BUG();
123 }
125 static void do_crashdump_trigger(unsigned char key)
126 {
127 printk("'%c' pressed -> triggering crashdump\n", key);
128 kexec_crash();
129 printk(" * no crash kernel loaded!\n");
130 }
132 static __init int register_crashdump_trigger(void)
133 {
134 register_keyhandler('C', do_crashdump_trigger, "trigger a crashdump");
135 return 0;
136 }
137 __initcall(register_crashdump_trigger);
139 static void setup_note(Elf_Note *n, const char *name, int type, int descsz)
140 {
141 int l = strlen(name) + 1;
142 strlcpy(ELFNOTE_NAME(n), name, l);
143 n->namesz = l;
144 n->descsz = descsz;
145 n->type = type;
146 }
148 static int sizeof_note(const char *name, int descsz)
149 {
150 return (sizeof(Elf_Note) +
151 ELFNOTE_ALIGN(strlen(name)+1) +
152 ELFNOTE_ALIGN(descsz));
153 }
155 #define kexec_get(x) kexec_get_##x
157 #endif
159 static int kexec_get(reserve)(xen_kexec_range_t *range)
160 {
161 if ( kexec_crash_area.size > 0 && kexec_crash_area.start > 0) {
162 range->start = kexec_crash_area.start;
163 range->size = kexec_crash_area.size;
164 }
165 else
166 range->start = range->size = 0;
167 return 0;
168 }
170 static int kexec_get(xen)(xen_kexec_range_t *range)
171 {
172 #ifdef CONFIG_X86_64
173 range->start = xenheap_phys_start;
174 #else
175 range->start = virt_to_maddr(_start);
176 #endif
177 range->size = (unsigned long)xenheap_phys_end - (unsigned long)range->start;
178 return 0;
179 }
181 static int kexec_get(cpu)(xen_kexec_range_t *range)
182 {
183 int nr = range->nr;
184 int nr_bytes = 0;
186 if ( nr < 0 || nr >= num_present_cpus() )
187 return -EINVAL;
189 nr_bytes += sizeof_note("CORE", sizeof(ELF_Prstatus));
190 nr_bytes += sizeof_note("Xen", sizeof(crash_xen_core_t));
192 /* The Xen info note is included in CPU0's range. */
193 if ( nr == 0 )
194 nr_bytes += sizeof_note("Xen", sizeof(crash_xen_info_t));
196 if ( per_cpu(crash_notes, nr) == NULL )
197 {
198 Elf_Note *note;
200 note = per_cpu(crash_notes, nr) = xmalloc_bytes(nr_bytes);
202 if ( note == NULL )
203 return -ENOMEM;
205 /* Setup CORE note. */
206 setup_note(note, "CORE", NT_PRSTATUS, sizeof(ELF_Prstatus));
208 /* Setup Xen CORE note. */
209 note = ELFNOTE_NEXT(note);
210 setup_note(note, "Xen", XEN_ELFNOTE_CRASH_REGS, sizeof(crash_xen_core_t));
212 if (nr == 0)
213 {
214 /* Setup system wide Xen info note. */
215 xen_crash_note = note = ELFNOTE_NEXT(note);
216 setup_note(note, "Xen", XEN_ELFNOTE_CRASH_INFO, sizeof(crash_xen_info_t));
217 }
218 }
220 range->start = __pa((unsigned long)per_cpu(crash_notes, nr));
221 range->size = nr_bytes;
222 return 0;
223 }
225 static int kexec_get(range)(XEN_GUEST_HANDLE(void) uarg)
226 {
227 xen_kexec_range_t range;
228 int ret = -EINVAL;
230 if ( unlikely(copy_from_guest(&range, uarg, 1)) )
231 return -EFAULT;
233 switch ( range.range )
234 {
235 case KEXEC_RANGE_MA_CRASH:
236 ret = kexec_get(reserve)(&range);
237 break;
238 case KEXEC_RANGE_MA_XEN:
239 ret = kexec_get(xen)(&range);
240 break;
241 case KEXEC_RANGE_MA_CPU:
242 ret = kexec_get(cpu)(&range);
243 break;
244 }
246 if ( ret == 0 && unlikely(copy_to_guest(uarg, &range, 1)) )
247 return -EFAULT;
249 return ret;
250 }
252 #ifndef COMPAT
254 static int kexec_load_get_bits(int type, int *base, int *bit)
255 {
256 switch ( type )
257 {
258 case KEXEC_TYPE_DEFAULT:
259 *base = KEXEC_IMAGE_DEFAULT_BASE;
260 *bit = KEXEC_FLAG_DEFAULT_POS;
261 break;
262 case KEXEC_TYPE_CRASH:
263 *base = KEXEC_IMAGE_CRASH_BASE;
264 *bit = KEXEC_FLAG_CRASH_POS;
265 break;
266 default:
267 return -1;
268 }
269 return 0;
270 }
272 #endif
274 static int kexec_load_unload(unsigned long op, XEN_GUEST_HANDLE(void) uarg)
275 {
276 xen_kexec_load_t load;
277 xen_kexec_image_t *image;
278 int base, bit, pos;
279 int ret = 0;
281 if ( unlikely(copy_from_guest(&load, uarg, 1)) )
282 return -EFAULT;
284 if ( kexec_load_get_bits(load.type, &base, &bit) )
285 return -EINVAL;
287 pos = (test_bit(bit, &kexec_flags) != 0);
289 /* Load the user data into an unused image */
290 if ( op == KEXEC_CMD_kexec_load )
291 {
292 image = &kexec_image[base + !pos];
294 BUG_ON(test_bit((base + !pos), &kexec_flags)); /* must be free */
296 #ifndef COMPAT
297 memcpy(image, &load.image, sizeof(*image));
298 #else
299 XLAT_kexec_image(image, &load.image);
300 #endif
302 if ( !(ret = machine_kexec_load(load.type, base + !pos, image)) )
303 {
304 /* Set image present bit */
305 set_bit((base + !pos), &kexec_flags);
307 /* Make new image the active one */
308 change_bit(bit, &kexec_flags);
309 }
310 }
312 /* Unload the old image if present and load successful */
313 if ( ret == 0 && !test_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags) )
314 {
315 if ( test_and_clear_bit((base + pos), &kexec_flags) )
316 {
317 image = &kexec_image[base + pos];
318 machine_kexec_unload(load.type, base + pos, image);
319 }
320 }
322 return ret;
323 }
325 #ifndef COMPAT
327 static int kexec_exec(XEN_GUEST_HANDLE(void) uarg)
328 {
329 xen_kexec_exec_t exec;
330 xen_kexec_image_t *image;
331 int base, bit, pos;
333 if ( unlikely(copy_from_guest(&exec, uarg, 1)) )
334 return -EFAULT;
336 if ( kexec_load_get_bits(exec.type, &base, &bit) )
337 return -EINVAL;
339 pos = (test_bit(bit, &kexec_flags) != 0);
341 /* Only allow kexec/kdump into loaded images */
342 if ( !test_bit(base + pos, &kexec_flags) )
343 return -ENOENT;
345 switch (exec.type)
346 {
347 case KEXEC_TYPE_DEFAULT:
348 image = &kexec_image[base + pos];
349 one_cpu_only();
350 machine_reboot_kexec(image); /* Does not return */
351 break;
352 case KEXEC_TYPE_CRASH:
353 kexec_crash(); /* Does not return */
354 break;
355 }
357 return -EINVAL; /* never reached */
358 }
360 #endif
362 ret_t do_kexec_op(unsigned long op, XEN_GUEST_HANDLE(void) uarg)
363 {
364 unsigned long flags;
365 int ret = -EINVAL;
367 if ( !IS_PRIV(current->domain) )
368 return -EPERM;
370 switch ( op )
371 {
372 case KEXEC_CMD_kexec_get_range:
373 ret = kexec_get(range)(uarg);
374 break;
375 case KEXEC_CMD_kexec_load:
376 case KEXEC_CMD_kexec_unload:
377 spin_lock_irqsave(&kexec_lock, flags);
378 if (!test_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags))
379 {
380 ret = kexec_load_unload(op, uarg);
381 }
382 spin_unlock_irqrestore(&kexec_lock, flags);
383 break;
384 case KEXEC_CMD_kexec:
385 ret = kexec_exec(uarg);
386 break;
387 }
389 return ret;
390 }
392 #if defined(CONFIG_COMPAT) && !defined(COMPAT)
393 #include "compat/kexec.c"
394 #endif
396 /*
397 * Local variables:
398 * mode: C
399 * c-set-style: "BSD"
400 * c-basic-offset: 4
401 * tab-width: 4
402 * indent-tabs-mode: nil
403 * End:
404 */