ia64/xen-unstable

view xen/arch/ia64/linux-xen/efi.c @ 18095:2fd648307ad1

[IA64] kexec: Map EFI regions into the same place they are maped into in Linux

Map EFI regions into the same place they are maped into in Linux

This is because of an unfortunate problem with the way that EFI interacts
with Kexec. The call to map the EFI regions may only be made once. This
means that after Kexec the EFI regions must be mapped into the same region
that they were mapped into prior to Kexec.

This is not usually a problem when kexecing from xen to xen or from linux
to linux, as the mapping will be the same. However when kexecing from xen
to linux or linux to xen, the mapping is different, and the problem
manifests.

So far Magnus Damm and I have come up with three different ideas for
resolving this problem.

1. Leave the EFI in physical mode
- This is nice and simple
- There is a potential performance hit, but PAL calls are not
made very often, so it shouldn't be a problem
- I have patches to do this, some of which are in the
series that accompany this patch.
- The SGI people tell me that it won't work on SN because
it allows the OS to provide EFI (or SAL?) code.

2. Always map EFI into the space that Linux uses
- Not so simple
- Requires Xen to jump through some hoops
- But leaves Linux unmodified
- But it will break if Linux ever changes its mapping
- This patch series implements this change

3. Always map EFI to some agreed space
- Similar to 2. but less likely to break in the future
- But it requires Xen and Linux to agree on a space to be used
- Reqires both Xen and Linux to be modified

Cc: Isaku Yamahata <yamahata@valinux.co.jp>
Cc: Tristan Gingold <tgingold@free.fr>
Cc: Alex Williamson <alex.williamson@hp.com>
Cc: Aron Griffis <aron@hp.com>
Signed-off-by: Simon Horman <horms@verge.net.au>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Jul 22 12:15:02 2008 +0900 (2008-07-22)
parents 7da7b53b2139
children 853476b12f56
line source
1 /*
2 * Extensible Firmware Interface
3 *
4 * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999
5 *
6 * Copyright (C) 1999 VA Linux Systems
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 * Copyright (C) 1999-2003 Hewlett-Packard Co.
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
12 * Bjorn Helgaas <bjorn.helgaas@hp.com>
13 *
14 * All EFI Runtime Services are not implemented yet as EFI only
15 * supports physical mode addressing on SoftSDV. This is to be fixed
16 * in a future version. --drummond 1999-07-20
17 *
18 * Implemented EFI runtime services and virtual mode calls. --davidm
19 *
20 * Goutham Rao: <goutham.rao@intel.com>
21 * Skip non-WB memory and ignore empty memory ranges.
22 */
23 #include <linux/module.h>
24 #include <linux/bootmem.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/time.h>
29 #include <linux/efi.h>
30 #include <linux/kexec.h>
32 #include <asm/io.h>
33 #include <asm/kregs.h>
34 #include <asm/meminit.h>
35 #include <asm/pgtable.h>
36 #include <asm/processor.h>
37 #include <asm/mca.h>
39 #define EFI_DEBUG 0
41 extern efi_status_t efi_call_phys (void *, ...);
42 #ifdef XEN
43 /* this should be defined in linux/kernel.h */
44 extern unsigned long long memparse (char *ptr, char **retptr);
45 /* this should be defined in linux/efi.h */
46 //#define EFI_INVALID_TABLE_ADDR (void *)(~0UL)
47 #endif
49 struct efi efi;
50 EXPORT_SYMBOL(efi);
51 static efi_runtime_services_t *runtime;
52 #if defined(XEN) && !defined(CONFIG_VIRTUAL_FRAME_TABLE)
53 // this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
54 static unsigned long mem_limit = ~0UL, max_addr = 0x100000000UL, min_addr = 0UL;
55 #else
56 static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
57 #endif
59 #define efi_call_virt(f, args...) (*(f))(args)
61 #define STUB_GET_TIME(prefix, adjust_arg) \
62 static efi_status_t \
63 prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
64 { \
65 struct ia64_fpreg fr[6]; \
66 efi_time_cap_t *atc = NULL; \
67 efi_status_t ret; \
68 XEN_EFI_RR_DECLARE(rr6, rr7); \
69 \
70 if (tc) \
71 atc = adjust_arg(tc); \
72 ia64_save_scratch_fpregs(fr); \
73 XEN_EFI_RR_ENTER(rr6, rr7); \
74 ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \
75 XEN_EFI_RR_LEAVE(rr6, rr7); \
76 ia64_load_scratch_fpregs(fr); \
77 return ret; \
78 }
80 #define STUB_SET_TIME(prefix, adjust_arg) \
81 static efi_status_t \
82 prefix##_set_time (efi_time_t *tm) \
83 { \
84 struct ia64_fpreg fr[6]; \
85 efi_status_t ret; \
86 XEN_EFI_RR_DECLARE(rr6, rr7); \
87 \
88 ia64_save_scratch_fpregs(fr); \
89 XEN_EFI_RR_ENTER(rr6, rr7); \
90 ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \
91 XEN_EFI_RR_LEAVE(rr6, rr7); \
92 ia64_load_scratch_fpregs(fr); \
93 return ret; \
94 }
96 #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
97 static efi_status_t \
98 prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \
99 { \
100 struct ia64_fpreg fr[6]; \
101 efi_status_t ret; \
102 XEN_EFI_RR_DECLARE(rr6, rr7); \
103 \
104 ia64_save_scratch_fpregs(fr); \
105 XEN_EFI_RR_ENTER(rr6, rr7); \
106 ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
107 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
108 XEN_EFI_RR_LEAVE(rr6, rr7); \
109 ia64_load_scratch_fpregs(fr); \
110 return ret; \
111 }
113 #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
114 static efi_status_t \
115 prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
116 { \
117 struct ia64_fpreg fr[6]; \
118 efi_time_t *atm = NULL; \
119 efi_status_t ret; \
120 XEN_EFI_RR_DECLARE(rr6, rr7); \
121 \
122 if (tm) \
123 atm = adjust_arg(tm); \
124 ia64_save_scratch_fpregs(fr); \
125 XEN_EFI_RR_ENTER(rr6, rr7); \
126 ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
127 enabled, atm); \
128 XEN_EFI_RR_LEAVE(rr6, rr7); \
129 ia64_load_scratch_fpregs(fr); \
130 return ret; \
131 }
133 #define STUB_GET_VARIABLE(prefix, adjust_arg) \
134 static efi_status_t \
135 prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
136 unsigned long *data_size, void *data) \
137 { \
138 struct ia64_fpreg fr[6]; \
139 u32 *aattr = NULL; \
140 efi_status_t ret; \
141 XEN_EFI_RR_DECLARE(rr6, rr7); \
142 \
143 if (attr) \
144 aattr = adjust_arg(attr); \
145 ia64_save_scratch_fpregs(fr); \
146 XEN_EFI_RR_ENTER(rr6, rr7); \
147 ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \
148 adjust_arg(name), adjust_arg(vendor), aattr, \
149 adjust_arg(data_size), adjust_arg(data)); \
150 XEN_EFI_RR_LEAVE(rr6, rr7); \
151 ia64_load_scratch_fpregs(fr); \
152 return ret; \
153 }
155 #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
156 static efi_status_t \
157 prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \
158 { \
159 struct ia64_fpreg fr[6]; \
160 efi_status_t ret; \
161 XEN_EFI_RR_DECLARE(rr6, rr7); \
162 \
163 ia64_save_scratch_fpregs(fr); \
164 XEN_EFI_RR_ENTER(rr6, rr7); \
165 ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \
166 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
167 XEN_EFI_RR_LEAVE(rr6, rr7); \
168 ia64_load_scratch_fpregs(fr); \
169 return ret; \
170 }
172 #define STUB_SET_VARIABLE(prefix, adjust_arg) \
173 static efi_status_t \
174 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr, \
175 unsigned long data_size, void *data) \
176 { \
177 struct ia64_fpreg fr[6]; \
178 efi_status_t ret; \
179 XEN_EFI_RR_DECLARE(rr6, rr7); \
180 \
181 ia64_save_scratch_fpregs(fr); \
182 XEN_EFI_RR_ENTER(rr6, rr7); \
183 ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \
184 adjust_arg(name), adjust_arg(vendor), attr, data_size, \
185 adjust_arg(data)); \
186 XEN_EFI_RR_LEAVE(rr6, rr7); \
187 ia64_load_scratch_fpregs(fr); \
188 return ret; \
189 }
191 #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
192 static efi_status_t \
193 prefix##_get_next_high_mono_count (u32 *count) \
194 { \
195 struct ia64_fpreg fr[6]; \
196 efi_status_t ret; \
197 XEN_EFI_RR_DECLARE(rr6, rr7); \
198 \
199 ia64_save_scratch_fpregs(fr); \
200 XEN_EFI_RR_ENTER(rr6, rr7); \
201 ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
202 __va(runtime->get_next_high_mono_count), adjust_arg(count)); \
203 XEN_EFI_RR_LEAVE(rr6, rr7); \
204 ia64_load_scratch_fpregs(fr); \
205 return ret; \
206 }
208 #define STUB_RESET_SYSTEM(prefix, adjust_arg) \
209 static void \
210 prefix##_reset_system (int reset_type, efi_status_t status, \
211 unsigned long data_size, efi_char16_t *data) \
212 { \
213 struct ia64_fpreg fr[6]; \
214 efi_char16_t *adata = NULL; \
215 XEN_EFI_RR_DECLARE(rr6, rr7); \
216 \
217 if (data) \
218 adata = adjust_arg(data); \
219 \
220 ia64_save_scratch_fpregs(fr); \
221 XEN_EFI_RR_ENTER(rr6, rr7); \
222 efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \
223 reset_type, status, data_size, adata); \
224 /* should not return, but just in case... */ \
225 XEN_EFI_RR_LEAVE(rr6, rr7); \
226 ia64_load_scratch_fpregs(fr); \
227 }
229 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
231 STUB_GET_TIME(phys, phys_ptr)
232 STUB_SET_TIME(phys, phys_ptr)
233 STUB_GET_WAKEUP_TIME(phys, phys_ptr)
234 STUB_SET_WAKEUP_TIME(phys, phys_ptr)
235 STUB_GET_VARIABLE(phys, phys_ptr)
236 STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
237 STUB_SET_VARIABLE(phys, phys_ptr)
238 STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
239 STUB_RESET_SYSTEM(phys, phys_ptr)
241 #define id(arg) arg
243 STUB_GET_TIME(virt, id)
244 STUB_SET_TIME(virt, id)
245 STUB_GET_WAKEUP_TIME(virt, id)
246 STUB_SET_WAKEUP_TIME(virt, id)
247 STUB_GET_VARIABLE(virt, id)
248 STUB_GET_NEXT_VARIABLE(virt, id)
249 STUB_SET_VARIABLE(virt, id)
250 STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
251 STUB_RESET_SYSTEM(virt, id)
253 #ifndef XEN
254 void
255 efi_gettimeofday (struct timespec *ts)
256 {
257 efi_time_t tm;
259 memset(ts, 0, sizeof(ts));
260 if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS)
261 return;
263 ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
264 ts->tv_nsec = tm.nanosecond;
265 }
266 #endif
268 static int
269 is_memory_available (efi_memory_desc_t *md)
270 {
271 if (!(md->attribute & EFI_MEMORY_WB))
272 return 0;
274 switch (md->type) {
275 case EFI_LOADER_CODE:
276 case EFI_LOADER_DATA:
277 case EFI_BOOT_SERVICES_CODE:
278 case EFI_BOOT_SERVICES_DATA:
279 case EFI_CONVENTIONAL_MEMORY:
280 return 1;
281 }
282 return 0;
283 }
285 typedef struct kern_memdesc {
286 u64 attribute;
287 u64 start;
288 u64 num_pages;
289 } kern_memdesc_t;
291 static kern_memdesc_t *kern_memmap;
293 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
295 static inline u64
296 kmd_end(kern_memdesc_t *kmd)
297 {
298 return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
299 }
301 static inline u64
302 efi_md_end(efi_memory_desc_t *md)
303 {
304 return (md->phys_addr + efi_md_size(md));
305 }
307 static inline int
308 efi_wb(efi_memory_desc_t *md)
309 {
310 return (md->attribute & EFI_MEMORY_WB);
311 }
313 static inline int
314 efi_uc(efi_memory_desc_t *md)
315 {
316 return (md->attribute & EFI_MEMORY_UC);
317 }
319 static void
320 walk (efi_freemem_callback_t callback, void *arg, u64 attr)
321 {
322 kern_memdesc_t *k;
323 u64 start, end, voff;
325 voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
326 for (k = kern_memmap; k->start != ~0UL; k++) {
327 if (k->attribute != attr)
328 continue;
329 start = PAGE_ALIGN(k->start);
330 end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
331 if (start < end)
332 if ((*callback)(start + voff, end + voff, arg) < 0)
333 return;
334 }
335 }
337 /*
338 * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
339 * has memory that is available for OS use.
340 */
341 void
342 efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
343 {
344 walk(callback, arg, EFI_MEMORY_WB);
345 }
347 /*
348 * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
349 * has memory that is available for uncached allocator.
350 */
351 void
352 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
353 {
354 walk(callback, arg, EFI_MEMORY_UC);
355 }
357 /*
358 * Look for the PAL_CODE region reported by EFI and maps it using an
359 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
360 * Abstraction Layer chapter 11 in ADAG
361 */
363 #ifdef XEN
364 static void *
365 __efi_get_pal_addr (void)
366 #else
367 void *
368 efi_get_pal_addr (void)
369 #endif
370 {
371 void *efi_map_start, *efi_map_end, *p;
372 efi_memory_desc_t *md;
373 u64 efi_desc_size;
374 int pal_code_count = 0;
375 u64 vaddr, mask;
377 efi_map_start = __va(ia64_boot_param->efi_memmap);
378 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
379 efi_desc_size = ia64_boot_param->efi_memdesc_size;
381 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
382 md = p;
383 if (md->type != EFI_PAL_CODE)
384 continue;
386 if (++pal_code_count > 1) {
387 printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
388 md->phys_addr);
389 continue;
390 }
391 /*
392 * The only ITLB entry in region 7 that is used is the one installed by
393 * __start(). That entry covers a 64MB range.
394 */
395 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
396 vaddr = PAGE_OFFSET + md->phys_addr;
398 /*
399 * We must check that the PAL mapping won't overlap with the kernel
400 * mapping.
401 *
402 * PAL code is guaranteed to be aligned on a power of 2 between 4k and
403 * 256KB and that only one ITR is needed to map it. This implies that the
404 * PAL code is always aligned on its size, i.e., the closest matching page
405 * size supported by the TLB. Therefore PAL code is guaranteed never to
406 * cross a 64MB unless it is bigger than 64MB (very unlikely!). So for
407 * now the following test is enough to determine whether or not we need a
408 * dedicated ITR for the PAL code.
409 */
410 if ((vaddr & mask) == (KERNEL_START & mask)) {
411 printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
412 __FUNCTION__);
413 continue;
414 }
416 if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
417 panic("Woah! PAL code size bigger than a granule!");
419 #if EFI_DEBUG
420 mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
422 printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
423 smp_processor_id(), md->phys_addr,
424 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
425 vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
426 #endif
427 return __va_efi(md->phys_addr);
428 }
429 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
430 __FUNCTION__);
431 return NULL;
432 }
434 #ifdef XEN
435 static void *pal_vaddr = 0;
437 void *
438 efi_get_pal_addr(void)
439 {
440 if (!pal_vaddr)
441 pal_vaddr = __efi_get_pal_addr();
442 return pal_vaddr;
443 }
444 #endif
446 #ifdef XEN
447 static void
448 __efi_unmap_pal_code (void *pal_vaddr)
449 {
450 ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long)pal_vaddr),
451 IA64_GRANULE_SHIFT);
452 }
454 void
455 efi_unmap_pal_code (void)
456 {
457 void *pal_vaddr = efi_get_pal_addr ();
458 u64 psr;
460 if (!pal_vaddr)
461 return;
463 /*
464 * Cannot write to CRx with PSR.ic=1
465 */
466 psr = ia64_clear_ic();
467 __efi_unmap_pal_code(pal_vaddr);
468 ia64_set_psr(psr); /* restore psr */
469 ia64_srlz_i();
470 }
471 #endif
473 void
474 efi_map_pal_code (void)
475 {
476 void *pal_vaddr = efi_get_pal_addr ();
477 u64 psr;
479 if (!pal_vaddr)
480 return;
482 /*
483 * Cannot write to CRx with PSR.ic=1
484 */
485 psr = ia64_clear_ic();
486 #ifdef XEN
487 /* pal_vaddr must be unpinned before pinning
488 * This is needed in the case of a nested EFI, PAL or SAL call */
489 __efi_unmap_pal_code(pal_vaddr);
490 #endif
491 ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
492 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
493 IA64_GRANULE_SHIFT);
494 ia64_set_psr(psr); /* restore psr */
495 ia64_srlz_i();
496 }
498 void __init
499 efi_init (void)
500 {
501 void *efi_map_start, *efi_map_end;
502 efi_config_table_t *config_tables;
503 efi_char16_t *c16;
504 u64 efi_desc_size;
505 char *cp, vendor[100] = "unknown";
506 int i;
508 /* it's too early to be able to use the standard kernel command line support... */
509 #ifdef XEN
510 extern char saved_command_line[];
511 for (cp = saved_command_line; *cp; ) {
512 #else
513 for (cp = boot_command_line; *cp; ) {
514 #endif
515 if (memcmp(cp, "mem=", 4) == 0) {
516 mem_limit = memparse(cp + 4, &cp);
517 } else if (memcmp(cp, "max_addr=", 9) == 0) {
518 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
519 } else if (memcmp(cp, "min_addr=", 9) == 0) {
520 min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
521 } else {
522 while (*cp != ' ' && *cp)
523 ++cp;
524 while (*cp == ' ')
525 ++cp;
526 }
527 }
528 if (min_addr != 0UL)
529 printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
530 if (max_addr != ~0UL)
531 printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
533 efi.systab = __va(ia64_boot_param->efi_systab);
535 /*
536 * Verify the EFI Table
537 */
538 if (efi.systab == NULL)
539 panic("Woah! Can't find EFI system table.\n");
540 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
541 panic("Woah! EFI system table signature incorrect\n");
542 if ((efi.systab->hdr.revision ^ EFI_SYSTEM_TABLE_REVISION) >> 16 != 0)
543 printk(KERN_WARNING "Warning: EFI system table major version mismatch: "
544 "got %d.%02d, expected %d.%02d\n",
545 efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff,
546 EFI_SYSTEM_TABLE_REVISION >> 16, EFI_SYSTEM_TABLE_REVISION & 0xffff);
548 config_tables = __va(efi.systab->tables);
550 /* Show what we know for posterity */
551 c16 = __va(efi.systab->fw_vendor);
552 if (c16) {
553 for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i)
554 vendor[i] = *c16++;
555 vendor[i] = '\0';
556 }
558 printk(KERN_INFO "EFI v%u.%.02u by %s:",
559 efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
561 efi.mps = EFI_INVALID_TABLE_ADDR;
562 efi.acpi = EFI_INVALID_TABLE_ADDR;
563 efi.acpi20 = EFI_INVALID_TABLE_ADDR;
564 efi.smbios = EFI_INVALID_TABLE_ADDR;
565 efi.sal_systab = EFI_INVALID_TABLE_ADDR;
566 efi.boot_info = EFI_INVALID_TABLE_ADDR;
567 efi.hcdp = EFI_INVALID_TABLE_ADDR;
568 efi.uga = EFI_INVALID_TABLE_ADDR;
570 for (i = 0; i < (int) efi.systab->nr_tables; i++) {
571 if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
572 efi.mps = config_tables[i].table;
573 printk(" MPS=0x%lx", config_tables[i].table);
574 } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
575 efi.acpi20 = config_tables[i].table;
576 printk(" ACPI 2.0=0x%lx", config_tables[i].table);
577 } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
578 efi.acpi = config_tables[i].table;
579 printk(" ACPI=0x%lx", config_tables[i].table);
580 } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
581 efi.smbios = config_tables[i].table;
582 printk(" SMBIOS=0x%lx", config_tables[i].table);
583 } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
584 efi.sal_systab = config_tables[i].table;
585 printk(" SALsystab=0x%lx", config_tables[i].table);
586 } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
587 efi.hcdp = config_tables[i].table;
588 printk(" HCDP=0x%lx", config_tables[i].table);
589 }
590 }
591 printk("\n");
593 runtime = __va(efi.systab->runtime);
594 efi.get_time = phys_get_time;
595 efi.set_time = phys_set_time;
596 efi.get_wakeup_time = phys_get_wakeup_time;
597 efi.set_wakeup_time = phys_set_wakeup_time;
598 efi.get_variable = phys_get_variable;
599 efi.get_next_variable = phys_get_next_variable;
600 efi.set_variable = phys_set_variable;
601 efi.get_next_high_mono_count = phys_get_next_high_mono_count;
602 efi.reset_system = phys_reset_system;
604 efi_map_start = __va(ia64_boot_param->efi_memmap);
605 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
606 efi_desc_size = ia64_boot_param->efi_memdesc_size;
608 #if EFI_DEBUG
609 /* print EFI memory map: */
610 {
611 efi_memory_desc_t *md;
612 void *p;
614 for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
615 md = p;
616 printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
617 i, md->type, md->attribute, md->phys_addr,
618 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
619 md->num_pages >> (20 - EFI_PAGE_SHIFT));
620 }
621 }
622 #endif
624 #ifndef XEN
625 efi_map_pal_code();
626 #endif
627 efi_enter_virtual_mode();
628 }
630 void
631 efi_enter_virtual_mode (void)
632 {
633 void *efi_map_start, *efi_map_end, *p;
634 efi_memory_desc_t *md;
635 efi_status_t status;
636 u64 efi_desc_size;
638 efi_map_start = __va(ia64_boot_param->efi_memmap);
639 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
640 efi_desc_size = ia64_boot_param->efi_memdesc_size;
642 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
643 md = p;
644 #ifdef XEN
645 if (md->attribute & EFI_MEMORY_RUNTIME) {
646 if (md->attribute & EFI_MEMORY_WB)
647 md->virt_addr = __IA64_EFI_CACHED_OFFSET|
648 md->phys_addr;
649 else if (md->attribute & (EFI_MEMORY_UC|EFI_MEMORY_WC|
650 EFI_MEMORY_WT))
651 md->virt_addr = __IA64_EFI_UNCACHED_OFFSET|
652 md->phys_addr;
653 }
654 #else
655 if (md->attribute & EFI_MEMORY_RUNTIME) {
656 /*
657 * Some descriptors have multiple bits set, so the order of
658 * the tests is relevant.
659 */
660 if (md->attribute & EFI_MEMORY_WB) {
661 md->virt_addr = (u64) __va(md->phys_addr);
662 } else if (md->attribute & EFI_MEMORY_UC) {
663 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
664 } else if (md->attribute & EFI_MEMORY_WC) {
665 #if 0
666 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
667 | _PAGE_D
668 | _PAGE_MA_WC
669 | _PAGE_PL_0
670 | _PAGE_AR_RW));
671 #else
672 printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
673 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
674 #endif
675 } else if (md->attribute & EFI_MEMORY_WT) {
676 #if 0
677 md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
678 | _PAGE_D | _PAGE_MA_WT
679 | _PAGE_PL_0
680 | _PAGE_AR_RW));
681 #else
682 printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
683 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
684 #endif
685 }
686 }
687 #endif
688 }
690 status = efi_call_phys(__va(runtime->set_virtual_address_map),
691 ia64_boot_param->efi_memmap_size,
692 efi_desc_size, ia64_boot_param->efi_memdesc_version,
693 ia64_boot_param->efi_memmap);
694 if (status != EFI_SUCCESS) {
695 printk(KERN_WARNING "warning: unable to switch EFI into virtual mode "
696 "(status=%lu)\n", status);
697 return;
698 }
700 /*
701 * Now that EFI is in virtual mode, we call the EFI functions more efficiently:
702 */
703 efi.get_time = virt_get_time;
704 efi.set_time = virt_set_time;
705 efi.get_wakeup_time = virt_get_wakeup_time;
706 efi.set_wakeup_time = virt_set_wakeup_time;
707 efi.get_variable = virt_get_variable;
708 efi.get_next_variable = virt_get_next_variable;
709 efi.set_variable = virt_set_variable;
710 efi.get_next_high_mono_count = virt_get_next_high_mono_count;
711 efi.reset_system = virt_reset_system;
712 }
714 /*
715 * Walk the EFI memory map looking for the I/O port range. There can only be one entry of
716 * this type, other I/O port ranges should be described via ACPI.
717 */
718 u64
719 efi_get_iobase (void)
720 {
721 void *efi_map_start, *efi_map_end, *p;
722 efi_memory_desc_t *md;
723 u64 efi_desc_size;
725 efi_map_start = __va(ia64_boot_param->efi_memmap);
726 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
727 efi_desc_size = ia64_boot_param->efi_memdesc_size;
729 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
730 md = p;
731 if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
732 if (md->attribute & EFI_MEMORY_UC)
733 return md->phys_addr;
734 }
735 }
736 return 0;
737 }
739 static struct kern_memdesc *
740 kern_memory_descriptor (unsigned long phys_addr)
741 {
742 struct kern_memdesc *md;
744 for (md = kern_memmap; md->start != ~0UL; md++) {
745 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
746 return md;
747 }
748 return NULL;
749 }
751 static efi_memory_desc_t *
752 efi_memory_descriptor (unsigned long phys_addr)
753 {
754 void *efi_map_start, *efi_map_end, *p;
755 efi_memory_desc_t *md;
756 u64 efi_desc_size;
758 efi_map_start = __va(ia64_boot_param->efi_memmap);
759 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
760 efi_desc_size = ia64_boot_param->efi_memdesc_size;
762 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
763 md = p;
765 if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
766 return md;
767 }
768 return NULL;
769 }
771 u32
772 efi_mem_type (unsigned long phys_addr)
773 {
774 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
776 if (md)
777 return md->type;
778 return 0;
779 }
781 u64
782 efi_mem_attributes (unsigned long phys_addr)
783 {
784 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
786 if (md)
787 return md->attribute;
788 return 0;
789 }
790 EXPORT_SYMBOL(efi_mem_attributes);
792 u64
793 efi_mem_attribute (unsigned long phys_addr, unsigned long size)
794 {
795 unsigned long end = phys_addr + size;
796 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
797 u64 attr;
799 if (!md)
800 return 0;
802 /*
803 * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
804 * the kernel that firmware needs this region mapped.
805 */
806 attr = md->attribute & ~EFI_MEMORY_RUNTIME;
807 do {
808 unsigned long md_end = efi_md_end(md);
810 if (end <= md_end)
811 return attr;
813 md = efi_memory_descriptor(md_end);
814 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
815 return 0;
816 } while (md);
817 return 0;
818 }
820 u64
821 kern_mem_attribute (unsigned long phys_addr, unsigned long size)
822 {
823 unsigned long end = phys_addr + size;
824 struct kern_memdesc *md;
825 u64 attr;
827 /*
828 * This is a hack for ioremap calls before we set up kern_memmap.
829 * Maybe we should do efi_memmap_init() earlier instead.
830 */
831 if (!kern_memmap) {
832 attr = efi_mem_attribute(phys_addr, size);
833 if (attr & EFI_MEMORY_WB)
834 return EFI_MEMORY_WB;
835 return 0;
836 }
838 md = kern_memory_descriptor(phys_addr);
839 if (!md)
840 return 0;
842 attr = md->attribute;
843 do {
844 unsigned long md_end = kmd_end(md);
846 if (end <= md_end)
847 return attr;
849 md = kern_memory_descriptor(md_end);
850 if (!md || md->attribute != attr)
851 return 0;
852 } while (md);
853 return 0;
854 }
855 EXPORT_SYMBOL(kern_mem_attribute);
857 #ifndef XEN
858 int
859 valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
860 {
861 u64 attr;
863 /*
864 * /dev/mem reads and writes use copy_to_user(), which implicitly
865 * uses a granule-sized kernel identity mapping. It's really
866 * only safe to do this for regions in kern_memmap. For more
867 * details, see Documentation/ia64/aliasing.txt.
868 */
869 attr = kern_mem_attribute(phys_addr, size);
870 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
871 return 1;
872 return 0;
873 }
875 int
876 valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
877 {
878 /*
879 * MMIO regions are often missing from the EFI memory map.
880 * We must allow mmap of them for programs like X, so we
881 * currently can't do any useful validation.
882 */
883 return 1;
884 }
886 pgprot_t
887 phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
888 pgprot_t vma_prot)
889 {
890 unsigned long phys_addr = pfn << PAGE_SHIFT;
891 u64 attr;
893 /*
894 * For /dev/mem mmap, we use user mappings, but if the region is
895 * in kern_memmap (and hence may be covered by a kernel mapping),
896 * we must use the same attribute as the kernel mapping.
897 */
898 attr = kern_mem_attribute(phys_addr, size);
899 if (attr & EFI_MEMORY_WB)
900 return pgprot_cacheable(vma_prot);
901 else if (attr & EFI_MEMORY_UC)
902 return pgprot_noncached(vma_prot);
904 /*
905 * Some chipsets don't support UC access to memory. If
906 * WB is supported, we prefer that.
907 */
908 if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
909 return pgprot_cacheable(vma_prot);
911 return pgprot_noncached(vma_prot);
912 }
913 #endif
915 int __init
916 efi_uart_console_only(void)
917 {
918 efi_status_t status;
919 char *s, name[] = "ConOut";
920 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
921 efi_char16_t *utf16, name_utf16[32];
922 unsigned char data[1024];
923 unsigned long size = sizeof(data);
924 struct efi_generic_dev_path *hdr, *end_addr;
925 int uart = 0;
927 /* Convert to UTF-16 */
928 utf16 = name_utf16;
929 s = name;
930 while (*s)
931 *utf16++ = *s++ & 0x7f;
932 *utf16 = 0;
934 status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
935 if (status != EFI_SUCCESS) {
936 printk(KERN_ERR "No EFI %s variable?\n", name);
937 return 0;
938 }
940 hdr = (struct efi_generic_dev_path *) data;
941 end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
942 while (hdr < end_addr) {
943 if (hdr->type == EFI_DEV_MSG &&
944 hdr->sub_type == EFI_DEV_MSG_UART)
945 uart = 1;
946 else if (hdr->type == EFI_DEV_END_PATH ||
947 hdr->type == EFI_DEV_END_PATH2) {
948 if (!uart)
949 return 0;
950 if (hdr->sub_type == EFI_DEV_END_ENTIRE)
951 return 1;
952 uart = 0;
953 }
954 hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length);
955 }
956 printk(KERN_ERR "Malformed %s value\n", name);
957 return 0;
958 }
960 /*
961 * Look for the first granule aligned memory descriptor memory
962 * that is big enough to hold EFI memory map. Make sure this
963 * descriptor is atleast granule sized so it does not get trimmed
964 */
965 struct kern_memdesc *
966 find_memmap_space (void)
967 {
968 u64 contig_low=0, contig_high=0;
969 u64 as = 0, ae;
970 void *efi_map_start, *efi_map_end, *p, *q;
971 efi_memory_desc_t *md, *pmd = NULL, *check_md;
972 u64 space_needed, efi_desc_size;
973 unsigned long total_mem = 0;
975 efi_map_start = __va(ia64_boot_param->efi_memmap);
976 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
977 efi_desc_size = ia64_boot_param->efi_memdesc_size;
979 /*
980 * Worst case: we need 3 kernel descriptors for each efi descriptor
981 * (if every entry has a WB part in the middle, and UC head and tail),
982 * plus one for the end marker.
983 */
984 space_needed = sizeof(kern_memdesc_t) *
985 (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
987 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
988 md = p;
989 if (!efi_wb(md)) {
990 continue;
991 }
992 if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
993 contig_low = GRANULEROUNDUP(md->phys_addr);
994 contig_high = efi_md_end(md);
995 for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
996 check_md = q;
997 if (!efi_wb(check_md))
998 break;
999 if (contig_high != check_md->phys_addr)
1000 break;
1001 contig_high = efi_md_end(check_md);
1003 contig_high = GRANULEROUNDDOWN(contig_high);
1005 if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
1006 continue;
1008 /* Round ends inward to granule boundaries */
1009 as = max(contig_low, md->phys_addr);
1010 ae = min(contig_high, efi_md_end(md));
1012 /* keep within max_addr= and min_addr= command line arg */
1013 as = max(as, min_addr);
1014 ae = min(ae, max_addr);
1015 if (ae <= as)
1016 continue;
1018 /* avoid going over mem= command line arg */
1019 if (total_mem + (ae - as) > mem_limit)
1020 ae -= total_mem + (ae - as) - mem_limit;
1022 if (ae <= as)
1023 continue;
1025 if (ae - as > space_needed)
1026 break;
1028 if (p >= efi_map_end)
1029 panic("Can't allocate space for kernel memory descriptors");
1031 return __va(as);
1034 /*
1035 * Walk the EFI memory map and gather all memory available for kernel
1036 * to use. We can allocate partial granules only if the unavailable
1037 * parts exist, and are WB.
1038 */
1039 void
1040 efi_memmap_init(unsigned long *s, unsigned long *e)
1042 struct kern_memdesc *k, *prev = NULL;
1043 u64 contig_low=0, contig_high=0;
1044 u64 as, ae, lim;
1045 void *efi_map_start, *efi_map_end, *p, *q;
1046 efi_memory_desc_t *md, *pmd = NULL, *check_md;
1047 u64 efi_desc_size;
1048 unsigned long total_mem = 0;
1050 k = kern_memmap = find_memmap_space();
1052 efi_map_start = __va(ia64_boot_param->efi_memmap);
1053 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1054 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1056 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
1057 md = p;
1058 if (!efi_wb(md)) {
1059 if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY ||
1060 md->type == EFI_BOOT_SERVICES_DATA)) {
1061 k->attribute = EFI_MEMORY_UC;
1062 k->start = md->phys_addr;
1063 k->num_pages = md->num_pages;
1064 k++;
1066 continue;
1068 #ifdef XEN
1069 /* this works around a problem in the ski bootloader */
1070 if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)
1071 continue;
1072 #endif
1073 if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
1074 contig_low = GRANULEROUNDUP(md->phys_addr);
1075 contig_high = efi_md_end(md);
1076 for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
1077 check_md = q;
1078 if (!efi_wb(check_md))
1079 break;
1080 if (contig_high != check_md->phys_addr)
1081 break;
1082 contig_high = efi_md_end(check_md);
1084 contig_high = GRANULEROUNDDOWN(contig_high);
1086 if (!is_memory_available(md))
1087 continue;
1089 #ifdef CONFIG_CRASH_DUMP
1090 /* saved_max_pfn should ignore max_addr= command line arg */
1091 if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
1092 saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
1093 #endif
1094 /*
1095 * Round ends inward to granule boundaries
1096 * Give trimmings to uncached allocator
1097 */
1098 if (md->phys_addr < contig_low) {
1099 lim = min(efi_md_end(md), contig_low);
1100 if (efi_uc(md)) {
1101 if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC &&
1102 kmd_end(k-1) == md->phys_addr) {
1103 (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
1104 } else {
1105 k->attribute = EFI_MEMORY_UC;
1106 k->start = md->phys_addr;
1107 k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
1108 k++;
1111 as = contig_low;
1112 } else
1113 as = md->phys_addr;
1115 if (efi_md_end(md) > contig_high) {
1116 lim = max(md->phys_addr, contig_high);
1117 if (efi_uc(md)) {
1118 if (lim == md->phys_addr && k > kern_memmap &&
1119 (k-1)->attribute == EFI_MEMORY_UC &&
1120 kmd_end(k-1) == md->phys_addr) {
1121 (k-1)->num_pages += md->num_pages;
1122 } else {
1123 k->attribute = EFI_MEMORY_UC;
1124 k->start = lim;
1125 k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT;
1126 k++;
1129 ae = contig_high;
1130 } else
1131 ae = efi_md_end(md);
1133 /* keep within max_addr= and min_addr= command line arg */
1134 as = max(as, min_addr);
1135 ae = min(ae, max_addr);
1136 if (ae <= as)
1137 continue;
1139 /* avoid going over mem= command line arg */
1140 if (total_mem + (ae - as) > mem_limit)
1141 ae -= total_mem + (ae - as) - mem_limit;
1143 if (ae <= as)
1144 continue;
1145 if (prev && kmd_end(prev) == md->phys_addr) {
1146 prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
1147 total_mem += ae - as;
1148 continue;
1150 k->attribute = EFI_MEMORY_WB;
1151 k->start = as;
1152 k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
1153 total_mem += ae - as;
1154 prev = k++;
1156 k->start = ~0L; /* end-marker */
1158 /* reserve the memory we are using for kern_memmap */
1159 *s = (u64)kern_memmap;
1160 *e = (u64)++k;
1163 #ifndef XEN
1164 void
1165 efi_initialize_iomem_resources(struct resource *code_resource,
1166 struct resource *data_resource)
1168 struct resource *res;
1169 void *efi_map_start, *efi_map_end, *p;
1170 efi_memory_desc_t *md;
1171 u64 efi_desc_size;
1172 char *name;
1173 unsigned long flags;
1175 efi_map_start = __va(ia64_boot_param->efi_memmap);
1176 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1177 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1179 res = NULL;
1181 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1182 md = p;
1184 if (md->num_pages == 0) /* should not happen */
1185 continue;
1187 flags = IORESOURCE_MEM;
1188 switch (md->type) {
1190 case EFI_MEMORY_MAPPED_IO:
1191 case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
1192 continue;
1194 case EFI_LOADER_CODE:
1195 case EFI_LOADER_DATA:
1196 case EFI_BOOT_SERVICES_DATA:
1197 case EFI_BOOT_SERVICES_CODE:
1198 case EFI_CONVENTIONAL_MEMORY:
1199 if (md->attribute & EFI_MEMORY_WP) {
1200 name = "System ROM";
1201 flags |= IORESOURCE_READONLY;
1202 } else {
1203 name = "System RAM";
1205 break;
1207 case EFI_ACPI_MEMORY_NVS:
1208 name = "ACPI Non-volatile Storage";
1209 flags |= IORESOURCE_BUSY;
1210 break;
1212 case EFI_UNUSABLE_MEMORY:
1213 name = "reserved";
1214 flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED;
1215 break;
1217 case EFI_RESERVED_TYPE:
1218 case EFI_RUNTIME_SERVICES_CODE:
1219 case EFI_RUNTIME_SERVICES_DATA:
1220 case EFI_ACPI_RECLAIM_MEMORY:
1221 default:
1222 name = "reserved";
1223 flags |= IORESOURCE_BUSY;
1224 break;
1227 if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
1228 printk(KERN_ERR "failed to alocate resource for iomem\n");
1229 return;
1232 res->name = name;
1233 res->start = md->phys_addr;
1234 res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
1235 res->flags = flags;
1237 if (insert_resource(&iomem_resource, res) < 0)
1238 kfree(res);
1239 else {
1240 /*
1241 * We don't know which region contains
1242 * kernel data so we try it repeatedly and
1243 * let the resource manager test it.
1244 */
1245 insert_resource(res, code_resource);
1246 insert_resource(res, data_resource);
1247 #ifdef CONFIG_KEXEC
1248 insert_resource(res, &efi_memmap_res);
1249 insert_resource(res, &boot_param_res);
1250 if (crashk_res.end > crashk_res.start)
1251 insert_resource(res, &crashk_res);
1252 #endif
1256 #endif /* XEN */
1258 #if defined(CONFIG_KEXEC) || defined(XEN)
1259 /* find a block of memory aligned to 64M exclude reserved regions
1260 rsvd_regions are sorted
1261 */
1262 unsigned long __init
1263 kdump_find_rsvd_region (unsigned long size,
1264 struct rsvd_region *r, int n)
1266 int i;
1267 u64 start, end;
1268 u64 alignment = 1UL << _PAGE_SIZE_64M;
1269 void *efi_map_start, *efi_map_end, *p;
1270 efi_memory_desc_t *md;
1271 u64 efi_desc_size;
1273 efi_map_start = __va(ia64_boot_param->efi_memmap);
1274 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1275 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1277 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1278 md = p;
1279 if (!efi_wb(md))
1280 continue;
1281 start = ALIGN(md->phys_addr, alignment);
1282 end = efi_md_end(md);
1283 for (i = 0; i < n; i++) {
1284 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
1285 if (__pa(r[i].start) > start + size)
1286 return start;
1287 start = ALIGN(__pa(r[i].end), alignment);
1288 if (i < n-1 && __pa(r[i+1].start) < start + size)
1289 continue;
1290 else
1291 break;
1294 if (end > start + size)
1295 return start;
1298 printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
1299 size);
1300 return ~0UL;
1302 #endif
1304 #ifndef XEN
1305 #ifdef CONFIG_PROC_VMCORE
1306 /* locate the size find a the descriptor at a certain address */
1307 unsigned long
1308 vmcore_find_descriptor_size (unsigned long address)
1310 void *efi_map_start, *efi_map_end, *p;
1311 efi_memory_desc_t *md;
1312 u64 efi_desc_size;
1313 unsigned long ret = 0;
1315 efi_map_start = __va(ia64_boot_param->efi_memmap);
1316 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1317 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1319 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1320 md = p;
1321 if (efi_wb(md) && md->type == EFI_LOADER_DATA
1322 && md->phys_addr == address) {
1323 ret = efi_md_size(md);
1324 break;
1328 if (ret == 0)
1329 printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");
1331 return ret;
1333 #endif
1334 #endif /* XEN */