ia64/xen-unstable

view xen/include/asm-ia64/xenpage.h @ 18095:2fd648307ad1

[IA64] kexec: Map EFI regions into the same place they are maped into in Linux

Map EFI regions into the same place they are maped into in Linux

This is because of an unfortunate problem with the way that EFI interacts
with Kexec. The call to map the EFI regions may only be made once. This
means that after Kexec the EFI regions must be mapped into the same region
that they were mapped into prior to Kexec.

This is not usually a problem when kexecing from xen to xen or from linux
to linux, as the mapping will be the same. However when kexecing from xen
to linux or linux to xen, the mapping is different, and the problem
manifests.

So far Magnus Damm and I have come up with three different ideas for
resolving this problem.

1. Leave the EFI in physical mode
- This is nice and simple
- There is a potential performance hit, but PAL calls are not
made very often, so it shouldn't be a problem
- I have patches to do this, some of which are in the
series that accompany this patch.
- The SGI people tell me that it won't work on SN because
it allows the OS to provide EFI (or SAL?) code.

2. Always map EFI into the space that Linux uses
- Not so simple
- Requires Xen to jump through some hoops
- But leaves Linux unmodified
- But it will break if Linux ever changes its mapping
- This patch series implements this change

3. Always map EFI to some agreed space
- Similar to 2. but less likely to break in the future
- But it requires Xen and Linux to agree on a space to be used
- Reqires both Xen and Linux to be modified

Cc: Isaku Yamahata <yamahata@valinux.co.jp>
Cc: Tristan Gingold <tgingold@free.fr>
Cc: Alex Williamson <alex.williamson@hp.com>
Cc: Aron Griffis <aron@hp.com>
Signed-off-by: Simon Horman <horms@verge.net.au>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Tue Jul 22 12:15:02 2008 +0900 (2008-07-22)
parents 54060aec0dc1
children 7db30bf36b0e
line source
1 #ifndef _ASM_IA64_XENPAGE_H
2 #define _ASM_IA64_XENPAGE_H
4 #ifndef __ASSEMBLY__
5 #undef mfn_valid
6 #undef page_to_mfn
7 #undef mfn_to_page
8 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
9 #undef ia64_mfn_valid
10 extern int ia64_mfn_valid (unsigned long pfn);
11 # define mfn_valid(_pfn) (((_pfn) < max_page) && ia64_mfn_valid(_pfn))
12 #else
13 # define mfn_valid(_pfn) ((_pfn) < max_page)
14 #endif
15 # define page_to_mfn(_page) ((unsigned long) ((_page) - frame_table))
16 # define mfn_to_page(_pfn) (frame_table + (_pfn))
19 #include <asm/xensystem.h>
21 static inline unsigned long __virt_to_maddr(unsigned long va)
22 {
23 if (va - KERNEL_START < xenheap_size)
24 return xen_pstart + (va - KERNEL_START);
25 else
26 return (va & ((1UL << 60) - 1));
27 }
29 #define virt_to_maddr(va) (__virt_to_maddr((unsigned long)va))
32 #undef page_to_maddr
33 #undef virt_to_page
34 #define page_to_maddr(page) (page_to_mfn(page) << PAGE_SHIFT)
35 #define virt_to_page(kaddr) (mfn_to_page(virt_to_maddr(kaddr) >> PAGE_SHIFT))
37 #define page_to_virt(_page) maddr_to_virt(page_to_maddr(_page))
38 #define maddr_to_page(kaddr) mfn_to_page(((kaddr) >> PAGE_SHIFT))
40 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
41 #define virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
42 #define mfn_to_virt(mfn) maddr_to_virt(mfn << PAGE_SHIFT)
44 /* Convert between frame number and address formats. */
45 #define pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
46 #define paddr_to_pfn(pa) ((unsigned long)((pa) >> PAGE_SHIFT))
48 typedef union xen_va {
49 struct {
50 unsigned long off : 60;
51 unsigned long reg : 4;
52 } f;
53 unsigned long l;
54 void *p;
55 } xen_va;
57 static inline int get_order_from_bytes(paddr_t size)
58 {
59 int order;
60 size = (size-1) >> PAGE_SHIFT;
61 for ( order = 0; size; order++ )
62 size >>= 1;
63 return order;
64 }
66 static inline int get_order_from_pages(unsigned long nr_pages)
67 {
68 int order;
69 nr_pages--;
70 for ( order = 0; nr_pages; order++ )
71 nr_pages >>= 1;
72 return order;
73 }
75 static inline int get_order_from_shift(unsigned long shift)
76 {
77 if (shift <= PAGE_SHIFT)
78 return 0;
79 else
80 return shift - PAGE_SHIFT;
81 }
82 /* from identity va to xen va */
83 #define virt_to_xenva(va) ((unsigned long)va - PAGE_OFFSET - \
84 xen_pstart + KERNEL_START)
86 /* Clear bit 63 (UC bit in physical addresses). */
87 static inline u64 pa_clear_uc(u64 paddr)
88 {
89 return (paddr << 1) >> 1;
90 }
92 #undef __pa
93 #undef __va
94 #define __pa(x) (virt_to_maddr(x))
95 #define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
97 /* It is sometimes very useful to have unsigned long as result. */
98 #define __va_ul(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.l;})
100 #define __va_efi(x) ((void*)((unsigned long)(x) | __IA64_EFI_CACHED_OFFSET))
102 #endif
103 #endif /* _ASM_IA64_XENPAGE_H */