direct-io.hg

view linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/maddr.h @ 11509:2e6c10dc7c0b

[POWERPC][XEN] make sure put_domain() is called in case of allocate_rma() failuer

Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Tue Sep 12 10:53:46 2006 -0400 (2006-09-12)
parents 9c953e1b6fad
children ade94aa072c5
line source
1 #ifndef _I386_MADDR_H
2 #define _I386_MADDR_H
4 #include <xen/features.h>
5 #include <xen/interface/xen.h>
7 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
8 #define INVALID_P2M_ENTRY (~0UL)
9 #define FOREIGN_FRAME_BIT (1UL<<31)
10 #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
12 #ifdef CONFIG_XEN
14 extern unsigned long *phys_to_machine_mapping;
16 #undef machine_to_phys_mapping
17 extern unsigned long *machine_to_phys_mapping;
18 extern unsigned int machine_to_phys_order;
20 static inline unsigned long pfn_to_mfn(unsigned long pfn)
21 {
22 if (xen_feature(XENFEAT_auto_translated_physmap))
23 return pfn;
24 return phys_to_machine_mapping[(unsigned int)(pfn)] &
25 ~FOREIGN_FRAME_BIT;
26 }
28 static inline int phys_to_machine_mapping_valid(unsigned long pfn)
29 {
30 if (xen_feature(XENFEAT_auto_translated_physmap))
31 return 1;
32 return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
33 }
35 static inline unsigned long mfn_to_pfn(unsigned long mfn)
36 {
37 extern unsigned long max_mapnr;
38 unsigned long pfn;
40 if (xen_feature(XENFEAT_auto_translated_physmap))
41 return mfn;
43 if (unlikely((mfn >> machine_to_phys_order) != 0))
44 return max_mapnr;
46 /* The array access can fail (e.g., device space beyond end of RAM). */
47 asm (
48 "1: movl %1,%0\n"
49 "2:\n"
50 ".section .fixup,\"ax\"\n"
51 "3: movl %2,%0\n"
52 " jmp 2b\n"
53 ".previous\n"
54 ".section __ex_table,\"a\"\n"
55 " .align 4\n"
56 " .long 1b,3b\n"
57 ".previous"
58 : "=r" (pfn)
59 : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) );
61 return pfn;
62 }
64 /*
65 * We detect special mappings in one of two ways:
66 * 1. If the MFN is an I/O page then Xen will set the m2p entry
67 * to be outside our maximum possible pseudophys range.
68 * 2. If the MFN belongs to a different domain then we will certainly
69 * not have MFN in our p2m table. Conversely, if the page is ours,
70 * then we'll have p2m(m2p(MFN))==MFN.
71 * If we detect a special mapping then it doesn't have a 'struct page'.
72 * We force !pfn_valid() by returning an out-of-range pointer.
73 *
74 * NB. These checks require that, for any MFN that is not in our reservation,
75 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
76 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
77 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
78 *
79 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
80 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
81 * require. In all the cases we care about, the FOREIGN_FRAME bit is
82 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
83 */
84 static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
85 {
86 extern unsigned long max_mapnr;
87 unsigned long pfn = mfn_to_pfn(mfn);
88 if ((pfn < max_mapnr)
89 && !xen_feature(XENFEAT_auto_translated_physmap)
90 && (phys_to_machine_mapping[pfn] != mfn))
91 return max_mapnr; /* force !pfn_valid() */
92 return pfn;
93 }
95 static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
96 {
97 if (xen_feature(XENFEAT_auto_translated_physmap)) {
98 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
99 return;
100 }
101 phys_to_machine_mapping[pfn] = mfn;
102 }
105 #else /* !CONFIG_XEN */
107 #define pfn_to_mfn(pfn) (pfn)
108 #define mfn_to_pfn(mfn) (mfn)
109 #define mfn_to_local_pfn(mfn) (mfn)
110 #define set_phys_to_machine(pfn, mfn) BUG_ON((pfn) != (mfn))
111 #define phys_to_machine_mapping_valid(pfn) (1)
113 #endif /* !CONFIG_XEN */
115 /* Definitions for machine and pseudophysical addresses. */
116 #ifdef CONFIG_X86_PAE
117 typedef unsigned long long paddr_t;
118 typedef unsigned long long maddr_t;
119 #else
120 typedef unsigned long paddr_t;
121 typedef unsigned long maddr_t;
122 #endif
124 static inline maddr_t phys_to_machine(paddr_t phys)
125 {
126 maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
127 machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
128 return machine;
129 }
130 static inline paddr_t machine_to_phys(maddr_t machine)
131 {
132 paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
133 phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
134 return phys;
135 }
137 /* VIRT <-> MACHINE conversion */
138 #define virt_to_machine(v) (phys_to_machine(__pa(v)))
139 #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
140 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
142 #ifdef CONFIG_X86_PAE
143 static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
144 {
145 pte_t pte;
147 pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
148 (pgprot_val(pgprot) >> 32);
149 pte.pte_high &= (__supported_pte_mask >> 32);
150 pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
151 __supported_pte_mask;
152 return pte;
153 }
154 #else
155 #define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
156 #endif
158 #define __pte_ma(x) ((pte_t) { (x) } )
160 #endif /* _I386_MADDR_H */