ia64/xen-unstable

view xen/include/asm-x86/p2m.h @ 15310:3d5f39c610ad

[XEN] Make common log-dirty paging code and add HAP log-dirty support.
Signed-off-by: Wei Huang <wei.huang2@amd.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Mon Jun 11 14:35:52 2007 +0100 (2007-06-11)
parents 6746873997b5
children 86a154e1ef5d
line source
1 /******************************************************************************
2 * include/asm-x86/paging.h
3 *
4 * physical-to-machine mappings for automatically-translated domains.
5 *
6 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
7 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
8 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
9 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
26 #ifndef _XEN_P2M_H
27 #define _XEN_P2M_H
30 /* The phys_to_machine_mapping is the reversed mapping of MPT for full
31 * virtualization. It is only used by shadow_mode_translate()==true
32 * guests, so we steal the address space that would have normally
33 * been used by the read-only MPT map.
34 */
35 #define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
38 /* Read the current domain's P2M table. */
39 static inline mfn_t gfn_to_mfn_current(unsigned long gfn)
40 {
41 l1_pgentry_t l1e = l1e_empty();
42 int ret;
44 if ( gfn > current->domain->arch.p2m.max_mapped_pfn )
45 return _mfn(INVALID_MFN);
47 /* Don't read off the end of the p2m table */
48 ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t));
50 ret = __copy_from_user(&l1e,
51 &phys_to_machine_mapping[gfn],
52 sizeof(l1e));
54 if ( (ret == 0) && (l1e_get_flags(l1e) & _PAGE_PRESENT) )
55 return _mfn(l1e_get_pfn(l1e));
57 return _mfn(INVALID_MFN);
58 }
60 /* Read another domain's P2M table, mapping pages as we go */
61 mfn_t gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
63 /* General conversion function from gfn to mfn */
64 static inline mfn_t gfn_to_mfn(struct domain *d, unsigned long gfn)
65 {
66 if ( !paging_mode_translate(d) )
67 return _mfn(gfn);
68 if ( likely(current->domain == d) )
69 return gfn_to_mfn_current(gfn);
70 else
71 return gfn_to_mfn_foreign(d, gfn);
72 }
74 /* General conversion function from mfn to gfn */
75 static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
76 {
77 if ( paging_mode_translate(d) )
78 return get_gpfn_from_mfn(mfn_x(mfn));
79 else
80 return mfn_x(mfn);
81 }
83 /* Compatibility function for HVM code */
84 static inline unsigned long get_mfn_from_gpfn(unsigned long pfn)
85 {
86 return mfn_x(gfn_to_mfn_current(pfn));
87 }
89 /* Is this guest address an mmio one? (i.e. not defined in p2m map) */
90 static inline int mmio_space(paddr_t gpa)
91 {
92 unsigned long gfn = gpa >> PAGE_SHIFT;
93 return !mfn_valid(mfn_x(gfn_to_mfn_current(gfn)));
94 }
96 /* Translate the frame number held in an l1e from guest to machine */
97 static inline l1_pgentry_t
98 gl1e_to_ml1e(struct domain *d, l1_pgentry_t l1e)
99 {
100 if ( unlikely(paging_mode_translate(d)) )
101 l1e = l1e_from_pfn(gmfn_to_mfn(d, l1e_get_pfn(l1e)),
102 l1e_get_flags(l1e));
103 return l1e;
104 }
108 /* Init the datastructures for later use by the p2m code */
109 void p2m_init(struct domain *d);
111 /* Allocate a new p2m table for a domain.
112 *
113 * The alloc_page and free_page functions will be used to get memory to
114 * build the p2m, and to release it again at the end of day.
115 *
116 * Returns 0 for success or -errno. */
117 int p2m_alloc_table(struct domain *d,
118 struct page_info * (*alloc_page)(struct domain *d),
119 void (*free_page)(struct domain *d, struct page_info *pg));
121 /* Return all the p2m resources to Xen. */
122 void p2m_teardown(struct domain *d);
124 /* Add a page to a domain's p2m table */
125 void guest_physmap_add_page(struct domain *d, unsigned long gfn,
126 unsigned long mfn);
128 /* Remove a page from a domain's p2m table */
129 void guest_physmap_remove_page(struct domain *d, unsigned long gfn,
130 unsigned long mfn);
132 /* set P2M table l1e flags */
133 void p2m_set_flags_global(struct domain *d, u32 l1e_flags);
135 /* set P2M table l1e flags for a gpa */
136 int p2m_set_flags(struct domain *d, paddr_t gpa, u32 l1e_flags);
138 #endif /* _XEN_P2M_H */
140 /*
141 * Local variables:
142 * mode: C
143 * c-set-style: "BSD"
144 * c-basic-offset: 4
145 * indent-tabs-mode: nil
146 * End:
147 */