ia64/xen-unstable

view xen/include/asm-x86/p2m.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 3d5f39c610ad
children 4633e9604da9
line source
1 /******************************************************************************
2 * include/asm-x86/paging.h
3 *
4 * physical-to-machine mappings for automatically-translated domains.
5 *
6 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
7 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
8 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
9 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
26 #ifndef _XEN_P2M_H
27 #define _XEN_P2M_H
30 /* The phys_to_machine_mapping is the reversed mapping of MPT for full
31 * virtualization. It is only used by shadow_mode_translate()==true
32 * guests, so we steal the address space that would have normally
33 * been used by the read-only MPT map.
34 */
35 #define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
38 /* Read the current domain's P2M table. */
39 static inline mfn_t gfn_to_mfn_current(unsigned long gfn)
40 {
41 l1_pgentry_t l1e = l1e_empty();
42 int ret;
44 if ( gfn > current->domain->arch.p2m.max_mapped_pfn )
45 return _mfn(INVALID_MFN);
47 /* Don't read off the end of the p2m table */
48 ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t));
50 ret = __copy_from_user(&l1e,
51 &phys_to_machine_mapping[gfn],
52 sizeof(l1e));
54 if ( (ret == 0) && (l1e_get_flags(l1e) & _PAGE_PRESENT) )
55 return _mfn(l1e_get_pfn(l1e));
57 return _mfn(INVALID_MFN);
58 }
60 /* Read another domain's P2M table, mapping pages as we go */
61 mfn_t gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
63 /* General conversion function from gfn to mfn */
64 #define gfn_to_mfn(d, g) _gfn_to_mfn((d), (g))
65 static inline mfn_t _gfn_to_mfn(struct domain *d, unsigned long gfn)
66 {
67 if ( !paging_mode_translate(d) )
68 return _mfn(gfn);
69 if ( likely(current->domain == d) )
70 return gfn_to_mfn_current(gfn);
71 else
72 return gfn_to_mfn_foreign(d, gfn);
73 }
75 /* General conversion function from mfn to gfn */
76 static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
77 {
78 if ( paging_mode_translate(d) )
79 return get_gpfn_from_mfn(mfn_x(mfn));
80 else
81 return mfn_x(mfn);
82 }
84 /* Compatibility function for HVM code */
85 static inline unsigned long get_mfn_from_gpfn(unsigned long pfn)
86 {
87 return mfn_x(gfn_to_mfn_current(pfn));
88 }
90 /* Is this guest address an mmio one? (i.e. not defined in p2m map) */
91 static inline int mmio_space(paddr_t gpa)
92 {
93 unsigned long gfn = gpa >> PAGE_SHIFT;
94 return !mfn_valid(mfn_x(gfn_to_mfn_current(gfn)));
95 }
97 /* Translate the frame number held in an l1e from guest to machine */
98 static inline l1_pgentry_t
99 gl1e_to_ml1e(struct domain *d, l1_pgentry_t l1e)
100 {
101 if ( unlikely(paging_mode_translate(d)) )
102 l1e = l1e_from_pfn(gmfn_to_mfn(d, l1e_get_pfn(l1e)),
103 l1e_get_flags(l1e));
104 return l1e;
105 }
109 /* Init the datastructures for later use by the p2m code */
110 void p2m_init(struct domain *d);
112 /* Allocate a new p2m table for a domain.
113 *
114 * The alloc_page and free_page functions will be used to get memory to
115 * build the p2m, and to release it again at the end of day.
116 *
117 * Returns 0 for success or -errno. */
118 int p2m_alloc_table(struct domain *d,
119 struct page_info * (*alloc_page)(struct domain *d),
120 void (*free_page)(struct domain *d, struct page_info *pg));
122 /* Return all the p2m resources to Xen. */
123 void p2m_teardown(struct domain *d);
125 /* Add a page to a domain's p2m table */
126 void guest_physmap_add_page(struct domain *d, unsigned long gfn,
127 unsigned long mfn);
129 /* Remove a page from a domain's p2m table */
130 void guest_physmap_remove_page(struct domain *d, unsigned long gfn,
131 unsigned long mfn);
133 /* set P2M table l1e flags */
134 void p2m_set_flags_global(struct domain *d, u32 l1e_flags);
136 /* set P2M table l1e flags for a gpa */
137 int p2m_set_flags(struct domain *d, paddr_t gpa, u32 l1e_flags);
139 #endif /* _XEN_P2M_H */
141 /*
142 * Local variables:
143 * mode: C
144 * c-set-style: "BSD"
145 * c-basic-offset: 4
146 * indent-tabs-mode: nil
147 * End:
148 */