ia64/xen-unstable

annotate xen/include/asm-x86/p2m.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 3d5f39c610ad
children 4633e9604da9
rev   line source
Tim@13909 1 /******************************************************************************
Tim@13909 2 * include/asm-x86/paging.h
Tim@13909 3 *
Tim@13909 4 * physical-to-machine mappings for automatically-translated domains.
Tim@13909 5 *
Tim@13909 6 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
Tim@13909 7 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
Tim@13909 8 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
Tim@13909 9 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
Tim@13909 10 *
Tim@13909 11 * This program is free software; you can redistribute it and/or modify
Tim@13909 12 * it under the terms of the GNU General Public License as published by
Tim@13909 13 * the Free Software Foundation; either version 2 of the License, or
Tim@13909 14 * (at your option) any later version.
Tim@13909 15 *
Tim@13909 16 * This program is distributed in the hope that it will be useful,
Tim@13909 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
Tim@13909 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Tim@13909 19 * GNU General Public License for more details.
Tim@13909 20 *
Tim@13909 21 * You should have received a copy of the GNU General Public License
Tim@13909 22 * along with this program; if not, write to the Free Software
Tim@13909 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Tim@13909 24 */
Tim@13909 25
Tim@13909 26 #ifndef _XEN_P2M_H
Tim@13909 27 #define _XEN_P2M_H
Tim@13909 28
Tim@13909 29
Tim@13909 30 /* The phys_to_machine_mapping is the reversed mapping of MPT for full
Tim@13909 31 * virtualization. It is only used by shadow_mode_translate()==true
Tim@13909 32 * guests, so we steal the address space that would have normally
Tim@13909 33 * been used by the read-only MPT map.
Tim@13909 34 */
Tim@13909 35 #define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
Tim@13909 36
Tim@13909 37
Tim@13909 38 /* Read the current domain's P2M table. */
Tim@13909 39 static inline mfn_t gfn_to_mfn_current(unsigned long gfn)
Tim@13909 40 {
Tim@13909 41 l1_pgentry_t l1e = l1e_empty();
Tim@13909 42 int ret;
Tim@13909 43
Tim@13909 44 if ( gfn > current->domain->arch.p2m.max_mapped_pfn )
Tim@13909 45 return _mfn(INVALID_MFN);
Tim@13909 46
Tim@13909 47 /* Don't read off the end of the p2m table */
Tim@13909 48 ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t));
Tim@13909 49
Tim@13909 50 ret = __copy_from_user(&l1e,
Tim@13909 51 &phys_to_machine_mapping[gfn],
Tim@13909 52 sizeof(l1e));
Tim@13909 53
Tim@13909 54 if ( (ret == 0) && (l1e_get_flags(l1e) & _PAGE_PRESENT) )
Tim@13909 55 return _mfn(l1e_get_pfn(l1e));
Tim@13909 56
Tim@13909 57 return _mfn(INVALID_MFN);
Tim@13909 58 }
Tim@13909 59
Tim@13909 60 /* Read another domain's P2M table, mapping pages as we go */
Tim@13909 61 mfn_t gfn_to_mfn_foreign(struct domain *d, unsigned long gpfn);
Tim@13909 62
Tim@13909 63 /* General conversion function from gfn to mfn */
Tim@15812 64 #define gfn_to_mfn(d, g) _gfn_to_mfn((d), (g))
Tim@15812 65 static inline mfn_t _gfn_to_mfn(struct domain *d, unsigned long gfn)
Tim@13909 66 {
Tim@13909 67 if ( !paging_mode_translate(d) )
Tim@13909 68 return _mfn(gfn);
Tim@13909 69 if ( likely(current->domain == d) )
Tim@13909 70 return gfn_to_mfn_current(gfn);
Tim@13909 71 else
Tim@13909 72 return gfn_to_mfn_foreign(d, gfn);
Tim@13909 73 }
Tim@13909 74
Tim@13909 75 /* General conversion function from mfn to gfn */
Tim@13909 76 static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
Tim@13909 77 {
Tim@13909 78 if ( paging_mode_translate(d) )
Tim@13909 79 return get_gpfn_from_mfn(mfn_x(mfn));
Tim@13909 80 else
Tim@13909 81 return mfn_x(mfn);
Tim@13909 82 }
Tim@13909 83
Tim@13909 84 /* Compatibility function for HVM code */
Tim@13909 85 static inline unsigned long get_mfn_from_gpfn(unsigned long pfn)
Tim@13909 86 {
Tim@13909 87 return mfn_x(gfn_to_mfn_current(pfn));
Tim@13909 88 }
Tim@13909 89
Tim@13909 90 /* Is this guest address an mmio one? (i.e. not defined in p2m map) */
Tim@13909 91 static inline int mmio_space(paddr_t gpa)
Tim@13909 92 {
Tim@14029 93 unsigned long gfn = gpa >> PAGE_SHIFT;
Tim@13909 94 return !mfn_valid(mfn_x(gfn_to_mfn_current(gfn)));
Tim@13909 95 }
Tim@13909 96
Tim@13909 97 /* Translate the frame number held in an l1e from guest to machine */
Tim@13909 98 static inline l1_pgentry_t
Tim@13909 99 gl1e_to_ml1e(struct domain *d, l1_pgentry_t l1e)
Tim@13909 100 {
Tim@13909 101 if ( unlikely(paging_mode_translate(d)) )
Tim@13909 102 l1e = l1e_from_pfn(gmfn_to_mfn(d, l1e_get_pfn(l1e)),
Tim@13909 103 l1e_get_flags(l1e));
Tim@13909 104 return l1e;
Tim@13909 105 }
Tim@13909 106
Tim@13909 107
Tim@13909 108
Tim@13909 109 /* Init the datastructures for later use by the p2m code */
Tim@13909 110 void p2m_init(struct domain *d);
Tim@13909 111
Tim@13909 112 /* Allocate a new p2m table for a domain.
Tim@13909 113 *
Tim@13909 114 * The alloc_page and free_page functions will be used to get memory to
Tim@13909 115 * build the p2m, and to release it again at the end of day.
Tim@13909 116 *
Tim@13909 117 * Returns 0 for success or -errno. */
Tim@13909 118 int p2m_alloc_table(struct domain *d,
Tim@13909 119 struct page_info * (*alloc_page)(struct domain *d),
Tim@13909 120 void (*free_page)(struct domain *d, struct page_info *pg));
Tim@13909 121
Tim@13909 122 /* Return all the p2m resources to Xen. */
Tim@13909 123 void p2m_teardown(struct domain *d);
Tim@13909 124
Tim@13909 125 /* Add a page to a domain's p2m table */
Tim@13909 126 void guest_physmap_add_page(struct domain *d, unsigned long gfn,
Tim@13909 127 unsigned long mfn);
Tim@13909 128
Tim@13909 129 /* Remove a page from a domain's p2m table */
Tim@13909 130 void guest_physmap_remove_page(struct domain *d, unsigned long gfn,
Tim@13909 131 unsigned long mfn);
Tim@13909 132
Tim@15310 133 /* set P2M table l1e flags */
Tim@15310 134 void p2m_set_flags_global(struct domain *d, u32 l1e_flags);
Tim@15310 135
Tim@15310 136 /* set P2M table l1e flags for a gpa */
Tim@15310 137 int p2m_set_flags(struct domain *d, paddr_t gpa, u32 l1e_flags);
Tim@13909 138
Tim@13909 139 #endif /* _XEN_P2M_H */
Tim@13909 140
Tim@13909 141 /*
Tim@13909 142 * Local variables:
Tim@13909 143 * mode: C
Tim@13909 144 * c-set-style: "BSD"
Tim@13909 145 * c-basic-offset: 4
Tim@13909 146 * indent-tabs-mode: nil
Tim@13909 147 * End:
Tim@13909 148 */