ia64/xen-unstable
changeset 16091:19a843def5fd
More static shadow functions.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Jan Beulich <jbeulich@novell.com>
author | Keir Fraser <keir@xensource.com> |
---|---|
date | Thu Oct 11 10:21:08 2007 +0100 (2007-10-11) |
parents | 8d51b80fcb6f |
children | 49323c8b8633 |
files | xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/private.h |
line diff
1.1 --- a/xen/arch/x86/mm/shadow/common.c Thu Oct 11 10:20:45 2007 +0100 1.2 +++ b/xen/arch/x86/mm/shadow/common.c Thu Oct 11 10:21:08 2007 +0100 1.3 @@ -667,7 +667,7 @@ int shadow_cmpxchg_guest_entry(struct vc 1.4 * at the same time, which means that to guarantee progress, we must 1.5 * allow for more than ninety allocated pages per vcpu. We round that 1.6 * up to 128 pages, or half a megabyte per vcpu. */ 1.7 -unsigned int shadow_min_acceptable_pages(struct domain *d) 1.8 +static unsigned int shadow_min_acceptable_pages(struct domain *d) 1.9 { 1.10 u32 vcpu_count = 0; 1.11 struct vcpu *v; 1.12 @@ -722,7 +722,7 @@ static inline int chunk_is_available(str 1.13 1.14 /* Dispatcher function: call the per-mode function that will unhook the 1.15 * non-Xen mappings in this top-level shadow mfn */ 1.16 -void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn) 1.17 +static void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn) 1.18 { 1.19 struct shadow_page_info *sp = mfn_to_shadow_page(smfn); 1.20 switch ( sp->type ) 1.21 @@ -1062,7 +1062,7 @@ sh_alloc_p2m_pages(struct domain *d) 1.22 } 1.23 1.24 // Returns 0 if no memory is available... 1.25 -struct page_info * 1.26 +static struct page_info * 1.27 shadow_alloc_p2m_page(struct domain *d) 1.28 { 1.29 struct list_head *entry; 1.30 @@ -1092,7 +1092,7 @@ shadow_alloc_p2m_page(struct domain *d) 1.31 return pg; 1.32 } 1.33 1.34 -void 1.35 +static void 1.36 shadow_free_p2m_page(struct domain *d, struct page_info *pg) 1.37 { 1.38 ASSERT(page_get_owner(pg) == d);
2.1 --- a/xen/arch/x86/mm/shadow/private.h Thu Oct 11 10:20:45 2007 +0100 2.2 +++ b/xen/arch/x86/mm/shadow/private.h Thu Oct 11 10:21:08 2007 +0100 2.3 @@ -360,10 +360,6 @@ mfn_t shadow_alloc(struct domain *d, 2.4 unsigned long backpointer); 2.5 void shadow_free(struct domain *d, mfn_t smfn); 2.6 2.7 -/* Dispatcher function: call the per-mode function that will unhook the 2.8 - * non-Xen mappings in this top-level shadow mfn */ 2.9 -void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn); 2.10 - 2.11 /* Install the xen mappings in various flavours of shadow */ 2.12 void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn); 2.13 void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn); 2.14 @@ -383,10 +379,6 @@ extern int sh_remove_write_access(struct 2.15 unsigned int level, 2.16 unsigned long fault_addr); 2.17 2.18 -/* Allocate/free functions for passing to the P2M code. */ 2.19 -struct page_info *shadow_alloc_p2m_page(struct domain *d); 2.20 -void shadow_free_p2m_page(struct domain *d, struct page_info *pg); 2.21 - 2.22 /* Functions that atomically write PT/P2M entries and update state */ 2.23 void shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn, 2.24 l1_pgentry_t *p, mfn_t table_mfn,