* at the same time, which means that to guarantee progress, we must
* allow for more than ninety allocated pages per vcpu. We round that
* up to 128 pages, or half a megabyte per vcpu. */
-unsigned int shadow_min_acceptable_pages(struct domain *d)
+static unsigned int shadow_min_acceptable_pages(struct domain *d)
{
u32 vcpu_count = 0;
struct vcpu *v;
/* Dispatcher function: call the per-mode function that will unhook the
* non-Xen mappings in this top-level shadow mfn */
-void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
+static void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn)
{
struct shadow_page_info *sp = mfn_to_shadow_page(smfn);
switch ( sp->type )
}
// Returns 0 if no memory is available...
-struct page_info *
+static struct page_info *
shadow_alloc_p2m_page(struct domain *d)
{
struct list_head *entry;
return pg;
}
-void
+static void
shadow_free_p2m_page(struct domain *d, struct page_info *pg)
{
ASSERT(page_get_owner(pg) == d);
unsigned long backpointer);
void shadow_free(struct domain *d, mfn_t smfn);
-/* Dispatcher function: call the per-mode function that will unhook the
- * non-Xen mappings in this top-level shadow mfn */
-void shadow_unhook_mappings(struct vcpu *v, mfn_t smfn);
-
/* Install the xen mappings in various flavours of shadow */
void sh_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn);
void sh_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn);
unsigned int level,
unsigned long fault_addr);
-/* Allocate/free functions for passing to the P2M code. */
-struct page_info *shadow_alloc_p2m_page(struct domain *d);
-void shadow_free_p2m_page(struct domain *d, struct page_info *pg);
-
/* Functions that atomically write PT/P2M entries and update state */
void shadow_write_p2m_entry(struct vcpu *v, unsigned long gfn,
l1_pgentry_t *p, mfn_t table_mfn,