* Parts of this code are Copyright (c) 2006 by XenSource Inc.
* Parts of this code are Copyright (c) 2006 by Michael A Fetterman
* Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
v, addr, p_data, bytes, sh_ctxt);
}
-static int
+static int
hvm_emulate_cmpxchg(enum x86_segment seg,
unsigned long offset,
void *p_old,
v, offset, p_data, bytes, sh_ctxt);
}
-static int
+static int
pv_emulate_cmpxchg(enum x86_segment seg,
unsigned long offset,
void *p_old,
return &hvm_shadow_emulator_ops;
}
-/* Update an initialized emulation context to prepare for the next
+/* Update an initialized emulation context to prepare for the next
* instruction */
-void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
+void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
struct cpu_user_regs *regs)
{
struct vcpu *v = current;
}
}
}
-
+
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/**************************************************************************/
-/* Out-of-sync shadows. */
+/* Out-of-sync shadows. */
-/* From time to time, we let a shadowed pagetable page go out of sync
- * with its shadow: the guest is allowed to write directly to the page,
+/* From time to time, we let a shadowed pagetable page go out of sync
+ * with its shadow: the guest is allowed to write directly to the page,
* and those writes are not synchronously reflected in the shadow.
- * This lets us avoid many emulations if the guest is writing a lot to a
- * pagetable, but it relaxes a pretty important invariant in the shadow
+ * This lets us avoid many emulations if the guest is writing a lot to a
+ * pagetable, but it relaxes a pretty important invariant in the shadow
* pagetable design. Therefore, some rules:
*
* 1. Only L1 pagetables may go out of sync: any page that is shadowed
* using linear shadow pagetables much less dangerous.
* That means that: (a) unsyncing code needs to check for higher-level
* shadows, and (b) promotion code needs to resync.
- *
+ *
* 2. All shadow operations on a guest page require the page to be brought
* back into sync before proceeding. This must be done under the
* paging lock so that the page is guaranteed to remain synced until
* the operation completes.
*
- * Exceptions to this rule: the pagefault and invlpg handlers may
- * update only one entry on an out-of-sync page without resyncing it.
+ * Exceptions to this rule: the pagefault and invlpg handlers may
+ * update only one entry on an out-of-sync page without resyncing it.
*
* 3. Operations on shadows that do not start from a guest page need to
* be aware that they may be handling an out-of-sync shadow.
*
- * 4. Operations that do not normally take the paging lock (fast-path
- * #PF handler, INVLPG) must fall back to a locking, syncing version
- * if they see an out-of-sync table.
+ * 4. Operations that do not normally take the paging lock (fast-path
+ * #PF handler, INVLPG) must fall back to a locking, syncing version
+ * if they see an out-of-sync table.
*
* 5. Operations corresponding to guest TLB flushes (MOV CR3, INVLPG)
* must explicitly resync all relevant pages or update their
#if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_FULL
-static void sh_oos_audit(struct domain *d)
+static void sh_oos_audit(struct domain *d)
{
int idx, expected_idx, expected_idx_alt;
struct page_info *pg;
struct vcpu *v;
-
- for_each_vcpu(d, v)
+
+ for_each_vcpu(d, v)
{
for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
{
mfn_t *oos = v->arch.paging.shadow.oos;
if ( !mfn_valid(oos[idx]) )
continue;
-
+
expected_idx = mfn_x(oos[idx]) % SHADOW_OOS_PAGES;
expected_idx_alt = ((expected_idx + 1) % SHADOW_OOS_PAGES);
if ( idx != expected_idx && idx != expected_idx_alt )
{
printk("%s: idx %d contains gmfn %lx, expected at %d or %d.\n",
- __func__, idx, mfn_x(oos[idx]),
+ __func__, idx, mfn_x(oos[idx]),
expected_idx, expected_idx_alt);
BUG();
}
#endif
#if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES
-void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn)
+void oos_audit_hash_is_present(struct domain *d, mfn_t gmfn)
{
int idx;
struct vcpu *v;
mfn_t *oos;
ASSERT(mfn_is_out_of_sync(gmfn));
-
- for_each_vcpu(d, v)
+
+ for_each_vcpu(d, v)
{
oos = v->arch.paging.shadow.oos;
idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
idx = (idx + 1) % SHADOW_OOS_PAGES;
-
+
if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
return;
}
if ( mfn_x(fixup->smfn[i]) != INVALID_MFN )
{
sh_remove_write_access_from_sl1p(v, gmfn,
- fixup->smfn[i],
+ fixup->smfn[i],
fixup->off[i]);
fixup->smfn[i] = _mfn(INVALID_MFN);
}
struct domain *d = v->domain;
perfc_incr(shadow_oos_fixup_add);
-
- for_each_vcpu(d, v)
+
+ for_each_vcpu(d, v)
{
oos = v->arch.paging.shadow.oos;
oos_fixup = v->arch.paging.shadow.oos_fixup;
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_OOS_FIXUP_EVICT);
/* Reuse this slot and remove current writable mapping. */
- sh_remove_write_access_from_sl1p(v, gmfn,
+ sh_remove_write_access_from_sl1p(v, gmfn,
oos_fixup[idx].smfn[next],
oos_fixup[idx].off[next]);
perfc_incr(shadow_oos_fixup_evict);
case -1:
/* An unfindable writeable typecount has appeared, probably via a
- * grant table entry: can't shoot the mapping, so try to unshadow
+ * grant table entry: can't shoot the mapping, so try to unshadow
* the page. If that doesn't work either, the guest is granting
* his pagetables and must be killed after all.
* This will flush the tlb, so we can return with no worries. */
ASSERT(paging_locked_by_me(v->domain));
ASSERT(mfn_is_out_of_sync(gmfn));
/* Guest page must be shadowed *only* as L1 when out of sync. */
- ASSERT(!(mfn_to_page(gmfn)->shadow_flags & SHF_page_type_mask
+ ASSERT(!(mfn_to_page(gmfn)->shadow_flags & SHF_page_type_mask
& ~SHF_L1_ANY));
ASSERT(!sh_page_has_multiple_shadows(mfn_to_page(gmfn)));
mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
struct oos_fixup *oos_fixup = v->arch.paging.shadow.oos_fixup;
struct oos_fixup fixup = { .next = 0 };
-
+
for (i = 0; i < SHADOW_OOS_FIXUPS; i++ )
fixup.smfn[i] = _mfn(INVALID_MFN);
idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
oidx = idx;
- if ( mfn_valid(oos[idx])
+ if ( mfn_valid(oos[idx])
&& (mfn_x(oos[idx]) % SHADOW_OOS_PAGES) == idx )
{
/* Punt the current occupant into the next slot */
SHADOW_PRINTK("%pv gmfn %lx\n", v, mfn_x(gmfn));
- for_each_vcpu(d, v)
+ for_each_vcpu(d, v)
{
oos = v->arch.paging.shadow.oos;
idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
mfn_t *oos;
mfn_t *oos_snapshot;
struct domain *d = v->domain;
-
- for_each_vcpu(d, v)
+
+ for_each_vcpu(d, v)
{
oos = v->arch.paging.shadow.oos;
oos_snapshot = v->arch.paging.shadow.oos_snapshot;
struct oos_fixup *oos_fixup;
struct domain *d = v->domain;
- for_each_vcpu(d, v)
+ for_each_vcpu(d, v)
{
oos = v->arch.paging.shadow.oos;
oos_fixup = v->arch.paging.shadow.oos_fixup;
idx = mfn_x(gmfn) % SHADOW_OOS_PAGES;
if ( mfn_x(oos[idx]) != mfn_x(gmfn) )
idx = (idx + 1) % SHADOW_OOS_PAGES;
-
+
if ( mfn_x(oos[idx]) == mfn_x(gmfn) )
{
_sh_resync(v, gmfn, &oos_fixup[idx], oos_snapshot[idx]);
goto resync_others;
/* First: resync all of this vcpu's oos pages */
- for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
+ for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
if ( mfn_valid(oos[idx]) )
{
/* Write-protect and sync contents */
/* Second: make all *other* vcpus' oos pages safe. */
for_each_vcpu(v->domain, other)
{
- if ( v == other )
+ if ( v == other )
continue;
oos = other->arch.paging.shadow.oos;
oos_fixup = other->arch.paging.shadow.oos_fixup;
oos_snapshot = other->arch.paging.shadow.oos_snapshot;
- for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
+ for ( idx = 0; idx < SHADOW_OOS_PAGES; idx++ )
{
if ( !mfn_valid(oos[idx]) )
continue;
_sh_resync(other, oos[idx], &oos_fixup[idx], oos_snapshot[idx]);
oos[idx] = _mfn(INVALID_MFN);
}
- }
+ }
}
}
int sh_unsync(struct vcpu *v, mfn_t gmfn)
{
struct page_info *pg;
-
+
ASSERT(paging_locked_by_me(v->domain));
SHADOW_PRINTK("d=%d, v=%d, gmfn=%05lx\n",
v->domain->domain_id, v->vcpu_id, mfn_x(gmfn));
pg = mfn_to_page(gmfn);
-
+
/* Guest page must be shadowed *only* as L1 and *only* once when out
- * of sync. Also, get out now if it's already out of sync.
+ * of sync. Also, get out now if it's already out of sync.
* Also, can't safely unsync if some vcpus have paging disabled.*/
- if ( pg->shadow_flags &
- ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync)
+ if ( pg->shadow_flags &
+ ((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync)
|| sh_page_has_multiple_shadows(pg)
|| is_pv_domain(v->domain)
|| !v->domain->arch.paging.shadow.oos_active )
ASSERT(mfn_valid(gmfn));
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Is the page already shadowed and out of sync? */
- if ( page_is_out_of_sync(page) )
+ if ( page_is_out_of_sync(page) )
sh_resync(v, gmfn);
#endif
if ( (page->shadow_flags & SHF_page_type_mask) == 0 )
{
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Was the page out of sync? */
- if ( page_is_out_of_sync(page) )
+ if ( page_is_out_of_sync(page) )
{
oos_hash_remove(v, gmfn);
}
-#endif
+#endif
clear_bit(_PGC_page_table, &page->count_info);
}
struct page_info *page = mfn_to_page(gmfn);
paging_mark_dirty(v->domain, mfn_x(gmfn));
-
+
// Determine which types of shadows are affected, and update each.
//
// Always validate L1s before L2s to prevent another cpu with a linear
- // mapping of this gmfn from seeing a walk that results from
+ // mapping of this gmfn from seeing a walk that results from
// using the new L2 value and the old L1 value. (It is OK for such a
// guest to see a walk that uses the old L2 value with the new L1 value,
// as hardware could behave this way if one level of the pagewalk occurs
if ( !(page->count_info & PGC_page_table) )
return 0; /* Not shadowed at all */
- if ( page->shadow_flags & SHF_L1_32 )
+ if ( page->shadow_flags & SHF_L1_32 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 2)
(v, gmfn, entry, size);
- if ( page->shadow_flags & SHF_L2_32 )
+ if ( page->shadow_flags & SHF_L2_32 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 2)
(v, gmfn, entry, size);
- if ( page->shadow_flags & SHF_L1_PAE )
+ if ( page->shadow_flags & SHF_L1_PAE )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 3)
(v, gmfn, entry, size);
- if ( page->shadow_flags & SHF_L2_PAE )
+ if ( page->shadow_flags & SHF_L2_PAE )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 3)
(v, gmfn, entry, size);
- if ( page->shadow_flags & SHF_L2H_PAE )
+ if ( page->shadow_flags & SHF_L2H_PAE )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 3)
(v, gmfn, entry, size);
- if ( page->shadow_flags & SHF_L1_64 )
+ if ( page->shadow_flags & SHF_L1_64 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl1e, 4)
(v, gmfn, entry, size);
- if ( page->shadow_flags & SHF_L2_64 )
+ if ( page->shadow_flags & SHF_L2_64 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2e, 4)
(v, gmfn, entry, size);
- if ( page->shadow_flags & SHF_L2H_64 )
+ if ( page->shadow_flags & SHF_L2H_64 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl2he, 4)
(v, gmfn, entry, size);
- if ( page->shadow_flags & SHF_L3_64 )
+ if ( page->shadow_flags & SHF_L3_64 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl3e, 4)
(v, gmfn, entry, size);
- if ( page->shadow_flags & SHF_L4_64 )
+ if ( page->shadow_flags & SHF_L4_64 )
result |= SHADOW_INTERNAL_NAME(sh_map_and_validate_gl4e, 4)
(v, gmfn, entry, size);
- this_cpu(trace_shadow_path_flags) |= (result<<(TRCE_SFLAG_SET_CHANGED));
+ this_cpu(trace_shadow_path_flags) |= (result<<(TRCE_SFLAG_SET_CHANGED));
return result;
}
if ( rc & SHADOW_SET_FLUSH )
/* Need to flush TLBs to pick up shadow PT changes */
flush_tlb_mask(d->domain_dirty_cpumask);
- if ( rc & SHADOW_SET_ERROR )
+ if ( rc & SHADOW_SET_ERROR )
{
- /* This page is probably not a pagetable any more: tear it out of the
- * shadows, along with any tables that reference it.
- * Since the validate call above will have made a "safe" (i.e. zero)
- * shadow entry, we can let the domain live even if we can't fully
+ /* This page is probably not a pagetable any more: tear it out of the
+ * shadows, along with any tables that reference it.
+ * Since the validate call above will have made a "safe" (i.e. zero)
+ * shadow entry, we can let the domain live even if we can't fully
* unshadow the page. */
sh_remove_shadows(v, gmfn, 0, 0);
}
int shadow_write_guest_entry(struct vcpu *v, intpte_t *p,
intpte_t new, mfn_t gmfn)
-/* Write a new value into the guest pagetable, and update the shadows
+/* Write a new value into the guest pagetable, and update the shadows
* appropriately. Returns 0 if we page-faulted, 1 for success. */
{
int failed;
int shadow_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,
intpte_t *old, intpte_t new, mfn_t gmfn)
-/* Cmpxchg a new value into the guest pagetable, and update the shadows
+/* Cmpxchg a new value into the guest pagetable, and update the shadows
* appropriately. Returns 0 if we page-faulted, 1 if not.
* N.B. caller should check the value of "old" to see if the
* cmpxchg itself was successful. */
/**************************************************************************/
-/* Memory management for shadow pages. */
+/* Memory management for shadow pages. */
/* Allocating shadow pages
* -----------------------
* PAE/64-bit l2 tables (1GB va each). These multi-page shadows are
* not contiguous in memory; functions for handling offsets into them are
* defined in shadow/multi.c (shadow_l1_index() etc.)
- *
+ *
* This table shows the allocation behaviour of the different modes:
*
* Xen paging 64b 64b 64b
* Guest paging 32b pae 64b
- * PV or HVM HVM HVM *
+ * PV or HVM HVM HVM *
* Shadow paging pae pae 64b
*
* sl1 size 8k 4k 4k
* sl3 size - - 4k
* sl4 size - - 4k
*
- * In HVM guests, the p2m table is built out of shadow pages, and we provide
- * a function for the p2m management to steal pages, in max-order chunks, from
+ * In HVM guests, the p2m table is built out of shadow pages, and we provide
+ * a function for the p2m management to steal pages, in max-order chunks, from
* the free pool.
*/
/* Figure out the least acceptable quantity of shadow memory.
* The minimum memory requirement for always being able to free up a
* chunk of memory is very small -- only three max-order chunks per
- * vcpu to hold the top level shadows and pages with Xen mappings in them.
+ * vcpu to hold the top level shadows and pages with Xen mappings in them.
*
* But for a guest to be guaranteed to successfully execute a single
* instruction, we must be able to map a large number (about thirty) VAs
* at the same time, which means that to guarantee progress, we must
* allow for more than ninety allocated pages per vcpu. We round that
- * up to 128 pages, or half a megabyte per vcpu, and add 1 more vcpu's
+ * up to 128 pages, or half a megabyte per vcpu, and add 1 more vcpu's
* worth to make sure we never return zero. */
-static unsigned int shadow_min_acceptable_pages(struct domain *d)
+static unsigned int shadow_min_acceptable_pages(struct domain *d)
{
u32 vcpu_count = 1;
struct vcpu *v;
vcpu_count++;
return (vcpu_count * 128);
-}
+}
/* Dispatcher function: call the per-mode function that will unhook the
* non-Xen mappings in this top-level shadow mfn. With user_only == 1,
int i;
if ( d->arch.paging.shadow.free_pages >= pages ) return;
-
+
v = current;
if ( v->domain != d )
v = d->vcpu[0];
* mappings. */
perfc_incr(shadow_prealloc_2);
- for_each_vcpu(d, v2)
+ for_each_vcpu(d, v2)
for ( i = 0 ; i < 4 ; i++ )
{
if ( !pagetable_is_null(v2->arch.shadow_table[i]) )
{
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PREALLOC_UNHOOK);
- shadow_unhook_mappings(v,
+ shadow_unhook_mappings(v,
pagetable_get_mfn(v2->arch.shadow_table[i]), 0);
/* See if that freed up enough space */
}
}
}
-
+
/* Nothing more we can do: all remaining shadows are of pages that
* hold Xen mappings for some vcpu. This can never happen. */
SHADOW_ERROR("Can't pre-allocate %u shadow pages!\n"
/* Deliberately free all the memory we can: this will tear down all of
* this domain's shadows */
-static void shadow_blow_tables(struct domain *d)
+static void shadow_blow_tables(struct domain *d)
{
struct page_info *sp, *t;
struct vcpu *v = d->vcpu[0];
smfn = page_to_mfn(sp);
sh_unpin(v, smfn);
}
-
+
/* Second pass: unhook entries of in-use shadows */
- for_each_vcpu(d, v)
+ for_each_vcpu(d, v)
for ( i = 0 ; i < 4 ; i++ )
if ( !pagetable_is_null(v->arch.shadow_table[i]) )
- shadow_unhook_mappings(v,
+ shadow_unhook_mappings(v,
pagetable_get_mfn(v->arch.shadow_table[i]), 0);
/* Make sure everyone sees the unshadowings */
}
/* Allocate another shadow's worth of (contiguous, aligned) pages,
- * and fill in the type and backpointer fields of their page_infos.
+ * and fill in the type and backpointer fields of their page_infos.
* Never fails to allocate. */
-mfn_t shadow_alloc(struct domain *d,
+mfn_t shadow_alloc(struct domain *d,
u32 shadow_type,
unsigned long backpointer)
{
INIT_PAGE_LIST_HEAD(&tmp_list);
/* Init page info fields and clear the pages */
- for ( i = 0; i < pages ; i++ )
+ for ( i = 0; i < pages ; i++ )
{
sp = page_list_remove_head(&d->arch.paging.shadow.freelist);
- /* Before we overwrite the old contents of this page,
+ /* Before we overwrite the old contents of this page,
* we need to be sure that no TLB holds a pointer to it. */
cpumask_copy(&mask, d->domain_dirty_cpumask);
tlbflush_filter(mask, sp->tlbflush_timestamp);
set_next_shadow(sp, NULL);
perfc_incr(shadow_alloc_count);
}
- if ( shadow_type >= SH_type_min_shadow
+ if ( shadow_type >= SH_type_min_shadow
&& shadow_type <= SH_type_max_shadow )
sp->u.sh.head = 1;
/* Return some shadow pages to the pool. */
void shadow_free(struct domain *d, mfn_t smfn)
{
- struct page_info *next = NULL, *sp = mfn_to_page(smfn);
+ struct page_info *next = NULL, *sp = mfn_to_page(smfn);
struct page_list_head *pin_list;
unsigned int pages;
u32 shadow_type;
pages = shadow_size(shadow_type);
pin_list = &d->arch.paging.shadow.pinned_shadows;
- for ( i = 0; i < pages; i++ )
+ for ( i = 0; i < pages; i++ )
{
#if SHADOW_OPTIMIZATIONS & (SHOPT_WRITABLE_HEURISTIC | SHOPT_FAST_EMULATION)
struct vcpu *v;
- for_each_vcpu(d, v)
+ for_each_vcpu(d, v)
{
#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
/* No longer safe to look for a writeable mapping in this shadow */
- if ( v->arch.paging.shadow.last_writeable_pte_smfn
- == mfn_x(page_to_mfn(sp)) )
+ if ( v->arch.paging.shadow.last_writeable_pte_smfn
+ == mfn_x(page_to_mfn(sp)) )
v->arch.paging.shadow.last_writeable_pte_smfn = 0;
#endif
#if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
next = page_list_next(sp, pin_list);
/* Strip out the type: this is now a free shadow page */
sp->u.sh.type = sp->u.sh.head = 0;
- /* Remember the TLB timestamp so we will know whether to flush
+ /* Remember the TLB timestamp so we will know whether to flush
* TLBs when we reuse the page. Because the destructors leave the
* contents of the pages in place, we can delay TLB flushes until
* just before the allocator hands the page out again. */
{
struct page_info *pg;
- /* This is called both from the p2m code (which never holds the
+ /* This is called both from the p2m code (which never holds the
* paging lock) and the log-dirty code (which always does). */
paging_lock_recursive(d);
- if ( d->arch.paging.shadow.total_pages
+ if ( d->arch.paging.shadow.total_pages
< shadow_min_acceptable_pages(d) + 1 )
{
if ( !d->arch.paging.p2m_alloc_failed )
}
pg->count_info &= ~PGC_count_mask;
pg->u.sh.type = SH_type_p2m_table; /* p2m code reuses type-info */
- page_set_owner(pg, NULL);
+ page_set_owner(pg, NULL);
- /* This is called both from the p2m code (which never holds the
+ /* This is called both from the p2m code (which never holds the
* paging lock) and the log-dirty code (which always does). */
paging_lock_recursive(d);
* Input will be rounded up to at least shadow_min_acceptable_pages(),
* plus space for the p2m table.
* Returns 0 for success, non-zero for failure. */
-static unsigned int sh_set_allocation(struct domain *d,
+static unsigned int sh_set_allocation(struct domain *d,
unsigned int pages,
int *preempted)
{
pages = 0;
else
pages -= d->arch.paging.shadow.p2m_pages;
-
+
/* Don't allocate less than the minimum acceptable, plus one page per
* megabyte of RAM (for the p2m table) */
lower_bound = shadow_min_acceptable_pages(d) + (d->tot_pages / 256);
pages = lower_bound;
}
- SHADOW_PRINTK("current %i target %i\n",
+ SHADOW_PRINTK("current %i target %i\n",
d->arch.paging.shadow.total_pages, pages);
for ( ; ; )
{
- if ( d->arch.paging.shadow.total_pages < pages )
+ if ( d->arch.paging.shadow.total_pages < pages )
{
/* Need to allocate more memory from domheap */
sp = (struct page_info *)
alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
- if ( sp == NULL )
- {
+ if ( sp == NULL )
+ {
SHADOW_PRINTK("failed to allocate shadow pages.\n");
return -ENOMEM;
}
sp->u.sh.count = 0;
sp->tlbflush_timestamp = 0; /* Not in any TLB */
page_list_add_tail(sp, &d->arch.paging.shadow.freelist);
- }
- else if ( d->arch.paging.shadow.total_pages > pages )
+ }
+ else if ( d->arch.paging.shadow.total_pages > pages )
{
/* Need to return memory to domheap */
_shadow_prealloc(d, 1);
/**************************************************************************/
/* Hash table for storing the guest->shadow mappings.
- * The table itself is an array of pointers to shadows; the shadows are then
+ * The table itself is an array of pointers to shadows; the shadows are then
* threaded on a singly-linked list of shadows with the same hash value */
#define SHADOW_HASH_BUCKETS 251
/* Hash function that takes a gfn or mfn, plus another byte of type info */
typedef u32 key_t;
-static inline key_t sh_hash(unsigned long n, unsigned int t)
+static inline key_t sh_hash(unsigned long n, unsigned int t)
{
unsigned char *p = (unsigned char *)&n;
key_t k = t;
SHADOW_ERROR("MFN %#"PRI_mfn" shadowed (by %#"PRI_mfn")"
" and not OOS but has typecount %#lx\n",
__backpointer(sp),
- mfn_x(page_to_mfn(sp)),
+ mfn_x(page_to_mfn(sp)),
gpg->u.inuse.type_info);
BUG();
}
}
else /* Not an l1 */
#endif
- if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page
+ if ( (gpg->u.inuse.type_info & PGT_type_mask) == PGT_writable_page
&& (gpg->u.inuse.type_info & PGT_count_mask) != 0 )
{
SHADOW_ERROR("MFN %#"PRI_mfn" shadowed (by %#"PRI_mfn")"
if ( !(SHADOW_AUDIT_ENABLE) )
return;
- for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ )
+ for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ )
{
sh_hash_audit_bucket(d, i);
}
#define sh_hash_audit(_d) do {} while(0)
#endif /* Hashtable bucket audit */
-/* Allocate and initialise the table itself.
+/* Allocate and initialise the table itself.
* Returns 0 for success, 1 for error. */
static int shadow_hash_alloc(struct domain *d)
{
if ( unlikely(d->arch.paging.shadow.hash_walking != 0) )
/* Can't reorder: someone is walking the hash chains */
return page_to_mfn(sp);
- else
+ else
{
ASSERT(prev);
/* Delete sp from the list */
- prev->next_shadow = sp->next_shadow;
+ prev->next_shadow = sp->next_shadow;
/* Re-insert it at the head of the list */
set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
d->arch.paging.shadow.hash_table[key] = sp;
return _mfn(INVALID_MFN);
}
-void shadow_hash_insert(struct vcpu *v, unsigned long n, unsigned int t,
+void shadow_hash_insert(struct vcpu *v, unsigned long n, unsigned int t,
mfn_t smfn)
/* Put a mapping (n,t)->smfn into the hash table */
{
struct domain *d = v->domain;
struct page_info *sp;
key_t key;
-
+
ASSERT(paging_locked_by_me(d));
ASSERT(d->arch.paging.shadow.hash_table);
ASSERT(t);
perfc_incr(shadow_hash_inserts);
key = sh_hash(n, t);
sh_hash_audit_bucket(d, key);
-
+
/* Insert this shadow at the top of the bucket */
sp = mfn_to_page(smfn);
set_next_shadow(sp, d->arch.paging.shadow.hash_table[key]);
d->arch.paging.shadow.hash_table[key] = sp;
-
+
sh_hash_audit_bucket(d, key);
}
-void shadow_hash_delete(struct vcpu *v, unsigned long n, unsigned int t,
+void shadow_hash_delete(struct vcpu *v, unsigned long n, unsigned int t,
mfn_t smfn)
/* Excise the mapping (n,t)->smfn from the hash table */
{
perfc_incr(shadow_hash_deletes);
key = sh_hash(n, t);
sh_hash_audit_bucket(d, key);
-
+
sp = mfn_to_page(smfn);
- if ( d->arch.paging.shadow.hash_table[key] == sp )
+ if ( d->arch.paging.shadow.hash_table[key] == sp )
/* Easy case: we're deleting the head item. */
d->arch.paging.shadow.hash_table[key] = next_shadow(sp);
- else
+ else
{
/* Need to search for the one we want */
x = d->arch.paging.shadow.hash_table[key];
typedef int (*hash_callback_t)(struct vcpu *v, mfn_t smfn, mfn_t other_mfn);
-static void hash_foreach(struct vcpu *v,
- unsigned int callback_mask,
+static void hash_foreach(struct vcpu *v,
+ unsigned int callback_mask,
const hash_callback_t callbacks[],
mfn_t callback_mfn)
-/* Walk the hash table looking at the types of the entries and
- * calling the appropriate callback function for each entry.
+/* Walk the hash table looking at the types of the entries and
+ * calling the appropriate callback function for each entry.
* The mask determines which shadow types we call back for, and the array
* of callbacks tells us which function to call.
- * Any callback may return non-zero to let us skip the rest of the scan.
+ * Any callback may return non-zero to let us skip the rest of the scan.
*
- * WARNING: Callbacks MUST NOT add or remove hash entries unless they
+ * WARNING: Callbacks MUST NOT add or remove hash entries unless they
* then return non-zero to terminate the scan. */
{
int i, done = 0;
ASSERT(d->arch.paging.shadow.hash_walking == 0);
d->arch.paging.shadow.hash_walking = 1;
- for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ )
+ for ( i = 0; i < SHADOW_HASH_BUCKETS; i++ )
{
/* WARNING: This is not safe against changes to the hash table.
* The callback *must* return non-zero if it has inserted or
if ( done ) break;
}
}
- if ( done ) break;
+ if ( done ) break;
}
- d->arch.paging.shadow.hash_walking = 0;
+ d->arch.paging.shadow.hash_walking = 0;
}
/**************************************************************************/
/* Destroy a shadow page: simple dispatcher to call the per-type destructor
- * which will decrement refcounts appropriately and return memory to the
+ * which will decrement refcounts appropriately and return memory to the
* free pool. */
void sh_destroy_shadow(struct vcpu *v, mfn_t smfn)
/* Double-check, if we can, that the shadowed page belongs to this
* domain, (by following the back-pointer). */
- ASSERT(t == SH_type_fl1_32_shadow ||
- t == SH_type_fl1_pae_shadow ||
- t == SH_type_fl1_64_shadow ||
- t == SH_type_monitor_table ||
+ ASSERT(t == SH_type_fl1_32_shadow ||
+ t == SH_type_fl1_pae_shadow ||
+ t == SH_type_fl1_64_shadow ||
+ t == SH_type_monitor_table ||
(is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
(page_get_owner(mfn_to_page(backpointer(sp)))
- == v->domain));
+ == v->domain));
/* The down-shifts here are so that the switch statement is on nice
* small numbers that the compiler will enjoy */
SHADOW_ERROR("tried to destroy shadow of bad type %08lx\n",
(unsigned long)t);
BUG();
- }
+ }
}
static inline void trace_shadow_wrmap_bf(mfn_t gmfn)
}
/**************************************************************************/
-/* Remove all writeable mappings of a guest frame from the shadow tables
- * Returns non-zero if we need to flush TLBs.
+/* Remove all writeable mappings of a guest frame from the shadow tables
+ * Returns non-zero if we need to flush TLBs.
* level and fault_addr desribe how we found this to be a pagetable;
* level==0 means we have some other reason for revoking write access.
* If level==0 we are allowed to fail, returning -1. */
-int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
+int sh_remove_write_access(struct vcpu *v, mfn_t gmfn,
unsigned int level,
unsigned long fault_addr)
{
/* Early exit if it's already a pagetable, or otherwise not writeable */
if ( (sh_mfn_is_a_page_table(gmfn)
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Unless they've been allowed to go out of sync with their shadows */
&& !mfn_oos_may_write(gmfn)
#endif
perfc_incr(shadow_writeable);
- /* If this isn't a "normal" writeable page, the domain is trying to
+ /* If this isn't a "normal" writeable page, the domain is trying to
* put pagetables in special memory of some kind. We can't allow that. */
if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_writable_page )
{
- SHADOW_ERROR("can't remove write access to mfn %lx, type_info is %"
+ SHADOW_ERROR("can't remove write access to mfn %lx, type_info is %"
PRtype_info "\n",
mfn_x(gmfn), mfn_to_page(gmfn)->u.inuse.type_info);
domain_crash(v->domain);
return 1; \
} \
} while (0)
-
+
if ( v->arch.paging.mode->guest_levels == 2 )
{
if ( level == 1 )
GUESS(0xC0000000UL + (fault_addr >> 10), 1);
/* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */
- if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 )
+ if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 )
GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
/* FreeBSD: Linear map at 0xBFC00000 */
if ( level == 1 )
- GUESS(0xBFC00000UL
+ GUESS(0xBFC00000UL
+ ((fault_addr & VADDR_MASK) >> 10), 6);
}
else if ( v->arch.paging.mode->guest_levels == 3 )
{
/* 32bit PAE w2k3: linear map at 0xC0000000 */
- switch ( level )
+ switch ( level )
{
case 1: GUESS(0xC0000000UL + (fault_addr >> 9), 2); break;
case 2: GUESS(0xC0600000UL + (fault_addr >> 18), 2); break;
}
/* Linux lowmem: first 896MB is mapped 1-to-1 above 0xC0000000 */
- if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 )
+ if ((gfn = mfn_to_gfn(v->domain, gmfn)) < 0x38000 )
GUESS(0xC0000000UL + (gfn << PAGE_SHIFT), 4);
-
+
/* FreeBSD PAE: Linear map at 0xBF800000 */
switch ( level )
{
else if ( v->arch.paging.mode->guest_levels == 4 )
{
/* 64bit w2k3: linear map at 0xfffff68000000000 */
- switch ( level )
+ switch ( level )
{
- case 1: GUESS(0xfffff68000000000UL
+ case 1: GUESS(0xfffff68000000000UL
+ ((fault_addr & VADDR_MASK) >> 9), 3); break;
case 2: GUESS(0xfffff6fb40000000UL
+ ((fault_addr & VADDR_MASK) >> 18), 3); break;
- case 3: GUESS(0xfffff6fb7da00000UL
+ case 3: GUESS(0xfffff6fb7da00000UL
+ ((fault_addr & VADDR_MASK) >> 27), 3); break;
}
/* 64bit Linux direct map at 0xffff880000000000; older kernels
* had it at 0xffff810000000000, and older kernels yet had it
* at 0x0000010000000000UL */
- gfn = mfn_to_gfn(v->domain, gmfn);
+ gfn = mfn_to_gfn(v->domain, gmfn);
GUESS(0xffff880000000000UL + (gfn << PAGE_SHIFT), 4);
GUESS(0xffff810000000000UL + (gfn << PAGE_SHIFT), 4);
GUESS(0x0000010000000000UL + (gfn << PAGE_SHIFT), 4);
* kpm_vbase; 0xfffffe0000000000UL
*/
GUESS(0xfffffe0000000000UL + (gfn << PAGE_SHIFT), 4);
-
+
/* FreeBSD 64bit: linear map 0xffff800000000000 */
switch ( level )
{
mfn_t last_smfn = _mfn(v->arch.paging.shadow.last_writeable_pte_smfn);
int shtype = mfn_to_page(last_smfn)->u.sh.type;
- if ( callbacks[shtype] )
+ if ( callbacks[shtype] )
callbacks[shtype](v, last_smfn, gmfn);
if ( (pg->u.inuse.type_info & PGT_count_mask) != old_count )
return 1;
#endif /* SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC */
-
+
/* Brute-force search of all the shadows, by walking the hash */
trace_shadow_wrmap_bf(gmfn);
if ( level == 0 )
(mfn_to_page(gmfn)->u.inuse.type_info&PGT_count_mask));
domain_crash(v->domain);
}
-
+
/* We killed at least one writeable mapping, so must flush TLBs. */
return 1;
}
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
int sh_remove_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
mfn_t smfn, unsigned long off)
{
struct page_info *sp = mfn_to_page(smfn);
-
+
ASSERT(mfn_valid(smfn));
ASSERT(mfn_valid(gmfn));
-
+
if ( sp->u.sh.type == SH_type_l1_32_shadow
|| sp->u.sh.type == SH_type_fl1_32_shadow )
{
return 0;
}
-#endif
+#endif
/**************************************************************************/
/* Remove all mappings of a guest frame from the shadow tables.
* can be called via put_page_type when we clear a shadow l1e).*/
paging_lock_recursive(v->domain);
- /* XXX TODO:
+ /* XXX TODO:
* Heuristics for finding the (probably) single mapping of this gmfn */
-
+
/* Brute-force search of all the shadows, by walking the hash */
perfc_incr(shadow_mappings_bf);
hash_foreach(v, callback_mask, callbacks, gmfn);
/* If that didn't catch the mapping, something is very wrong */
if ( !sh_check_page_has_no_refs(page) )
{
- /* Don't complain if we're in HVM and there are some extra mappings:
- * The qemu helper process has an untyped mapping of this dom's RAM
+ /* Don't complain if we're in HVM and there are some extra mappings:
+ * The qemu helper process has an untyped mapping of this dom's RAM
* and the HVM restore program takes another.
* Also allow one typed refcount for xenheap pages, to match
* share_xen_page_with_guest(). */
== !!is_xen_heap_page(page))) )
{
SHADOW_ERROR("can't find all mappings of mfn %lx: "
- "c=%08lx t=%08lx\n", mfn_x(gmfn),
+ "c=%08lx t=%08lx\n", mfn_x(gmfn),
page->count_info, page->u.inuse.type_info);
}
}
ASSERT(sp->u.sh.type > 0);
ASSERT(sp->u.sh.type < SH_type_max_shadow);
ASSERT(sh_type_has_up_pointer(v, sp->u.sh.type));
-
+
if (sp->up == 0) return 0;
pmfn = _mfn(sp->up >> PAGE_SHIFT);
ASSERT(mfn_valid(pmfn));
ASSERT(vaddr);
vaddr += sp->up & (PAGE_SIZE-1);
ASSERT(l1e_get_pfn(*(l1_pgentry_t *)vaddr) == mfn_x(smfn));
-
+
/* Is this the only reference to this shadow? */
rc = (sp->u.sh.count == 1) ? 1 : 0;
break;
default: BUG(); /* Some wierd unknown shadow type */
}
-
+
sh_unmap_domain_page(vaddr);
if ( rc )
perfc_incr(shadow_up_pointer);
}
void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all)
-/* Remove the shadows of this guest page.
- * If fast != 0, just try the quick heuristic, which will remove
+/* Remove the shadows of this guest page.
+ * If fast != 0, just try the quick heuristic, which will remove
* at most one reference to each shadow of the page. Otherwise, walk
* all the shadow tables looking for refs to shadows of this gmfn.
* If all != 0, kill the domain if we can't find all the shadows.
struct page_info *pg = mfn_to_page(gmfn);
mfn_t smfn;
unsigned char t;
-
+
/* Dispatch table for getting per-type functions: each level must
* be called with the function to remove a lower-level shadow. */
static const hash_callback_t callbacks[SH_type_unused] = {
domain_crash(v->domain);
}
- /* Need to flush TLBs now, so that linear maps are safe next time we
+ /* Need to flush TLBs now, so that linear maps are safe next time we
* take a fault. */
flush_tlb_mask(v->domain->domain_dirty_cpumask);
{
sh_remove_shadows(v, gmfn, 0, 1);
/* XXX TODO:
- * Rework this hashtable walker to return a linked-list of all
- * the shadows it modified, then do breadth-first recursion
- * to find the way up to higher-level tables and unshadow them too.
+ * Rework this hashtable walker to return a linked-list of all
+ * the shadows it modified, then do breadth-first recursion
+ * to find the way up to higher-level tables and unshadow them too.
*
* The current code (just tearing down each page's shadows as we
- * detect that it is not a pagetable) is correct, but very slow.
+ * detect that it is not a pagetable) is correct, but very slow.
* It means extra emulated writes and slows down removal of mappings. */
}
/**************************************************************************/
-/* Reset the up-pointers of every L3 shadow to 0.
+/* Reset the up-pointers of every L3 shadow to 0.
* This is called when l3 shadows stop being pinnable, to clear out all
* the list-head bits so the up-pointer field is properly inititalised. */
static int sh_clear_up_pointer(struct vcpu *v, mfn_t smfn, mfn_t unused)
ASSERT(paging_locked_by_me(d));
-#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
/* Make sure this vcpu has a virtual TLB array allocated */
if ( unlikely(!v->arch.paging.vtlb) )
{
}
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
if ( mfn_x(v->arch.paging.shadow.oos_snapshot[0]) == INVALID_MFN )
{
int i;
ASSERT(shadow_mode_translate(d));
ASSERT(shadow_mode_external(d));
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Need to resync all our pages now, because if a page goes out
* of sync with paging enabled and is resynced with paging
* disabled, the resync will go wrong. */
/* Need to make a new monitor table for the new mode */
mfn_t new_mfn, old_mfn;
- if ( v != current && vcpu_runnable(v) )
+ if ( v != current && vcpu_runnable(v) )
{
SHADOW_ERROR("Some third party (d=%u v=%u) is changing "
"this HVM vcpu's (d=%u v=%u) paging mode "
SHADOW_PRINTK("new monitor table %"PRI_mfn "\n",
mfn_x(new_mfn));
- /* Don't be running on the old monitor table when we
+ /* Don't be running on the old monitor table when we
* pull it down! Switch CR3, and warn the HVM code that
* its host cr3 has changed. */
make_cr3(v, mfn_x(new_mfn));
int shadow_enable(struct domain *d, u32 mode)
/* Turn on "permanent" shadow features: external, translate, refcount.
* Can only be called once on a domain, and these features cannot be
- * disabled.
+ * disabled.
* Returns 0 for success, -errno for failure. */
-{
+{
unsigned int old_pages;
struct page_info *pg = NULL;
uint32_t *e;
if ( old_pages == 0 )
{
unsigned int r;
- paging_lock(d);
+ paging_lock(d);
r = sh_set_allocation(d, 1024, NULL); /* Use at least 4MB */
if ( r != 0 )
{
sh_set_allocation(d, 0, NULL);
rv = -ENOMEM;
goto out_locked;
- }
+ }
paging_unlock(d);
}
d->arch.paging.alloc_page = shadow_alloc_p2m_page;
d->arch.paging.free_page = shadow_free_p2m_page;
- /* Init the P2M table. Must be done before we take the paging lock
+ /* Init the P2M table. Must be done before we take the paging lock
* to avoid possible deadlock. */
if ( mode & PG_translate )
{
* have paging disabled */
if ( is_hvm_domain(d) )
{
- /* Get a single page from the shadow pool. Take it via the
+ /* Get a single page from the shadow pool. Take it via the
* P2M interface to make freeing it simpler afterwards. */
pg = shadow_alloc_p2m_page(d);
if ( pg == NULL )
goto out_unlocked;
}
/* Fill it with 32-bit, non-PAE superpage entries, each mapping 4MB
- * of virtual address space onto the same physical address range */
+ * of virtual address space onto the same physical address range */
e = __map_domain_page(pg);
for ( i = 0; i < PAGE_SIZE / sizeof(*e); i++ )
e[i] = ((0x400000U * i)
- | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER
+ | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER
| _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
sh_unmap_domain_page(e);
pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated;
goto out_locked;
}
-#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
- /* We assume we're dealing with an older 64bit linux guest until we
+#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
+ /* We assume we're dealing with an older 64bit linux guest until we
* see the guest use more than one l4 per vcpu. */
d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
#endif
}
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
{
int i;
mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
SHADOW_PRINTK("teardown of domain %u starts."
" Shadow pages total = %u, free = %u, p2m=%u\n",
d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
+ d->arch.paging.shadow.total_pages,
+ d->arch.paging.shadow.free_pages,
d->arch.paging.shadow.p2m_pages);
/* Destroy all the shadows and release memory to domheap */
sh_set_allocation(d, 0, NULL);
/* Release the hash table back to xenheap */
- if (d->arch.paging.shadow.hash_table)
+ if (d->arch.paging.shadow.hash_table)
shadow_hash_teardown(d);
/* Should not have any more memory held */
SHADOW_PRINTK("teardown done."
" Shadow pages total = %u, free = %u, p2m=%u\n",
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
+ d->arch.paging.shadow.total_pages,
+ d->arch.paging.shadow.free_pages,
d->arch.paging.shadow.p2m_pages);
ASSERT(d->arch.paging.shadow.total_pages == 0);
}
- /* Free the non-paged-vcpus pagetable; must happen after we've
+ /* Free the non-paged-vcpus pagetable; must happen after we've
* destroyed any shadows of it or sh_destroy_shadow will get confused. */
if ( !pagetable_is_null(d->arch.paging.shadow.unpaged_pagetable) )
{
if ( !hvm_paging_enabled(v) )
v->arch.guest_table = pagetable_null();
}
- unpaged_pagetable =
+ unpaged_pagetable =
pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable);
d->arch.paging.shadow.unpaged_pagetable = pagetable_null();
}
paging_unlock(d);
/* Must be called outside the lock */
- if ( unpaged_pagetable )
+ if ( unpaged_pagetable )
shadow_free_p2m_page(d, unpaged_pagetable);
}
SHADOW_PRINTK("dom %u final teardown starts."
" Shadow pages total = %u, free = %u, p2m=%u\n",
d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
+ d->arch.paging.shadow.total_pages,
+ d->arch.paging.shadow.free_pages,
d->arch.paging.shadow.p2m_pages);
- /* Double-check that the domain didn't have any shadow memory.
+ /* Double-check that the domain didn't have any shadow memory.
* It is possible for a domain that never got domain_kill()ed
* to get here with its shadow allocation intact. */
if ( d->arch.paging.shadow.total_pages != 0 )
SHADOW_PRINTK("dom %u final teardown done."
" Shadow pages total = %u, free = %u, p2m=%u\n",
d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
+ d->arch.paging.shadow.total_pages,
+ d->arch.paging.shadow.free_pages,
d->arch.paging.shadow.p2m_pages);
paging_unlock(d);
}
return 0;
}
-static int shadow_one_bit_disable(struct domain *d, u32 mode)
+static int shadow_one_bit_disable(struct domain *d, u32 mode)
/* Turn off a single shadow mode feature */
{
struct vcpu *v;
SHADOW_PRINTK("un-shadowing of domain %u starts."
" Shadow pages total = %u, free = %u, p2m=%u\n",
d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
+ d->arch.paging.shadow.total_pages,
+ d->arch.paging.shadow.free_pages,
d->arch.paging.shadow.p2m_pages);
for_each_vcpu(d, v)
{
else
make_cr3(v, pagetable_get_pfn(v->arch.guest_table));
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
{
int i;
mfn_t *oos_snapshot = v->arch.paging.shadow.oos_snapshot;
SHADOW_PRINTK("un-shadowing of domain %u done."
" Shadow pages total = %u, free = %u, p2m=%u\n",
d->domain_id,
- d->arch.paging.shadow.total_pages,
- d->arch.paging.shadow.free_pages,
+ d->arch.paging.shadow.total_pages,
+ d->arch.paging.shadow.free_pages,
d->arch.paging.shadow.p2m_pages);
}
/* P2M map manipulations */
/* shadow specific code which should be called when P2M table entry is updated
- * with new content. It is responsible for update the entry, as well as other
+ * with new content. It is responsible for update the entry, as well as other
* shadow processing jobs.
*/
{
mfn_t mfn = _mfn(l1e_get_pfn(*p));
p2m_type_t p2mt = p2m_flags_to_type(l1e_get_flags(*p));
- if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
+ if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) )
{
sh_remove_all_shadows_and_parents(v, mfn);
if ( sh_remove_all_mappings(v, mfn) )
}
}
- /* If we're removing a superpage mapping from the p2m, we need to check
- * all the pages covered by it. If they're still there in the new
+ /* If we're removing a superpage mapping from the p2m, we need to check
+ * all the pages covered by it. If they're still there in the new
* scheme, that's OK, but otherwise they must be unshadowed. */
if ( level == 2 && (l1e_get_flags(*p) & _PAGE_PRESENT) &&
(l1e_get_flags(*p) & _PAGE_PSE) )
/* If we're replacing a superpage with a normal L1 page, map it */
if ( (l1e_get_flags(new) & _PAGE_PRESENT)
- && !(l1e_get_flags(new) & _PAGE_PSE)
+ && !(l1e_get_flags(new) & _PAGE_PSE)
&& mfn_valid(nmfn) )
npte = map_domain_page(mfn_x(nmfn));
-
+
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
- if ( !npte
+ if ( !npte
|| !p2m_is_ram(p2m_flags_to_type(l1e_get_flags(npte[i])))
|| l1e_get_pfn(npte[i]) != mfn_x(omfn) )
{
omfn = _mfn(mfn_x(omfn) + 1);
}
flush_tlb_mask(&flushmask);
-
+
if ( npte )
unmap_domain_page(npte);
}
paging_lock(d);
/* If there are any shadows, update them. But if shadow_teardown()
- * has already been called then it's not safe to try. */
+ * has already been called then it's not safe to try. */
if ( likely(d->arch.paging.shadow.total_pages != 0) )
sh_unshadow_for_p2m_change(d, gfn, p, new, level);
paging_lock(d);
if ( shadow_mode_enabled(d) )
{
- /* This domain already has some shadows: need to clear them out
- * of the way to make sure that all references to guest memory are
+ /* This domain already has some shadows: need to clear them out
+ * of the way to make sure that all references to guest memory are
* properly write-protected */
shadow_blow_tables(d);
}
if ( is_pv_32on64_domain(d) )
d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
#endif
-
+
ret = shadow_one_bit_enable(d, PG_log_dirty);
paging_unlock(d);
paging_lock(d);
ret = shadow_one_bit_disable(d, PG_log_dirty);
paging_unlock(d);
-
+
return ret;
}
-/* This function is called when we CLEAN log dirty bitmap. See
- * paging_log_dirty_op() for details.
+/* This function is called when we CLEAN log dirty bitmap. See
+ * paging_log_dirty_op() for details.
*/
static void sh_clean_dirty_bitmap(struct domain *d)
{
* no need to be careful. */
if ( !dirty_vram )
{
- /* Throw away all the shadows rather than walking through them
+ /* Throw away all the shadows rather than walking through them
* up to nr times getting rid of mappings of each pfn */
shadow_blow_tables(d);
/**************************************************************************/
/* Shadow-control XEN_DOMCTL dispatcher */
-int shadow_domctl(struct domain *d,
+int shadow_domctl(struct domain *d,
xen_domctl_shadow_op_t *sc,
XEN_GUEST_HANDLE_PARAM(void) u_domctl)
{
{
case XEN_DOMCTL_SHADOW_OP_OFF:
if ( d->arch.paging.mode == PG_SH_enable )
- if ( (rc = shadow_test_disable(d)) != 0 )
+ if ( (rc = shadow_test_disable(d)) != 0 )
return rc;
return 0;
case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
paging_lock(d);
if ( sc->mb == 0 && shadow_mode_enabled(d) )
- {
+ {
/* Can't set the allocation to zero unless the domain stops using
* shadow pagetables first */
SHADOW_ERROR("Can't set shadow allocation to zero, domain %u"
/* Not finished. Set up to re-run the call. */
rc = hypercall_create_continuation(
__HYPERVISOR_domctl, "h", u_domctl);
- else
+ else
/* Finished. Return the new allocation */
sc->mb = shadow_get_allocation(d);
return rc;
#if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_FULL
-void shadow_audit_tables(struct vcpu *v)
+void shadow_audit_tables(struct vcpu *v)
{
/* Dispatch table for getting per-type functions */
static const hash_callback_t callbacks[SH_type_unused] = {
SHADOW_INTERNAL_NAME(sh_audit_l4_table, 4), /* l4_64 */
NULL /* All the rest */
};
- unsigned int mask;
+ unsigned int mask;
if ( !(SHADOW_AUDIT_ENABLE) )
return;
case 2: mask = (SHF_L1_32|SHF_FL1_32|SHF_L2_32); break;
case 3: mask = (SHF_L1_PAE|SHF_FL1_PAE|SHF_L2_PAE
|SHF_L2H_PAE); break;
- case 4: mask = (SHF_L1_64|SHF_FL1_64|SHF_L2_64
+ case 4: mask = (SHF_L1_64|SHF_FL1_64|SHF_L2_64
|SHF_L3_64|SHF_L4_64); break;
default: BUG();
}
* c-file-style: "BSD"
* c-basic-offset: 4
* indent-tabs-mode: nil
- * End:
+ * End:
*/
/******************************************************************************
* arch/x86/mm/shadow/multi.c
*
- * Simple, mostly-synchronous shadow page tables.
+ * Simple, mostly-synchronous shadow page tables.
* Parts of this code are Copyright (c) 2006 by XenSource Inc.
* Parts of this code are Copyright (c) 2006 by Michael A Fetterman
* Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
#include "types.h"
/* THINGS TO DO LATER:
- *
+ *
* TEARDOWN HEURISTICS
- * Also: have a heuristic for when to destroy a previous paging-mode's
+ * Also: have a heuristic for when to destroy a previous paging-mode's
* shadows. When a guest is done with its start-of-day 32-bit tables
- * and reuses the memory we want to drop those shadows. Start with
- * shadows in a page in two modes as a hint, but beware of clever tricks
+ * and reuses the memory we want to drop those shadows. Start with
+ * shadows in a page in two modes as a hint, but beware of clever tricks
* like reusing a pagetable for both PAE and 64-bit during boot...
*
* PAE LINEAR MAPS
* Rework shadow_get_l*e() to have the option of using map_domain_page()
- * instead of linear maps. Add appropriate unmap_l*e calls in the users.
- * Then we can test the speed difference made by linear maps. If the
- * map_domain_page() version is OK on PAE, we could maybe allow a lightweight
- * l3-and-l2h-only shadow mode for PAE PV guests that would allow them
- * to share l2h pages again.
+ * instead of linear maps. Add appropriate unmap_l*e calls in the users.
+ * Then we can test the speed difference made by linear maps. If the
+ * map_domain_page() version is OK on PAE, we could maybe allow a lightweight
+ * l3-and-l2h-only shadow mode for PAE PV guests that would allow them
+ * to share l2h pages again.
*
* PSE disabled / PSE36
* We don't support any modes other than PSE enabled, PSE36 disabled.
- * Neither of those would be hard to change, but we'd need to be able to
+ * Neither of those would be hard to change, but we'd need to be able to
* deal with shadows made in one mode and used in another.
*/
* shadow L1 which maps its "splinters".
*/
-static inline mfn_t
+static inline mfn_t
get_fl1_shadow_status(struct vcpu *v, gfn_t gfn)
/* Look for FL1 shadows in the hash table */
{
return smfn;
}
-static inline mfn_t
+static inline mfn_t
get_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type)
/* Look for shadows in the hash table */
{
return smfn;
}
-static inline void
+static inline void
set_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t smfn)
/* Put an FL1 shadow into the hash table */
{
shadow_hash_insert(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
}
-static inline void
+static inline void
set_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
/* Put a shadow into the hash table */
{
shadow_hash_insert(v, mfn_x(gmfn), shadow_type, smfn);
}
-static inline void
+static inline void
delete_fl1_shadow_status(struct vcpu *v, gfn_t gfn, mfn_t smfn)
/* Remove a shadow from the hash table */
{
shadow_hash_delete(v, gfn_x(gfn), SH_type_fl1_shadow, smfn);
}
-static inline void
+static inline void
delete_shadow_status(struct vcpu *v, mfn_t gmfn, u32 shadow_type, mfn_t smfn)
/* Remove a shadow from the hash table */
{
/* Functions for walking the guest page tables */
static inline uint32_t
-sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw,
+sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw,
uint32_t pfec)
{
- return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec,
+ return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec,
#if GUEST_PAGING_LEVELS == 3 /* PAE */
_mfn(INVALID_MFN),
v->arch.paging.shadow.gl3e
#if SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES
/* Lightweight audit: pass all the shadows associated with this guest walk
* through the audit mechanisms */
-static void sh_audit_gw(struct vcpu *v, walk_t *gw)
+static void sh_audit_gw(struct vcpu *v, walk_t *gw)
{
mfn_t smfn;
#if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
if ( mfn_valid(gw->l4mfn)
- && mfn_valid((smfn = get_shadow_status(v, gw->l4mfn,
+ && mfn_valid((smfn = get_shadow_status(v, gw->l4mfn,
SH_type_l4_shadow))) )
(void) sh_audit_l4_table(v, smfn, _mfn(INVALID_MFN));
if ( mfn_valid(gw->l3mfn)
- && mfn_valid((smfn = get_shadow_status(v, gw->l3mfn,
+ && mfn_valid((smfn = get_shadow_status(v, gw->l3mfn,
SH_type_l3_shadow))) )
(void) sh_audit_l3_table(v, smfn, _mfn(INVALID_MFN));
#endif /* PAE or 64... */
if ( mfn_valid(gw->l2mfn) )
{
- if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn,
+ if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn,
SH_type_l2_shadow))) )
(void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
#if GUEST_PAGING_LEVELS == 3
- if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn,
+ if ( mfn_valid((smfn = get_shadow_status(v, gw->l2mfn,
SH_type_l2h_shadow))) )
(void) sh_audit_l2_table(v, smfn, _mfn(INVALID_MFN));
#endif
}
if ( mfn_valid(gw->l1mfn)
- && mfn_valid((smfn = get_shadow_status(v, gw->l1mfn,
+ && mfn_valid((smfn = get_shadow_status(v, gw->l1mfn,
SH_type_l1_shadow))) )
(void) sh_audit_l1_table(v, smfn, _mfn(INVALID_MFN));
else if ( (guest_l2e_get_flags(gw->l2e) & _PAGE_PRESENT)
&& (guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)
- && mfn_valid(
+ && mfn_valid(
(smfn = get_fl1_shadow_status(v, guest_l2e_get_gfn(gw->l2e)))) )
(void) sh_audit_fl1_table(v, smfn, _mfn(INVALID_MFN));
}
walk_t gw;
ASSERT(shadow_mode_translate(v->domain));
-
+
// XXX -- this is expensive, but it's easy to cobble together...
// FIXME!
- if ( sh_walk_guest_tables(v, addr, &gw, PFEC_page_present) == 0
+ if ( sh_walk_guest_tables(v, addr, &gw, PFEC_page_present) == 0
&& mfn_valid(gw.l1mfn) )
{
if ( gl1mfn )
walk_t gw;
ASSERT(shadow_mode_translate(v->domain));
-
+
// XXX -- this is expensive, but it's easy to cobble together...
// FIXME!
*/
static always_inline void
-_sh_propagate(struct vcpu *v,
+_sh_propagate(struct vcpu *v,
guest_intpte_t guest_intpte,
- mfn_t target_mfn,
+ mfn_t target_mfn,
void *shadow_entry_ptr,
int level,
- fetch_type_t ft,
+ fetch_type_t ft,
p2m_type_t p2mt)
{
guest_l1e_t guest_entry = { guest_intpte };
if ( unlikely(!(gflags & _PAGE_PRESENT)) )
{
#if !(SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
- /* If a guest l1 entry is not present, shadow with the magic
+ /* If a guest l1 entry is not present, shadow with the magic
* guest-not-present entry. */
if ( level == 1 )
*sp = sh_l1e_gnp();
- else
+ else
#endif /* !OOS */
*sp = shadow_l1e_empty();
goto done;
// return early.
//
if ( !mfn_valid(target_mfn)
- && !(level == 1 && (!shadow_mode_refcounts(d)
+ && !(level == 1 && (!shadow_mode_refcounts(d)
|| p2mt == p2m_mmio_direct)) )
{
ASSERT((ft == ft_prefetch));
ASSERT(!(sflags & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)));
/* compute the PAT index for shadow page entry when VT-d is enabled
- * and device assigned.
+ * and device assigned.
* 1) direct MMIO: compute the PAT index with gMTRR=UC and gPAT.
* 2) if enables snoop control, compute the PAT index as WB.
* 3) if disables snoop control, compute the PAT index with
gflags,
gfn_to_paddr(target_gfn),
pfn_to_paddr(mfn_x(target_mfn)),
- MTRR_TYPE_UNCACHABLE);
+ MTRR_TYPE_UNCACHABLE);
else if ( iommu_snoop )
sflags |= pat_type_2_pte_flags(PAT_TYPE_WRBACK);
else
// Only allow the guest write access to a page a) on a demand fault,
// or b) if the page is already marked as dirty.
//
- // (We handle log-dirty entirely inside the shadow code, without using the
+ // (We handle log-dirty entirely inside the shadow code, without using the
// p2m_ram_logdirty p2m type: only HAP uses that.)
if ( unlikely((level == 1) && shadow_mode_log_dirty(d)) )
{
if ( mfn_valid(target_mfn) ) {
- if ( ft & FETCH_TYPE_WRITE )
+ if ( ft & FETCH_TYPE_WRITE )
paging_mark_dirty(d, mfn_x(target_mfn));
else if ( !paging_mfn_is_dirty(d, target_mfn) )
sflags &= ~_PAGE_RW;
(p2mt == p2m_mmio_direct &&
rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn))) )
sflags &= ~_PAGE_RW;
-
+
// protect guest page tables
//
- if ( unlikely((level == 1)
+ if ( unlikely((level == 1)
&& sh_mfn_is_a_page_table(target_mfn)
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
/* Unless the page is out of sync and the guest is
// PV guests in 64-bit mode use two different page tables for user vs
// supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
// It is always shadowed as present...
- if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)
+ if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)
&& is_pv_domain(d) )
{
sflags |= _PAGE_USER;
#if GUEST_PAGING_LEVELS >= 4
static void
-l4e_propagate_from_guest(struct vcpu *v,
+l4e_propagate_from_guest(struct vcpu *v,
guest_l4e_t gl4e,
mfn_t sl3mfn,
shadow_l4e_t *sl4e,
static void
l3e_propagate_from_guest(struct vcpu *v,
guest_l3e_t gl3e,
- mfn_t sl2mfn,
+ mfn_t sl2mfn,
shadow_l3e_t *sl3e,
fetch_type_t ft)
{
#endif // GUEST_PAGING_LEVELS >= 4
static void
-l2e_propagate_from_guest(struct vcpu *v,
+l2e_propagate_from_guest(struct vcpu *v,
guest_l2e_t gl2e,
mfn_t sl1mfn,
shadow_l2e_t *sl2e,
}
static void
-l1e_propagate_from_guest(struct vcpu *v,
+l1e_propagate_from_guest(struct vcpu *v,
guest_l1e_t gl1e,
- mfn_t gmfn,
+ mfn_t gmfn,
shadow_l1e_t *sl1e,
- fetch_type_t ft,
+ fetch_type_t ft,
p2m_type_t p2mt)
{
_sh_propagate(v, gl1e.l1, gmfn, sl1e, 1, ft, p2mt);
* functions which ever write (non-zero) data onto a shadow page.
*/
-static inline void safe_write_entry(void *dst, void *src)
+static inline void safe_write_entry(void *dst, void *src)
/* Copy one PTE safely when processors might be running on the
* destination pagetable. This does *not* give safety against
- * concurrent writes (that's what the paging lock is for), just
+ * concurrent writes (that's what the paging lock is for), just
* stops the hardware picking up partially written entries. */
{
volatile unsigned long *d = dst;
}
-static inline void
+static inline void
shadow_write_entries(void *d, void *s, int entries, mfn_t mfn)
/* This function does the actual writes to shadow pages.
* It must not be called directly, since it doesn't do the bookkeeping
/* Because we mirror access rights at all levels in the shadow, an
* l2 (or higher) entry with the RW bit cleared will leave us with
- * no write access through the linear map.
- * We detect that by writing to the shadow with copy_to_user() and
+ * no write access through the linear map.
+ * We detect that by writing to the shadow with copy_to_user() and
* using map_domain_page() to get a writeable mapping if we need to. */
- if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 )
+ if ( __copy_to_user(d, d, sizeof (unsigned long)) != 0 )
{
perfc_incr(shadow_linear_map_failed);
map = sh_map_domain_page(mfn);
static void inline
shadow_put_page_from_l1e(shadow_l1e_t sl1e, struct domain *d)
-{
+{
if ( !shadow_mode_refcounts(d) )
return;
}
#if GUEST_PAGING_LEVELS >= 4
-static int shadow_set_l4e(struct vcpu *v,
- shadow_l4e_t *sl4e,
- shadow_l4e_t new_sl4e,
+static int shadow_set_l4e(struct vcpu *v,
+ shadow_l4e_t *sl4e,
+ shadow_l4e_t new_sl4e,
mfn_t sl4mfn)
{
int flags = 0, ok;
old_sl4e = *sl4e;
if ( old_sl4e.l4 == new_sl4e.l4 ) return 0; /* Nothing to do */
-
- paddr = ((((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
+
+ paddr = ((((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
| (((unsigned long)sl4e) & ~PAGE_MASK));
- if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT )
+ if ( shadow_l4e_get_flags(new_sl4e) & _PAGE_PRESENT )
{
- /* About to install a new reference */
+ /* About to install a new reference */
mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e);
ok = sh_get_ref(v, sl3mfn, paddr);
/* Are we pinning l3 shadows to handle wierd linux behaviour? */
shadow_write_entries(sl4e, &new_sl4e, 1, sl4mfn);
flags |= SHADOW_SET_CHANGED;
- if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT )
+ if ( shadow_l4e_get_flags(old_sl4e) & _PAGE_PRESENT )
{
/* We lost a reference to an old mfn. */
mfn_t osl3mfn = shadow_l4e_get_mfn(old_sl4e);
if ( (mfn_x(osl3mfn) != mfn_x(shadow_l4e_get_mfn(new_sl4e)))
- || !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e),
+ || !perms_strictly_increased(shadow_l4e_get_flags(old_sl4e),
shadow_l4e_get_flags(new_sl4e)) )
{
flags |= SHADOW_SET_FLUSH;
return flags;
}
-static int shadow_set_l3e(struct vcpu *v,
- shadow_l3e_t *sl3e,
- shadow_l3e_t new_sl3e,
+static int shadow_set_l3e(struct vcpu *v,
+ shadow_l3e_t *sl3e,
+ shadow_l3e_t new_sl3e,
mfn_t sl3mfn)
{
int flags = 0;
if ( old_sl3e.l3 == new_sl3e.l3 ) return 0; /* Nothing to do */
- paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
+ paddr = ((((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
| (((unsigned long)sl3e) & ~PAGE_MASK));
-
+
if ( shadow_l3e_get_flags(new_sl3e) & _PAGE_PRESENT )
{
- /* About to install a new reference */
+ /* About to install a new reference */
if ( !sh_get_ref(v, shadow_l3e_get_mfn(new_sl3e), paddr) )
{
domain_crash(v->domain);
shadow_write_entries(sl3e, &new_sl3e, 1, sl3mfn);
flags |= SHADOW_SET_CHANGED;
- if ( shadow_l3e_get_flags(old_sl3e) & _PAGE_PRESENT )
+ if ( shadow_l3e_get_flags(old_sl3e) & _PAGE_PRESENT )
{
/* We lost a reference to an old mfn. */
mfn_t osl2mfn = shadow_l3e_get_mfn(old_sl3e);
if ( (mfn_x(osl2mfn) != mfn_x(shadow_l3e_get_mfn(new_sl3e))) ||
- !perms_strictly_increased(shadow_l3e_get_flags(old_sl3e),
- shadow_l3e_get_flags(new_sl3e)) )
+ !perms_strictly_increased(shadow_l3e_get_flags(old_sl3e),
+ shadow_l3e_get_flags(new_sl3e)) )
{
flags |= SHADOW_SET_FLUSH;
}
}
return flags;
}
-#endif /* GUEST_PAGING_LEVELS >= 4 */
+#endif /* GUEST_PAGING_LEVELS >= 4 */
-static int shadow_set_l2e(struct vcpu *v,
- shadow_l2e_t *sl2e,
- shadow_l2e_t new_sl2e,
+static int shadow_set_l2e(struct vcpu *v,
+ shadow_l2e_t *sl2e,
+ shadow_l2e_t new_sl2e,
mfn_t sl2mfn)
{
int flags = 0;
#if GUEST_PAGING_LEVELS == 2
/* In 2-on-3 we work with pairs of l2es pointing at two-page
* shadows. Reference counting and up-pointers track from the first
- * page of the shadow to the first l2e, so make sure that we're
+ * page of the shadow to the first l2e, so make sure that we're
* working with those:
* Start with a pair of identical entries */
shadow_l2e_t pair[2] = { new_sl2e, new_sl2e };
ASSERT(sl2e != NULL);
old_sl2e = *sl2e;
-
+
if ( old_sl2e.l2 == new_sl2e.l2 ) return 0; /* Nothing to do */
-
+
paddr = ((((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
| (((unsigned long)sl2e) & ~PAGE_MASK));
- if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT )
+ if ( shadow_l2e_get_flags(new_sl2e) & _PAGE_PRESENT )
{
mfn_t sl1mfn = shadow_l2e_get_mfn(new_sl2e);
ASSERT(mfn_to_page(sl1mfn)->u.sh.head);
the GFN instead of the GMFN, and it's definitely not
OOS. */
if ( (sp->u.sh.type != SH_type_fl1_shadow) && mfn_valid(gl1mfn)
- && mfn_is_out_of_sync(gl1mfn) )
+ && mfn_is_out_of_sync(gl1mfn) )
sh_resync(v, gl1mfn);
}
#endif
#endif
flags |= SHADOW_SET_CHANGED;
- if ( shadow_l2e_get_flags(old_sl2e) & _PAGE_PRESENT )
+ if ( shadow_l2e_get_flags(old_sl2e) & _PAGE_PRESENT )
{
/* We lost a reference to an old mfn. */
mfn_t osl1mfn = shadow_l2e_get_mfn(old_sl2e);
if ( (mfn_x(osl1mfn) != mfn_x(shadow_l2e_get_mfn(new_sl2e))) ||
- !perms_strictly_increased(shadow_l2e_get_flags(old_sl2e),
- shadow_l2e_get_flags(new_sl2e)) )
+ !perms_strictly_increased(shadow_l2e_get_flags(old_sl2e),
+ shadow_l2e_get_flags(new_sl2e)) )
{
flags |= SHADOW_SET_FLUSH;
}
shadow_l1e_t *sl1e,
mfn_t sl1mfn,
struct domain *d)
-{
+{
mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
int flags = shadow_l1e_get_flags(new_sl1e);
unsigned long gfn;
{
unsigned long i = gfn - dirty_vram->begin_pfn;
struct page_info *page = mfn_to_page(mfn);
-
+
if ( (page->u.inuse.type_info & PGT_count_mask) == 1 )
/* Initial guest reference, record it */
dirty_vram->sl1ma[i] = pfn_to_paddr(mfn_x(sl1mfn))
}
}
-static int shadow_set_l1e(struct vcpu *v,
- shadow_l1e_t *sl1e,
+static int shadow_set_l1e(struct vcpu *v,
+ shadow_l1e_t *sl1e,
shadow_l1e_t new_sl1e,
p2m_type_t new_type,
mfn_t sl1mfn)
== (_PAGE_RW|_PAGE_PRESENT)) )
oos_fixup_add(v, new_gmfn, sl1mfn, pgentry_ptr_to_slot(sl1e));
#endif
-
+
old_sl1e = *sl1e;
if ( old_sl1e.l1 == new_sl1e.l1 ) return 0; /* Nothing to do */
-
+
if ( (shadow_l1e_get_flags(new_sl1e) & _PAGE_PRESENT)
- && !sh_l1e_is_magic(new_sl1e) )
+ && !sh_l1e_is_magic(new_sl1e) )
{
- /* About to install a new reference */
+ /* About to install a new reference */
if ( shadow_mode_refcounts(d) ) {
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF);
switch ( shadow_get_page_from_l1e(new_sl1e, d, new_type) )
break;
}
}
- }
+ }
/* Write the new entry */
shadow_write_entries(sl1e, &new_sl1e, 1, sl1mfn);
flags |= SHADOW_SET_CHANGED;
- if ( (shadow_l1e_get_flags(old_sl1e) & _PAGE_PRESENT)
+ if ( (shadow_l1e_get_flags(old_sl1e) & _PAGE_PRESENT)
&& !sh_l1e_is_magic(old_sl1e) )
{
/* We lost a reference to an old mfn. */
- /* N.B. Unlike higher-level sets, never need an extra flush
- * when writing an l1e. Because it points to the same guest frame
+ /* N.B. Unlike higher-level sets, never need an extra flush
+ * when writing an l1e. Because it points to the same guest frame
* as the guest l1e did, it's the guest's responsibility to
* trigger a flush later. */
- if ( shadow_mode_refcounts(d) )
+ if ( shadow_mode_refcounts(d) )
{
shadow_vram_put_l1e(old_sl1e, sl1e, sl1mfn, d);
shadow_put_page_from_l1e(old_sl1e, d);
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_PUT_REF);
- }
+ }
}
return flags;
}
/**************************************************************************/
-/* Macros to walk pagetables. These take the shadow of a pagetable and
- * walk every "interesting" entry. That is, they don't touch Xen mappings,
- * and for 32-bit l2s shadowed onto PAE or 64-bit, they only touch every
+/* Macros to walk pagetables. These take the shadow of a pagetable and
+ * walk every "interesting" entry. That is, they don't touch Xen mappings,
+ * and for 32-bit l2s shadowed onto PAE or 64-bit, they only touch every
* second entry (since pairs of entries are managed together). For multi-page
* shadows they walk all pages.
- *
- * Arguments are an MFN, the variable to point to each entry, a variable
- * to indicate that we are done (we will shortcut to the end of the scan
+ *
+ * Arguments are an MFN, the variable to point to each entry, a variable
+ * to indicate that we are done (we will shortcut to the end of the scan
* when _done != 0), a variable to indicate that we should avoid Xen mappings,
- * and the code.
+ * and the code.
*
- * WARNING: These macros have side-effects. They change the values of both
- * the pointer and the MFN. */
+ * WARNING: These macros have side-effects. They change the values of both
+ * the pointer and the MFN. */
static inline void increment_ptr_to_guest_entry(void *ptr)
{
#define SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code) \
_SHADOW_FOREACH_L1E(_sl1mfn, _sl1e, _gl1p, _done, _code)
#endif
-
+
#if GUEST_PAGING_LEVELS == 2
sh_unmap_domain_page(_sp); \
} while (0)
-#else
+#else
/* 64-bit l2: touch all entries except for PAE compat guests. */
#define SHADOW_FOREACH_L2E(_sl2mfn, _sl2e, _gl2p, _done, _dom, _code) \
sl4e = sh_map_domain_page(sl4mfn);
BUILD_BUG_ON(sizeof (l4_pgentry_t) != sizeof (shadow_l4e_t));
-
+
/* Copy the common Xen mappings from the idle domain */
slots = (shadow_mode_external(d)
? ROOT_PAGETABLE_XEN_SLOTS
shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);
}
- sh_unmap_domain_page(sl4e);
+ sh_unmap_domain_page(sl4e);
}
#endif
mfn_to_page(smfn)->up = 0;
#if GUEST_PAGING_LEVELS == 4
-#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_LINUX_L3_TOPLEVEL)
if ( shadow_type == SH_type_l4_64_shadow &&
unlikely(v->domain->arch.paging.shadow.opt_flags & SHOPT_LINUX_L3_TOPLEVEL) )
{
/* We're shadowing a new l4, but we've been assuming the guest uses
- * only one l4 per vcpu and context switches using an l4 entry.
+ * only one l4 per vcpu and context switches using an l4 entry.
* Count the number of active l4 shadows. If there are enough
* of them, decide that this isn't an old linux guest, and stop
* pinning l3es. This is not very quick but it doesn't happen
if ( sp->u.sh.type == SH_type_l4_64_shadow )
l4count++;
}
- for_each_vcpu ( v->domain, v2 )
+ for_each_vcpu ( v->domain, v2 )
vcpus++;
- if ( l4count > 2 * vcpus )
+ if ( l4count > 2 * vcpus )
{
/* Unpin all the pinned l3 tables, and don't pin any more. */
page_list_for_each_safe(sp, t, &v->domain->arch.paging.shadow.pinned_shadows)
// Create the Xen mappings...
if ( !shadow_mode_external(v->domain) )
{
- switch (shadow_type)
+ switch (shadow_type)
{
#if GUEST_PAGING_LEVELS == 4
case SH_type_l4_shadow:
struct domain *d = v->domain;
ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
-
+
/* Guarantee we can get the memory we need */
shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
mfn_t m3mfn, m2mfn;
l4_pgentry_t *l4e;
l3_pgentry_t *l3e;
- /* Install an l3 table and an l2 table that will hold the shadow
- * linear map entries. This overrides the linear map entry that
+ /* Install an l3 table and an l2 table that will hold the shadow
+ * linear map entries. This overrides the linear map entry that
* was installed by sh_install_xen_entries_in_l4. */
l4e = sh_map_domain_page(m4mfn);
m3mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m3mfn)->shadow_flags = 3;
l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
-
+
m2mfn = shadow_alloc(d, SH_type_monitor_table, 0);
mfn_to_page(m2mfn)->shadow_flags = 2;
l3e = sh_map_domain_page(m3mfn);
* If the necessary tables are not present in the guest, they return NULL. */
/* N.B. The use of GUEST_PAGING_LEVELS here is correct. If the shadow has
- * more levels than the guest, the upper levels are always fixed and do not
- * reflect any information from the guest, so we do not use these functions
+ * more levels than the guest, the upper levels are always fixed and do not
+ * reflect any information from the guest, so we do not use these functions
* to access them. */
#if GUEST_PAGING_LEVELS >= 4
-static shadow_l4e_t * shadow_get_and_create_l4e(struct vcpu *v,
- walk_t *gw,
+static shadow_l4e_t * shadow_get_and_create_l4e(struct vcpu *v,
+ walk_t *gw,
mfn_t *sl4mfn)
{
/* There is always a shadow of the top level table. Get it. */
return sh_linear_l4_table(v) + shadow_l4_linear_offset(gw->va);
}
-static shadow_l3e_t * shadow_get_and_create_l3e(struct vcpu *v,
- walk_t *gw,
+static shadow_l3e_t * shadow_get_and_create_l3e(struct vcpu *v,
+ walk_t *gw,
mfn_t *sl3mfn,
fetch_type_t ft,
int *resync)
/* Get the l4e */
sl4e = shadow_get_and_create_l4e(v, gw, &sl4mfn);
ASSERT(sl4e != NULL);
- if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
+ if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
{
*sl3mfn = shadow_l4e_get_mfn(*sl4e);
ASSERT(mfn_valid(*sl3mfn));
- }
- else
+ }
+ else
{
int r;
shadow_l4e_t new_sl4e;
/* No l3 shadow installed: find and install it. */
*sl3mfn = get_shadow_status(v, gw->l3mfn, SH_type_l3_shadow);
- if ( !mfn_valid(*sl3mfn) )
+ if ( !mfn_valid(*sl3mfn) )
{
/* No l3 shadow of this page exists at all: make one. */
*sl3mfn = sh_make_shadow(v, gw->l3mfn, SH_type_l3_shadow);
#endif /* GUEST_PAGING_LEVELS >= 4 */
-static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v,
- walk_t *gw,
+static shadow_l2e_t * shadow_get_and_create_l2e(struct vcpu *v,
+ walk_t *gw,
mfn_t *sl2mfn,
fetch_type_t ft,
int *resync)
if ( !mfn_valid(gw->l2mfn) ) return NULL; /* No guest page. */
/* Get the l3e */
sl3e = shadow_get_and_create_l3e(v, gw, &sl3mfn, ft, resync);
- if ( sl3e == NULL ) return NULL;
- if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
+ if ( sl3e == NULL ) return NULL;
+ if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
{
*sl2mfn = shadow_l3e_get_mfn(*sl3e);
ASSERT(mfn_valid(*sl2mfn));
- }
- else
+ }
+ else
{
int r;
shadow_l3e_t new_sl3e;
/* No l2 shadow installed: find and install it. */
*sl2mfn = get_shadow_status(v, gw->l2mfn, t);
- if ( !mfn_valid(*sl2mfn) )
+ if ( !mfn_valid(*sl2mfn) )
{
/* No l2 shadow of this page exists at all: make one. */
*sl2mfn = sh_make_shadow(v, gw->l2mfn, t);
r = shadow_set_l3e(v, sl3e, new_sl3e, sl3mfn);
ASSERT((r & SHADOW_SET_FLUSH) == 0);
if ( r & SHADOW_SET_ERROR )
- return NULL;
+ return NULL;
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC )
*resync |= 1;
#elif GUEST_PAGING_LEVELS == 3 /* PAE... */
/* We never demand-shadow PAE l3es: they are only created in
* sh_update_cr3(). Check if the relevant sl3e is present. */
- shadow_l3e_t *sl3e = ((shadow_l3e_t *)&v->arch.paging.shadow.l3table)
+ shadow_l3e_t *sl3e = ((shadow_l3e_t *)&v->arch.paging.shadow.l3table)
+ shadow_l3_linear_offset(gw->va);
- if ( !(shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT) )
+ if ( !(shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT) )
return NULL;
*sl2mfn = shadow_l3e_get_mfn(*sl3e);
ASSERT(mfn_valid(*sl2mfn));
(void) shadow_l2_index(sl2mfn, guest_l2_table_offset(gw->va));
/* Reading the top level table is always valid. */
return sh_linear_l2_table(v) + shadow_l2_linear_offset(gw->va);
-#endif
+#endif
}
-static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v,
- walk_t *gw,
+static shadow_l1e_t * shadow_get_and_create_l1e(struct vcpu *v,
+ walk_t *gw,
mfn_t *sl1mfn,
fetch_type_t ft)
{
/* Install the sl1 in the l2e if it wasn't there or if we need to
* re-do it to fix a PSE dirty bit. */
- if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT
+ if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT
&& likely(ft != ft_demand_write
- || (shadow_l2e_get_flags(*sl2e) & _PAGE_RW)
+ || (shadow_l2e_get_flags(*sl2e) & _PAGE_RW)
|| !(guest_l2e_get_flags(gw->l2e) & _PAGE_PSE)) )
{
*sl1mfn = shadow_l2e_get_mfn(*sl2e);
ASSERT(mfn_valid(*sl1mfn));
- }
- else
+ }
+ else
{
shadow_l2e_t new_sl2e;
int r, flags = guest_l2e_get_flags(gw->l2e);
/* No l1 shadow installed: find and install it. */
if ( !(flags & _PAGE_PRESENT) )
return NULL; /* No guest page. */
- if ( guest_supports_superpages(v) && (flags & _PAGE_PSE) )
+ if ( guest_supports_superpages(v) && (flags & _PAGE_PSE) )
{
/* Splintering a superpage */
gfn_t l2gfn = guest_l2e_get_gfn(gw->l2e);
*sl1mfn = get_fl1_shadow_status(v, l2gfn);
- if ( !mfn_valid(*sl1mfn) )
+ if ( !mfn_valid(*sl1mfn) )
{
/* No fl1 shadow of this superpage exists at all: make one. */
*sl1mfn = make_fl1_shadow(v, l2gfn);
}
- }
- else
+ }
+ else
{
/* Shadowing an actual guest l1 table */
if ( !mfn_valid(gw->l1mfn) ) return NULL; /* No guest page. */
*sl1mfn = get_shadow_status(v, gw->l1mfn, SH_type_l1_shadow);
- if ( !mfn_valid(*sl1mfn) )
+ if ( !mfn_valid(*sl1mfn) )
{
/* No l1 shadow of this page exists at all: make one. */
*sl1mfn = sh_make_shadow(v, gw->l1mfn, SH_type_l1_shadow);
/* Install the new sl1 table in the sl2e */
l2e_propagate_from_guest(v, gw->l2e, *sl1mfn, &new_sl2e, ft);
r = shadow_set_l2e(v, sl2e, new_sl2e, sl2mfn);
- ASSERT((r & SHADOW_SET_FLUSH) == 0);
+ ASSERT((r & SHADOW_SET_FLUSH) == 0);
if ( r & SHADOW_SET_ERROR )
return NULL;
/**************************************************************************/
-/* Destructors for shadow tables:
+/* Destructors for shadow tables:
* Unregister the shadow, decrement refcounts of any entries present in it,
* and release the memory.
*
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
/* Decrement refcounts of all the old entries */
- sl4mfn = smfn;
+ sl4mfn = smfn;
SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, 0, v->domain, {
- if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
+ if ( shadow_l4e_get_flags(*sl4e) & _PAGE_PRESENT )
{
sh_put_ref(v, shadow_l4e_get_mfn(*sl4e),
- (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
+ (((paddr_t)mfn_x(sl4mfn)) << PAGE_SHIFT)
| ((unsigned long)sl4e & ~PAGE_MASK));
}
});
-
+
/* Put the memory back in the pool */
shadow_free(v->domain, smfn);
}
shadow_demote(v, gmfn, t);
/* Decrement refcounts of all the old entries */
- sl3mfn = smfn;
+ sl3mfn = smfn;
SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, 0, {
- if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
+ if ( shadow_l3e_get_flags(*sl3e) & _PAGE_PRESENT )
sh_put_ref(v, shadow_l3e_get_mfn(*sl3e),
- (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
+ (((paddr_t)mfn_x(sl3mfn)) << PAGE_SHIFT)
| ((unsigned long)sl3e & ~PAGE_MASK));
});
/* Decrement refcounts of all the old entries */
sl2mfn = smfn;
SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
- if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
+ if ( shadow_l2e_get_flags(*sl2e) & _PAGE_PRESENT )
sh_put_ref(v, shadow_l2e_get_mfn(*sl2e),
- (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
+ (((paddr_t)mfn_x(sl2mfn)) << PAGE_SHIFT)
| ((unsigned long)sl2e & ~PAGE_MASK));
});
gfn_t gfn = _gfn(sp->v.sh.back);
delete_fl1_shadow_status(v, gfn, smfn);
}
- else
+ else
{
mfn_t gmfn = backpointer(sp);
delete_shadow_status(v, gmfn, t, smfn);
shadow_demote(v, gmfn, t);
}
-
+
if ( shadow_mode_refcounts(d) )
{
/* Decrement refcounts of all the old entries */
- mfn_t sl1mfn = smfn;
+ mfn_t sl1mfn = smfn;
SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, 0, {
if ( (shadow_l1e_get_flags(*sl1e) & _PAGE_PRESENT)
&& !sh_l1e_is_magic(*sl1e) ) {
}
});
}
-
+
/* Put the memory back in the pool */
shadow_free(v->domain, smfn);
}
l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
l3_pgentry_t *l3e;
int linear_slot = shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);
-
- /* Need to destroy the l3 and l2 monitor pages used
+
+ /* Need to destroy the l3 and l2 monitor pages used
* for the linear map */
ASSERT(l4e_get_flags(l4e[linear_slot]) & _PAGE_PRESENT);
m3mfn = _mfn(l4e_get_pfn(l4e[linear_slot]));
/**************************************************************************/
/* Functions to destroy non-Xen mappings in a pagetable hierarchy.
* These are called from common code when we are running out of shadow
- * memory, and unpinning all the top-level shadows hasn't worked.
+ * memory, and unpinning all the top-level shadows hasn't worked.
*
* With user_only == 1, we leave guest kernel-mode mappings in place too,
* unhooking only the user-mode mappings
*
- * This implementation is pretty crude and slow, but we hope that it won't
+ * This implementation is pretty crude and slow, but we hope that it won't
* be called very often. */
#if GUEST_PAGING_LEVELS == 2
void sh_unhook_32b_mappings(struct vcpu *v, mfn_t sl2mfn, int user_only)
-{
+{
shadow_l2e_t *sl2e;
SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, 0, v->domain, {
if ( !user_only || (sl2e->l2 & _PAGE_USER) )
* These functions require a pointer to the shadow entry that will be updated.
*/
-/* These functions take a new guest entry, translate it to shadow and write
+/* These functions take a new guest entry, translate it to shadow and write
* the shadow entry.
*
* They return the same bitmaps as the shadow_set_lXe() functions.
mfn_t gl1mfn = get_gfn_query_unlocked(v->domain, gfn_x(gl1gfn),
&p2mt);
if ( p2m_is_ram(p2mt) )
- sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
+ sl1mfn = get_shadow_status(v, gl1mfn, SH_type_l1_shadow);
else if ( p2mt != p2m_populate_on_demand )
result |= SHADOW_SET_ERROR;
}
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
gl1mfn = backpointer(mfn_to_page(sl1mfn));
- if ( mfn_valid(gl1mfn)
+ if ( mfn_valid(gl1mfn)
&& mfn_is_out_of_sync(gl1mfn) )
{
/* Update the OOS snapshot. */
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/**************************************************************************/
-/* Special validation function for re-syncing out-of-sync shadows.
+/* Special validation function for re-syncing out-of-sync shadows.
* Walks the *shadow* page, and for every entry that it finds,
* revalidates the guest entry that corresponds to it.
* N.B. This function is called with the vcpu that unsynced the page,
ASSERT(!(rc & SHADOW_SET_FLUSH));
}
-/* Figure out whether it's definitely safe not to sync this l1 table.
- * That is: if we can tell that it's only used once, and that the
- * toplevel shadow responsible is not one of ours.
- * N.B. This function is called with the vcpu that required the resync,
+/* Figure out whether it's definitely safe not to sync this l1 table.
+ * That is: if we can tell that it's only used once, and that the
+ * toplevel shadow responsible is not one of ours.
+ * N.B. This function is called with the vcpu that required the resync,
* *not* the one that originally unsynced the page, but it is
* called in the *mode* of the vcpu that unsynced it. Clear? Good. */
int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
smfn = _mfn(sp->up >> PAGE_SHIFT);
ASSERT(mfn_valid(smfn));
-#if (SHADOW_PAGING_LEVELS == 4)
+#if (SHADOW_PAGING_LEVELS == 4)
/* up to l3 */
sp = mfn_to_page(smfn);
ASSERT(sh_type_has_up_pointer(v, SH_type_l2_shadow));
#endif
if ( pagetable_get_pfn(v->arch.shadow_table[0]) == mfn_x(smfn)
-#if (SHADOW_PAGING_LEVELS == 3)
+#if (SHADOW_PAGING_LEVELS == 3)
|| pagetable_get_pfn(v->arch.shadow_table[1]) == mfn_x(smfn)
|| pagetable_get_pfn(v->arch.shadow_table[2]) == mfn_x(smfn)
- || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn)
+ || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn)
#endif
)
return 0;
-
- /* Only in use in one toplevel shadow, and it's not the one we're
+
+ /* Only in use in one toplevel shadow, and it's not the one we're
* running on */
return 1;
}
/**************************************************************************/
-/* Functions which translate and install the shadows of arbitrary guest
+/* Functions which translate and install the shadows of arbitrary guest
* entries that we have just seen the guest write. */
-static inline int
+static inline int
sh_map_and_validate(struct vcpu *v, mfn_t gmfn,
- void *new_gp, u32 size, u32 sh_type,
+ void *new_gp, u32 size, u32 sh_type,
u32 (*shadow_index)(mfn_t *smfn, u32 idx),
- int (*validate_ge)(struct vcpu *v, void *ge,
+ int (*validate_ge)(struct vcpu *v, void *ge,
mfn_t smfn, void *se))
/* Generic function for mapping and validating. */
{
void *new_gl4p, u32 size)
{
#if GUEST_PAGING_LEVELS >= 4
- return sh_map_and_validate(v, gl4mfn, new_gl4p, size,
- SH_type_l4_shadow,
- shadow_l4_index,
+ return sh_map_and_validate(v, gl4mfn, new_gl4p, size,
+ SH_type_l4_shadow,
+ shadow_l4_index,
validate_gl4e);
#else // ! GUEST_PAGING_LEVELS >= 4
SHADOW_ERROR("called in wrong paging mode!\n");
BUG();
return 0;
-#endif
+#endif
}
-
+
int
sh_map_and_validate_gl3e(struct vcpu *v, mfn_t gl3mfn,
void *new_gl3p, u32 size)
{
#if GUEST_PAGING_LEVELS >= 4
- return sh_map_and_validate(v, gl3mfn, new_gl3p, size,
- SH_type_l3_shadow,
- shadow_l3_index,
+ return sh_map_and_validate(v, gl3mfn, new_gl3p, size,
+ SH_type_l3_shadow,
+ shadow_l3_index,
validate_gl3e);
#else // ! GUEST_PAGING_LEVELS >= 4
SHADOW_ERROR("called in wrong paging mode!\n");
sh_map_and_validate_gl2e(struct vcpu *v, mfn_t gl2mfn,
void *new_gl2p, u32 size)
{
- return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
- SH_type_l2_shadow,
- shadow_l2_index,
+ return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
+ SH_type_l2_shadow,
+ shadow_l2_index,
validate_gl2e);
}
void *new_gl2p, u32 size)
{
#if GUEST_PAGING_LEVELS >= 3
- return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
- SH_type_l2h_shadow,
- shadow_l2_index,
+ return sh_map_and_validate(v, gl2mfn, new_gl2p, size,
+ SH_type_l2h_shadow,
+ shadow_l2_index,
validate_gl2e);
#else /* Non-PAE guests don't have different kinds of l2 table */
SHADOW_ERROR("called in wrong paging mode!\n");
sh_map_and_validate_gl1e(struct vcpu *v, mfn_t gl1mfn,
void *new_gl1p, u32 size)
{
- return sh_map_and_validate(v, gl1mfn, new_gl1p, size,
- SH_type_l1_shadow,
- shadow_l1_index,
+ return sh_map_and_validate(v, gl1mfn, new_gl1p, size,
+ SH_type_l1_shadow,
+ shadow_l1_index,
validate_gl1e);
}
/**************************************************************************/
-/* Optimization: Prefetch multiple L1 entries. This is called after we have
+/* Optimization: Prefetch multiple L1 entries. This is called after we have
* demand-faulted a shadow l1e in the fault handler, to see if it's
* worth fetching some more.
*/
/* XXX magic number */
#define PREFETCH_DISTANCE 32
-static void sh_prefetch(struct vcpu *v, walk_t *gw,
+static void sh_prefetch(struct vcpu *v, walk_t *gw,
shadow_l1e_t *ptr_sl1e, mfn_t sl1mfn)
{
int i, dist;
#endif /* OOS */
}
- for ( i = 1; i < dist ; i++ )
+ for ( i = 1; i < dist ; i++ )
{
/* No point in prefetching if there's already a shadow */
if ( ptr_sl1e[i].l1 != 0 )
/* Not worth continuing if we hit an entry that will need another
* fault for A/D-bit propagation anyway */
gflags = guest_l1e_get_flags(gl1e);
- if ( (gflags & _PAGE_PRESENT)
+ if ( (gflags & _PAGE_PRESENT)
&& (!(gflags & _PAGE_ACCESSED)
|| ((gflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY))) )
break;
- }
- else
+ }
+ else
{
/* Fragmented superpage, unless we've been called wrongly */
ASSERT(guest_l2e_get_flags(gw->l2e) & _PAGE_PSE);
/* Increment the l1e's GFN by the right number of guest pages */
gl1e = guest_l1e_from_gfn(
- _gfn(gfn_x(guest_l1e_get_gfn(gw->l1e)) + i),
+ _gfn(gfn_x(guest_l1e_get_gfn(gw->l1e)) + i),
guest_l1e_get_flags(gw->l1e));
}
__trace_var(event, 0/*!tsc*/, sizeof(d), &d);
}
}
-
+
static inline void trace_not_shadow_fault(guest_l1e_t gl1e,
guest_va_t va)
{
__trace_var(event, 0/*!tsc*/, sizeof(d), &d);
}
}
-
+
static inline void trace_shadow_emulate_other(u32 event,
guest_va_t va,
gfn_t gfn)
* shadow code (and the guest should retry) or 0 if it is not (and the
* fault should be handled elsewhere or passed to the guest). */
-static int sh_page_fault(struct vcpu *v,
- unsigned long va,
+static int sh_page_fault(struct vcpu *v,
+ unsigned long va,
struct cpu_user_regs *regs)
{
struct domain *d = v->domain;
* Then try to emulate early to avoid lock aquisition.
*/
if ( v->arch.paging.last_write_emul_ok
- && v->arch.paging.shadow.last_emulated_frame == (va >> PAGE_SHIFT) )
+ && v->arch.paging.shadow.last_emulated_frame == (va >> PAGE_SHIFT) )
{
/* check whether error code is 3, or else fall back to normal path
* in case of some validation is required
fast_emul = 1;
gmfn = _mfn(v->arch.paging.shadow.last_emulated_mfn);
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Fall back to the slow path if we're trying to emulate
writes to an out of sync page. */
if ( mfn_valid(gmfn) && mfn_is_out_of_sync(gmfn) )
#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
if ( (regs->error_code & PFEC_reserved_bit) )
{
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* First, need to check that this isn't an out-of-sync
* shadow l1e. If it is, we fall back to the slow path, which
* will sync it up again. */
shadow_l2e_get_mfn(sl2e))))
|| unlikely(mfn_is_out_of_sync(gl1mfn)) )
{
- /* Hit the slow path as if there had been no
+ /* Hit the slow path as if there had been no
* shadow entry at all, and let it tidy up */
ASSERT(regs->error_code & PFEC_page_present);
regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present);
}
}
#endif /* SHOPT_OUT_OF_SYNC */
- /* The only reasons for reserved bits to be set in shadow entries
+ /* The only reasons for reserved bits to be set in shadow entries
* are the two "magic" shadow_l1e entries. */
- if ( likely((__copy_from_user(&sl1e,
- (sh_linear_l1_table(v)
+ if ( likely((__copy_from_user(&sl1e,
+ (sh_linear_l1_table(v)
+ shadow_l1_linear_offset(va)),
sizeof(sl1e)) == 0)
&& sh_l1e_is_magic(sl1e)) )
{
/* Magic MMIO marker: extract gfn for MMIO address */
ASSERT(sh_l1e_is_mmio(sl1e));
- gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
- << PAGE_SHIFT)
+ gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
+ << PAGE_SHIFT)
| (va & ~PAGE_MASK);
}
perfc_incr(shadow_fault_fast_mmio);
else
{
/* This should be exceptionally rare: another vcpu has fixed
- * the tables between the fault and our reading the l1e.
+ * the tables between the fault and our reading the l1e.
* Retry and let the hardware give us the right fault next time. */
perfc_incr(shadow_fault_fast_fail);
- SHADOW_PRINTK("fast path false alarm!\n");
+ SHADOW_PRINTK("fast path false alarm!\n");
trace_shadow_gen(TRC_SHADOW_FALSE_FAST_PATH, va);
return EXCRET_fault_fixed;
}
}
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
page_fault_slow_path:
#endif
#endif /* SHOPT_FAST_FAULT_PATH */
/* Detect if this page fault happened while we were already in Xen
* doing a shadow operation. If that happens, the only thing we can
- * do is let Xen's normal fault handlers try to fix it. In any case,
- * a diagnostic trace of the fault will be more useful than
+ * do is let Xen's normal fault handlers try to fix it. In any case,
+ * a diagnostic trace of the fault will be more useful than
* a BUG() when we try to take the lock again. */
if ( unlikely(paging_locked_by_me(d)) )
{
/* The walk is done in a lock-free style, with some sanity check
* postponed after grabbing paging lock later. Those delayed checks
* will make sure no inconsistent mapping being translated into
- * shadow page table. */
+ * shadow page table. */
version = atomic_read(&d->arch.paging.shadow.gtable_dirty_version);
rmb();
rc = sh_walk_guest_tables(v, va, &gw, regs->error_code);
goto propagate;
}
- /* It's possible that the guest has put pagetables in memory that it has
+ /* It's possible that the guest has put pagetables in memory that it has
* already used for some special purpose (ioreq pages, or granted pages).
- * If that happens we'll have killed the guest already but it's still not
+ * If that happens we'll have killed the guest already but it's still not
* safe to propagate entries out of the guest PT so get out now. */
if ( unlikely(d->is_shutting_down && d->shutdown_code == SHUTDOWN_crash) )
{
gfn = guest_l1e_get_gfn(gw.l1e);
gmfn = get_gfn(d, gfn, &p2mt);
- if ( shadow_mode_refcounts(d) &&
+ if ( shadow_mode_refcounts(d) &&
((!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) ||
(!p2m_is_mmio(p2mt) && !mfn_valid(gmfn))) )
{
perfc_incr(shadow_fault_bail_bad_gfn);
- SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n",
+ SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n",
gfn_x(gfn), mfn_x(gmfn));
reset_early_unshadow(v);
put_gfn(d, gfn_x(gfn));
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
/* Remember this successful VA->GFN translation for later. */
- vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn),
+ vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn),
regs->error_code | PFEC_page_present);
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
shadow_prealloc(d,
SH_type_l1_shadow,
GUEST_PAGING_LEVELS < 4 ? 1 : GUEST_PAGING_LEVELS - 1);
-
+
rc = gw_remove_write_accesses(v, va, &gw);
/* First bit set: Removed write access to a page. */
shadow_audit_tables(v);
sh_audit_gw(v, &gw);
- /* Acquire the shadow. This must happen before we figure out the rights
+ /* Acquire the shadow. This must happen before we figure out the rights
* for the shadow entry, since we might promote a page here. */
ptr_sl1e = shadow_get_and_create_l1e(v, &gw, &sl1mfn, ft);
- if ( unlikely(ptr_sl1e == NULL) )
+ if ( unlikely(ptr_sl1e == NULL) )
{
/* Couldn't get the sl1e! Since we know the guest entries
* are OK, this can only have been caused by a failed
r = shadow_set_l1e(v, ptr_sl1e, sl1e, p2mt, sl1mfn);
#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
- if ( mfn_valid(gw.l1mfn)
+ if ( mfn_valid(gw.l1mfn)
&& mfn_is_out_of_sync(gw.l1mfn) )
{
/* Update the OOS snapshot. */
mfn_t snpmfn = oos_snapshot_lookup(v, gw.l1mfn);
guest_l1e_t *snp;
-
+
ASSERT(mfn_valid(snpmfn));
-
+
snp = sh_map_domain_page(snpmfn);
snp[guest_l1_table_offset(va)] = gw.l1e;
sh_unmap_domain_page(snp);
/* Need to emulate accesses to page tables */
if ( sh_mfn_is_a_page_table(gmfn)
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Unless they've been allowed to go out of sync with their
shadows and we don't need to unshadow it. */
&& !(mfn_is_out_of_sync(gmfn)
/* In HVM guests, we force CR0.WP always to be set, so that the
* pagetables are always write-protected. If the guest thinks
* CR0.WP is clear, we must emulate faulting supervisor writes to
- * allow the guest to write through read-only PTEs. Emulate if the
+ * allow the guest to write through read-only PTEs. Emulate if the
* fault was a non-user write to a present page. */
- if ( is_hvm_domain(d)
- && unlikely(!hvm_wp_enabled(v))
+ if ( is_hvm_domain(d)
+ && unlikely(!hvm_wp_enabled(v))
&& regs->error_code == (PFEC_write_access|PFEC_page_present)
&& mfn_valid(gmfn) )
{
*/
if ( (regs->error_code & PFEC_user_mode) )
{
- SHADOW_PRINTK("user-mode fault to PT, unshadowing mfn %#lx\n",
+ SHADOW_PRINTK("user-mode fault to PT, unshadowing mfn %#lx\n",
mfn_x(gmfn));
perfc_incr(shadow_fault_emulate_failed);
sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
}
#endif
gdprintk(XENLOG_DEBUG, "write to pagetable during event "
- "injection: cr2=%#lx, mfn=%#lx\n",
+ "injection: cr2=%#lx, mfn=%#lx\n",
va, mfn_x(gmfn));
sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
trace_shadow_emulate_other(TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ,
}
}
- SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n",
+ SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n",
(unsigned long)regs->eip, (unsigned long)regs->esp);
emul_ops = shadow_init_emulation(&emul_ctxt, regs);
v->arch.paging.last_write_emul_ok = 0;
}
#endif
- SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n",
+ SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n",
mfn_x(gmfn));
- /* If this is actually a page table, then we have a bug, and need
- * to support more operations in the emulator. More likely,
+ /* If this is actually a page table, then we have a bug, and need
+ * to support more operations in the emulator. More likely,
* though, this is a hint that this page should not be shadowed. */
shadow_remove_all_shadows(v, gmfn);
if ( r == X86EMUL_OKAY ) {
int i, emulation_count=0;
this_cpu(trace_emulate_initial_va) = va;
- /* Emulate up to four extra instructions in the hope of catching
+ /* Emulate up to four extra instructions in the hope of catching
* the "second half" of a 64-bit pagetable write. */
for ( i = 0 ; i < 4 ; i++ )
{
v->arch.paging.last_write_was_pt = 0;
r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
if ( r == X86EMUL_OKAY )
- {
+ {
emulation_count++;
if ( v->arch.paging.last_write_was_pt )
{
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_EMULATION_2ND_PT_WRITTEN);
break; /* Don't emulate past the other half of the write */
}
- else
+ else
perfc_incr(shadow_em_ex_non_pt);
}
else
{
mfn_t sl1mfn;
shadow_l2e_t sl2e;
-
+
perfc_incr(shadow_invlpg);
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
#endif
/* First check that we can safely read the shadow l2e. SMP/PAE linux can
- * run as high as 6% of invlpg calls where we haven't shadowed the l2
+ * run as high as 6% of invlpg calls where we haven't shadowed the l2
* yet. */
#if SHADOW_PAGING_LEVELS == 4
{
/* This must still be a copy-from-user because we don't have the
* paging lock, and the higher-level shadows might disappear
* under our feet. */
- if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v)
+ if ( __copy_from_user(&sl3e, (sh_linear_l3_table(v)
+ shadow_l3_linear_offset(va)),
sizeof (sl3e)) != 0 )
{
/* This must still be a copy-from-user because we don't have the shadow
* lock, and the higher-level shadows might disappear under our feet. */
- if ( __copy_from_user(&sl2e,
+ if ( __copy_from_user(&sl2e,
sh_linear_l2_table(v) + shadow_l2_linear_offset(va),
sizeof (sl2e)) != 0 )
{
return 0;
}
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Check to see if the SL1 is out of sync. */
{
mfn_t gl1mfn = backpointer(mfn_to_page(sl1mfn));
struct page_info *pg = mfn_to_page(gl1mfn);
- if ( mfn_valid(gl1mfn)
+ if ( mfn_valid(gl1mfn)
&& page_is_out_of_sync(pg) )
{
/* The test above may give false positives, since we don't
* have the paging lock last time we checked, and the
* higher-level shadows might have disappeared under our
* feet. */
- if ( __copy_from_user(&sl2e,
+ if ( __copy_from_user(&sl2e,
sh_linear_l2_table(v)
+ shadow_l2_linear_offset(va),
sizeof (sl2e)) != 0 )
sl1mfn = shadow_l2e_get_mfn(sl2e);
gl1mfn = backpointer(mfn_to_page(sl1mfn));
pg = mfn_to_page(gl1mfn);
-
+
if ( likely(sh_mfn_is_a_page_table(gl1mfn)
&& page_is_out_of_sync(pg) ) )
{
#if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
/* Check the vTLB cache first */
unsigned long vtlb_gfn = vtlb_lookup(v, va, pfec[0]);
- if ( VALID_GFN(vtlb_gfn) )
+ if ( VALID_GFN(vtlb_gfn) )
return vtlb_gfn;
#endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
* is subtler. Normal linear mappings are made by having an entry
* in the top-level table that points to itself (shadow linear) or
* to the guest top-level table (guest linear). For PAE, to set up
- * a linear map requires us to copy the four top-level entries into
+ * a linear map requires us to copy the four top-level entries into
* level-2 entries. That means that every time we change a PAE l3e,
* we need to reflect the change into the copy.
*
* For HVM guests, the linear pagetables are installed in the monitor
* tables (since we can't put them in the shadow). Shadow linear
* pagetables, which map the shadows, are at SH_LINEAR_PT_VIRT_START,
- * and we use the linear pagetable slot at LINEAR_PT_VIRT_START for
- * a linear pagetable of the monitor tables themselves. We have
+ * and we use the linear pagetable slot at LINEAR_PT_VIRT_START for
+ * a linear pagetable of the monitor tables themselves. We have
* the same issue of having to re-copy PAE l3 entries whevever we use
- * PAE shadows.
+ * PAE shadows.
*
- * Because HVM guests run on the same monitor tables regardless of the
- * shadow tables in use, the linear mapping of the shadow tables has to
- * be updated every time v->arch.shadow_table changes.
+ * Because HVM guests run on the same monitor tables regardless of the
+ * shadow tables in use, the linear mapping of the shadow tables has to
+ * be updated every time v->arch.shadow_table changes.
*/
/* Don't try to update the monitor table if it doesn't exist */
- if ( shadow_mode_external(d)
- && pagetable_get_pfn(v->arch.monitor_table) == 0 )
+ if ( shadow_mode_external(d)
+ && pagetable_get_pfn(v->arch.monitor_table) == 0 )
return;
#if SHADOW_PAGING_LEVELS == 4
-
+
/* For PV, one l4e points at the guest l4, one points at the shadow
- * l4. No maintenance required.
+ * l4. No maintenance required.
* For HVM, just need to update the l4e that points to the shadow l4. */
if ( shadow_mode_external(d) )
{
/* Use the linear map if we can; otherwise make a new mapping */
- if ( v == current )
+ if ( v == current )
{
- __linear_l4_table[l4_linear_offset(SH_LINEAR_PT_VIRT_START)] =
+ __linear_l4_table[l4_linear_offset(SH_LINEAR_PT_VIRT_START)] =
l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
__PAGE_HYPERVISOR);
- }
+ }
else
- {
+ {
l4_pgentry_t *ml4e;
ml4e = sh_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
- ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
+ ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
__PAGE_HYPERVISOR);
sh_unmap_domain_page(ml4e);
if ( v == current )
ml2e = __linear_l2_table
+ l2_linear_offset(SH_LINEAR_PT_VIRT_START);
- else
- {
+ else
+ {
mfn_t l3mfn, l2mfn;
l4_pgentry_t *ml4e;
l3_pgentry_t *ml3e;
for ( i = 0; i < SHADOW_L3_PAGETABLE_ENTRIES; i++ )
{
- ml2e[i] =
- (shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT)
+ ml2e[i] =
+ (shadow_l3e_get_flags(sl3e[i]) & _PAGE_PRESENT)
? l2e_from_pfn(mfn_x(shadow_l3e_get_mfn(sl3e[i])),
- __PAGE_HYPERVISOR)
+ __PAGE_HYPERVISOR)
: l2e_empty();
}
- if ( v != current )
+ if ( v != current )
sh_unmap_domain_page(ml2e);
}
else
/*
* Having modified the linear pagetable mapping, flush local host TLBs.
* This was not needed when vmenter/vmexit always had the side effect
- * of flushing host TLBs but, with ASIDs, it is possible to finish
- * this CR3 update, vmenter the guest, vmexit due to a page fault,
- * without an intervening host TLB flush. Then the page fault code
- * could use the linear pagetable to read a top-level shadow page
- * table entry. But, without this change, it would fetch the wrong
+ * of flushing host TLBs but, with ASIDs, it is possible to finish
+ * this CR3 update, vmenter the guest, vmexit due to a page fault,
+ * without an intervening host TLB flush. Then the page fault code
+ * could use the linear pagetable to read a top-level shadow page
+ * table entry. But, without this change, it would fetch the wrong
* value due to a stale TLB.
*/
flush_tlb_local();
/* Set up the top-level shadow and install it in slot 'slot' of shadow_table */
static void
-sh_set_toplevel_shadow(struct vcpu *v,
+sh_set_toplevel_shadow(struct vcpu *v,
int slot,
- mfn_t gmfn,
- unsigned int root_type)
+ mfn_t gmfn,
+ unsigned int root_type)
{
mfn_t smfn;
pagetable_t old_entry, new_entry;
struct domain *d = v->domain;
-
+
/* Remember the old contents of this slot */
old_entry = v->arch.shadow_table[slot];
smfn = sh_make_shadow(v, gmfn, root_type);
}
ASSERT(mfn_valid(smfn));
-
+
/* Pin the shadow and put it (back) on the list of pinned shadows */
if ( sh_pin(v, smfn) == 0 )
{
* Paravirtual guests should set v->arch.guest_table (and guest_table_user,
* if appropriate).
* HVM guests should also make sure hvm_get_guest_cntl_reg(v, 3) works;
- * this function will call hvm_update_guest_cr(v, 3) to tell them where the
+ * this function will call hvm_update_guest_cr(v, 3) to tell them where the
* shadow tables are.
- * If do_locking != 0, assume we are being called from outside the
- * shadow code, and must take and release the paging lock; otherwise
+ * If do_locking != 0, assume we are being called from outside the
+ * shadow code, and must take and release the paging lock; otherwise
* that is the caller's responsibility.
*/
{
////
//// vcpu->arch.guest_table is already set
////
-
-#ifndef NDEBUG
+
+#ifndef NDEBUG
/* Double-check that the HVM code has sent us a sane guest_table */
if ( is_hvm_domain(d) )
{
ASSERT(shadow_mode_external(d));
if ( hvm_paging_enabled(v) )
ASSERT(pagetable_get_pfn(v->arch.guest_table));
- else
+ else
ASSERT(v->arch.guest_table.pfn
== d->arch.paging.shadow.unpaged_pagetable.pfn);
}
#endif
SHADOW_PRINTK("d=%u v=%u guest_table=%05lx\n",
- d->domain_id, v->vcpu_id,
+ d->domain_id, v->vcpu_id,
(unsigned long)pagetable_get_pfn(v->arch.guest_table));
#if GUEST_PAGING_LEVELS == 4
* table. We cache the current state of that table and shadow that,
* until the next CR3 write makes us refresh our cache. */
ASSERT(v->arch.paging.shadow.guest_vtable == NULL);
-
- if ( shadow_mode_external(d) )
+
+ if ( shadow_mode_external(d) )
/* Find where in the page the l3 table is */
guest_idx = guest_index((void *)v->arch.hvm_vcpu.guest_cr[3]);
else
- /* PV guest: l3 is at the start of a page */
- guest_idx = 0;
+ /* PV guest: l3 is at the start of a page */
+ guest_idx = 0;
// Ignore the low 2 bits of guest_idx -- they are really just
// cache control.
guest_idx &= ~3;
-
+
gl3e = ((guest_l3e_t *)sh_map_domain_page(gmfn)) + guest_idx;
for ( i = 0; i < 4 ; i++ )
v->arch.paging.shadow.gl3e[i] = gl3e[i];
////
/* We revoke write access to the new guest toplevel page(s) before we
- * replace the old shadow pagetable(s), so that we can safely use the
+ * replace the old shadow pagetable(s), so that we can safely use the
* (old) shadow linear maps in the writeable mapping heuristics. */
#if GUEST_PAGING_LEVELS == 2
if ( sh_remove_write_access(v, gmfn, 2, 0) != 0 )
flush_tlb_mask(d->domain_dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l2_shadow);
#elif GUEST_PAGING_LEVELS == 3
- /* PAE guests have four shadow_table entries, based on the
+ /* PAE guests have four shadow_table entries, based on the
* current values of the guest's four l3es. */
{
int flush = 0;
flush |= sh_remove_write_access(v, gl2mfn, 2, 0);
}
}
- if ( flush )
+ if ( flush )
flush_tlb_mask(d->domain_dirty_cpumask);
/* Now install the new shadows. */
- for ( i = 0; i < 4; i++ )
+ for ( i = 0; i < 4; i++ )
{
if ( guest_l3e_get_flags(gl3e[i]) & _PAGE_PRESENT )
{
gl2gfn = guest_l3e_get_gfn(gl3e[i]);
gl2mfn = get_gfn_query_unlocked(d, gfn_x(gl2gfn), &p2mt);
if ( p2m_is_ram(p2mt) )
- sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3)
- ? SH_type_l2h_shadow
+ sh_set_toplevel_shadow(v, i, gl2mfn, (i == 3)
+ ? SH_type_l2h_shadow
: SH_type_l2_shadow);
else
- sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
+ sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
}
else
- sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
+ sh_set_toplevel_shadow(v, i, _mfn(INVALID_MFN), 0);
}
}
#elif GUEST_PAGING_LEVELS == 4
flush_tlb_mask(d->domain_dirty_cpumask);
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
#else
-#error This should never happen
+#error This should never happen
#endif
- ///
+ ///
/// v->arch.paging.shadow.l3table
///
#if SHADOW_PAGING_LEVELS == 3
/* 3-on-3: make a PAE l3 that points at the four l2 pages */
smfn = pagetable_get_mfn(v->arch.shadow_table[i]);
#endif
- v->arch.paging.shadow.l3table[i] =
- (mfn_x(smfn) == 0)
+ v->arch.paging.shadow.l3table[i] =
+ (mfn_x(smfn) == 0)
? shadow_l3e_empty()
: shadow_l3e_from_mfn(smfn, _PAGE_PRESENT);
}
/* Functions to revoke guest rights */
#if SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC
-int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
+int sh_rm_write_access_from_sl1p(struct vcpu *v, mfn_t gmfn,
mfn_t smfn, unsigned long off)
{
int r;
if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
return 0;
#else /* SHADOW_PAGING_LEVELS == 3 */
- sl3p = ((shadow_l3e_t *) v->arch.paging.shadow.l3table)
+ sl3p = ((shadow_l3e_t *) v->arch.paging.shadow.l3table)
+ shadow_l3_linear_offset(vaddr);
if ( !(shadow_l3e_get_flags(*sl3p) & _PAGE_PRESENT) )
return 0;
shadow_l1e_t *sl1e;
int done = 0;
int flags;
-#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
+#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */
#endif
-
- SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done,
+
+ SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done,
{
flags = shadow_l1e_get_flags(*sl1e);
- if ( (flags & _PAGE_PRESENT)
- && (flags & _PAGE_RW)
+ if ( (flags & _PAGE_PRESENT)
+ && (flags & _PAGE_RW)
&& (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(readonly_mfn)) )
{
shadow_l1e_t ro_sl1e = shadow_l1e_remove_flags(*sl1e, _PAGE_RW);
(void) shadow_set_l1e(v, sl1e, ro_sl1e, p2m_ram_rw, sl1mfn);
-#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
+#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
/* Remember the last shadow that we shot a writeable mapping in */
v->arch.paging.shadow.last_writeable_pte_smfn = mfn_x(base_sl1mfn);
#endif
shadow_l1e_t *sl1e;
int done = 0;
int flags;
-
- SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done,
+
+ SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done,
{
flags = shadow_l1e_get_flags(*sl1e);
- if ( (flags & _PAGE_PRESENT)
+ if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l1e_get_mfn(*sl1e)) == mfn_x(target_mfn)) )
{
(void) shadow_set_l1e(v, sl1e, shadow_l1e_empty(),
shadow_l2e_t *sl2e;
int done = 0;
int flags;
-
- SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, v->domain,
+
+ SHADOW_FOREACH_L2E(sl2mfn, sl2e, 0, done, v->domain,
{
flags = shadow_l2e_get_flags(*sl2e);
- if ( (flags & _PAGE_PRESENT)
+ if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l2e_get_mfn(*sl2e)) == mfn_x(sl1mfn)) )
{
(void) shadow_set_l2e(v, sl2e, shadow_l2e_empty(), sl2mfn);
shadow_l3e_t *sl3e;
int done = 0;
int flags;
-
- SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, done,
+
+ SHADOW_FOREACH_L3E(sl3mfn, sl3e, 0, done,
{
flags = shadow_l3e_get_flags(*sl3e);
- if ( (flags & _PAGE_PRESENT)
+ if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l3e_get_mfn(*sl3e)) == mfn_x(sl2mfn)) )
{
(void) shadow_set_l3e(v, sl3e, shadow_l3e_empty(), sl3mfn);
shadow_l4e_t *sl4e;
int done = 0;
int flags;
-
+
SHADOW_FOREACH_L4E(sl4mfn, sl4e, 0, done, v->domain,
{
flags = shadow_l4e_get_flags(*sl4e);
- if ( (flags & _PAGE_PRESENT)
+ if ( (flags & _PAGE_PRESENT)
&& (mfn_x(shadow_l4e_get_mfn(*sl4e)) == mfn_x(sl3mfn)) )
{
(void) shadow_set_l4e(v, sl4e, shadow_l4e_empty(), sl4mfn);
});
return done;
}
-#endif /* 64bit guest */
+#endif /* 64bit guest */
/**************************************************************************/
/* Function for the guest to inform us that a process is being torn
#else
smfn = shadow_hash_lookup(v, mfn_x(gmfn), SH_type_l4_64_shadow);
#endif
-
+
if ( mfn_valid(smfn) )
{
mfn_to_page(gmfn)->shadow_flags |= SHF_pagetable_dying;
/* Translate the VA to a GFN */
gfn = sh_gva_to_gfn(v, NULL, vaddr, &pfec);
- if ( gfn == INVALID_GFN )
+ if ( gfn == INVALID_GFN )
{
if ( is_hvm_vcpu(v) )
hvm_inject_page_fault(pfec, vaddr);
return mfn;
}
-/* Check that the user is allowed to perform this write.
+/* Check that the user is allowed to perform this write.
* Returns a mapped pointer to write to, or NULL for error. */
#define MAPPING_UNHANDLEABLE ((void *)(unsigned long)X86EMUL_UNHANDLEABLE)
#define MAPPING_EXCEPTION ((void *)(unsigned long)X86EMUL_EXCEPTION)
void *map = NULL;
sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
- if ( !mfn_valid(sh_ctxt->mfn1) )
+ if ( !mfn_valid(sh_ctxt->mfn1) )
return ((mfn_x(sh_ctxt->mfn1) == BAD_GVA_TO_GFN) ?
MAPPING_EXCEPTION :
(mfn_x(sh_ctxt->mfn1) == READONLY_GFN) ?
return MAPPING_UNHANDLEABLE;
}
#endif
-
+
/* Unaligned writes mean probably this isn't a pagetable */
if ( vaddr & (bytes - 1) )
sh_remove_shadows(v, sh_ctxt->mfn1, 0, 0 /* Slow, can fail */ );
sh_ctxt->mfn2 = _mfn(INVALID_MFN);
map = sh_map_domain_page(sh_ctxt->mfn1) + (vaddr & ~PAGE_MASK);
}
- else
+ else
{
unsigned long mfns[2];
- /* Cross-page emulated writes are only supported for HVM guests;
+ /* Cross-page emulated writes are only supported for HVM guests;
* PV guests ought to know better */
if ( !is_hvm_vcpu(v) )
return MAPPING_UNHANDLEABLE;
/* This write crosses a page boundary. Translate the second page */
sh_ctxt->mfn2 = emulate_gva_to_mfn(v, (vaddr + bytes - 1) & PAGE_MASK,
sh_ctxt);
- if ( !mfn_valid(sh_ctxt->mfn2) )
+ if ( !mfn_valid(sh_ctxt->mfn2) )
return ((mfn_x(sh_ctxt->mfn2) == BAD_GVA_TO_GFN) ?
MAPPING_EXCEPTION :
(mfn_x(sh_ctxt->mfn2) == READONLY_GFN) ?
/* Cross-page writes mean probably not a pagetable */
sh_remove_shadows(v, sh_ctxt->mfn2, 0, 0 /* Slow, can fail */ );
-
+
mfns[0] = mfn_x(sh_ctxt->mfn1);
mfns[1] = mfn_x(sh_ctxt->mfn2);
map = vmap(mfns, 2);
{
if ( ((unsigned long) addr & ((sizeof (guest_intpte_t)) - 1)) == 0 )
check_for_early_unshadow(v, sh_ctxt->mfn1);
- /* Don't reset the heuristic if we're writing zeros at non-aligned
+ /* Don't reset the heuristic if we're writing zeros at non-aligned
* addresses, otherwise it doesn't catch REP MOVSD on PAE guests */
}
- else
+ else
reset_early_unshadow(v);
/* We can avoid re-verifying the page contents after the write if:
&& bytes <= 4)) )
{
/* Writes with this alignment constraint can't possibly cross pages */
- ASSERT(!mfn_valid(sh_ctxt->mfn2));
+ ASSERT(!mfn_valid(sh_ctxt->mfn2));
}
- else
+ else
#endif /* SHADOW_OPTIMIZATIONS & SHOPT_SKIP_VERIFY */
- {
+ {
if ( unlikely(mfn_valid(sh_ctxt->mfn2)) )
{
/* Validate as two writes, one to each page */
paging_mark_dirty(v->domain, mfn_x(sh_ctxt->mfn2));
vunmap((void *)((unsigned long)addr & PAGE_MASK));
}
- else
+ else
sh_unmap_domain_page(addr);
atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version);
}
static int
-sh_x86_emulate_cmpxchg(struct vcpu *v, unsigned long vaddr,
+sh_x86_emulate_cmpxchg(struct vcpu *v, unsigned long vaddr,
unsigned long old, unsigned long new,
unsigned int bytes, struct sh_emulate_ctxt *sh_ctxt)
{
prev = ~old;
}
- if ( prev != old )
+ if ( prev != old )
rv = X86EMUL_CMPXCHG_FAILED;
SHADOW_DEBUG(EMULATE, "va %#lx was %#lx expected %#lx"
} while (0)
static char * sh_audit_flags(struct vcpu *v, int level,
- int gflags, int sflags)
+ int gflags, int sflags)
/* Common code for auditing flag bits */
{
if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_PRESENT) )
return "shadow is present but guest is not present";
- if ( (sflags & _PAGE_GLOBAL) && !is_hvm_vcpu(v) )
+ if ( (sflags & _PAGE_GLOBAL) && !is_hvm_vcpu(v) )
return "global bit set in PV shadow";
if ( level == 2 && (sflags & _PAGE_PSE) )
return "PS bit set in shadow";
#if SHADOW_PAGING_LEVELS == 3
if ( level == 3 ) return NULL; /* All the other bits are blank in PAEl3 */
#endif
- if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_ACCESSED) )
+ if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_ACCESSED) )
return "accessed bit not propagated";
if ( (level == 1 || (level == 2 && (gflags & _PAGE_PSE)))
- && ((sflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY)) )
+ && ((sflags & _PAGE_RW) && !(gflags & _PAGE_DIRTY)) )
return "dirty bit not propagated";
- if ( (sflags & _PAGE_USER) != (gflags & _PAGE_USER) )
+ if ( (sflags & _PAGE_USER) != (gflags & _PAGE_USER) )
return "user/supervisor bit does not match";
- if ( (sflags & _PAGE_NX_BIT) != (gflags & _PAGE_NX_BIT) )
+ if ( (sflags & _PAGE_NX_BIT) != (gflags & _PAGE_NX_BIT) )
return "NX bit does not match";
- if ( (sflags & _PAGE_RW) && !(gflags & _PAGE_RW) )
+ if ( (sflags & _PAGE_RW) && !(gflags & _PAGE_RW) )
return "shadow grants write access but guest does not";
return NULL;
}
p2m_type_t p2mt;
char *s;
int done = 0;
-
+
/* Follow the backpointer */
ASSERT(mfn_to_page(sl1mfn)->u.sh.head);
gl1mfn = backpointer(mfn_to_page(sl1mfn));
gl1e = gp = sh_map_domain_page(gl1mfn);
SHADOW_FOREACH_L1E(sl1mfn, sl1e, &gl1e, done, {
- if ( sh_l1e_is_magic(*sl1e) )
+ if ( sh_l1e_is_magic(*sl1e) )
{
#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
if ( sh_l1e_is_gnp(*sl1e) )
{
if ( guest_l1e_get_flags(*gl1e) & _PAGE_PRESENT )
AUDIT_FAIL(1, "shadow is GNP magic but guest is present");
- }
- else
+ }
+ else
{
ASSERT(sh_l1e_is_mmio(*sl1e));
gfn = sh_l1e_mmio_get_gfn(*sl1e);
if ( gfn_x(gfn) != gfn_x(guest_l1e_get_gfn(*gl1e)) )
- AUDIT_FAIL(1, "shadow MMIO gfn is %" SH_PRI_gfn
+ AUDIT_FAIL(1, "shadow MMIO gfn is %" SH_PRI_gfn
" but guest gfn is %" SH_PRI_gfn,
gfn_x(gfn),
gfn_x(guest_l1e_get_gfn(*gl1e)));
}
#endif
}
- else
+ else
{
s = sh_audit_flags(v, 1, guest_l1e_get_flags(*gl1e),
shadow_l1e_get_flags(*sl1e));
if ( s ) AUDIT_FAIL(1, "%s", s);
-
+
if ( SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_MFNS )
{
gfn = guest_l1e_get_gfn(*gl1e);
SHADOW_FOREACH_L1E(sl1mfn, sl1e, 0, done, {
f = shadow_l1e_get_flags(*sl1e);
f &= ~(_PAGE_AVAIL0|_PAGE_AVAIL1|_PAGE_AVAIL2);
- if ( !(f == 0
+ if ( !(f == 0
|| f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW|
- _PAGE_ACCESSED)
+ _PAGE_ACCESSED)
|| f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_ACCESSED)
|| f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_RW|
- _PAGE_ACCESSED|_PAGE_DIRTY)
+ _PAGE_ACCESSED|_PAGE_DIRTY)
|| f == (_PAGE_PRESENT|_PAGE_USER|_PAGE_ACCESSED|_PAGE_DIRTY)
|| sh_l1e_is_magic(*sl1e)) )
AUDIT_FAIL(1, "fl1e has bad flags");
{
gfn = guest_l2e_get_gfn(*gl2e);
mfn = shadow_l2e_get_mfn(*sl2e);
- gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)
+ gmfn = (guest_l2e_get_flags(*gl2e) & _PAGE_PSE)
? get_fl1_shadow_status(v, gfn)
- : get_shadow_status(v,
- get_gfn_query_unlocked(v->domain, gfn_x(gfn),
+ : get_shadow_status(v,
+ get_gfn_query_unlocked(v->domain, gfn_x(gfn),
&p2mt), SH_type_l1_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )
AUDIT_FAIL(2, "bad translation: gfn %" SH_PRI_gfn
" (--> %" PRI_mfn ")"
" --> %" PRI_mfn " != mfn %" PRI_mfn,
- gfn_x(gfn),
+ gfn_x(gfn),
(guest_l2e_get_flags(*gl2e) & _PAGE_PSE) ? 0
: mfn_x(get_gfn_query_unlocked(v->domain,
gfn_x(gfn), &p2mt)), mfn_x(gmfn), mfn_x(mfn));
ASSERT(mfn_to_page(sl3mfn)->u.sh.head);
gl3mfn = backpointer(mfn_to_page(sl3mfn));
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */
if ( page_is_out_of_sync(mfn_to_page(gl3mfn)) )
AUDIT_FAIL_MIN(3, "gmfn %lx is out of sync", mfn_x(gl3mfn));
ASSERT(mfn_to_page(sl4mfn)->u.sh.head);
gl4mfn = backpointer(mfn_to_page(sl4mfn));
-#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
/* Only L1's may be out of sync. */
if ( page_is_out_of_sync(mfn_to_page(gl4mfn)) )
AUDIT_FAIL_MIN(4, "gmfn %lx is out of sync", mfn_x(gl4mfn));
gfn = guest_l4e_get_gfn(*gl4e);
mfn = shadow_l4e_get_mfn(*sl4e);
gmfn = get_shadow_status(v, get_gfn_query_unlocked(
- v->domain, gfn_x(gfn), &p2mt),
+ v->domain, gfn_x(gfn), &p2mt),
SH_type_l3_shadow);
if ( mfn_x(gmfn) != mfn_x(mfn) )
AUDIT_FAIL(4, "bad translation: gfn %" SH_PRI_gfn
/* Entry points into this mode of the shadow code.
* This will all be mangled by the preprocessor to uniquify everything. */
const struct paging_mode sh_paging_mode = {
- .page_fault = sh_page_fault,
+ .page_fault = sh_page_fault,
.invlpg = sh_invlpg,
.gva_to_gfn = sh_gva_to_gfn,
.update_cr3 = sh_update_cr3,
* c-file-style: "BSD"
* c-basic-offset: 4
* indent-tabs-mode: nil
- * End:
+ * End:
*/