perfc_incr(shadow_prealloc_2);
for_each_vcpu(d, v)
- for ( i = 0 ; i < 4 ; i++ )
+ for ( i = 0; i < ARRAY_SIZE(v->arch.paging.shadow.shadow_table); i++ )
{
- if ( !pagetable_is_null(v->arch.shadow_table[i]) )
+ if ( !pagetable_is_null(v->arch.paging.shadow.shadow_table[i]) )
{
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_PREALLOC_UNHOOK);
- shadow_unhook_mappings(d,
- pagetable_get_mfn(v->arch.shadow_table[i]), 0);
+ shadow_unhook_mappings(
+ d,
+ pagetable_get_mfn(v->arch.paging.shadow.shadow_table[i]),
+ 0);
/* See if that freed up enough space */
if ( d->arch.paging.shadow.free_pages >= pages )
/* Second pass: unhook entries of in-use shadows */
for_each_vcpu(d, v)
- for ( i = 0 ; i < 4 ; i++ )
- if ( !pagetable_is_null(v->arch.shadow_table[i]) )
- shadow_unhook_mappings(d,
- pagetable_get_mfn(v->arch.shadow_table[i]), 0);
+ for ( i = 0; i < ARRAY_SIZE(v->arch.paging.shadow.shadow_table); i++ )
+ if ( !pagetable_is_null(v->arch.paging.shadow.shadow_table[i]) )
+ shadow_unhook_mappings(
+ d,
+ pagetable_get_mfn(v->arch.paging.shadow.shadow_table[i]),
+ 0);
/* Make sure everyone sees the unshadowings */
guest_flush_tlb_mask(d, d->dirty_cpumask);
};
#endif
+#if SHADOW_PAGING_LEVELS == 3
+# define for_each_shadow_table(v, i) \
+ for ( (i) = 0; \
+ (i) < ARRAY_SIZE((v)->arch.paging.shadow.shadow_table); \
+ ++(i) )
+#else
+# define for_each_shadow_table(v, i) for ( (i) = 0; (i) < 1; ++(i) )
+#endif
+
/* Helper to perform a local TLB flush. */
static void sh_flush_local(const struct domain *d)
{
mfn_t *sl4mfn)
{
/* There is always a shadow of the top level table. Get it. */
- *sl4mfn = pagetable_get_mfn(v->arch.shadow_table[0]);
+ *sl4mfn = pagetable_get_mfn(v->arch.paging.shadow.shadow_table[0]);
/* Reading the top level table is always valid. */
return sh_linear_l4_table(v) + shadow_l4_linear_offset(gw->va);
}
return sh_linear_l2_table(v) + shadow_l2_linear_offset(gw->va);
#else /* 32bit... */
/* There is always a shadow of the top level table. Get it. */
- *sl2mfn = pagetable_get_mfn(v->arch.shadow_table[0]);
+ *sl2mfn = pagetable_get_mfn(v->arch.paging.shadow.shadow_table[0]);
/* This next line is important: the guest l2 has a 16k
* shadow, we need to return the right mfn of the four. This
* call will set it for us as a side-effect. */
struct domain *d = v->domain;
struct page_info *sp;
mfn_t smfn;
+ unsigned int i;
if ( !sh_type_has_up_pointer(d, SH_type_l1_shadow) )
return 0;
ASSERT(mfn_valid(smfn));
#endif
- if ( pagetable_get_pfn(v->arch.shadow_table[0]) == mfn_x(smfn)
-#if (SHADOW_PAGING_LEVELS == 3)
- || pagetable_get_pfn(v->arch.shadow_table[1]) == mfn_x(smfn)
- || pagetable_get_pfn(v->arch.shadow_table[2]) == mfn_x(smfn)
- || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn)
-#endif
- )
- return 0;
+ for_each_shadow_table(v, i)
+ if ( pagetable_get_pfn(v->arch.paging.shadow.shadow_table[i]) ==
+ mfn_x(smfn) )
+ return 0;
/* Only in use in one toplevel shadow, and it's not the one we're
* running on */
for_each_vcpu(d, tmp)
{
#if GUEST_PAGING_LEVELS == 3
- int i;
- for ( i = 0; i < 4; i++ )
+ unsigned int i;
+
+ for_each_shadow_table(v, i)
{
- mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[i]);
+ mfn_t smfn = pagetable_get_mfn(
+ v->arch.paging.shadow.shadow_table[i]);
if ( mfn_valid(smfn) && (mfn_x(smfn) != 0) )
{
*
* Because HVM guests run on the same monitor tables regardless of the
* shadow tables in use, the linear mapping of the shadow tables has to
- * be updated every time v->arch.shadow_table changes.
+ * be updated every time v->arch.paging.shadow.shadow_table changes.
*/
/* Don't try to update the monitor table if it doesn't exist */
if ( v == current )
{
__linear_l4_table[l4_linear_offset(SH_LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
- __PAGE_HYPERVISOR_RW);
+ l4e_from_pfn(
+ pagetable_get_pfn(v->arch.paging.shadow.shadow_table[0]),
+ __PAGE_HYPERVISOR_RW);
}
else
{
ml4e = map_domain_page(pagetable_get_mfn(v->arch.hvm.monitor_table));
ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
- l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
- __PAGE_HYPERVISOR_RW);
+ l4e_from_pfn(
+ pagetable_get_pfn(v->arch.paging.shadow.shadow_table[0]),
+ __PAGE_HYPERVISOR_RW);
unmap_domain_page(ml4e);
}
/*
- * Removes vcpu->arch.shadow_table[].
+ * Removes v->arch.paging.shadow.shadow_table[].
* Does all appropriate management/bookkeeping/refcounting/etc...
*/
static void
{
struct domain *d = v->domain;
mfn_t smfn;
- int i = 0;
+ unsigned int i;
////
- //// vcpu->arch.shadow_table[]
+ //// vcpu->arch.paging.shadow.shadow_table[]
////
-#if GUEST_PAGING_LEVELS == 3
- /* PAE guests have four shadow_table entries */
- for ( i = 0 ; i < 4 ; i++ )
-#endif
+ for_each_shadow_table(v, i)
{
- smfn = pagetable_get_mfn(v->arch.shadow_table[i]);
+ smfn = pagetable_get_mfn(v->arch.paging.shadow.shadow_table[i]);
if ( mfn_x(smfn) )
sh_put_ref(d, smfn, 0);
- v->arch.shadow_table[i] = pagetable_null();
+ v->arch.paging.shadow.shadow_table[i] = pagetable_null();
}
}
/* Set up the top-level shadow and install it in slot 'slot' of shadow_table */
static void
sh_set_toplevel_shadow(struct vcpu *v,
- int slot,
+ unsigned int slot,
mfn_t gmfn,
unsigned int root_type)
{
mfn_t smfn;
pagetable_t old_entry, new_entry;
-
struct domain *d = v->domain;
/* Remember the old contents of this slot */
- old_entry = v->arch.shadow_table[slot];
+ old_entry = v->arch.paging.shadow.shadow_table[slot];
/* Now figure out the new contents: is this a valid guest MFN? */
if ( !mfn_valid(gmfn) )
SHADOW_PRINTK("%u/%u [%u] gmfn %#"PRI_mfn" smfn %#"PRI_mfn"\n",
GUEST_PAGING_LEVELS, SHADOW_PAGING_LEVELS, slot,
mfn_x(gmfn), mfn_x(pagetable_get_mfn(new_entry)));
- v->arch.shadow_table[slot] = new_entry;
+ v->arch.paging.shadow.shadow_table[slot] = new_entry;
/* Decrement the refcount of the old contents of this slot */
if ( !pagetable_is_null(old_entry) ) {
////
- //// vcpu->arch.shadow_table[]
+ //// vcpu->arch.paging.shadow.shadow_table[]
////
/* We revoke write access to the new guest toplevel page(s) before we
sh_set_toplevel_shadow(v, 0, gmfn, SH_type_l4_shadow);
if ( !shadow_mode_external(d) && !is_pv_32bit_domain(d) )
{
- mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]);
+ mfn_t smfn = pagetable_get_mfn(v->arch.paging.shadow.shadow_table[0]);
if ( !(v->arch.flags & TF_kernel_mode) && VM_ASSIST(d, m2p_strict) )
zap_ro_mpt(smfn);
///
#if SHADOW_PAGING_LEVELS == 3
{
- mfn_t smfn = pagetable_get_mfn(v->arch.shadow_table[0]);
- int i;
- for ( i = 0; i < 4; i++ )
+ mfn_t smfn = pagetable_get_mfn(v->arch.paging.shadow.shadow_table[0]);
+ unsigned int i;
+
+ for_each_shadow_table(v, i)
{
#if GUEST_PAGING_LEVELS == 2
/* 2-on-3: make a PAE l3 that points at the four-page l2 */
smfn = sh_next_page(smfn);
#else
/* 3-on-3: make a PAE l3 that points at the four l2 pages */
- smfn = pagetable_get_mfn(v->arch.shadow_table[i]);
+ smfn = pagetable_get_mfn(v->arch.paging.shadow.shadow_table[i]);
#endif
v->arch.paging.shadow.l3table[i] =
(mfn_x(smfn) == 0)
/* We don't support PV except guest == shadow == config levels */
BUILD_BUG_ON(GUEST_PAGING_LEVELS != SHADOW_PAGING_LEVELS);
/* Just use the shadow top-level directly */
- make_cr3(v, pagetable_get_mfn(v->arch.shadow_table[0]));
+ make_cr3(v, pagetable_get_mfn(v->arch.paging.shadow.shadow_table[0]));
}
#endif
v->arch.hvm.hw_cr[3] = virt_to_maddr(&v->arch.paging.shadow.l3table);
#else
/* 4-on-4: Just use the shadow top-level directly */
- v->arch.hvm.hw_cr[3] = pagetable_get_paddr(v->arch.shadow_table[0]);
+ v->arch.hvm.hw_cr[3] =
+ pagetable_get_paddr(v->arch.paging.shadow.shadow_table[0]);
#endif
hvm_update_guest_cr3(v, noflush);
}
{
struct vcpu *v = current;
struct domain *d = v->domain;
- int i = 0;
+ unsigned int i;
int flush = 0;
int fast_path = 0;
paddr_t gcr3 = 0;
gl3pa = map_domain_page(l3mfn);
gl3e = (guest_l3e_t *)(gl3pa + ((unsigned long)gpa & ~PAGE_MASK));
}
- for ( i = 0; i < 4; i++ )
+ for_each_shadow_table(v, i)
{
mfn_t smfn, gmfn;
- if ( fast_path ) {
- if ( pagetable_is_null(v->arch.shadow_table[i]) )
+ if ( fast_path )
+ {
+ if ( pagetable_is_null(v->arch.paging.shadow.shadow_table[i]) )
smfn = INVALID_MFN;
else
- smfn = pagetable_get_mfn(v->arch.shadow_table[i]);
+ smfn = pagetable_get_mfn(v->arch.paging.shadow.shadow_table[i]);
}
else
{