}
#endif
- v->arch.paging.mode = is_pv_vcpu(v) ?
- &SHADOW_INTERNAL_NAME(sh_paging_mode, 4) :
- &SHADOW_INTERNAL_NAME(sh_paging_mode, 3);
+ v->arch.paging.mode = is_hvm_vcpu(v) ?
+ &SHADOW_INTERNAL_NAME(sh_paging_mode, 3) :
+ &SHADOW_INTERNAL_NAME(sh_paging_mode, 4);
}
#if SHADOW_AUDIT
switch ( mfn_to_page(smfn)->u.sh.type )
{
+#ifdef CONFIG_HVM
case SH_type_l1_32_shadow:
case SH_type_fl1_32_shadow:
return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p, 2)
case SH_type_fl1_pae_shadow:
return SHADOW_INTERNAL_NAME(sh_rm_write_access_from_sl1p, 3)
(d, gmfn, smfn, off);
+#endif
case SH_type_l1_64_shadow:
case SH_type_fl1_64_shadow:
* the free pool.
*/
+#ifdef CONFIG_HVM
const u8 sh_type_to_size[] = {
1, /* SH_type_none */
2, /* SH_type_l1_32_shadow */
1, /* SH_type_monitor_table */
1 /* SH_type_oos_snapshot */
};
+#endif
/*
* Figure out the least acceptable quantity of shadow memory.
struct page_info *sp = mfn_to_page(smfn);
switch ( sp->u.sh.type )
{
+#ifdef CONFIG_HVM
case SH_type_l2_32_shadow:
SHADOW_INTERNAL_NAME(sh_unhook_32b_mappings, 2)(d, smfn, user_only);
break;
case SH_type_l2_pae_shadow:
SHADOW_INTERNAL_NAME(sh_unhook_pae_mappings, 3)(d, smfn, user_only);
break;
+#endif
case SH_type_l4_64_shadow:
SHADOW_INTERNAL_NAME(sh_unhook_64b_mappings, 4)(d, smfn, user_only);
break;
/* Backpointers that are MFNs need to be packed into PDXs (PFNs don't) */
switch (shadow_type)
{
+#ifdef CONFIG_HVM
case SH_type_fl1_32_shadow:
case SH_type_fl1_pae_shadow:
+#endif
case SH_type_fl1_64_shadow:
break;
default:
* small numbers that the compiler will enjoy */
switch ( t )
{
+#ifdef CONFIG_HVM
case SH_type_l1_32_shadow:
case SH_type_fl1_32_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 2)(d, smfn);
case SH_type_l2_pae_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 3)(d, smfn);
break;
+#endif
case SH_type_l1_64_shadow:
case SH_type_fl1_64_shadow:
/* Dispatch table for getting per-type functions */
static const hash_domain_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
+#ifdef CONFIG_HVM
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 2), /* fl1_32 */
NULL, /* l2_32 */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* l1_pae */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 3), /* fl1_pae */
NULL, /* l2_pae */
+#endif
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* l1_64 */
SHADOW_INTERNAL_NAME(sh_rm_write_access_from_l1, 4), /* fl1_64 */
NULL, /* l2_64 */
/* Dispatch table for getting per-type functions */
static const hash_domain_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
+#ifdef CONFIG_HVM
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 2), /* fl1_32 */
NULL, /* l2_32 */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* l1_pae */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 3), /* fl1_pae */
NULL, /* l2_pae */
+#endif
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* l1_64 */
SHADOW_INTERNAL_NAME(sh_rm_mappings_from_l1, 4), /* fl1_64 */
NULL, /* l2_64 */
/* Blank the offending entry */
switch (sp->u.sh.type)
{
+#ifdef CONFIG_HVM
case SH_type_l1_32_shadow:
case SH_type_l2_32_shadow:
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 2)(d, vaddr, pmfn);
case SH_type_l2_pae_shadow:
SHADOW_INTERNAL_NAME(sh_clear_shadow_entry, 3)(d, vaddr, pmfn);
break;
+#endif
case SH_type_l1_64_shadow:
case SH_type_l2_64_shadow:
case SH_type_l2h_64_shadow:
* be called with the function to remove a lower-level shadow. */
static const hash_domain_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
+#ifdef CONFIG_HVM
NULL, /* l1_32 */
NULL, /* fl1_32 */
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 2), /* l2_32 */
NULL, /* l1_pae */
NULL, /* fl1_pae */
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 3), /* l2_pae */
+#endif
NULL, /* l1_64 */
NULL, /* fl1_64 */
SHADOW_INTERNAL_NAME(sh_remove_l1_shadow, 4), /* l2_64 */
/* Another lookup table, for choosing which mask to use */
static const unsigned int masks[SH_type_unused] = {
0, /* none */
+#ifdef CONFIG_HVM
SHF_L2_32, /* l1_32 */
0, /* fl1_32 */
0, /* l2_32 */
SHF_L2_PAE, /* l1_pae */
0, /* fl1_pae */
0, /* l2_pae */
+#endif
SHF_L2H_64 | SHF_L2_64, /* l1_64 */
0, /* fl1_64 */
SHF_L3_64, /* l2_64 */
{
static const hash_vcpu_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
+#ifdef CONFIG_HVM
NULL, /* l1_32 */
NULL, /* fl1_32 */
NULL, /* l2_32 */
NULL, /* l1_pae */
NULL, /* fl1_pae */
NULL, /* l2_pae */
+#endif
NULL, /* l1_64 */
NULL, /* fl1_64 */
NULL, /* l2_64 */
static const hash_vcpu_callback_t callbacks[SH_type_unused] = {
NULL, /* none */
#if SHADOW_AUDIT & (SHADOW_AUDIT_ENTRIES | SHADOW_AUDIT_ENTRIES_FULL)
+# ifdef CONFIG_HVM
SHADOW_INTERNAL_NAME(sh_audit_l1_table, 2), /* l1_32 */
SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 2), /* fl1_32 */
SHADOW_INTERNAL_NAME(sh_audit_l2_table, 2), /* l2_32 */
SHADOW_INTERNAL_NAME(sh_audit_l1_table, 3), /* l1_pae */
SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 3), /* fl1_pae */
SHADOW_INTERNAL_NAME(sh_audit_l2_table, 3), /* l2_pae */
+# endif
SHADOW_INTERNAL_NAME(sh_audit_l1_table, 4), /* l1_64 */
SHADOW_INTERNAL_NAME(sh_audit_fl1_table, 4), /* fl1_64 */
SHADOW_INTERNAL_NAME(sh_audit_l2_table, 4), /* l2_64 */
/* Shadow type codes */
#define SH_type_none (0U) /* on the shadow free list */
#define SH_type_min_shadow (1U)
+#ifdef CONFIG_HVM
#define SH_type_l1_32_shadow (1U) /* shadowing a 32-bit L1 guest page */
#define SH_type_fl1_32_shadow (2U) /* L1 shadow for a 32b 4M superpage */
#define SH_type_l2_32_shadow (3U) /* shadowing a 32-bit L2 guest page */
#define SH_type_monitor_table (14U) /* in use as a monitor table */
#define SH_type_oos_snapshot (15U) /* in use as OOS snapshot */
#define SH_type_unused (16U)
+#else
+#define SH_type_l1_32_shadow SH_type_unused
+#define SH_type_fl1_32_shadow SH_type_unused
+#define SH_type_l2_32_shadow SH_type_unused
+#define SH_type_l1_pae_shadow SH_type_unused
+#define SH_type_fl1_pae_shadow SH_type_unused
+#define SH_type_l2_pae_shadow SH_type_unused
+#define SH_type_l1_64_shadow 1U /* shadowing a 64-bit L1 page */
+#define SH_type_fl1_64_shadow 2U /* L1 shadow for 64-bit 2M superpg */
+#define SH_type_l2_64_shadow 3U /* shadowing a 64-bit L2 page */
+#define SH_type_l2h_64_shadow 4U /* shadowing a compat PAE L2 high page */
+#define SH_type_l3_64_shadow 5U /* shadowing a 64-bit L3 page */
+#define SH_type_l4_64_shadow 6U /* shadowing a 64-bit L4 page */
+#define SH_type_max_shadow 6U
+#define SH_type_p2m_table 7U /* in use as the p2m table */
+#define SH_type_monitor_table 8U /* in use as a monitor table */
+#define SH_type_oos_snapshot 9U /* in use as OOS snapshot */
+#define SH_type_unused 10U
+#endif
/*
* What counts as a pinnable shadow?
(((1u << (SH_type_max_shadow + 1u)) - 1u) - \
((1u << SH_type_min_shadow) - 1u))
+#ifdef CONFIG_HVM
#define SHF_L1_32 (1u << SH_type_l1_32_shadow)
#define SHF_FL1_32 (1u << SH_type_fl1_32_shadow)
#define SHF_L2_32 (1u << SH_type_l2_32_shadow)
#define SHF_L1_PAE (1u << SH_type_l1_pae_shadow)
#define SHF_FL1_PAE (1u << SH_type_fl1_pae_shadow)
#define SHF_L2_PAE (1u << SH_type_l2_pae_shadow)
+#else
+#define SHF_L1_32 0
+#define SHF_FL1_32 0
+#define SHF_L2_32 0
+#define SHF_L1_PAE 0
+#define SHF_FL1_PAE 0
+#define SHF_L2_PAE 0
+#endif
#define SHF_L1_64 (1u << SH_type_l1_64_shadow)
#define SHF_FL1_64 (1u << SH_type_fl1_64_shadow)
#define SHF_L2_64 (1u << SH_type_l2_64_shadow)
static inline unsigned int
shadow_size(unsigned int shadow_type)
{
+#ifdef CONFIG_HVM
ASSERT(shadow_type < ARRAY_SIZE(sh_type_to_size));
return sh_type_to_size[shadow_type];
+#else
+ ASSERT(shadow_type < SH_type_unused);
+ return 1;
+#endif
}
/******************************************************************************
{
switch (sp->u.sh.type)
{
+#ifdef CONFIG_HVM
case SH_type_fl1_32_shadow:
case SH_type_fl1_pae_shadow:
+#endif
case SH_type_fl1_64_shadow:
return sp->v.sh.back;
}