l4tab = __map_domain_page(pg);
clear_page(l4tab);
- init_guest_l4_table(l4tab, v->domain, 1);
+ init_guest_l4_table(l4tab, v->domain);
unmap_domain_page(l4tab);
v->arch.guest_table = pagetable_from_page(pg);
case -EINTR:
rc = -ERESTART;
case -ERESTART:
- break;
case 0:
- if ( !compat && !VM_ASSIST(d, m2p_strict) &&
- !paging_mode_refcounts(d) )
- fill_ro_mpt(cr3_gfn);
break;
default:
if ( cr3_page == current->arch.old_guest_table )
default:
if ( cr3_page == current->arch.old_guest_table )
cr3_page = NULL;
- break;
case 0:
- if ( VM_ASSIST(d, m2p_strict) )
- zap_ro_mpt(cr3_gfn);
break;
}
}
l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
}
clear_page(l4tab);
- init_guest_l4_table(l4tab, d, 0);
+ init_guest_l4_table(l4tab, d);
v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
if ( is_pv_32on64_domain(d) )
v->arch.guest_table_user = v->arch.guest_table;
return rc > 0 ? 0 : rc;
}
-void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d,
- bool_t zap_ro_mpt)
+void init_guest_l4_table(l4_pgentry_t l4tab[], const struct domain *d)
{
/* Xen private mappings. */
memcpy(&l4tab[ROOT_PAGETABLE_FIRST_XEN_SLOT],
l4e_from_pfn(domain_page_map_to_mfn(l4tab), __PAGE_HYPERVISOR);
l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
l4e_from_page(d->arch.perdomain_l3_pg, __PAGE_HYPERVISOR);
- if ( zap_ro_mpt || is_pv_32on64_domain(d) || paging_mode_refcounts(d) )
- l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
-}
-
-void fill_ro_mpt(unsigned long mfn)
-{
- l4_pgentry_t *l4tab = map_domain_page(mfn);
-
- l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
- idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
- unmap_domain_page(l4tab);
-}
-
-void zap_ro_mpt(unsigned long mfn)
-{
- l4_pgentry_t *l4tab = map_domain_page(mfn);
-
- l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
- unmap_domain_page(l4tab);
}
static int alloc_l4_table(struct page_info *page)
adjust_guest_l4e(pl4e[i], d);
}
- init_guest_l4_table(pl4e, d, !VM_ASSIST(d, m2p_strict));
+ init_guest_l4_table(pl4e, d);
unmap_domain_page(pl4e);
return rc > 0 ? 0 : rc;
invalidate_shadow_ldt(curr, 0);
- if ( !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
- fill_ro_mpt(mfn);
curr->arch.guest_table = pagetable_from_pfn(mfn);
update_cr3(curr);
op.arg1.mfn);
break;
}
- if ( VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) )
- zap_ro_mpt(op.arg1.mfn);
}
curr->arch.guest_table_user = pagetable_from_pfn(op.arg1.mfn);
shadow_l4e_from_mfn(page_to_mfn(d->arch.perdomain_l3_pg),
__PAGE_HYPERVISOR);
- if ( !VM_ASSIST(d, m2p_strict) )
- sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
-
/* Shadow linear mapping for 4-level shadows. N.B. for 3-level
* shadows on 64-bit xen, this linear mapping is later replaced by the
* monitor pagetable structure, which is built in make_monitor_table
/* PAGING_LEVELS==4 implies 64-bit, which means that
* map_domain_page_global can't fail */
BUG_ON(v->arch.paging.shadow.guest_vtable == NULL);
- if ( !shadow_mode_external(d) && !is_pv_32on64_domain(d) )
- {
- shadow_l4e_t *sl4e = v->arch.paging.shadow.guest_vtable;
-
- if ( (v->arch.flags & TF_kernel_mode) &&
- !VM_ASSIST(d, m2p_strict) )
- sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
- idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
- else if ( !(v->arch.flags & TF_kernel_mode) &&
- VM_ASSIST(d, m2p_strict) )
- sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] =
- shadow_l4e_empty();
- }
}
else
v->arch.paging.shadow.guest_vtable = __linear_l4_table;
l2_ro_mpt += l2_table_offset(va);
}
- /* NB. Cannot be GLOBAL: guest user mode should not see it. */
+ /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
l2e_write(l2_ro_mpt, l2e_from_pfn(mfn,
/*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT));
}
0x77, 1UL << L3_PAGETABLE_SHIFT);
ASSERT(!l2_table_offset(va));
- /* NB. Cannot be GLOBAL: guest user mode should not see it. */
+ /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
l3e_write(&l3_ro_mpt[l3_table_offset(va)],
l3e_from_page(l1_pg,
/*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT));
l3e_from_page(l2_pg, __PAGE_HYPERVISOR | _PAGE_USER));
ASSERT(!l2_table_offset(va));
}
- /* NB. Cannot be GLOBAL: guest user mode should not see it. */
+ /* NB. Cannot be GLOBAL as shadow_mode_translate reuses this area. */
if ( l1_pg )
l2e_write(l2_ro_mpt, l2e_from_page(
l1_pg, /*_PAGE_GLOBAL|*/_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT));
#define NATIVE_VM_ASSIST_VALID ((1UL << VMASST_TYPE_4gb_segments) | \
(1UL << VMASST_TYPE_4gb_segments_notify) | \
(1UL << VMASST_TYPE_writable_pagetables) | \
- (1UL << VMASST_TYPE_pae_extended_cr3) | \
- (1UL << VMASST_TYPE_m2p_strict))
+ (1UL << VMASST_TYPE_pae_extended_cr3))
#define VM_ASSIST_VALID NATIVE_VM_ASSIST_VALID
#define COMPAT_VM_ASSIST_VALID (NATIVE_VM_ASSIST_VALID & \
((1UL << COMPAT_BITS_PER_LONG) - 1))
int free_page_type(struct page_info *page, unsigned long type,
int preemptible);
-void init_guest_l4_table(l4_pgentry_t[], const struct domain *,
- bool_t zap_ro_mpt);
-void fill_ro_mpt(unsigned long mfn);
-void zap_ro_mpt(unsigned long mfn);
+void init_guest_l4_table(l4_pgentry_t[], const struct domain *);
int is_iomem_page(unsigned long mfn);
/* x86/PAE guests: support PDPTs above 4GB. */
#define VMASST_TYPE_pae_extended_cr3 3
-/*
- * x86/64 guests: strictly hide M2P from user mode.
- * This allows the guest to control respective hypervisor behavior:
- * - when not set, L4 tables get created with the respective slot blank,
- * and whenever the L4 table gets used as a kernel one the missing
- * mapping gets inserted,
- * - when set, L4 tables get created with the respective slot initialized
- * as before, and whenever the L4 table gets used as a user one the
- * mapping gets zapped.
- */
-#define VMASST_TYPE_m2p_strict 32
-
#if __XEN_INTERFACE_VERSION__ < 0x00040600
#define MAX_VMASST_TYPE 3
#endif