mflags = mandatory_flags(v, pfec);
iflags = (_PAGE_NX_BIT | _PAGE_INVALID_BITS);
- if ( is_hvm_vcpu(v) && !(pfec & PFEC_user_mode) )
+ if ( is_hvm_domain(d) && !(pfec & PFEC_user_mode) )
{
struct segment_register seg;
const struct cpu_user_regs *regs = guest_cpu_user_regs();
* destroyed any shadows of it or sh_destroy_shadow will get confused. */
if ( !pagetable_is_null(d->arch.paging.shadow.unpaged_pagetable) )
{
+ ASSERT(is_hvm_domain(d));
for_each_vcpu(d, v)
- {
- ASSERT(is_hvm_vcpu(v));
if ( !hvm_paging_enabled(v) )
v->arch.guest_table = pagetable_null();
- }
unpaged_pagetable =
pagetable_get_page(d->arch.paging.shadow.unpaged_pagetable);
d->arch.paging.shadow.unpaged_pagetable = pagetable_null();
sh_unmap_domain_page(l3e);
shadow_free(d, m3mfn);
- if ( is_pv_32on64_vcpu(v) )
+ if ( is_pv_32on64_domain(d) )
{
/* Need to destroy the l3 and l2 monitor pages that map the
* Xen VAs at 3GB-4GB */
(unsigned long)pagetable_get_pfn(v->arch.guest_table));
#if GUEST_PAGING_LEVELS == 4
- if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_vcpu(v) )
+ if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_domain(d) )
gmfn = pagetable_get_mfn(v->arch.guest_table_user);
else
#endif
/* Cross-page emulated writes are only supported for HVM guests;
* PV guests ought to know better */
- if ( !is_hvm_vcpu(v) )
+ if ( !is_hvm_domain(d) )
return MAPPING_UNHANDLEABLE;
/* This write crosses a page boundary. Translate the second page */
gmfn = get_shadow_status(d, get_gfn_query_unlocked(
d, gfn_x(gfn), &p2mt),
((GUEST_PAGING_LEVELS == 3 ||
- is_pv_32on64_vcpu(v))
+ is_pv_32on64_domain(d))
&& !shadow_mode_external(d)
&& (guest_index(gl3e) % 4) == 3)
? SH_type_l2h_shadow