BUG();
/* Maybe load the debug registers. */
- BUG_ON(is_hvm_vcpu(curr));
+ BUG_ON(!is_pv_vcpu(curr));
if ( !is_idle_vcpu(curr) && curr->arch.debugreg[7] )
{
write_debugreg(0, curr->arch.debugreg[0]);
{
dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities"
" %#" PRIx64 " for d%d:v%u (supported: %#Lx)\n",
- is_hvm_vcpu(v) ? "HVM" : "PV", ctxt->caps,
+ has_hvm_container_vcpu(v) ? "HVM" : "PV", ctxt->caps,
v->domain->domain_id, v->vcpu_id,
guest_mcg_cap & ~MCG_CAP_COUNT);
return -EPERM;
if ( vcpu != VMCE_INJECT_BROADCAST && vcpu != v->vcpu_id )
continue;
- if ( (is_hvm_domain(d) ||
+ if ( (has_hvm_container_domain(d) ||
guest_has_trap_callback(d, v->vcpu_id, TRAP_machine_check)) &&
!test_and_set_bool(v->mce_pending) )
{
if (!mfn_valid(mfn_x(mfn)))
return -EINVAL;
- if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
+ if ( !has_hvm_container_domain(d) || !paging_mode_hap(d) )
return -ENOSYS;
rc = -1;
pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
- mfn = (dp->is_hvm
+ mfn = (has_hvm_container_domain(dp)
? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn)
: dbg_pv_va2mfn(addr, dp, pgd3));
if ( mfn == INVALID_MFN )
spin_unlock(&d->page_alloc_lock);
}
- if ( is_hvm_domain(d) )
+ if ( has_hvm_container_domain(d) )
p2m_pod_dump_data(d);
spin_lock(&d->page_alloc_lock);
vmce_init_vcpu(v);
- if ( is_hvm_domain(d) )
+ if ( has_hvm_container_domain(d) )
{
rc = hvm_vcpu_initialise(v);
goto done;
{
vcpu_destroy_fpu(v);
- if ( !is_hvm_domain(d) )
+ if ( is_pv_domain(d) )
xfree(v->arch.pv_vcpu.trap_ctxt);
}
vcpu_destroy_fpu(v);
- if ( is_hvm_vcpu(v) )
+ if ( has_hvm_container_vcpu(v) )
hvm_vcpu_destroy(v);
else
xfree(v->arch.pv_vcpu.trap_ctxt);
int rc = -ENOMEM;
d->arch.hvm_domain.hap_enabled =
- is_hvm_domain(d) &&
+ has_hvm_container_domain(d) &&
hvm_funcs.hap_supported &&
(domcr_flags & DOMCRF_hap);
d->arch.hvm_domain.mem_sharing_enabled = 0;
d->domain_id);
}
- if ( is_hvm_domain(d) )
+ if ( has_hvm_container_domain(d) )
rc = create_perdomain_mapping(d, PERDOMAIN_VIRT_START, 0, NULL, NULL);
else if ( is_idle_domain(d) )
rc = 0;
mapcache_domain_init(d);
HYPERVISOR_COMPAT_VIRT_START(d) =
- is_hvm_domain(d) ? ~0u : __HYPERVISOR_COMPAT_VIRT_START;
+ is_pv_domain(d) ? __HYPERVISOR_COMPAT_VIRT_START : ~0u;
if ( (rc = paging_domain_init(d, domcr_flags)) != 0 )
goto fail;
goto fail;
}
- if ( is_hvm_domain(d) )
+ if ( has_hvm_container_domain(d) )
{
if ( (rc = hvm_domain_initialise(d)) != 0 )
{
if ( paging_initialised )
paging_final_teardown(d);
free_perdomain_mappings(d);
- if ( !is_hvm_domain(d) )
+ if ( is_pv_domain(d) )
free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
return rc;
}
void arch_domain_destroy(struct domain *d)
{
- if ( is_hvm_domain(d) )
+ if ( has_hvm_container_domain(d) )
hvm_domain_destroy(d);
else
xfree(d->arch.pv_domain.e820);
paging_final_teardown(d);
free_perdomain_mappings(d);
- if ( !is_hvm_domain(d) )
+ if ( is_pv_domain(d) )
free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
free_xenheap_page(d->shared_info);
#define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
flags = c(flags);
- if ( !is_hvm_vcpu(v) )
+ if ( is_pv_vcpu(v) )
{
if ( !compat )
{
v->fpu_initialised = !!(flags & VGCF_I387_VALID);
v->arch.flags &= ~TF_kernel_mode;
- if ( (flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ )
+ if ( (flags & VGCF_in_kernel) || has_hvm_container_vcpu(v)/*???*/ )
v->arch.flags |= TF_kernel_mode;
v->arch.vgc_flags = flags;
if ( !compat )
{
memcpy(&v->arch.user_regs, &c.nat->user_regs, sizeof(c.nat->user_regs));
- if ( !is_hvm_vcpu(v) )
+ if ( is_pv_vcpu(v) )
memcpy(v->arch.pv_vcpu.trap_ctxt, c.nat->trap_ctxt,
sizeof(c.nat->trap_ctxt));
}
v->arch.user_regs.eflags |= 2;
- if ( is_hvm_vcpu(v) )
+ if ( has_hvm_container_vcpu(v) )
{
hvm_set_info_guest(v);
goto out;
int arch_vcpu_reset(struct vcpu *v)
{
- if ( !is_hvm_vcpu(v) )
+ if ( is_pv_vcpu(v) )
{
destroy_gdt(v);
return vcpu_destroy_pagetables(v);
static inline int need_full_gdt(struct vcpu *v)
{
- return (!is_hvm_vcpu(v) && !is_idle_vcpu(v));
+ return (is_pv_vcpu(v) && !is_idle_vcpu(v));
}
static void __context_switch(void)
{
__context_switch();
- if ( !is_hvm_vcpu(next) &&
+ if ( is_pv_vcpu(next) &&
(is_idle_vcpu(prev) ||
- is_hvm_vcpu(prev) ||
+ has_hvm_container_vcpu(prev) ||
is_pv_32on64_vcpu(prev) != is_pv_32on64_vcpu(next)) )
{
uint64_t efer = read_efer();
/* Re-enable interrupts before restoring state which may fault. */
local_irq_enable();
- if ( !is_hvm_vcpu(next) )
+ if ( is_pv_vcpu(next) )
{
load_LDT(next);
load_segments(next);
}
- set_cpuid_faulting(!is_hvm_vcpu(next) &&
+ set_cpuid_faulting(is_pv_vcpu(next) &&
(next->domain->domain_id != 0));
}
}
else
{
- if ( !is_hvm_vcpu(current) )
+ if ( is_pv_vcpu(current) )
regs->eip += 2; /* skip re-execute 'syscall' / 'int $xx' */
else
current->arch.hvm_vcpu.hcall_preempted = 0;
regs->eax = op;
/* Ensure the hypercall trap instruction is re-executed. */
- if ( !is_hvm_vcpu(current) )
+ if ( is_pv_vcpu(current) )
regs->eip -= 2; /* re-execute 'syscall' / 'int $xx' */
else
current->arch.hvm_vcpu.hcall_preempted = 1;
- if ( !is_hvm_vcpu(current) ?
+ if ( is_pv_vcpu(current) ?
!is_pv_32on64_vcpu(current) :
(hvm_guest_x86_mode(current) == 8) )
{
return ret;
}
- if ( !is_hvm_domain(d) )
+ if ( is_pv_domain(d) )
{
for_each_vcpu ( d, v )
{
BUG();
}
- if ( is_hvm_domain(d) )
+ if ( has_hvm_container_domain(d) )
hvm_domain_relinquish_resources(d);
return 0;
if ( already_pending )
return;
- if ( is_hvm_vcpu(v) )
+ if ( has_hvm_container_vcpu(v) )
hvm_assert_evtchn_irq(v);
else
vcpu_kick(v);
* then it means we are running on the idle domain's page table and must
* therefore use its mapcache.
*/
- if ( unlikely(pagetable_is_null(v->arch.guest_table)) && !is_hvm_vcpu(v) )
+ if ( unlikely(pagetable_is_null(v->arch.guest_table)) && is_pv_vcpu(v) )
{
/* If we really are idling, perform lazy context switch now. */
if ( (v = idle_vcpu[smp_processor_id()]) == current )
#endif
v = mapcache_current_vcpu();
- if ( !v || is_hvm_vcpu(v) )
+ if ( !v || !is_pv_vcpu(v) )
return mfn_to_virt(mfn);
dcache = &v->domain->arch.pv_domain.mapcache;
ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
v = mapcache_current_vcpu();
- ASSERT(v && !is_hvm_vcpu(v));
+ ASSERT(v && is_pv_vcpu(v));
dcache = &v->domain->arch.pv_domain.mapcache;
ASSERT(dcache->inuse);
struct mapcache_domain *dcache = &d->arch.pv_domain.mapcache;
unsigned int bitmap_pages;
- if ( is_hvm_domain(d) || is_idle_domain(d) )
+ if ( !is_pv_domain(d) || is_idle_domain(d) )
return 0;
#ifdef NDEBUG
unsigned int ents = d->max_vcpus * MAPCACHE_VCPU_ENTRIES;
unsigned int nr = PFN_UP(BITS_TO_LONGS(ents) * sizeof(long));
- if ( is_hvm_vcpu(v) || !dcache->inuse )
+ if ( !is_pv_vcpu(v) || !dcache->inuse )
return 0;
if ( ents > dcache->entries )
if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
{
evc->size = sizeof(*evc);
- if ( !is_hvm_domain(d) )
+ if ( is_pv_domain(d) )
{
evc->sysenter_callback_cs =
v->arch.pv_vcpu.sysenter_callback_cs;
ret = -EINVAL;
if ( evc->size < offsetof(typeof(*evc), vmce) )
goto ext_vcpucontext_out;
- if ( !is_hvm_domain(d) )
+ if ( is_pv_domain(d) )
{
if ( !is_canonical_address(evc->sysenter_callback_eip) ||
!is_canonical_address(evc->syscall32_callback_eip) )
bool_t compat = is_pv_32on64_domain(v->domain);
#define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
- if ( is_hvm_vcpu(v) )
+ if ( !is_pv_vcpu(v) )
memset(c.nat, 0, sizeof(*c.nat));
memcpy(&c.nat->fpu_ctxt, v->arch.fpu_ctxt, sizeof(c.nat->fpu_ctxt));
c(flags = v->arch.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel));
if ( !compat )
{
memcpy(&c.nat->user_regs, &v->arch.user_regs, sizeof(c.nat->user_regs));
- if ( !is_hvm_vcpu(v) )
+ if ( is_pv_vcpu(v) )
memcpy(c.nat->trap_ctxt, v->arch.pv_vcpu.trap_ctxt,
sizeof(c.nat->trap_ctxt));
}
for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
c(debugreg[i] = v->arch.debugreg[i]);
- if ( is_hvm_vcpu(v) )
+ if ( has_hvm_container_vcpu(v) )
{
struct segment_register sreg;
/* prevent fixup_page_fault() from doing anything */
irq_enter();
- if ( !is_hvm_vcpu(current) && !is_idle_vcpu(current) )
+ if ( is_pv_vcpu(current) && !is_idle_vcpu(current) )
{
struct desc_ptr gdt_desc = {
.limit = LAST_RESERVED_GDT_BYTE,
void efi_rs_leave(unsigned long cr3)
{
write_cr3(cr3);
- if ( !is_hvm_vcpu(current) && !is_idle_vcpu(current) )
+ if ( is_pv_vcpu(current) && !is_idle_vcpu(current) )
{
struct desc_ptr gdt_desc = {
.limit = LAST_RESERVED_GDT_BYTE,
{
/* Don't confuse vmx_do_resume (for @v or @current!) */
vmx_clear_vmcs(v);
- if ( is_hvm_vcpu(current) )
+ if ( has_hvm_container_vcpu(current) )
vmx_load_vmcs(current);
spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
for_each_domain ( d )
{
- if ( !is_hvm_domain(d) )
+ if ( !has_hvm_container_domain(d) )
continue;
printk("\n>>> Domain %d <<<\n", d->domain_id);
for_each_vcpu ( d, v )
(rangeset_is_empty((d)->iomem_caps) && \
rangeset_is_empty((d)->arch.ioport_caps) && \
!has_arch_pdevs(d) && \
- !is_hvm_domain(d)) ? \
+ is_pv_domain(d)) ? \
L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))
static void __init init_frametable_chunk(void *start, void *end)
unsigned long domain_get_maximum_gpfn(struct domain *d)
{
- if ( is_hvm_domain(d) )
+ if ( has_hvm_container_domain(d) )
return p2m_get_hostp2m(d)->max_mapped_pfn;
/* NB. PV guests specify nr_pfns rather than max_pfn so we adjust here. */
return (arch_get_max_pfn(d) ?: 1) - 1;
{
/* Special pages should not be accessible from devices. */
struct domain *d = page_get_owner(page);
- if ( d && !is_hvm_domain(d) && unlikely(need_iommu(d)) )
+ if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
{
if ( (x & PGT_type_mask) == PGT_writable_page )
iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
sh_ctxt->ctxt.regs = regs;
sh_ctxt->ctxt.force_writeback = 0;
- if ( !is_hvm_vcpu(v) )
+ if ( is_pv_vcpu(v) )
{
sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = BITS_PER_LONG;
return &pv_shadow_emulator_ops;
if ( pg->shadow_flags &
((SHF_page_type_mask & ~SHF_L1_ANY) | SHF_out_of_sync)
|| sh_page_has_multiple_shadows(pg)
- || !is_hvm_domain(v->domain)
+ || is_pv_domain(v->domain)
|| !v->domain->arch.paging.shadow.oos_active )
return 0;
if ( v->arch.paging.mode )
v->arch.paging.mode->shadow.detach_old_tables(v);
- if ( !is_hvm_domain(d) )
+ if ( is_pv_domain(d) )
{
///
/// PV guest
// supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
// It is always shadowed as present...
if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)
- && !is_hvm_domain(d) )
+ && is_pv_domain(d) )
{
sflags |= _PAGE_USER;
}
#endif
/* Don't do anything on an uninitialised vcpu */
- if ( !is_hvm_domain(d) && !v->is_initialised )
+ if ( is_pv_domain(d) && !v->is_initialised )
{
ASSERT(v->arch.cr3 == 0);
return;
spin_unlock(&v->domain->event_lock);
break;
}
- if ( !is_hvm_domain(v->domain) &&
+ if ( is_pv_domain(v->domain) &&
v->domain->arch.pv_domain.auto_unmask )
evtchn_unmask(pirq->evtchn);
- if ( !is_hvm_domain(v->domain) ||
+ if ( is_pv_domain(v->domain) ||
domain_pirq_to_irq(v->domain, eoi.irq) > 0 )
pirq_guest_eoi(pirq);
if ( is_hvm_domain(v->domain) &&
unsigned long *stack, addr;
unsigned long mask = STACK_SIZE;
+ /* Avoid HVM as we don't know what the stack looks like. */
if ( is_hvm_vcpu(v) )
return;
}
if ( ((trapnr == TRAP_copro_error) || (trapnr == TRAP_simd_error)) &&
- is_hvm_vcpu(curr) && curr->arch.hvm_vcpu.fpu_exception_callback )
+ has_hvm_container_vcpu(curr) && curr->arch.hvm_vcpu.fpu_exception_callback )
{
curr->arch.hvm_vcpu.fpu_exception_callback(
curr->arch.hvm_vcpu.fpu_exception_callback_arg, regs);
*ebx = 0x40000200;
*ecx = 0; /* Features 1 */
*edx = 0; /* Features 2 */
- if ( !is_hvm_vcpu(current) )
+ if ( is_pv_vcpu(current) )
*ecx |= XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD;
break;
l2_pgentry_t l2e, *l2t;
l1_pgentry_t l1e, *l1t;
- if ( is_hvm_vcpu(v) || !is_canonical_address(addr) )
+ if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
return NULL;
l4t = map_domain_page(mfn);
enum context context;
struct vcpu *v = current;
- if ( is_hvm_vcpu(v) && guest_mode(regs) )
+ if ( has_hvm_container_vcpu(v) && guest_mode(regs) )
{
struct segment_register sreg;
context = CTXT_hvm_guest;
const struct cpu_user_regs *regs = &v->arch.user_regs;
unsigned long crs[8];
- /* No need to handle HVM for now. */
- if ( is_hvm_vcpu(v) )
+ /* Only handle PV guests for now */
+ if ( !is_pv_vcpu(v) )
return;
crs[0] = v->arch.pv_vcpu.ctrlreg[0];
void hypercall_page_initialise(struct domain *d, void *hypercall_page)
{
memset(hypercall_page, 0xCC, PAGE_SIZE);
- if ( is_hvm_domain(d) )
+ if ( has_hvm_container_domain(d) )
hvm_hypercall_page_initialise(d, hypercall_page);
else if ( !is_pv_32bit_domain(d) )
hypercall_page_initialise_ring3_kernel(hypercall_page);
goto fail;
if ( domcr_flags & DOMCRF_hvm )
- d->is_hvm = 1;
+ d->guest_type = guest_type_hvm;
if ( domid == 0 )
{
double_gt_lock(lgt, rgt);
- if ( !is_hvm_domain(ld) && need_iommu(ld) )
+ if ( is_pv_domain(ld) && need_iommu(ld) )
{
unsigned int wrc, rdc;
int err = 0;
act->pin -= GNTPIN_hstw_inc;
}
- if ( !is_hvm_domain(ld) && need_iommu(ld) )
+ if ( is_pv_domain(ld) && need_iommu(ld) )
{
unsigned int wrc, rdc;
int err = 0;
if ( current->domain == dom0 )
fi.submap |= 1U << XENFEAT_dom0;
#ifdef CONFIG_X86
- if ( !is_hvm_vcpu(current) )
+ if ( is_pv_vcpu(current) )
fi.submap |= (1U << XENFEAT_mmu_pt_update_preserve_ad) |
(1U << XENFEAT_highmem_assist) |
(1U << XENFEAT_gnttab_map_avail_bits);
#define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
#define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain))
-#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
+#define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
static inline int local_events_need_delivery(void)
{
struct vcpu *v = current;
- return (is_hvm_vcpu(v) ? hvm_local_events_need_delivery(v) :
+ return (has_hvm_container_vcpu(v) ? hvm_local_events_need_delivery(v) :
(vcpu_info(v, evtchn_upcall_pending) &&
!vcpu_info(v, evtchn_upcall_mask)));
}
/* Raw access functions: no type checking. */
#define raw_copy_to_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
+ (has_hvm_container_vcpu(current) ? \
copy_to_user_hvm((dst), (src), (len)) : \
copy_to_user((dst), (src), (len)))
#define raw_copy_from_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
+ (has_hvm_container_vcpu(current) ? \
copy_from_user_hvm((dst), (src), (len)) : \
copy_from_user((dst), (src), (len)))
#define raw_clear_guest(dst, len) \
- (is_hvm_vcpu(current) ? \
+ (has_hvm_container_vcpu(current) ? \
clear_user_hvm((dst), (len)) : \
clear_user((dst), (len)))
#define __raw_copy_to_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
+ (has_hvm_container_vcpu(current) ? \
copy_to_user_hvm((dst), (src), (len)) : \
__copy_to_user((dst), (src), (len)))
#define __raw_copy_from_guest(dst, src, len) \
- (is_hvm_vcpu(current) ? \
+ (has_hvm_container_vcpu(current) ? \
copy_from_user_hvm((dst), (src), (len)) : \
__copy_from_user((dst), (src), (len)))
#define __raw_clear_guest(dst, len) \
- (is_hvm_vcpu(current) ? \
+ (has_hvm_container_vcpu(current) ? \
clear_user_hvm((dst), (len)) : \
clear_user((dst), (len)))
/* The _PAGE_PSE bit must be honoured in HVM guests, whenever
* CR4.PSE is set or the guest is in PAE or long mode.
* It's also used in the dummy PT for vcpus with CR4.PG cleared. */
- return (!is_hvm_vcpu(v)
+ return (is_pv_vcpu(v)
? opt_allow_superpage
: (GUEST_PAGING_LEVELS != 2
|| !hvm_paging_enabled(v)
{
if ( GUEST_PAGING_LEVELS == 2 || !cpu_has_nx )
return 0;
- if ( !is_hvm_vcpu(v) )
+ if ( is_pv_vcpu(v) )
return cpu_has_nx;
return hvm_nx_enabled(v);
}
struct evtchn_port_ops;
+enum guest_type {
+ guest_type_pv, guest_type_hvm
+};
+
struct domain
{
domid_t domain_id;
struct rangeset *iomem_caps;
struct rangeset *irq_caps;
- /* Is this an HVM guest? */
- bool_t is_hvm;
+ enum guest_type guest_type;
+
#ifdef HAS_PASSTHROUGH
/* Does this guest need iommu mappings? */
bool_t need_iommu;
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
-#define is_hvm_domain(d) ((d)->is_hvm)
+#define is_pv_domain(d) ((d)->guest_type == guest_type_pv)
+#define is_pv_vcpu(v) (is_pv_domain((v)->domain))
+#define is_hvm_domain(d) ((d)->guest_type == guest_type_hvm)
#define is_hvm_vcpu(v) (is_hvm_domain(v->domain))
+#define has_hvm_container_domain(d) ((d)->guest_type != guest_type_pv)
+#define has_hvm_container_vcpu(v) (has_hvm_container_domain((v)->domain))
#define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
cpumask_weight((v)->cpu_affinity) == 1)
#ifdef HAS_PASSTHROUGH
static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
{
#ifdef CONFIG_COMPAT
- if ( is_hvm_vcpu(current) ?
+ if ( has_hvm_container_vcpu(current) ?
hvm_guest_x86_mode(current) != 8 :
is_pv_32on64_vcpu(current) )
{