/*
* Virtual UART is only used by linux early printk and decompress code.
- * Only use it for dom0 because the linux kernel may not support
- * multi-platform.
+ * Only use it for the hardware domain because the linux kernel may not
+ * support multi-platform.
*/
- if ( (d->domain_id == 0) && (rc = domain_vuart_init(d)) )
+ if ( is_hardware_domain(d) && (rc = domain_vuart_init(d)) )
goto fail;
return 0;
int ret;
/*
- * Domain 0 gets the hardware address.
+ * The hardware domain gets the hardware address.
* Guests get the virtual platform layout.
*/
- if ( d->domain_id == 0 )
+ if ( is_hardware_domain(d) )
{
d->arch.vgic.dbase = gic.dbase;
d->arch.vgic.cbase = gic.cbase;
/* Currently nr_lines in vgic and gic doesn't have the same meanings
* Here nr_lines = number of SPIs
*/
- if ( d->domain_id == 0 )
+ if ( is_hardware_domain(d) )
d->arch.vgic.nr_lines = gic_number_lines() - 32;
else
d->arch.vgic.nr_lines = 0; /* We don't need SPIs for the guest */
int vcpu_vtimer_init(struct vcpu *v)
{
struct vtimer *t = &v->arch.phys_timer;
- bool_t d0 = (v->domain == dom0);
+ bool_t d0 = is_hardware_domain(v->domain);
/*
- * Domain 0 uses the hardware interrupts, guests get the virtual platform.
+ * Hardware domain uses the hardware interrupts, guests get the virtual
+ * platform.
*/
init_timer(&t->timer, phys_timer_expired, t, v->processor);
int domain_vuart_init(struct domain *d)
{
- ASSERT( !d->domain_id );
+ ASSERT( is_hardware_domain(d) );
d->arch.vuart.info = serial_vuart_info(SERHND_DTUART);
if ( !d->arch.vuart.info )
int rc;
/* Always trust dom0's MCE handler will prevent future access */
- if ( d == dom0 )
+ if ( is_hardware_domain(d) )
return 0;
if (!mfn_valid(mfn_x(mfn)))
}
set_cpuid_faulting(is_pv_vcpu(next) &&
- (next->domain->domain_id != 0));
+ !is_control_domain(next->domain) &&
+ !is_hardware_domain(next->domain));
}
if (is_hvm_vcpu(next) && (prev != next) )
.data = data
};
- if ( (current->domain->domain_id == 0) && dom0_pit_access(&ioreq) )
+ if ( is_hardware_domain(current->domain) && dom0_pit_access(&ioreq) )
{
/* nothing to do */;
}
.max_mfn = MACH2PHYS_NR_ENTRIES - 1
};
- if ( !mem_hotplug && current->domain == dom0 )
+ if ( !mem_hotplug && is_hardware_domain(current->domain) )
mapping.max_mfn = max_page - 1;
if ( copy_to_guest(arg, &mapping, 1) )
return -EFAULT;
uint32_t tsc_mode, uint64_t elapsed_nsec,
uint32_t gtsc_khz, uint32_t incarnation)
{
- if ( is_idle_domain(d) || (d->domain_id == 0) )
+ if ( is_idle_domain(d) || is_hardware_domain(d) )
{
d->arch.vtsc = 0;
return;
"warp=%lu (count=%lu)\n", tsc_max_warp, tsc_check_count);
for_each_domain ( d )
{
- if ( d->domain_id == 0 && d->arch.tsc_mode == TSC_MODE_DEFAULT )
+ if ( is_hardware_domain(d) && d->arch.tsc_mode == TSC_MODE_DEFAULT )
continue;
printk("dom%u%s: mode=%d",d->domain_id,
is_hvm_domain(d) ? "(hvm)" : "", d->arch.tsc_mode);
void pv_cpuid(struct cpu_user_regs *regs)
{
uint32_t a, b, c, d;
+ struct vcpu *curr = current;
a = regs->eax;
b = regs->ebx;
c = regs->ecx;
d = regs->edx;
- if ( current->domain->domain_id != 0 )
+ if ( !is_control_domain(curr->domain) && !is_hardware_domain(curr->domain) )
{
unsigned int cpuid_leaf = a, sub_leaf = c;
if ( !cpuid_hypervisor_leaves(a, c, &a, &b, &c, &d) )
- domain_cpuid(current->domain, a, c, &a, &b, &c, &d);
+ domain_cpuid(curr->domain, a, c, &a, &b, &c, &d);
switch ( cpuid_leaf )
{
{
unsigned int _eax, _ebx, _ecx, _edx;
/* EBX value of main leaf 0 depends on enabled xsave features */
- if ( sub_leaf == 0 && current->arch.xcr0 )
+ if ( sub_leaf == 0 && curr->arch.xcr0 )
{
/* reset EBX to default value first */
b = XSTATE_AREA_MIN_SIZE;
for ( sub_leaf = 2; sub_leaf < 63; sub_leaf++ )
{
- if ( !(current->arch.xcr0 & (1ULL << sub_leaf)) )
+ if ( !(curr->arch.xcr0 & (1ULL << sub_leaf)) )
continue;
- domain_cpuid(current->domain, cpuid_leaf, sub_leaf,
+ domain_cpuid(curr->domain, cpuid_leaf, sub_leaf,
&_eax, &_ebx, &_ecx, &_edx);
if ( (_eax + _ebx) > b )
b = _eax + _ebx;
__clear_bit(X86_FEATURE_DS, &d);
__clear_bit(X86_FEATURE_ACC, &d);
__clear_bit(X86_FEATURE_PBE, &d);
- if ( is_pvh_vcpu(current) )
+ if ( is_pvh_vcpu(curr) )
__clear_bit(X86_FEATURE_MTRR, &d);
__clear_bit(X86_FEATURE_DTES64 % 32, &c);
__clear_bit(X86_FEATURE_VMXE % 32, &c);
__clear_bit(X86_FEATURE_SMXE % 32, &c);
__clear_bit(X86_FEATURE_TM2 % 32, &c);
- if ( is_pv_32bit_vcpu(current) )
+ if ( is_pv_32bit_vcpu(curr) )
__clear_bit(X86_FEATURE_CX16 % 32, &c);
__clear_bit(X86_FEATURE_XTPR % 32, &c);
__clear_bit(X86_FEATURE_PDCM % 32, &c);
case 0x80000001:
/* Modify Feature Information. */
- if ( is_pv_32bit_vcpu(current) )
+ if ( is_pv_32bit_vcpu(curr) )
{
__clear_bit(X86_FEATURE_LM % 32, &d);
__clear_bit(X86_FEATURE_LAHF_LM % 32, &c);
}
- if ( is_pv_32on64_vcpu(current) &&
+ if ( is_pv_32on64_vcpu(curr) &&
boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
__clear_bit(X86_FEATURE_SYSCALL % 32, &d);
__clear_bit(X86_FEATURE_PAGE1GB % 32, &d);
static int is_cpufreq_controller(struct domain *d)
{
return ((cpufreq_controller == FREQCTL_dom0_kernel) &&
- (d->domain_id == 0));
+ is_hardware_domain(d));
}
#include "x86_64/mmconfig.h"
else if ( domcr_flags & DOMCRF_pvh )
d->guest_type = guest_type_pvh;
- if ( domid == 0 )
+ if ( is_hardware_domain(d) )
{
d->is_pinned = opt_dom0_vcpus_pin;
d->disable_migrate = 1;
d->is_paused_by_controller = 1;
atomic_inc(&d->pause_count);
- if ( domid )
+ if ( !is_hardware_domain(d) )
d->nr_pirqs = nr_static_irqs + extra_domU_irqs;
else
d->nr_pirqs = nr_static_irqs + extra_dom0_irqs;
d->shutdown_code = reason;
reason = d->shutdown_code;
- if ( d->domain_id == 0 )
+ if ( is_hardware_domain(d) )
dom0_shutdown(reason);
if ( d->is_shutting_down )
(1U << XENFEAT_auto_translated_physmap);
if ( supervisor_mode_kernel )
fi.submap |= 1U << XENFEAT_supervisor_mode_kernel;
- if ( current->domain == dom0 )
+ if ( is_hardware_domain(current->domain) )
fi.submap |= 1U << XENFEAT_dom0;
#ifdef CONFIG_X86
switch ( d->guest_type )
xenoprof_init.cpu_type)) )
return ret;
+ /* Only the hardware domain may become the primary profiler here because
+ * there is currently no cleanup of xenoprof_primary_profiler or associated
+ * profiling state when the primary profiling domain is shut down or
+ * crashes. Once a better cleanup method is present, it will be possible to
+ * allow another domain to be the primary profiler.
+ */
xenoprof_init.is_primary =
((xenoprof_primary_profiler == d) ||
- ((xenoprof_primary_profiler == NULL) && (d->domain_id == 0)));
+ ((xenoprof_primary_profiler == NULL) && is_hardware_domain(d)));
if ( xenoprof_init.is_primary )
xenoprof_primary_profiler = current->domain;
BUG_ON( !hd->root_table || !hd->paging_mode || !iommu->dev_table.buffer );
- if ( iommu_passthrough && (domain->domain_id == 0) )
+ if ( iommu_passthrough && is_hardware_domain(domain) )
valid = 0;
if ( ats_enabled )
ops = iommu_get_ops();
for_each_domain(d)
{
- if ( !d->domain_id )
+ if ( is_hardware_domain(d) )
continue;
if ( iommu_use_hap_pt(d) )
return res;
}
- if ( iommu_passthrough && (domain->domain_id == 0) )
+ if ( iommu_passthrough && is_hardware_domain(domain) )
{
context_set_translation_type(*context, CONTEXT_TT_PASS_THRU);
agaw = level_to_agaw(iommu->nr_pt_levels);
return 0;
/* do nothing if dom0 and iommu supports pass thru */
- if ( iommu_passthrough && (d->domain_id == 0) )
+ if ( iommu_passthrough && is_hardware_domain(d) )
return 0;
spin_lock(&hd->mapping_lock);
static int intel_iommu_unmap_page(struct domain *d, unsigned long gfn)
{
/* Do nothing if dom0 and iommu supports pass thru. */
- if ( iommu_passthrough && (d->domain_id == 0) )
+ if ( iommu_passthrough && is_hardware_domain(d) )
return 0;
dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
continue;
/*
- * If the device belongs to dom0, and it has RMRR, don't remove
- * it from dom0, because BIOS may use RMRR at booting time.
+ * If the device belongs to the hardware domain, and it has RMRR, don't
+ * remove it from the hardware domain, because BIOS may use RMRR at
+ * booting time.
*/
if ( is_hardware_domain(pdev->domain) )
return 0;
{
unsigned long i, j, tmp, top;
- BUG_ON(d->domain_id != 0);
+ BUG_ON(!is_hardware_domain(d));
top = max(max_pdx, pfn_to_pdx(0xffffffffUL >> PAGE_SHIFT) + 1);
/*
* Use this check when the following are both true:
* - Using this feature or interface requires full access to the hardware
- * (that is, this is would not be suitable for a driver domain)
+ * (that is, this would not be suitable for a driver domain)
* - There is never a reason to deny dom0 access to this
*/
-#define is_hardware_domain(_d) ((_d)->is_privileged)
+#define is_hardware_domain(_d) ((_d)->domain_id == 0)
/* This check is for functionality specific to a control domain */
#define is_control_domain(_d) ((_d)->is_privileged)