*/
#define DOM0_FDT_EXTRA_SIZE (128 + sizeof(struct fdt_reserve_entry))
-struct vcpu *__init alloc_dom0_vcpu0(void)
+struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0)
{
if ( opt_dom0_max_vcpus == 0 )
opt_dom0_max_vcpus = num_online_cpus();
size_t fdt_size;
int cpus, i;
const char *cmdline;
+ struct domain *dom0;
setup_cache();
do_initcalls();
/* Create initial domain 0. */
- dom0 = domain_create(0, 0, 0);
- if ( IS_ERR(dom0) || (alloc_dom0_vcpu0() == NULL) )
+ hardware_domain = dom0 = domain_create(0, 0, 0);
+ if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) )
panic("Error creating domain 0");
dom0->is_privileged = 1;
cx->entry_method = ACPI_CSTATE_EM_HALT;
break;
case ACPI_ADR_SPACE_SYSTEM_IO:
- if ( ioports_deny_access(dom0, cx->address, cx->address) )
+ if ( ioports_deny_access(hardware_domain, cx->address, cx->address) )
printk(XENLOG_WARNING "Could not deny access to port %04x\n",
cx->address);
cx->entry_method = ACPI_CSTATE_EM_SYSIO;
int vmce_init(struct cpuinfo_x86 *c);
-#define dom0_vmce_enabled() (dom0 && dom0->max_vcpus && dom0->vcpu[0] \
- && guest_enabled_event(dom0->vcpu[0], VIRQ_MCA))
+#define dom0_vmce_enabled() (hardware_domain && hardware_domain->max_vcpus \
+ && hardware_domain->vcpu[0] \
+ && guest_enabled_event(hardware_domain->vcpu[0], VIRQ_MCA))
int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn);
info = kexec_crash_save_info();
info->xen_phys_start = xen_phys_start;
info->dom0_pfn_to_mfn_frame_list_list =
- arch_get_pfn_to_mfn_frame_list_list(dom0);
+ arch_get_pfn_to_mfn_frame_list_list(hardware_domain);
}
/*
}
custom_param("dom0_max_vcpus", parse_dom0_max_vcpus);
-struct vcpu *__init alloc_dom0_vcpu0(void)
+struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0)
{
unsigned max_vcpus;
printk("Disabling dom0 access to ioport range %04lx-%04lx\n",
io_from, io_to);
- if ( ioports_deny_access(dom0, io_from, io_to) != 0 )
+ if ( ioports_deny_access(hardware_domain, io_from, io_to) != 0 )
BUG();
}
}
rc = 0;
- /* DOM0 is permitted full I/O capabilities. */
- rc |= ioports_permit_access(dom0, 0, 0xFFFF);
- rc |= iomem_permit_access(dom0, 0UL, ~0UL);
- rc |= irqs_permit_access(dom0, 1, nr_irqs_gsi - 1);
+ /* The hardware domain is initially permitted full I/O capabilities. */
+ rc |= ioports_permit_access(hardware_domain, 0, 0xFFFF);
+ rc |= iomem_permit_access(hardware_domain, 0UL, ~0UL);
+ rc |= irqs_permit_access(hardware_domain, 1, nr_irqs_gsi - 1);
/*
* Modify I/O port access permissions.
*/
/* Master Interrupt Controller (PIC). */
- rc |= ioports_deny_access(dom0, 0x20, 0x21);
+ rc |= ioports_deny_access(hardware_domain, 0x20, 0x21);
/* Slave Interrupt Controller (PIC). */
- rc |= ioports_deny_access(dom0, 0xA0, 0xA1);
+ rc |= ioports_deny_access(hardware_domain, 0xA0, 0xA1);
/* Interval Timer (PIT). */
- rc |= ioports_deny_access(dom0, 0x40, 0x43);
+ rc |= ioports_deny_access(hardware_domain, 0x40, 0x43);
/* PIT Channel 2 / PC Speaker Control. */
- rc |= ioports_deny_access(dom0, 0x61, 0x61);
+ rc |= ioports_deny_access(hardware_domain, 0x61, 0x61);
/* ACPI PM Timer. */
if ( pmtmr_ioport )
- rc |= ioports_deny_access(dom0, pmtmr_ioport, pmtmr_ioport + 3);
+ rc |= ioports_deny_access(hardware_domain, pmtmr_ioport,
+ pmtmr_ioport + 3);
/* PCI configuration space (NB. 0xcf8 has special treatment). */
- rc |= ioports_deny_access(dom0, 0xcfc, 0xcff);
+ rc |= ioports_deny_access(hardware_domain, 0xcfc, 0xcff);
/* Command-line I/O ranges. */
process_dom0_ioports_disable();
if ( mp_lapic_addr != 0 )
{
mfn = paddr_to_pfn(mp_lapic_addr);
- rc |= iomem_deny_access(dom0, mfn, mfn);
+ rc |= iomem_deny_access(hardware_domain, mfn, mfn);
}
/* I/O APICs. */
for ( i = 0; i < nr_ioapics; i++ )
{
mfn = paddr_to_pfn(mp_ioapics[i].mpc_apicaddr);
if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
- rc |= iomem_deny_access(dom0, mfn, mfn);
+ rc |= iomem_deny_access(hardware_domain, mfn, mfn);
}
/* MSI range. */
- rc |= iomem_deny_access(dom0, paddr_to_pfn(MSI_ADDR_BASE_LO),
+ rc |= iomem_deny_access(hardware_domain, paddr_to_pfn(MSI_ADDR_BASE_LO),
paddr_to_pfn(MSI_ADDR_BASE_LO +
MSI_ADDR_DEST_ID_MASK));
/* HyperTransport range. */
if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- rc |= iomem_deny_access(dom0, paddr_to_pfn(0xfdULL << 32),
+ rc |= iomem_deny_access(hardware_domain, paddr_to_pfn(0xfdULL << 32),
paddr_to_pfn((1ULL << 40) - 1));
/* Remove access to E820_UNUSABLE I/O regions above 1MB. */
if ( (e820.map[i].type == E820_UNUSABLE) &&
(e820.map[i].size != 0) &&
(sfn <= efn) )
- rc |= iomem_deny_access(dom0, sfn, efn);
+ rc |= iomem_deny_access(hardware_domain, sfn, efn);
}
BUG_ON(rc != 0);
printk(" Xen warning: dom0 kernel broken ELF: %s\n",
elf_check_broken(&elf));
- iommu_dom0_init(dom0);
+ iommu_dom0_init(hardware_domain);
return 0;
out:
* that dom0 pirq == irq.
*/
pirq = (irq >= 256) ? irq : rte.vector;
- if ( (pirq < 0) || (pirq >= dom0->nr_pirqs) )
+ if ( (pirq < 0) || (pirq >= hardware_domain->nr_pirqs) )
return -EINVAL;
if ( desc->action )
printk(XENLOG_INFO "allocated vector %02x for irq %d\n", ret, irq);
}
- spin_lock(&dom0->event_lock);
- ret = map_domain_pirq(dom0, pirq, irq,
+ spin_lock(&hardware_domain->event_lock);
+ ret = map_domain_pirq(hardware_domain, pirq, irq,
MAP_PIRQ_TYPE_GSI, NULL);
- spin_unlock(&dom0->event_lock);
+ spin_unlock(&hardware_domain->event_lock);
if ( ret < 0 )
return ret;
desc->arch.used = IRQ_UNUSED;
irq = ret;
}
- else if ( dom0 )
+ else if ( hardware_domain )
{
- ret = irq_permit_access(dom0, irq);
+ ret = irq_permit_access(hardware_domain, irq);
if ( ret )
printk(XENLOG_G_ERR
"Could not grant Dom0 access to IRQ%d (error %d)\n",
BUG_ON(!MSI_IRQ(irq));
- if ( dom0 )
+ if ( hardware_domain )
{
- int err = irq_deny_access(dom0, irq);
+ int err = irq_deny_access(hardware_domain, irq);
if ( err )
printk(XENLOG_G_ERR
{
void *ptr = alloc_xenheap_page();
- BUG_ON(!dom0 && !ptr);
+ BUG_ON(!hardware_domain && !ptr);
return ptr;
}
for_each_online_cpu ( i )
printk("%3d\t%3d\n", i, nmi_count(i));
- if ( ((d = dom0) == NULL) || (d->vcpu == NULL) ||
+ if ( ((d = hardware_domain) == NULL) || (d->vcpu == NULL) ||
((v = d->vcpu[0]) == NULL) )
return;
unsigned long nr_pages, raw_max_page, modules_headroom, *module_map;
int i, j, e820_warn = 0, bytes = 0;
bool_t acpi_boot_table_init_done = 0;
+ struct domain *dom0;
struct ns16550_defaults ns16550 = {
.data_bits = 8,
.parity = 'n',
panic("Could not protect TXT memory regions");
/* Create initial domain 0. */
- dom0 = domain_create(0, DOMCRF_s3_integrity, 0);
- if ( IS_ERR(dom0) || (alloc_dom0_vcpu0() == NULL) )
+ hardware_domain = dom0 = domain_create(0, DOMCRF_s3_integrity, 0);
+ if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) )
panic("Error creating domain 0");
dom0->is_privileged = 1;
static void nmi_dom0_report(unsigned int reason_idx)
{
- struct domain *d = dom0;
+ struct domain *d = hardware_domain;
if ( (d == NULL) || (d->vcpu == NULL) || (d->vcpu[0] == NULL) )
return;
if ( ret )
goto destroy_m2p;
- if ( !need_iommu(dom0) )
+ if ( !need_iommu(hardware_domain) )
{
for ( i = spfn; i < epfn; i++ )
- if ( iommu_map_page(dom0, i, i, IOMMUF_readable|IOMMUF_writable) )
+ if ( iommu_map_page(hardware_domain, i, i, IOMMUF_readable|IOMMUF_writable) )
break;
if ( i != epfn )
{
while (i-- > old_max)
- iommu_unmap_page(dom0, i);
+ iommu_unmap_page(hardware_domain, i);
goto destroy_m2p;
}
}
static struct domain *domain_hash[DOMAIN_HASH_SIZE];
struct domain *domain_list;
-struct domain *dom0;
+struct domain *hardware_domain __read_mostly;
struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
ASSERT(virq < NR_VIRQS);
ASSERT(virq_is_global(virq));
- send_guest_global_virq(global_virq_handlers[virq] ?: dom0, virq);
+ send_guest_global_virq(global_virq_handlers[virq] ?: hardware_domain, virq);
}
int set_global_virq_handler(struct domain *d, uint32_t virq)
static uint16_t kexec_load_v1_arch(void)
{
#ifdef CONFIG_X86
- return is_pv_32on64_domain(dom0) ? EM_386 : EM_X86_64;
+ return is_pv_32on64_domain(hardware_domain) ? EM_386 : EM_X86_64;
#else
return EM_NONE;
#endif
{
struct vcpu *v;
- if ( dom0 == NULL )
+ if ( hardware_domain == NULL )
return;
printk("'%c' pressed -> dumping Dom0's registers\n", key);
- for_each_vcpu ( dom0, v )
+ for_each_vcpu ( hardware_domain, v )
{
if ( alt_key_handling && softirq_pending(smp_processor_id()) )
{
bufsize = sizeof(struct xenoprof_buf);
i = sizeof(struct event_log);
#ifdef CONFIG_COMPAT
- d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? dom0 : d);
+ d->xenoprof->is_compat = is_pv_32on64_domain(is_passive ? hardware_domain : d);
if ( XENOPROF_COMPAT(d->xenoprof) )
{
bufsize = sizeof(struct compat_oprof_buf);
{
#ifdef HAS_IOPORTS
struct ns16550 *uart = port->uart;
+ int rv;
if ( uart->remapped_io_base )
return;
- if ( ioports_deny_access(dom0, uart->io_base, uart->io_base + 7) != 0 )
+ rv = ioports_deny_access(hardware_domain, uart->io_base, uart->io_base + 7);
+ if ( rv != 0 )
BUG();
#endif
}
ivrs_mappings[req_id].read_permission);
}
- return reassign_device(dom0, d, devfn, pdev);
+ return reassign_device(hardware_domain, d, devfn, pdev);
}
static void deallocate_next_page_table(struct page_info *pg, int level)
}
/*
- * If the device isn't owned by dom0, it means it already
+ * If the device isn't owned by the hardware domain, it means it already
* has been assigned to other domain, or it doesn't exist.
*/
static int device_assigned(u16 seg, u8 bus, u8 devfn)
struct pci_dev *pdev;
spin_lock(&pcidevs_lock);
- pdev = pci_get_pdev_by_domain(dom0, seg, bus, devfn);
+ pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
spin_unlock(&pcidevs_lock);
return pdev ? 0 : -EBUSY;
d->need_iommu = 1;
}
- pdev = pci_get_pdev_by_domain(dom0, seg, bus, devfn);
+ pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
if ( !pdev )
{
rc = pci_get_pdev(seg, bus, devfn) ? -EBUSY : -ENODEV;
devfn += pdev->phantom_stride;
if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
break;
- ret = hd->platform_ops->reassign_device(d, dom0, devfn, pdev);
+ ret = hd->platform_ops->reassign_device(d, hardware_domain, devfn, pdev);
if ( !ret )
continue;
}
devfn = pdev->devfn;
- ret = hd->platform_ops->reassign_device(d, dom0, devfn, pdev);
+ ret = hd->platform_ops->reassign_device(d, hardware_domain, devfn, pdev);
if ( ret )
{
dprintk(XENLOG_G_ERR,
ret = 0;
if ( !pdev->domain )
{
- pdev->domain = dom0;
+ pdev->domain = hardware_domain;
ret = iommu_add_device(pdev);
if ( ret )
{
goto out;
}
- list_add(&pdev->domain_list, &dom0->arch.pdev_list);
+ list_add(&pdev->domain_list, &hardware_domain->arch.pdev_list);
}
else
iommu_enable_device(pdev);
* can attempt to send arbitrary LAPIC/MSI messages. We are unprotected
* by the root complex unless interrupt remapping is enabled.
*/
- if ( (target != dom0) && !iommu_intremap )
+ if ( (target != hardware_domain) && !iommu_intremap )
untrusted_msi = 1;
ret = domain_context_unmap(source, devfn, pdev);
if ( list_empty(&acpi_drhd_units) )
return -ENODEV;
- ret = reassign_device_ownership(dom0, d, devfn, pdev);
+ ret = reassign_device_ownership(hardware_domain, d, devfn, pdev);
if ( ret )
goto done;
#endif
extern int dom0_11_mapping;
-#define is_domain_direct_mapped(d) ((d) == dom0 && dom0_11_mapping)
+#define is_domain_direct_mapped(d) ((d) == hardware_domain && dom0_11_mapping)
struct vtimer {
struct vcpu *v;
struct vcpu *alloc_vcpu(
struct domain *d, unsigned int vcpu_id, unsigned int cpu_id);
-struct vcpu *alloc_dom0_vcpu0(void);
+struct vcpu *alloc_dom0_vcpu0(struct domain *dom0);
int vcpu_reset(struct vcpu *);
struct xen_domctl_getdomaininfo;
#define SCHED_STAT_CRANK(_X) (perfc_incr(_X))
-/* A global pointer to the initial domain (DOM0). */
-extern struct domain *dom0;
+/* A global pointer to the hardware domain (usually DOM0). */
+extern struct domain *hardware_domain;
#ifndef CONFIG_COMPAT
#define BITS_PER_EVTCHN_WORD(d) BITS_PER_XEN_ULONG