char features[] = "";
struct mmu_update *m2p_updates;
unsigned long nr_m2p_updates;
+ uint64_t virt_base;
DEBUG("booting with cmdline %s\n", cmdline);
xc_handle = xc_interface_open(0,0,0);
goto out;
}
+ virt_base = xc_dom_virt_base(dom);
/* copy hypercall page */
/* TODO: domctl instead, but requires privileges */
- if (dom->parms.virt_hypercall != -1) {
- pfn = PHYS_PFN(dom->parms.virt_hypercall - dom->parms.virt_base);
+ if (xc_dom_virt_hypercall(dom) != -1) {
+ pfn = PHYS_PFN(xc_dom_virt_hypercall(dom) - virt_base);
memcpy((void *) pages[pfn], hypercall_page, PAGE_SIZE);
}
/* Move current console, xenstore and boot MFNs to the allocated place */
do_exchange(dom, dom->console_pfn, start_info.console.domU.mfn);
do_exchange(dom, dom->xenstore_pfn, start_info.store_mfn);
- DEBUG("virt base at %llx\n", dom->parms.virt_base);
+ DEBUG("virt base at %llx\n", virt_base);
DEBUG("bootstack_pfn %lx\n", dom->bootstack_pfn);
- _boot_target = dom->parms.virt_base + PFN_PHYS(dom->bootstack_pfn);
+ _boot_target = virt_base + PFN_PHYS(dom->bootstack_pfn);
DEBUG("_boot_target %lx\n", _boot_target);
- do_exchange(dom, PHYS_PFN(_boot_target - dom->parms.virt_base),
+ do_exchange(dom, PHYS_PFN(_boot_target - virt_base),
virt_to_mfn(&_boot_page));
if ( dom->arch_hooks->setup_pgtables )
_boot_oldpdmfn = virt_to_mfn(start_info.pt_base);
DEBUG("boot old pd mfn %lx\n", _boot_oldpdmfn);
DEBUG("boot pd virt %lx\n", dom->pgtables_seg.vstart);
- _boot_pdmfn = dom->pv_p2m[PHYS_PFN(dom->pgtables_seg.vstart - dom->parms.virt_base)];
+ _boot_pdmfn = dom->pv_p2m[PHYS_PFN(dom->pgtables_seg.vstart - virt_base)];
DEBUG("boot pd mfn %lx\n", _boot_pdmfn);
_boot_stack = _boot_target + PAGE_SIZE;
DEBUG("boot stack %lx\n", _boot_stack);
- _boot_start_info = dom->parms.virt_base + PFN_PHYS(dom->start_info_pfn);
+ _boot_start_info = virt_base + PFN_PHYS(dom->start_info_pfn);
DEBUG("boot start info %lx\n", _boot_start_info);
- _boot_start = dom->parms.virt_entry;
+ _boot_start = xc_dom_virt_entry(dom);
DEBUG("boot start %lx\n", _boot_start);
/* Keep only useful entries */
#ifndef XENGUEST_H
#define XENGUEST_H
-#include <xen/libelf/libelf.h>
-
#define XC_NUMA_NO_NODE (~0U)
#define XCFLAGS_LIVE (1 << 0)
uint32_t f_requested[XENFEAT_NR_SUBMAPS];
/* info from (elf) kernel image */
- struct elf_dom_parms parms;
+ struct elf_dom_parms *parms;
char *guest_type;
/* memory layout */
xen_pfn_t count, xen_pfn_t *count_out);
void xc_dom_unmap_one(struct xc_dom_image *dom, xen_pfn_t pfn);
void xc_dom_unmap_all(struct xc_dom_image *dom);
+void *xc_dom_vaddr_to_ptr(struct xc_dom_image *dom,
+ xen_vaddr_t vaddr, size_t *safe_region_out);
+uint64_t xc_dom_virt_base(struct xc_dom_image *dom);
+uint64_t xc_dom_virt_entry(struct xc_dom_image *dom);
+uint64_t xc_dom_virt_hypercall(struct xc_dom_image *dom);
+char *xc_dom_guest_os(struct xc_dom_image *dom);
+bool xc_dom_feature_get(struct xc_dom_image *dom, unsigned int nr);
static inline void *xc_dom_seg_to_ptr_pages(struct xc_dom_image *dom,
struct xc_dom_seg *seg,
return xc_dom_seg_to_ptr_pages(dom, seg, &dummy);
}
-static inline void *xc_dom_vaddr_to_ptr(struct xc_dom_image *dom,
- xen_vaddr_t vaddr,
- size_t *safe_region_out)
-{
- unsigned int page_size = XC_DOM_PAGE_SIZE(dom);
- xen_pfn_t page = (vaddr - dom->parms.virt_base) / page_size;
- unsigned int offset = (vaddr - dom->parms.virt_base) % page_size;
- xen_pfn_t safe_region_count;
- void *ptr;
-
- *safe_region_out = 0;
- ptr = xc_dom_pfn_to_ptr_retcount(dom, page, 0, &safe_region_count);
- if ( ptr == NULL )
- return ptr;
- *safe_region_out = (safe_region_count << XC_DOM_PAGE_SHIFT(dom)) - offset;
- return ptr + offset;
-}
-
static inline xen_pfn_t xc_dom_p2m(struct xc_dom_image *dom, xen_pfn_t pfn)
{
if ( xc_dom_translated(dom) )
/* clear everything */
memset(ctxt, 0, sizeof(*ctxt));
- ctxt->user_regs.pc32 = dom->parms.virt_entry;
+ ctxt->user_regs.pc32 = dom->parms->virt_entry;
/* Linux boot protocol. See linux.Documentation/arm/Booting. */
ctxt->user_regs.r0_usr = 0; /* SBZ */
/* clear everything */
memset(ctxt, 0, sizeof(*ctxt));
- ctxt->user_regs.pc64 = dom->parms.virt_entry;
+ ctxt->user_regs.pc64 = dom->parms->virt_entry;
/* Linux boot protocol. See linux.Documentation/arm64/booting.txt. */
ctxt->user_regs.x0 = dom->devicetree_blob ?
dom->kernel_seg.vstart = v_start;
dom->kernel_seg.vend = v_end;
- dom->parms.virt_entry = entry_addr;
- dom->parms.virt_base = rambase;
+ dom->parms->virt_entry = entry_addr;
+ dom->parms->virt_base = rambase;
dom->guest_type = "xen-3.0-armv7l";
DOMPRINTF("%s: %s: 0x%" PRIx64 " -> 0x%" PRIx64 "",
dom->kernel_seg.vend = v_end;
/* Call the kernel at offset 0 */
- dom->parms.virt_entry = v_start;
- dom->parms.virt_base = rambase;
+ dom->parms->virt_entry = v_start;
+ dom->parms->virt_base = rambase;
dom->guest_type = "xen-3.0-aarch64";
DOMPRINTF("%s: %s: 0x%" PRIx64 " -> 0x%" PRIx64 "",
dom->kernel_seg.vstart = image_info->load_addr;
dom->kernel_seg.vend = bss_end_addr;
- dom->parms.virt_base = start_addr;
- dom->parms.virt_entry = image_info->entry_addr;
+ dom->parms->virt_base = start_addr;
+ dom->parms->virt_entry = image_info->entry_addr;
pae_flags = image_info->flags & XEN_MULTIBOOT_FLAG_PAE_MASK;
switch (pae_flags >> XEN_MULTIBOOT_FLAG_PAE_SHIFT) {
{
DOMPRINTF("%s: PAE fixup", __FUNCTION__);
dom->guest_type = "xen-3.0-x86_32p";
- dom->parms.pae = XEN_PAE_EXTCR3;
+ dom->parms->pae = XEN_PAE_EXTCR3;
}
break;
}
xen_pfn_t pfn;
int rc;
- if ( dom->parms.virt_hypercall == -1 )
+ if ( dom->parms->virt_hypercall == -1 )
return 0;
- pfn = (dom->parms.virt_hypercall - dom->parms.virt_base)
+ pfn = (dom->parms->virt_hypercall - dom->parms->virt_base)
>> XC_DOM_PAGE_SHIFT(dom);
DOMPRINTF("%s: vaddr=0x%" PRIx64 " pfn=0x%" PRIpfn "", __FUNCTION__,
- dom->parms.virt_hypercall, pfn);
+ dom->parms->virt_hypercall, pfn);
domctl.cmd = XEN_DOMCTL_hypercall_init;
domctl.domain = dom->guest_domid;
domctl.u.hypercall_init.gmfn = xc_dom_p2m(dom, pfn);
xc_dom_unmap_all(dom);
xc_dom_free_all(dom);
free(dom->arch_private);
+ free(dom->parms);
free(dom);
}
memset(dom, 0, sizeof(*dom));
dom->xch = xch;
+ dom->parms = malloc(sizeof(*dom->parms));
+ if (!dom->parms)
+ goto err;
+ memset(dom->parms, 0, sizeof(*dom->parms));
+ dom->alloc_malloc += sizeof(*dom->parms);
+
dom->max_kernel_size = XC_DOM_DECOMPRESS_MAX;
dom->max_module_size = XC_DOM_DECOMPRESS_MAX;
dom->max_devicetree_size = XC_DOM_DECOMPRESS_MAX;
if ( features )
elf_xen_parse_features(features, dom->f_requested, NULL);
- dom->parms.virt_base = UNSET_ADDR;
- dom->parms.virt_entry = UNSET_ADDR;
- dom->parms.virt_hypercall = UNSET_ADDR;
- dom->parms.virt_hv_start_low = UNSET_ADDR;
- dom->parms.elf_paddr_offset = UNSET_ADDR;
- dom->parms.p2m_base = UNSET_ADDR;
+ dom->parms->virt_base = UNSET_ADDR;
+ dom->parms->virt_entry = UNSET_ADDR;
+ dom->parms->virt_hypercall = UNSET_ADDR;
+ dom->parms->virt_hv_start_low = UNSET_ADDR;
+ dom->parms->elf_paddr_offset = UNSET_ADDR;
+ dom->parms->p2m_base = UNSET_ADDR;
dom->flags = SIF_VIRT_P2M_4TOOLS;
for ( i = 0; i < XENFEAT_NR_SUBMAPS; i++ )
{
dom->f_active[i] |= dom->f_requested[i]; /* cmd line */
- dom->f_active[i] |= dom->parms.f_required[i]; /* kernel */
- if ( (dom->f_active[i] & dom->parms.f_supported[i]) !=
+ dom->f_active[i] |= dom->parms->f_required[i]; /* kernel */
+ if ( (dom->f_active[i] & dom->parms->f_supported[i]) !=
dom->f_active[i] )
{
xc_dom_panic(dom->xch, XC_INVALID_PARAM,
goto err;
}
page_size = XC_DOM_PAGE_SIZE(dom);
- if ( dom->parms.virt_base != UNSET_ADDR )
- dom->virt_alloc_end = dom->parms.virt_base;
+ if ( dom->parms->virt_base != UNSET_ADDR )
+ dom->virt_alloc_end = dom->parms->virt_base;
/* load kernel */
if ( xc_dom_alloc_segment(dom, &dom->kernel_seg, "kernel",
/* Don't load ramdisk / other modules now if no initial mapping required. */
for ( mod = 0; mod < dom->num_modules; mod++ )
{
- unmapped_initrd = (dom->parms.unmapped_initrd &&
+ unmapped_initrd = (dom->parms->unmapped_initrd &&
!dom->modules[mod].seg.vstart);
if ( dom->modules[mod].blob && !unmapped_initrd )
/* allocate other pages */
if ( !dom->arch_hooks->p2m_base_supported ||
- dom->parms.p2m_base >= dom->parms.virt_base ||
- (dom->parms.p2m_base & (XC_DOM_PAGE_SIZE(dom) - 1)) )
- dom->parms.p2m_base = UNSET_ADDR;
- if ( dom->arch_hooks->alloc_p2m_list && dom->parms.p2m_base == UNSET_ADDR &&
+ dom->parms->p2m_base >= dom->parms->virt_base ||
+ (dom->parms->p2m_base & (XC_DOM_PAGE_SIZE(dom) - 1)) )
+ dom->parms->p2m_base = UNSET_ADDR;
+ if ( dom->arch_hooks->alloc_p2m_list && dom->parms->p2m_base == UNSET_ADDR &&
dom->arch_hooks->alloc_p2m_list(dom) != 0 )
goto err;
if ( dom->arch_hooks->alloc_magic_pages(dom) != 0 )
for ( mod = 0; mod < dom->num_modules; mod++ )
{
- unmapped_initrd = (dom->parms.unmapped_initrd &&
+ unmapped_initrd = (dom->parms->unmapped_initrd &&
!dom->modules[mod].seg.vstart);
/* Load ramdisk / other modules if no initial mapping required. */
}
/* Allocate p2m list if outside of initial kernel mapping. */
- if ( dom->arch_hooks->alloc_p2m_list && dom->parms.p2m_base != UNSET_ADDR )
+ if ( dom->arch_hooks->alloc_p2m_list && dom->parms->p2m_base != UNSET_ADDR )
{
if ( dom->arch_hooks->alloc_p2m_list(dom) != 0 )
goto err;
- dom->p2m_seg.vstart = dom->parms.p2m_base;
+ dom->p2m_seg.vstart = dom->parms->p2m_base;
}
return 0;
return -1;
}
+void *xc_dom_vaddr_to_ptr(struct xc_dom_image *dom,
+ xen_vaddr_t vaddr, size_t *safe_region_out)
+{
+ unsigned int page_size = XC_DOM_PAGE_SIZE(dom);
+ xen_pfn_t page = (vaddr - dom->parms->virt_base) / page_size;
+ unsigned int offset = (vaddr - dom->parms->virt_base) % page_size;
+ xen_pfn_t safe_region_count;
+ void *ptr;
+
+ *safe_region_out = 0;
+ ptr = xc_dom_pfn_to_ptr_retcount(dom, page, 0, &safe_region_count);
+ if ( ptr == NULL )
+ return ptr;
+ *safe_region_out = (safe_region_count << XC_DOM_PAGE_SHIFT(dom)) - offset;
+ return ptr + offset;
+}
+
+uint64_t xc_dom_virt_base(struct xc_dom_image *dom)
+{
+ return dom->parms->virt_base;
+}
+
+uint64_t xc_dom_virt_entry(struct xc_dom_image *dom)
+{
+ return dom->parms->virt_entry;
+}
+
+uint64_t xc_dom_virt_hypercall(struct xc_dom_image *dom)
+{
+ return dom->parms->virt_hypercall;
+}
+
+char *xc_dom_guest_os(struct xc_dom_image *dom)
+{
+ return dom->parms->guest_os;
+}
+
+bool xc_dom_feature_get(struct xc_dom_image *dom, unsigned int nr)
+{
+ return elf_xen_feature_get(nr, dom->parms->f_supported);
+}
+
/*
* Local variables:
* mode: C
uint64_t machine = elf_uval(elf, elf->ehdr, e_machine);
if ( dom->container_type == XC_DOM_HVM_CONTAINER &&
- dom->parms.phys_entry != UNSET_ADDR32 )
+ dom->parms->phys_entry != UNSET_ADDR32 )
return "hvm-3.0-x86_32";
if ( dom->container_type == XC_DOM_HVM_CONTAINER )
{
switch ( machine )
{
case EM_386:
- switch ( dom->parms.pae )
+ switch ( dom->parms->pae )
{
case XEN_PAE_BIMODAL:
if ( strstr(dom->xen_caps, "xen-3.0-x86_32p") )
* or else we might be trying to load a plain ELF.
*/
elf_parse_binary(&elf);
- rc = elf_xen_parse(&elf, &dom->parms);
+ rc = elf_xen_parse(&elf, dom->parms);
if ( rc != 0 )
return rc;
/* parse binary and get xen meta info */
elf_parse_binary(elf);
- if ( elf_xen_parse(elf, &dom->parms) != 0 )
+ if ( elf_xen_parse(elf, dom->parms) != 0 )
{
rc = -EINVAL;
goto out;
}
- if ( elf_xen_feature_get(XENFEAT_dom0, dom->parms.f_required) )
+ if ( elf_xen_feature_get(XENFEAT_dom0, dom->parms->f_required) )
{
xc_dom_panic(dom->xch, XC_INVALID_KERNEL, "%s: Kernel does not"
" support unprivileged (DomU) operation", __FUNCTION__);
}
/* find kernel segment */
- dom->kernel_seg.vstart = dom->parms.virt_kstart;
- dom->kernel_seg.vend = dom->parms.virt_kend;
+ dom->kernel_seg.vstart = dom->parms->virt_kstart;
+ dom->kernel_seg.vend = dom->parms->virt_kend;
dom->guest_type = xc_dom_guest_type(dom, elf);
if ( dom->guest_type == NULL )
* else we might be trying to load a PV kernel.
*/
elf_parse_binary(&elf);
- rc = elf_xen_parse(&elf, &dom->parms);
+ rc = elf_xen_parse(&elf, dom->parms);
if ( rc == 0 )
return -EINVAL;
goto error;
}
- dom->parms.phys_entry = elf_uval(elf, elf->ehdr, e_entry);
+ dom->parms->phys_entry = elf_uval(elf, elf->ehdr, e_entry);
free(entries);
return 0;
try_virt_end = round_up(dom->virt_alloc_end + pages * PAGE_SIZE_X86,
bits_to_mask(22)); /* 4MB alignment */
- if ( count_pgtables(dom, dom->parms.virt_base, try_virt_end, 0) )
+ if ( count_pgtables(dom, dom->parms->virt_base, try_virt_end, 0) )
return -1;
pages = map->area.pgtables + extra_pages;
l3pfn = domx86->maps[0].lvls[2].pfn;
l3mfn = xc_dom_p2m(dom, l3pfn);
- if ( dom->parms.pae == XEN_PAE_YES )
+ if ( dom->parms->pae == XEN_PAE_YES )
{
if ( l3mfn >= 0x100000 )
l3mfn = move_l3_below_4G(dom, l3pfn, l3mfn);
unsigned lvl;
p2m_alloc_size = round_pg_up(p2m_alloc_size);
- if ( dom->parms.p2m_base != UNSET_ADDR )
+ if ( dom->parms->p2m_base != UNSET_ADDR )
{
- from = dom->parms.p2m_base;
+ from = dom->parms->p2m_base;
to = from + p2m_alloc_size - 1;
if ( count_pgtables(dom, from, to, dom->pfn_alloc_end) )
return -1;
dom->xenstore_pfn = special_pfn(SPECIALPAGE_XENSTORE);
xc_clear_domain_page(dom->xch, dom->guest_domid, dom->xenstore_pfn);
- dom->parms.virt_hypercall = -1;
+ dom->parms->virt_hypercall = -1;
rc = 0;
goto out;
start_info->pt_base = dom->pgtables_seg.vstart;
start_info->nr_pt_frames = domx86->maps[0].area.pgtables;
start_info->mfn_list = dom->p2m_seg.vstart;
- if ( dom->parms.p2m_base != UNSET_ADDR )
+ if ( dom->parms->p2m_base != UNSET_ADDR )
{
start_info->first_p2m_pfn = dom->p2m_seg.pfn;
start_info->nr_p2m_frames = dom->p2m_seg.pages;
/* clear everything */
memset(ctxt, 0, sizeof(*ctxt));
- ctxt->user_regs.eip = dom->parms.virt_entry;
+ ctxt->user_regs.eip = dom->parms->virt_entry;
ctxt->user_regs.esp =
- dom->parms.virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
+ dom->parms->virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
ctxt->user_regs.esi =
- dom->parms.virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
+ dom->parms->virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
ctxt->debugreg[6] = X86_DR6_DEFAULT;
ctxt->debugreg[7] = X86_DR7_DEFAULT;
ctxt->flags = VGCF_in_kernel_X86_32 | VGCF_online_X86_32;
- if ( dom->parms.pae == XEN_PAE_EXTCR3 ||
- dom->parms.pae == XEN_PAE_BIMODAL )
+ if ( dom->parms->pae == XEN_PAE_EXTCR3 ||
+ dom->parms->pae == XEN_PAE_BIMODAL )
ctxt->vm_assist |= (1UL << VMASST_TYPE_pae_extended_cr3);
cr3_pfn = xc_dom_p2m(dom, dom->pgtables_seg.pfn);
/* clear everything */
memset(ctxt, 0, sizeof(*ctxt));
- ctxt->user_regs.rip = dom->parms.virt_entry;
+ ctxt->user_regs.rip = dom->parms->virt_entry;
ctxt->user_regs.rsp =
- dom->parms.virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
+ dom->parms->virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
ctxt->user_regs.rsi =
- dom->parms.virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
+ dom->parms->virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
ctxt->user_regs.rflags = 1 << 9; /* Interrupt Enable */
ctxt->debugreg[6] = X86_DR6_DEFAULT;
bsp_ctx.cpu.cr0 = X86_CR0_PE | X86_CR0_ET;
/* Set the IP. */
- bsp_ctx.cpu.rip = dom->parms.phys_entry;
+ bsp_ctx.cpu.rip = dom->parms->phys_entry;
bsp_ctx.cpu.dr6 = X86_DR6_DEFAULT;
bsp_ctx.cpu.dr7 = X86_DR7_DEFAULT;
for ( i = 0; i < dom->num_modules; i++ )
{
struct xc_hvm_firmware_module mod;
- uint64_t base = dom->parms.virt_base != UNSET_ADDR ?
- dom->parms.virt_base : 0;
+ uint64_t base = dom->parms->virt_base != UNSET_ADDR ?
+ dom->parms->virt_base : 0;
mod.guest_addr_out =
dom->modules[i].seg.vstart - base;
#include <xen/memory.h>
#include <xen/elfnote.h>
+#include <xen/libelf/libelf.h>
#ifndef ELFSIZE
#include <limits.h>
* and so we need to put RSDP in location that can be discovered by ACPI's
* standard search method, in R-O BIOS memory (we chose last 64 bytes)
*/
- if (strcmp(dom->parms.guest_os, "linux") ||
- elf_xen_feature_get(XENFEAT_linux_rsdp_unrestricted,
- dom->parms.f_supported))
+ if (strcmp(xc_dom_guest_os(dom), "linux") ||
+ xc_dom_feature_get(dom, XENFEAT_linux_rsdp_unrestricted))
dom->acpi_modules[0].guest_addr_out = ACPI_INFO_PHYSICAL_ADDRESS +
(1 + acpi_pages_num) * libxl_ctxt.page_size;
else