xen_pfn_t target_mfn;
for (source_pfn = 0; source_pfn < start_info.nr_pages; source_pfn++)
- if (dom->p2m_host[source_pfn] == source_mfn)
+ if (dom->pv_p2m[source_pfn] == source_mfn)
break;
ASSERT(source_pfn < start_info.nr_pages);
- target_mfn = dom->p2m_host[target_pfn];
+ target_mfn = dom->pv_p2m[target_pfn];
/* Put target MFN at source PFN */
- dom->p2m_host[source_pfn] = target_mfn;
+ dom->pv_p2m[source_pfn] = target_mfn;
/* Put source MFN at target PFN */
- dom->p2m_host[target_pfn] = source_mfn;
+ dom->pv_p2m[target_pfn] = source_mfn;
}
int kexec_allocate(struct xc_dom_image *dom)
pages_moved2pfns = realloc(pages_moved2pfns, new_allocated * sizeof(*pages_moved2pfns));
for (i = allocated; i < new_allocated; i++) {
/* Exchange old page of PFN i with a newly allocated page. */
- xen_pfn_t old_mfn = dom->p2m_host[i];
+ xen_pfn_t old_mfn = dom->pv_p2m[i];
xen_pfn_t new_pfn;
xen_pfn_t new_mfn;
/*
* If PFN of newly allocated page (new_pfn) is less then currently
* requested PFN (i) then look for relevant PFN/MFN pair. In this
- * situation dom->p2m_host[new_pfn] no longer contains proper MFN
+ * situation dom->pv_p2m[new_pfn] no longer contains proper MFN
* because original page with new_pfn was moved earlier
* to different location.
*/
pages_moved2pfns[i] = new_pfn;
/* Put old page at new PFN */
- dom->p2m_host[new_pfn] = old_mfn;
+ dom->pv_p2m[new_pfn] = old_mfn;
/* Put new page at PFN i */
- dom->p2m_host[i] = new_mfn;
+ dom->pv_p2m[i] = new_mfn;
}
allocated = new_allocated;
dom->p2m_size = dom->total_pages;
/* setup initial p2m */
- dom->p2m_host = malloc(sizeof(*dom->p2m_host) * dom->p2m_size);
+ dom->pv_p2m = malloc(sizeof(*dom->pv_p2m) * dom->p2m_size);
/* Start with our current P2M */
for (i = 0; i < dom->p2m_size; i++)
- dom->p2m_host[i] = pfn_to_mfn(i);
+ dom->pv_p2m[i] = pfn_to_mfn(i);
if ( (rc = xc_dom_build_image(dom)) != 0 ) {
printk("xc_dom_build_image returned %d\n", rc);
_boot_oldpdmfn = virt_to_mfn(start_info.pt_base);
DEBUG("boot old pd mfn %lx\n", _boot_oldpdmfn);
DEBUG("boot pd virt %lx\n", dom->pgtables_seg.vstart);
- _boot_pdmfn = dom->p2m_host[PHYS_PFN(dom->pgtables_seg.vstart - dom->parms.virt_base)];
+ _boot_pdmfn = dom->pv_p2m[PHYS_PFN(dom->pgtables_seg.vstart - dom->parms.virt_base)];
DEBUG("boot pd mfn %lx\n", _boot_pdmfn);
_boot_stack = _boot_target + PAGE_SIZE;
DEBUG("boot stack %lx\n", _boot_stack);
/* Keep only useful entries */
for (nr_m2p_updates = pfn = 0; pfn < start_info.nr_pages; pfn++)
- if (dom->p2m_host[pfn] != pfn_to_mfn(pfn))
+ if (dom->pv_p2m[pfn] != pfn_to_mfn(pfn))
nr_m2p_updates++;
m2p_updates = malloc(sizeof(*m2p_updates) * nr_m2p_updates);
for (i = pfn = 0; pfn < start_info.nr_pages; pfn++)
- if (dom->p2m_host[pfn] != pfn_to_mfn(pfn)) {
- m2p_updates[i].ptr = PFN_PHYS(dom->p2m_host[pfn]) | MMU_MACHPHYS_UPDATE;
+ if (dom->pv_p2m[pfn] != pfn_to_mfn(pfn)) {
+ m2p_updates[i].ptr = PFN_PHYS(dom->pv_p2m[pfn]) | MMU_MACHPHYS_UPDATE;
m2p_updates[i].val = pfn;
i++;
}
/* other state info */
uint32_t f_active[XENFEAT_NR_SUBMAPS];
+
/*
- * p2m_host maps guest physical addresses an offset from
- * rambase_pfn (see below) into gfns.
- *
- * For a pure PV guest this means that it maps GPFNs into MFNs for
- * a hybrid guest this means that it maps GPFNs to GPFNS.
- *
- * Note that the input is offset by rambase.
+ * pv_p2m is specific to x86 PV guests, and maps GFNs to MFNs. It is
+ * eventually copied into guest context.
*/
- xen_pfn_t *p2m_host;
+ xen_pfn_t *pv_p2m;
/* physical memory
*
* An x86 PV guest has one or more blocks of physical RAM,
- * consisting of total_pages starting at rambase_pfn. The start
- * address and size of each block is controlled by vNUMA
- * structures.
+ * consisting of total_pages starting at 0. The start address and
+ * size of each block is controlled by vNUMA structures.
*
* An ARM guest has GUEST_RAM_BANKS regions of RAM, with
* rambank_size[i] pages in each. The lowest RAM address
{
if ( xc_dom_translated(dom) )
return pfn;
- if (pfn < dom->rambase_pfn || pfn >= dom->rambase_pfn + dom->total_pages)
+
+ /* x86 PV only now. */
+ if ( pfn >= dom->total_pages )
return INVALID_MFN;
- return dom->p2m_host[pfn - dom->rambase_pfn];
+
+ return dom->pv_p2m[pfn];
}
#endif /* _XC_DOM_H */
if ( !new_l3mfn )
goto out;
- p2m_guest[l3pfn] = dom->p2m_host[l3pfn] = new_l3mfn;
+ p2m_guest[l3pfn] = dom->pv_p2m[l3pfn] = new_l3mfn;
if ( xc_add_mmu_update(dom->xch, mmu,
(((unsigned long long)new_l3mfn)
uint32_t *p2m_guest = domx86->p2m_guest;
xen_pfn_t l3mfn, l3pfn, i;
- /* Copy dom->p2m_host[] into the guest. */
+ /* Copy dom->pv_p2m[] into the guest. */
for ( i = 0; i < dom->p2m_size; ++i )
{
- if ( dom->p2m_host[i] != INVALID_PFN )
- p2m_guest[i] = dom->p2m_host[i];
+ if ( dom->pv_p2m[i] != INVALID_PFN )
+ p2m_guest[i] = dom->pv_p2m[i];
else
p2m_guest[i] = -1;
}
uint64_t *p2m_guest = domx86->p2m_guest;
xen_pfn_t i;
- /* Copy dom->p2m_host[] into the guest. */
+ /* Copy dom->pv_p2m[] into the guest. */
for ( i = 0; i < dom->p2m_size; ++i )
{
- if ( dom->p2m_host[i] != INVALID_PFN )
- p2m_guest[i] = dom->p2m_host[i];
+ if ( dom->pv_p2m[i] != INVALID_PFN )
+ p2m_guest[i] = dom->pv_p2m[i];
else
p2m_guest[i] = -1;
}
return -EINVAL;
}
- dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * dom->p2m_size);
- if ( dom->p2m_host == NULL )
+ dom->pv_p2m = xc_dom_malloc(dom, sizeof(*dom->pv_p2m) * dom->p2m_size);
+ if ( dom->pv_p2m == NULL )
return -EINVAL;
for ( pfn = 0; pfn < dom->p2m_size; pfn++ )
- dom->p2m_host[pfn] = INVALID_PFN;
+ dom->pv_p2m[pfn] = INVALID_PFN;
/* allocate guest memory */
for ( i = 0; i < nr_vmemranges; i++ )
pfn_base = vmemranges[i].start >> PAGE_SHIFT;
for ( pfn = pfn_base; pfn < pfn_base+pages; pfn++ )
- dom->p2m_host[pfn] = pfn;
+ dom->pv_p2m[pfn] = pfn;
pfn_base_idx = pfn_base;
while ( super_pages ) {
for ( pfn = pfn_base_idx, j = 0;
pfn < pfn_base_idx + (count << SUPERPAGE_2MB_SHIFT);
pfn += SUPERPAGE_2MB_NR_PFNS, j++ )
- extents[j] = dom->p2m_host[pfn];
+ extents[j] = dom->pv_p2m[pfn];
rc = xc_domain_populate_physmap(dom->xch, dom->guest_domid, count,
SUPERPAGE_2MB_SHIFT, memflags,
extents);
{
mfn = extents[j];
for ( k = 0; k < SUPERPAGE_2MB_NR_PFNS; k++, pfn++ )
- dom->p2m_host[pfn] = mfn + k;
+ dom->pv_p2m[pfn] = mfn + k;
}
pfn_base_idx = pfn;
}
{
allocsz = min_t(uint64_t, 1024 * 1024, pages - j);
rc = xc_domain_populate_physmap_exact(dom->xch, dom->guest_domid,
- allocsz, 0, memflags, &dom->p2m_host[pfn_base + j]);
+ allocsz, 0, memflags, &dom->pv_p2m[pfn_base + j]);
if ( rc )
{
}
dom->p2m_size = p2m_size;
- dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) *
- dom->p2m_size);
- if ( dom->p2m_host == NULL )
- {
- DOMPRINTF("Could not allocate p2m");
- goto error_out;
- }
-
- for ( i = 0; i < p2m_size; i++ )
- dom->p2m_host[i] = ((xen_pfn_t)-1);
- for ( vmemid = 0; vmemid < nr_vmemranges; vmemid++ )
- {
- uint64_t pfn;
-
- for ( pfn = vmemranges[vmemid].start >> PAGE_SHIFT;
- pfn < vmemranges[vmemid].end >> PAGE_SHIFT;
- pfn++ )
- dom->p2m_host[pfn] = pfn;
- }
/*
* Try to claim pages for early warning of insufficient memory available.
* We attempt to allocate 1GB pages if possible. It falls back on 2MB
* pages if 1GB allocation fails. 4KB pages will be used eventually if
* both fail.
- *
- * Under 2MB mode, we allocate pages in batches of no more than 8MB to
- * ensure that we can be preempted and hence dom0 remains responsive.
*/
if ( dom->device_model )
{
+ xen_pfn_t extents[0xa0];
+
+ for ( i = 0; i < ARRAY_SIZE(extents); ++i )
+ extents[i] = i;
+
rc = xc_domain_populate_physmap_exact(
- xch, domid, 0xa0, 0, memflags, &dom->p2m_host[0x00]);
+ xch, domid, 0xa0, 0, memflags, extents);
if ( rc != 0 )
{
DOMPRINTF("Could not populate low memory (< 0xA0).\n");
if ( count > max_pages )
count = max_pages;
- cur_pfn = dom->p2m_host[cur_pages];
+ cur_pfn = cur_pages;
/* Take care the corner cases of super page tails */
if ( ((cur_pfn & (SUPERPAGE_1GB_NR_PFNS-1)) != 0) &&
xen_pfn_t sp_extents[nr_extents];
for ( i = 0; i < nr_extents; i++ )
- sp_extents[i] =
- dom->p2m_host[cur_pages+(i<<SUPERPAGE_1GB_SHIFT)];
+ sp_extents[i] = cur_pages + (i << SUPERPAGE_1GB_SHIFT);
done = xc_domain_populate_physmap(xch, domid, nr_extents,
SUPERPAGE_1GB_SHIFT,
xen_pfn_t sp_extents[nr_extents];
for ( i = 0; i < nr_extents; i++ )
- sp_extents[i] =
- dom->p2m_host[cur_pages+(i<<SUPERPAGE_2MB_SHIFT)];
+ sp_extents[i] = cur_pages + (i << SUPERPAGE_2MB_SHIFT);
done = xc_domain_populate_physmap(xch, domid, nr_extents,
SUPERPAGE_2MB_SHIFT,
/* Fall back to 4kB extents. */
if ( count != 0 )
{
+ xen_pfn_t extents[count];
+
+ for ( i = 0; i < count; ++i )
+ extents[i] = cur_pages + i;
+
rc = xc_domain_populate_physmap_exact(
- xch, domid, count, 0, new_memflags, &dom->p2m_host[cur_pages]);
+ xch, domid, count, 0, new_memflags, extents);
cur_pages += count;
stat_normal_pages += count;
}