static int apply_batch(xc_interface *xch, uint32_t dom, struct restore_ctx *ctx,
xen_pfn_t* region_mfn, unsigned long* pfn_type, int pae_extended_cr3,
struct xc_mmu* mmu,
- pagebuf_t* pagebuf, int curbatch)
+ pagebuf_t* pagebuf, int curbatch, int *invalid_pages)
{
int i, j, curpage, nr_mfns;
int k, scount;
struct domain_info_context *dinfo = &ctx->dinfo;
int* pfn_err = NULL;
int rc = -1;
+ int local_invalid_pages = 0;
+ /* We have handled curbatch pages before this batch, and there are
+ * *invalid_pages pages that are not in pagebuf->pages. So the first
+ * page for this page is (curbatch - *invalid_pages) page.
+ */
+ int first_page = curbatch - *invalid_pages;
unsigned long mfn, pfn, pagetype;
pfn = pagebuf->pfn_types[i + curbatch] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
pagetype = pagebuf->pfn_types[i + curbatch] & XEN_DOMCTL_PFINFO_LTAB_MASK;
- if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
+ if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
|| pagetype == XEN_DOMCTL_PFINFO_XALLOC)
+ {
+ local_invalid_pages++;
/* a bogus/unmapped/allocate-only page: skip it */
continue;
+ }
if ( pagetype == XEN_DOMCTL_PFINFO_BROKEN )
{
"dom=%d, pfn=%lx\n", dom, pfn);
goto err_mapped;
}
+
+ local_invalid_pages++;
continue;
}
}
}
else
- memcpy(page, pagebuf->pages + (curpage + curbatch) * PAGE_SIZE,
+ memcpy(page, pagebuf->pages + (first_page + curpage) * PAGE_SIZE,
PAGE_SIZE);
pagetype &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
} /* end of 'batch' for loop */
rc = nraces;
+ *invalid_pages += local_invalid_pages;
err_mapped:
munmap(region_base, j*PAGE_SIZE);
loadpages:
for ( ; ; )
{
- int j, curbatch;
+ int j, curbatch, invalid_pages;
xc_report_progress_step(xch, n, dinfo->p2m_size);
/* break pagebuf into batches */
curbatch = 0;
+ invalid_pages = 0;
while ( curbatch < j ) {
int brc;
brc = apply_batch(xch, dom, ctx, region_mfn, pfn_type,
- pae_extended_cr3, mmu, &pagebuf, curbatch);
+ pae_extended_cr3, mmu, &pagebuf, curbatch,
+ &invalid_pages);
if ( brc < 0 )
goto out;