static const struct address_space_operations xen_balloon_aops;
#ifdef CONFIG_BALLOON_COMPACTION
+/*
+ * xen_balloon_migratepage - perform the balloon page migration on behalf of
+ * a compation thread. (called under page lock)
+ * @mapping: the page->mapping which will be assigned to the new migrated page
+ * @newpage: page that will replace the isolated page after migration finishes
+ * @page : the isolated (old) page that is about to be migrated to newpage.
+ * @mode : compaction mode -- not used for balloon page migration.
+ *
+ * After a ballooned page gets isolated by compaction procedures, this
+ * is the function that performs the page migration on behalf of a
+ * compaction thread. The page migration for Xen balloon is done in
+ * these two macro steps:
+ *
+ * A. back @page with machine page
+ * B. release machine that backs @newpage
+ *
+ * Logically the above steps should work in reversed order (first B
+ * then A). But if we fail in A (in BA order) due to memory pressure
+ * in Xen we might not get back @newpage easily. With current order,
+ * we can safely return -EAGAIN if step A fails (either due to memory
+ * cap for guest or out of memory in hypervisor).
+ *
+ * This function preforms the balloon page migration task.
+ * Called through balloon_mapping->a_ops->migratepage
+ */
static int xen_balloon_migratepage(struct address_space *mapping,
struct page *newpage, struct page *page,
enum migrate_mode mode)
{
- return -EAGAIN;
-}
+ struct xen_balloon *xb;
+ struct balloon_dev_info *info = balloon_page_device(page);
+ unsigned long flags;
+ unsigned long pfn;
+ xen_pfn_t frame;
+ int rc;
+
+ BUG_ON(!info);
+ BUG_ON(info->balloon_device != &xen_balloon);
+
+ xb = info->balloon_device;
+
+ /* Avoid contention if we're increasing / decreasing
+ * reservation.
+ */
+ if (!mutex_trylock(&xb->balloon_mutex))
+ return -EAGAIN;
+
+ kmap_flush_unused();
+
+ /*
+ * Step A:
+ * Back page with machine page
+ */
+ frame = page_to_pfn(page);
+
+ rc = __memory_op_hypercall(XENMEM_populate_physmap, &frame, 1);
+ if (rc != 1) {
+ rc = -EAGAIN;
+ goto out;
+ }
+ /*
+ * It's safe to delete page->lru here because this page is at
+ * an isolated migration list, and this step is expected to happen here
+ */
+ balloon_page_delete(page);
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ __link_back_to_pagetable(page, frame,
+ mfn_pte(frame, PAGE_KERNEL));
+
+ /*
+ * Step B:
+ * Give up newpage's backing machine page and add it to list
+ */
+ pfn = page_to_pfn(newpage);
+ frame = pfn_to_mfn(pfn);
+ scrub_page(newpage);
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ __replace_mapping_with_scratch_page(newpage);
+
+ rc = __memory_op_hypercall(XENMEM_decrease_reservation, &frame, 1);
+ BUG_ON(rc != 1);
+
+ spin_lock_irqsave(&info->pages_lock, flags);
+ balloon_page_insert(newpage, mapping, &info->pages);
+ info->isolated_pages--;
+ spin_unlock_irqrestore(&info->pages_lock, flags);
+
+ rc = MIGRATEPAGE_BALLOON_SUCCESS;
+
+ flush_tlb_all();
+out:
+ mutex_unlock(&xb->balloon_mutex);
+
+ return rc;
+}
static const struct address_space_operations xen_balloon_aops = {
.migratepage = xen_balloon_migratepage,
};