spin_unlock_irqrestore(&xen_balloon.xb_dev_info->pages_lock, flags);
}
+static int __memory_op_hypercall(int cmd, xen_pfn_t *list, xen_ulong_t nr)
+{
+ int rc;
+ struct xen_memory_reservation reservation = {
+ .address_bits = 0,
+ .extent_order = 0,
+ .domid = DOMID_SELF
+ };
+
+ set_xen_guest_handle(reservation.extent_start, list);
+ reservation.nr_extents = nr;
+ rc = HYPERVISOR_memory_op(cmd, &reservation);
+
+ return rc;
+}
+
+static void __link_back_to_pagetable(struct page *page, xen_ulong_t mfn,
+ pte_t pte)
+{
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ unsigned long pfn = page_to_pfn(page);
+
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ set_phys_to_machine(pfn, mfn);
+
+ /* Link back into the page tables if not highmem. */
+ if (!PageHighMem(page)) {
+ int ret;
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ pte, 0);
+ BUG_ON(ret);
+ }
+ }
+#endif
+}
+
+static void __replace_mapping_with_scratch_page(struct page *page)
+{
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ /*
+ * Ballooned out frames are effectively replaced with
+ * a scratch frame. Ensure direct mappings and the
+ * p2m are consistent.
+ */
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ unsigned long p, smfn;
+ struct page *scratch_page = get_balloon_scratch_page();
+
+ p = page_to_pfn(scratch_page);
+ smfn = pfn_to_mfn(p);
+
+ __link_back_to_pagetable(page, smfn,
+ mfn_pte(smfn, PAGE_KERNEL_RO));
+
+ put_balloon_scratch_page();
+ }
+#endif
+}
+
+
/* This function will always try to fill in pages managed by Xen
* balloon driver, then pages managed by generic balloon driver.
*/
struct page *page;
LIST_HEAD(queue);
bool xen_pages;
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
- .extent_order = 0,
- .domid = DOMID_SELF
- };
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
if (!xen_balloon.balloon_stats.balloon_low &&
}
/* Second step: issue hypercall */
- set_xen_guest_handle(reservation.extent_start, frame_list);
- reservation.nr_extents = nr_pages;
- rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+ rc = __memory_op_hypercall(XENMEM_populate_physmap, frame_list,
+ nr_pages);
if (rc <= 0) {
rc = BP_EAGAIN;
goto move_pages_back;
list_del(&page->lru);
pfn = page_to_pfn(page);
-#ifdef CONFIG_XEN_HAVE_PVMMU
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- set_phys_to_machine(pfn, frame_list[i]);
-
- /* Link back into the page tables if not highmem. */
- if (!PageHighMem(page)) {
- int ret;
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- mfn_pte(frame_list[i], PAGE_KERNEL),
- 0);
- BUG_ON(ret);
- }
- }
-#endif
+ __link_back_to_pagetable(page, frame_list[i],
+ mfn_pte(frame_list[i], PAGE_KERNEL));
/* Relinquish the page back to the allocator. */
if (xen_pages)
unsigned long pfn, i;
struct page *page;
int ret;
- struct xen_memory_reservation reservation = {
- .address_bits = 0,
- .extent_order = 0,
- .domid = DOMID_SELF
- };
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
if (xen_balloon.balloon_stats.hotplug_pages) {
frame_list[i] = pfn_to_mfn(pfn);
page = pfn_to_page(pfn);
-#ifdef CONFIG_XEN_HAVE_PVMMU
- /*
- * Ballooned out frames are effectively replaced with
- * a scratch frame. Ensure direct mappings and the
- * p2m are consistent.
- */
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- if (!PageHighMem(page)) {
- struct page *scratch_page = get_balloon_scratch_page();
-
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- pfn_pte(page_to_pfn(scratch_page),
- PAGE_KERNEL_RO), 0);
- BUG_ON(ret);
-
- put_balloon_scratch_page();
- }
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
- }
-#endif
-
+ __replace_mapping_with_scratch_page(page);
balloon_append(page, core_driver);
}
flush_tlb_all();
- set_xen_guest_handle(reservation.extent_start, frame_list);
- reservation.nr_extents = nr_pages;
- ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
+ ret = __memory_op_hypercall(XENMEM_decrease_reservation, frame_list,
+ nr_pages);
BUG_ON(ret != nr_pages);
xen_balloon.balloon_stats.current_pages -= nr_pages;