adjust_managed_page_count(page, -1);
}
-/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
+/* balloon_retrieve: rescue a page from Xen balloon driver, if it is
+ * not empty. This function doesn't look at pages in generic balloon
+ * driver. Also the accouting is not updated as the page might be put
+ * back to the list.
+ */
static struct page *balloon_retrieve(bool prefer_highmem)
{
struct page *page;
struct page, lru);
list_del(&page->lru);
- update_balloon_stats(page, -1);
-
- adjust_managed_page_count(page, 1);
-
return page;
}
-static struct page *balloon_next_page(struct page *page)
-{
- struct list_head *next = page->lru.next;
- if (next == &xen_balloon.ballooned_pages)
- return NULL;
- return list_entry(next, struct page, lru);
-}
-
static enum bp_state update_schedule(enum bp_state state)
{
if (state == BP_DONE) {
int rc;
unsigned long pfn, i;
struct page *page;
+ LIST_HEAD(queue);
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
- page = list_first_entry_or_null(&xen_balloon.ballooned_pages,
- struct page, lru);
+ /* First step: grab all pages we need to balloon in */
for (i = 0; i < nr_pages; i++) {
+ page = balloon_retrieve(false);
if (!page) {
nr_pages = i;
break;
}
frame_list[i] = page_to_pfn(page);
- page = balloon_next_page(page);
+ /* The order in queue must match the order in frame_list */
+ list_add_tail(&page->lru, &queue);
}
+ /* Second step: issue hypercall */
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
- if (rc <= 0)
- return BP_EAGAIN;
+ if (rc <= 0) {
+ rc = BP_EAGAIN;
+ goto move_pages_back;
+ }
+ /* Third step: free populated pages back to kernel allocator */
for (i = 0; i < rc; i++) {
- page = balloon_retrieve(false);
+ page = list_first_entry_or_null(&queue, struct page, lru);
+
BUG_ON(page == NULL);
+ list_del(&page->lru);
pfn = page_to_pfn(page);
#ifdef CONFIG_XEN_HAVE_PVMMU
/* Relinquish the page back to the allocator. */
__free_reserved_page(page);
+
+ /* We only account for those pages that have been populated. */
+ update_balloon_stats(page, -1);
+ adjust_managed_page_count(page, 1);
}
xen_balloon.balloon_stats.current_pages += rc;
+ rc = BP_DONE;
- return BP_DONE;
+move_pages_back:
+ /* Final step: move back any unpopulated pages to balloon driver */
+ list_splice_init(&queue, &xen_balloon.ballooned_pages);
+ return rc;
}
static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
page = balloon_retrieve(highmem);
if (page && (highmem || !PageHighMem(page))) {
pages[pgno++] = page;
+ update_balloon_stats(page, -1);
+ adjust_managed_page_count(page, 1);
} else {
enum bp_state st;
if (page)