}
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
+static void undo_limit_pages(struct page *pages, unsigned int order)
+{
+ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
+ BUG_ON(order > MAX_CONTIG_ORDER);
+ xen_limit_pages_to_max_mfn(pages, order, 0);
+ ClearPageForeign(pages);
+ __free_pages(pages, order);
+}
+
int xen_limit_pages_to_max_mfn(
struct page *pages, unsigned int order, unsigned int address_bits)
{
if (unlikely(order > MAX_CONTIG_ORDER))
return -ENOMEM;
- bitmap_zero(limit_map, 1U << order);
+ if (address_bits) {
+ if (address_bits < PAGE_SHIFT)
+ return -EINVAL;
+ bitmap_zero(limit_map, 1U << order);
+ } else if (order) {
+ BUILD_BUG_ON(sizeof(pages->index) != sizeof(*limit_map));
+ for (i = 0; i < BITS_TO_LONGS(1U << order); ++i)
+ limit_map[i] = pages[i + 1].index;
+ } else
+ __set_bit(0, limit_map);
+
set_xen_guest_handle(exchange.in.extent_start, in_frames);
set_xen_guest_handle(exchange.out.extent_start, out_frames);
/* 0. Scrub the pages. */
for (i = 0, n = 0; i < 1U<<order ; i++) {
page = &pages[i];
- if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT)))
- continue;
- __set_bit(i, limit_map);
+ if (address_bits) {
+ if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT)))
+ continue;
+ __set_bit(i, limit_map);
+ }
if (!PageHighMem(page))
scrub_pages(page_address(page), 1);
balloon_unlock(flags);
- return success ? 0 : -ENOMEM;
+ if (!success)
+ return -ENOMEM;
+
+ if (address_bits) {
+ if (order) {
+ BUILD_BUG_ON(sizeof(*limit_map) != sizeof(pages->index));
+ for (i = 0; i < BITS_TO_LONGS(1U << order); ++i)
+ pages[i + 1].index = limit_map[i];
+ }
+ SetPageForeign(pages, undo_limit_pages);
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);
return pte;
}
+static void _pte_free(struct page *page, unsigned int order)
+{
+ BUG_ON(order);
+ pte_free(page);
+}
+
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
#endif
if (pte) {
- SetPageForeign(pte, pte_free);
+ SetPageForeign(pte, _pte_free);
init_page_count(pte);
}
return pte;
mm_unpin(mm);
}
+static void _pte_free(struct page *page, unsigned int order)
+{
+ BUG_ON(order);
+ pte_free(page);
+}
+
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
if (pte) {
- SetPageForeign(pte, pte_free);
+ SetPageForeign(pte, _pte_free);
init_page_count(pte);
}
return pte;
return 0;
}
-static void gnttab_page_free(struct page *page)
+static void gnttab_page_free(struct page *page, unsigned int order)
{
+ BUG_ON(order);
ClearPageForeign(page);
gnttab_reset_grant_page(page);
put_page(page);
};
static void netif_idx_release(u16 pending_idx);
-static void netif_page_release(struct page *page);
static void make_tx_response(netif_t *netif,
netif_tx_request_t *txp,
s8 st);
tasklet_schedule(&net_tx_tasklet);
}
-static void netif_page_release(struct page *page)
+static void netif_page_release(struct page *page, unsigned int order)
{
+ BUG_ON(order);
netif_idx_release(netif_page_index(page));
}
#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
#define SetPageForeign(_page, dtor) do { \
set_bit(PG_foreign, &(_page)->flags); \
- BUG_ON((dtor) == (void (*)(struct page *))0); \
+ BUG_ON((dtor) == (void (*)(struct page *, unsigned int))0); \
(_page)->index = (long)(dtor); \
} while (0)
#define ClearPageForeign(page) do { \
clear_bit(PG_foreign, &(page)->flags); \
(page)->index = 0; \
} while (0)
-#define PageForeignDestructor(_page) \
- ((void (*)(struct page *))(_page)->index)(_page)
+#define PageForeignDestructor(_page, order) \
+ ((void (*)(struct page *, unsigned int))(_page)->index)(_page, order)
struct page; /* forward declaration */
#ifdef CONFIG_XEN
if (PageForeign(page)) {
- PageForeignDestructor(page);
+ PageForeignDestructor(page, order);
return;
}
#endif
#ifdef CONFIG_XEN
if (PageForeign(page)) {
- PageForeignDestructor(page);
+ PageForeignDestructor(page, 0);
return;
}
#endif