mmap_pages, GFP_KERNEL);
pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
+ for(i = 0; i < mmap_pages; i++)
+ SetPageBlkback(pending_pages[i]);
+
if (!pending_reqs || !pending_grant_handles || !pending_pages)
goto out_of_memory;
DPRINTK("%s: reqs=%d, pages=%d\n",
__FUNCTION__, blkif_reqs, mmap_pages);
+ for (i = 0; i < mmap_pages; i++)
+ SetPageBlkback(foreign_pages[mmap_alloc][i]);
+
for (i = 0; i < MAX_PENDING_REQS; i++) {
list_add_tail(&pending_reqs[mmap_alloc][i].free_list,
&pending_free);
*pagep = new_page;
SetPageForeign(page, gnttab_page_free);
+ ClearPageNetback(page);
+ ClearPageBlkback(page);
page->mapping = NULL;
out:
static inline int copy_pending_req(PEND_RING_IDX pending_idx)
{
- return gnttab_copy_grant_page(grant_tx_handle[pending_idx],
- &mmap_pages[pending_idx]);
+ int err = gnttab_copy_grant_page(grant_tx_handle[pending_idx],
+ &mmap_pages[pending_idx]);
+
+ if (!err)
+ SetPageNetback(mmap_pages[pending_idx]);
+
+ return err;
}
inline static void net_tx_action_dealloc(void)
for (i = 0; i < MAX_PENDING_REQS; i++) {
page = mmap_pages[i];
SetPageForeign(page, netif_page_release);
+ SetPageNetback(page);
netif_set_page_index(page, i);
INIT_LIST_HEAD(&pending_inuse[i].list);
}
#define PG_foreign 20 /* Page is owned by foreign allocator. */
+#define PG_netback 21 /* Page is owned by netback */
+#define PageNetback(page) test_bit(PG_netback, &(page)->flags)
+#define SetPageNetback(page) set_bit(PG_netback, &(page)->flags)
+#define ClearPageNetback(page) clear_bit(PG_netback, &(page)->flags)
+
+#define PG_blkback 22 /* Page is owned by blkback */
+#define PageBlkback(page) test_bit(PG_blkback, &(page)->flags)
+#define SetPageBlkback(page) set_bit(PG_blkback, &(page)->flags)
+#define ClearPageBlkback(page) clear_bit(PG_blkback, &(page)->flags)
+
/*
* Manipulation of page state flags
*/