]> xenbits.xensource.com Git - people/ssmith/netchannel2-pvops.git/commitdiff
Pull in the live maps stuff.
authorSteven Smith <ssmith@weybridge.uk.xensource.com>
Thu, 16 Apr 2009 13:03:57 +0000 (14:03 +0100)
committerSteven Smith <ssmith@weybridge.uk.xensource.com>
Tue, 19 May 2009 14:04:11 +0000 (15:04 +0100)
drivers/xen/Makefile
drivers/xen/live_maps.c [new file with mode: 0644]
drivers/xen/netback/netback.c
include/xen/live_maps.h [new file with mode: 0644]

index 007aa99b1be1177fe8f94ad254b7044b0b8d2aae..16781e798028c9ab2bfe03ba345e2ed254e4e0d9 100644 (file)
@@ -1,4 +1,4 @@
-obj-y  += grant-table.o features.o events.o manage.o biomerge.o
+obj-y  += grant-table.o features.o events.o manage.o biomerge.o live_maps.o
 obj-y  += xenbus/
 
 obj-$(CONFIG_HOTPLUG_CPU)              += cpu_hotplug.o
diff --git a/drivers/xen/live_maps.c b/drivers/xen/live_maps.c
new file mode 100644 (file)
index 0000000..010682a
--- /dev/null
@@ -0,0 +1,61 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <xen/grant_table.h>
+#include <xen/live_maps.h>
+
+/* This lock protects allocation and release of trackers, but is not
+   held when we're actually looking stuff up.  The caller is
+   responsible for making sure that suitable locks are held around
+   data path operations. */
+static DEFINE_SPINLOCK(tracker_lock);
+
+struct page_foreign_tracker *foreign_trackers[LIVE_MAP_NR_TRACKERS];
+EXPORT_SYMBOL(foreign_trackers);
+
+/* Allocate a foreign page tracker.  @size is the maximum index in the
+   tracker.  Returns NULL on error. */
+struct page_foreign_tracker *alloc_page_foreign_tracker(unsigned size)
+{
+        struct page_foreign_tracker *work;
+        unsigned x;
+
+        BUG_ON(size & ~LIVE_MAP_TRACKER_IDX_MASK);
+
+        work = kzalloc(sizeof(*work) +
+                       size * sizeof(struct page_foreign_tracked),
+                       GFP_KERNEL);
+        if (!work)
+                return work;
+        work->size = size;
+
+        spin_lock(&tracker_lock);
+        for (x = 0; x < LIVE_MAP_NR_TRACKERS; x++) {
+                if (foreign_trackers[x] == NULL) {
+                        work->id = x;
+                        foreign_trackers[x] = work;
+                        break;
+                }
+        }
+        spin_unlock(&tracker_lock);
+        if (x == LIVE_MAP_NR_TRACKERS) {
+                printk(KERN_WARNING "Out of foreign page trackers!\n");
+                kfree(work);
+                return NULL;
+        }
+        return work;
+}
+
+/* Release a tracker allocated with alloc_page_foreign_tracker.  There
+   should be no tracked pages when this is called. */
+void free_page_foreign_tracker(struct page_foreign_tracker *pft)
+{
+        spin_lock(&tracker_lock);
+        BUG_ON(foreign_trackers[pft->id] != pft);
+        foreign_trackers[pft->id] = NULL;
+        spin_unlock(&tracker_lock);
+        kfree(pft);
+}
+
+EXPORT_SYMBOL(alloc_page_foreign_tracker);
+EXPORT_SYMBOL(free_page_foreign_tracker);
index 80b424fb72c197c59aa07e7900e4dc080368a6f6..f8ad1bac399f0e1405793f23b785a5090a7ad254 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/udp.h>
 
 #include <xen/balloon.h>
+#include <xen/live_maps.h>
 #include <xen/events.h>
 #include <xen/interface/memory.h>
 
@@ -142,6 +143,7 @@ static inline pending_ring_idx_t nr_pending_reqs(void)
 {
        return MAX_PENDING_REQS - pending_prod + pending_cons;
 }
+static struct page_foreign_tracker *foreign_page_tracker;
 
 /* Freed TX SKBs get batched on this ring before return to pending_ring. */
 static u16 dealloc_ring[MAX_PENDING_REQS];
@@ -294,7 +296,6 @@ static u16 netbk_gop_frag(struct xen_netif *netif, struct netbk_rx_meta *meta,
        struct gnttab_copy *copy_gop;
        struct xen_netif_rx_request *req;
        unsigned long old_mfn;
-       int idx = netif_page_index(page);
 
        old_mfn = virt_to_mfn(page_address(page));
 
@@ -302,10 +303,9 @@ static u16 netbk_gop_frag(struct xen_netif *netif, struct netbk_rx_meta *meta,
 
        copy_gop = npo->copy + npo->copy_prod++;
        copy_gop->flags = GNTCOPY_dest_gref;
-       if (idx > -1) {
-               struct pending_tx_info *src_pend = &pending_tx_info[idx];
-               copy_gop->source.domid = src_pend->netif->domid;
-               copy_gop->source.u.ref = src_pend->req.gref;
+       if (PageForeign(page)) {
+               lookup_tracker_page(page, &copy_gop->source.domid,
+                                   &copy_gop->source.u.ref);
                copy_gop->flags |= GNTCOPY_source_gref;
        } else {
                copy_gop->source.domid = DOMID_SELF;
@@ -708,6 +708,8 @@ inline static void net_tx_action_dealloc(void)
                        if (!phys_to_machine_mapping_valid(pfn))
                                continue;
 
+                        stop_tracking_page(mmap_pages[pending_idx]);
+
                        gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
                                            GNTMAP_host_map,
                                            grant_tx_handle[pending_idx]);
@@ -847,6 +849,12 @@ static struct gnttab_map_grant_ref *netbk_get_requests(struct xen_netif *netif,
                netif_get(netif);
                pending_tx_info[pending_idx].netif = netif;
                frags[i].page = (void *)pending_idx;
+
+                start_tracking_page(foreign_page_tracker,
+                                    mmap_pages[pending_idx],
+                                    netif->domid,
+                                    pending_tx_info[pending_idx].req.gref,
+                                    pending_idx);
        }
 
        return mop;
@@ -1185,6 +1193,12 @@ static unsigned net_tx_build_mops(void)
                                  txreq.gref, netif->domid);
                mop++;
 
+                start_tracking_page(foreign_page_tracker,
+                                    mmap_pages[pending_idx],
+                                    netif->domid,
+                                    txreq.gref,
+                                    pending_idx);
+
                memcpy(&pending_tx_info[pending_idx].req,
                       &txreq, sizeof(txreq));
                pending_tx_info[pending_idx].netif = netif;
@@ -1465,9 +1479,13 @@ static int __init netback_init(void)
        netbk_tx_pending_timer.data = 0;
        netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
 
+        foreign_page_tracker = alloc_page_foreign_tracker(MAX_PENDING_REQS);
+        if (!foreign_page_tracker)
+                return -ENOMEM;
        mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
        if (mmap_pages == NULL) {
                printk("%s: out of memory\n", __FUNCTION__);
+                free_page_foreign_tracker(foreign_page_tracker);
                return -ENOMEM;
        }
 
diff --git a/include/xen/live_maps.h b/include/xen/live_maps.h
new file mode 100644 (file)
index 0000000..54c3dd4
--- /dev/null
@@ -0,0 +1,130 @@
+#ifndef XEN_LIVE_MAPS_H__
+#define XEN_LIVE_MAPS_H__
+
+/* A mechanism for tracking where pages have been grant mapped from.
+   Anything which can map pages through a grant reference is supposed
+   to allocate a page_tracker and then, whenever they map a grant:
+
+   a) Flag the page as foreign with SetPageForeign(), and
+   b) Register the struct page with a tracker through start_tracking_page().
+
+   If you later need to grant access to the page (either with a normal
+   grant or implicitly in a copy grant operation), you should use
+   lookup_tracker_page() to find out what domain and grant reference
+   it was mapped from.
+
+   Obviously, if a backend knows that the page will never need to be
+   re-granted once it's been mapped, it can avoid doing all this
+   stuff.
+
+   The number of trackers is quite limited, so they shouldn't be
+   allocated unnecessarily.  One per backend class is reasonable
+   (i.e. netback, blkback, etc.), but one per backend device probably
+   isn't.
+*/
+
+#include <linux/mm.h>
+#include <xen/grant_table.h>
+
+/* We use page->private to store some index information so that we can
+   find the tracking information later.  The top few bits are used to
+   identify the tracker, and the rest are used as an index into that
+   tracker. */
+
+/* How many bits to use for tracker IDs. */
+#define LIVE_MAP_TRACKER_BITS 2
+
+/* How many bits to use for tracker indexes. */
+#define LIVE_MAP_TRACKER_IDX_BITS (32 - LIVE_MAP_TRACKER_BITS)
+
+/* Maximum number of trackers */
+#define LIVE_MAP_NR_TRACKERS (1 << LIVE_MAP_TRACKER_BITS)
+
+/* Bitmask of index inside tracker */
+#define LIVE_MAP_TRACKER_IDX_MASK (~0u >> LIVE_MAP_TRACKER_BITS)
+
+/* Turn off some moderately expensive debug checks. */
+#undef LIVE_MAPS_DEBUG
+
+struct page_foreign_tracked {
+        domid_t dom;
+        grant_ref_t gref;
+#ifdef LIVE_MAPS_DEBUG
+        unsigned in_use;
+#endif
+};
+
+struct page_foreign_tracker {
+        unsigned size;
+        unsigned id;
+        struct page_foreign_tracked contents[];
+};
+
+extern struct page_foreign_tracker *foreign_trackers[LIVE_MAP_NR_TRACKERS];
+
+/* Allocate a foreign page tracker.  @size is the maximum index in the
+   tracker.  Returns NULL on error. */
+struct page_foreign_tracker *alloc_page_foreign_tracker(unsigned size);
+
+/* Release a tracker allocated with alloc_page_foreign_tracker.  There
+   should be no tracked pages when this is called. */
+void free_page_foreign_tracker(struct page_foreign_tracker *pft);
+
+/* Start tracking a page.  @idx is an index in the tracker which is
+   not currently in use, and must be less than the size of the
+   tracker.  The page must be marked as foreign before this is called.
+   The caller is expected to make sure that the page is not a
+   simulataneous target of lookup_tracker_page().  The page should be
+   passed to stop_tracking_page() when the grant is unmapped. */
+static inline void start_tracking_page(struct page_foreign_tracker *pft,
+                                       struct page *p,
+                                       domid_t dom,
+                                       grant_ref_t gref,
+                                       unsigned idx)
+{
+        BUG_ON(!PageForeign(p));
+#ifdef LIVE_MAPS_DEBUG
+        BUG_ON(idx > pft->size);
+        BUG_ON(pft->contents[idx].in_use);
+        pft->contents[idx].in_use = 1;
+#endif
+        pft->contents[idx].dom = dom;
+        pft->contents[idx].gref = gref;
+        set_page_private(p, idx | (pft->id << LIVE_MAP_TRACKER_IDX_BITS));
+}
+
+static inline void stop_tracking_page(struct page *p)
+{
+#ifdef LIVE_MAPS_DEBUG
+        struct page_foreign_tracker *pft;
+        unsigned idx = page_private(p);
+        BUG_ON(!PageForeign(p));
+        pft = foreign_trackers[idx >> LIVE_MAP_TRACKER_IDX_BITS];
+        BUG_ON((idx & LIVE_MAP_TRACKER_IDX_MASK) >= pft->size);
+        BUG_ON(!pft->contents[idx & LIVE_MAP_TRACKER_IDX_MASK].in_use);
+        pft->contents[idx & LIVE_MAP_TRACKER_IDX_MASK].in_use = 0;
+        set_page_private(p, 0);
+#endif
+}
+
+/* Lookup a page which is tracked in some tracker.
+   start_tracking_page() must have been called previously.  *@dom and
+   *@gref will be set to the values which were specified when
+   start_tracking_page() was called. */
+static inline void lookup_tracker_page(struct page *p, domid_t *dom,
+                                       grant_ref_t *gref)
+{
+        struct page_foreign_tracker *pft;
+        unsigned idx = page_private(p);
+        BUG_ON(!PageForeign(p));
+        pft = foreign_trackers[idx >> LIVE_MAP_TRACKER_IDX_BITS];
+#ifdef LIVE_MAPS_DEBUG
+        BUG_ON(!pft);
+        BUG_ON((idx & LIVE_MAP_TRACKER_IDX_MASK) >= pft->size);
+        BUG_ON(!pft->contents[idx & LIVE_MAP_TRACKER_IDX_MASK].in_use);
+#endif
+        *dom = pft->contents[idx & LIVE_MAP_TRACKER_IDX_MASK].dom;
+        *gref = pft->contents[idx & LIVE_MAP_TRACKER_IDX_MASK].gref;
+}
+
+#endif /* !XEN_LIVE_MAPS_H__ */