]> xenbits.xensource.com Git - people/aperard/linux.git/commitdiff
xen/grant-table: add a mechanism to safely unmap pages that are in use
authorJennifer Herbert <jennifer.herbert@citrix.com>
Tue, 9 Dec 2014 18:28:37 +0000 (18:28 +0000)
committerAnthony PERARD <anthony.perard@citrix.com>
Thu, 7 May 2015 17:05:59 +0000 (18:05 +0100)
Introduce gnttab_unmap_refs_async() that can be used to safely unmap
pages that may be in use (ref count > 1).  If the pages are in use the
unmap is deferred and retried later.  This polling is not very clever
but it should be good enough if the cases where the delay is necessary
are rare.

The initial delay is 5 ms and is increased linearly on each subsequent
retry (to reduce load if the page is in use for a long time).

This is needed to allow block backends using grant mapping to safely
use network storage (block or filesystem based such as iSCSI or NFS).

The network storage driver may complete a block request whilst there
is a queued network packet retry (because the ack from the remote end
races with deciding to queue the retry).  The pages for the retried
packet would be grant unmapped and the network driver (or hardware)
would access the unmapped page.

Signed-off-by: Jennifer Herbert <jennifer.herbert@citrix.com>
Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
(cherry picked from commit 3f9f1c67572f5e5e6dc84216d48d1480f3c4fcf6)

 Conflicts:
drivers/xen/grant-table.c

drivers/xen/grant-table.c
include/xen/grant_table.h

index 680dd4ab8521d756bf5e9210a6a1c66847e73512..9fd742839fa3a3a336ee502e0cb67fd7dcd372a6 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/hardirq.h>
+#include <linux/workqueue.h>
 
 #include <xen/xen.h>
 #include <xen/interface/xen.h>
@@ -1030,6 +1031,49 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
 }
 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
 
+#define GNTTAB_UNMAP_REFS_DELAY 5
+
+static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
+
+static void gnttab_unmap_work(struct work_struct *work)
+{
+       struct gntab_unmap_queue_data
+               *unmap_data = container_of(work, 
+                                          struct gntab_unmap_queue_data,
+                                          gnttab_work.work);
+       if (unmap_data->age != UINT_MAX)
+               unmap_data->age++;
+       __gnttab_unmap_refs_async(unmap_data);
+}
+
+static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
+{
+       int ret;
+       int pc;
+
+       for (pc = 0; pc < item->count; pc++) {
+               if (page_count(item->pages[pc]) > 1) {
+                       unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
+                       schedule_delayed_work(&item->gnttab_work,
+                                             msecs_to_jiffies(delay));
+                       return;
+               }
+       }
+
+       ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
+                               item->pages, item->count);
+       item->done(ret, item);
+}
+
+void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
+{
+       INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
+       item->age = 0;
+
+       __gnttab_unmap_refs_async(item);
+}
+EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
+
 static unsigned nr_status_frames(unsigned nr_grant_frames)
 {
        BUG_ON(grefs_per_grant_frame == 0);
index 85e9fdc6d00c5c37d497f1c5e27c9c1b14d2e107..295e9aea626c42f34057146d90153d07da23b6a7 100644 (file)
@@ -60,6 +60,22 @@ struct gnttab_free_callback {
        u16 count;
 };
 
+struct gntab_unmap_queue_data;
+
+typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
+
+struct gntab_unmap_queue_data
+{
+       struct delayed_work     gnttab_work;
+       void *data;
+       gnttab_unmap_refs_done  done;
+       struct gnttab_unmap_grant_ref *unmap_ops;
+       struct gnttab_unmap_grant_ref *kunmap_ops;
+       struct page **pages;
+       unsigned int count;
+       unsigned int age;
+};
+
 int gnttab_init(void);
 int gnttab_suspend(void);
 int gnttab_resume(void);
@@ -202,6 +218,8 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
                      struct gnttab_unmap_grant_ref *kunmap_ops,
                      struct page **pages, unsigned int count);
+void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
+
 
 /* Perform a batch of grant map/copy operations. Retry every batch slot
  * for which the hypervisor returns GNTST_eagain. This is typically due