]> xenbits.xensource.com Git - people/royger/xen.git/commitdiff
x86/HVM: correct page dirty marking in hvm_map_guest_frame_rw()
authorJan Beulich <jbeulich@suse.com>
Tue, 27 Oct 2015 10:42:04 +0000 (11:42 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 27 Oct 2015 10:42:04 +0000 (11:42 +0100)
Rather than dirtying a page when establishing a (permanent) mapping,
dirty it when the page gets unmapped, or - if still mapped - on the
final iteration of a save operation (or in other cases where the guest
is paused or already shut down). (Transient mappings continue to get
dirtied upon getting mapped, to avoid the overhead of tracking.)

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
tools/libxc/xc_sr_save.c
xen/arch/x86/hvm/hvm.c
xen/arch/x86/mm/paging.c
xen/include/asm-x86/hvm/domain.h
xen/include/asm-x86/hvm/hvm.h
xen/include/public/domctl.h

index 7dc3a48ccb170b34f291ccc4e7cc78542be05e2d..d1bdf7242ef3bb9db841efbf2fe80c881aa143f3 100644 (file)
@@ -537,7 +537,8 @@ static int suspend_and_send_dirty(struct xc_sr_context *ctx)
     if ( xc_shadow_control(
              xch, ctx->domid, XEN_DOMCTL_SHADOW_OP_CLEAN,
              HYPERCALL_BUFFER(dirty_bitmap), ctx->save.p2m_size,
-             NULL, 0, &stats) != ctx->save.p2m_size )
+             NULL, XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL, &stats) !=
+         ctx->save.p2m_size )
     {
         PERROR("Failed to retrieve logdirty bitmap");
         rc = -1;
index 965bfbfbaf24528f9e47c7e9c7f20423fa17cf02..21f42a7f0e5dad62ab895ffa3e1b83ff99fc5734 100644 (file)
@@ -1565,6 +1565,8 @@ int hvm_domain_initialise(struct domain *d)
     INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
     spin_lock_init(&d->arch.hvm_domain.irq_lock);
     spin_lock_init(&d->arch.hvm_domain.uc_lock);
+    spin_lock_init(&d->arch.hvm_domain.write_map.lock);
+    INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
 
     hvm_init_cacheattr_region_list(d);
 
@@ -3677,6 +3679,11 @@ int hvm_virtual_to_linear_addr(
     return 1;
 }
 
+struct hvm_write_map {
+    struct list_head list;
+    struct page_info *page;
+};
+
 /* On non-NULL return, we leave this function holding an additional 
  * ref on the underlying mfn, if any */
 static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
@@ -3704,15 +3711,30 @@ static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
 
     if ( writable )
     {
-        if ( !p2m_is_discard_write(p2mt) )
-            paging_mark_dirty(d, page_to_mfn(page));
-        else
+        if ( unlikely(p2m_is_discard_write(p2mt)) )
             *writable = 0;
+        else if ( !permanent )
+            paging_mark_dirty(d, page_to_mfn(page));
     }
 
     if ( !permanent )
         return __map_domain_page(page);
 
+    if ( writable && *writable )
+    {
+        struct hvm_write_map *track = xmalloc(struct hvm_write_map);
+
+        if ( !track )
+        {
+            put_page(page);
+            return NULL;
+        }
+        track->page = page;
+        spin_lock(&d->arch.hvm_domain.write_map.lock);
+        list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);
+        spin_unlock(&d->arch.hvm_domain.write_map.lock);
+    }
+
     map = __map_domain_page_global(page);
     if ( !map )
         put_page(page);
@@ -3735,18 +3757,45 @@ void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent)
 void hvm_unmap_guest_frame(void *p, bool_t permanent)
 {
     unsigned long mfn;
+    struct page_info *page;
 
     if ( !p )
         return;
 
     mfn = domain_page_map_to_mfn(p);
+    page = mfn_to_page(mfn);
 
     if ( !permanent )
         unmap_domain_page(p);
     else
+    {
+        struct domain *d = page_get_owner(page);
+        struct hvm_write_map *track;
+
         unmap_domain_page_global(p);
+        spin_lock(&d->arch.hvm_domain.write_map.lock);
+        list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+            if ( track->page == page )
+            {
+                paging_mark_dirty(d, mfn);
+                list_del(&track->list);
+                xfree(track);
+                break;
+            }
+        spin_unlock(&d->arch.hvm_domain.write_map.lock);
+    }
+
+    put_page(page);
+}
+
+void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
+{
+    struct hvm_write_map *track;
 
-    put_page(mfn_to_page(mfn));
+    spin_lock(&d->arch.hvm_domain.write_map.lock);
+    list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+        paging_mark_dirty(d, page_to_mfn(track->page));
+    spin_unlock(&d->arch.hvm_domain.write_map.lock);
 }
 
 static void *hvm_map_entry(unsigned long va, bool_t *writable)
index ba1d52841409dd438f9cab41668a5cc2528e0672..42648df83c42ab5290eb0787534c652c1737e1fc 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/hvm/nestedhvm.h>
 #include <xen/numa.h>
 #include <xsm/xsm.h>
+#include <public/sched.h> /* SHUTDOWN_suspend */
 
 #include "mm-locks.h"
 
@@ -422,6 +423,13 @@ static int paging_log_dirty_op(struct domain *d,
 
     if ( !resuming )
     {
+        /*
+         * Mark dirty all currently write-mapped pages on e.g. the
+         * final iteration of a save operation.
+         */
+        if ( sc->mode & XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL )
+            hvm_mapped_guest_frames_mark_dirty(d);
+
         domain_pause(d);
 
         /*
@@ -744,6 +752,8 @@ int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
 
     case XEN_DOMCTL_SHADOW_OP_CLEAN:
     case XEN_DOMCTL_SHADOW_OP_PEEK:
+        if ( sc->mode & ~XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL )
+            return -EINVAL;
         return paging_log_dirty_op(d, sc, resuming);
     }
 
index 992d5d14091935992e979229d87d4c92c1f9998b..a8cc2ad4d3f2977f76f68f5eb5f25b793e563422 100644 (file)
@@ -145,6 +145,12 @@ struct hvm_domain {
 
     unsigned long *io_bitmap;
 
+    /* List of permanently write-mapped pages. */
+    struct {
+        spinlock_t lock;
+        struct list_head list;
+    } write_map;
+
     union {
         struct vmx_domain vmx;
         struct svm_domain svm;
index 069370636452ed658d5d164f05b3ed82ba96a860..0cf7da1035ecebcaa377b6c704df2d4ef3aff41d 100644 (file)
@@ -445,6 +445,7 @@ void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent,
                              bool_t *writable);
 void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
 void hvm_unmap_guest_frame(void *p, bool_t permanent);
+void hvm_mapped_guest_frames_mark_dirty(struct domain *);
 
 static inline void hvm_set_info_guest(struct vcpu *v)
 {
index 86cd0abbf9c9727ad000aa627564b39a6b6b8b0b..7a56b3f7dc95189d1ee29f548cf6697011d3a157 100644 (file)
@@ -208,6 +208,13 @@ struct xen_domctl_getpageframeinfo3 {
   */
 #define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL  (1 << 4)
 
+/* Mode flags for XEN_DOMCTL_SHADOW_OP_{CLEAN,PEEK}. */
+ /*
+  * This is the final iteration: Requesting to include pages mapped
+  * writably by the hypervisor in the dirty bitmap.
+  */
+#define XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL   (1 << 0)
+
 struct xen_domctl_shadow_op_stats {
     uint32_t fault_count;
     uint32_t dirty_count;
@@ -219,8 +226,9 @@ struct xen_domctl_shadow_op {
     /* IN variables. */
     uint32_t       op;       /* XEN_DOMCTL_SHADOW_OP_* */
 
-    /* OP_ENABLE */
-    uint32_t       mode;     /* XEN_DOMCTL_SHADOW_ENABLE_* */
+    /* OP_ENABLE: XEN_DOMCTL_SHADOW_ENABLE_* */
+    /* OP_PEAK / OP_CLEAN: XEN_DOMCTL_SHADOW_LOGDIRTY_* */
+    uint32_t       mode;
 
     /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
     uint32_t       mb;       /* Shadow memory allocation in MB */