INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
spin_lock_init(&d->arch.hvm_domain.irq_lock);
spin_lock_init(&d->arch.hvm_domain.uc_lock);
+ spin_lock_init(&d->arch.hvm_domain.write_map.lock);
+ INIT_LIST_HEAD(&d->arch.hvm_domain.write_map.list);
hvm_init_cacheattr_region_list(d);
return 1;
}
+struct hvm_write_map {
+ struct list_head list;
+ struct page_info *page;
+};
+
/* On non-NULL return, we leave this function holding an additional
* ref on the underlying mfn, if any */
static void *_hvm_map_guest_frame(unsigned long gfn, bool_t permanent,
if ( writable )
{
- if ( !p2m_is_discard_write(p2mt) )
- paging_mark_dirty(d, page_to_mfn(page));
- else
+ if ( unlikely(p2m_is_discard_write(p2mt)) )
*writable = 0;
+ else if ( !permanent )
+ paging_mark_dirty(d, page_to_mfn(page));
}
if ( !permanent )
return __map_domain_page(page);
+ if ( writable && *writable )
+ {
+ struct hvm_write_map *track = xmalloc(struct hvm_write_map);
+
+ if ( !track )
+ {
+ put_page(page);
+ return NULL;
+ }
+ track->page = page;
+ spin_lock(&d->arch.hvm_domain.write_map.lock);
+ list_add_tail(&track->list, &d->arch.hvm_domain.write_map.list);
+ spin_unlock(&d->arch.hvm_domain.write_map.lock);
+ }
+
map = __map_domain_page_global(page);
if ( !map )
put_page(page);
void hvm_unmap_guest_frame(void *p, bool_t permanent)
{
unsigned long mfn;
+ struct page_info *page;
if ( !p )
return;
mfn = domain_page_map_to_mfn(p);
+ page = mfn_to_page(mfn);
if ( !permanent )
unmap_domain_page(p);
else
+ {
+ struct domain *d = page_get_owner(page);
+ struct hvm_write_map *track;
+
unmap_domain_page_global(p);
+ spin_lock(&d->arch.hvm_domain.write_map.lock);
+ list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+ if ( track->page == page )
+ {
+ paging_mark_dirty(d, mfn);
+ list_del(&track->list);
+ xfree(track);
+ break;
+ }
+ spin_unlock(&d->arch.hvm_domain.write_map.lock);
+ }
+
+ put_page(page);
+}
+
+void hvm_mapped_guest_frames_mark_dirty(struct domain *d)
+{
+ struct hvm_write_map *track;
- put_page(mfn_to_page(mfn));
+ spin_lock(&d->arch.hvm_domain.write_map.lock);
+ list_for_each_entry(track, &d->arch.hvm_domain.write_map.list, list)
+ paging_mark_dirty(d, page_to_mfn(track->page));
+ spin_unlock(&d->arch.hvm_domain.write_map.lock);
}
static void *hvm_map_entry(unsigned long va, bool_t *writable)
#include <asm/hvm/nestedhvm.h>
#include <xen/numa.h>
#include <xsm/xsm.h>
+#include <public/sched.h> /* SHUTDOWN_suspend */
#include "mm-locks.h"
if ( !resuming )
{
+ /*
+ * Mark dirty all currently write-mapped pages on e.g. the
+ * final iteration of a save operation.
+ */
+ if ( sc->mode & XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL )
+ hvm_mapped_guest_frames_mark_dirty(d);
+
domain_pause(d);
/*
case XEN_DOMCTL_SHADOW_OP_CLEAN:
case XEN_DOMCTL_SHADOW_OP_PEEK:
+ if ( sc->mode & ~XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL )
+ return -EINVAL;
return paging_log_dirty_op(d, sc, resuming);
}
*/
#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4)
+/* Mode flags for XEN_DOMCTL_SHADOW_OP_{CLEAN,PEEK}. */
+ /*
+ * This is the final iteration: Requesting to include pages mapped
+ * writably by the hypervisor in the dirty bitmap.
+ */
+#define XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL (1 << 0)
+
struct xen_domctl_shadow_op_stats {
uint32_t fault_count;
uint32_t dirty_count;
/* IN variables. */
uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */
- /* OP_ENABLE */
- uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */
+ /* OP_ENABLE: XEN_DOMCTL_SHADOW_ENABLE_* */
+ /* OP_PEAK / OP_CLEAN: XEN_DOMCTL_SHADOW_LOGDIRTY_* */
+ uint32_t mode;
/* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
uint32_t mb; /* Shadow memory allocation in MB */