#include <asm/shadow.h>
#include <asm/p2m.h>
#include <asm/hap.h>
+#include <asm/event.h>
#include <asm/hvm/nestedhvm.h>
#include <xen/numa.h>
#include <xsm/xsm.h>
d->arch.paging.free_page(d, mfn_to_page(mfn));
}
-void paging_free_log_dirty_bitmap(struct domain *d)
+static int paging_free_log_dirty_bitmap(struct domain *d, int rc)
{
mfn_t *l4, *l3, *l2;
int i4, i3, i2;
+ paging_lock(d);
+
if ( !mfn_valid(d->arch.paging.log_dirty.top) )
- return;
+ {
+ paging_unlock(d);
+ return 0;
+ }
- paging_lock(d);
+ if ( !d->arch.paging.preempt.dom )
+ {
+ memset(&d->arch.paging.preempt.log_dirty, 0,
+ sizeof(d->arch.paging.preempt.log_dirty));
+ ASSERT(rc <= 0);
+ d->arch.paging.preempt.log_dirty.done = -rc;
+ }
+ else if ( d->arch.paging.preempt.dom != current->domain ||
+ d->arch.paging.preempt.op != XEN_DOMCTL_SHADOW_OP_OFF )
+ {
+ paging_unlock(d);
+ return -EBUSY;
+ }
l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+ i4 = d->arch.paging.preempt.log_dirty.i4;
+ i3 = d->arch.paging.preempt.log_dirty.i3;
+ rc = 0;
- for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ )
+ for ( ; i4 < LOGDIRTY_NODE_ENTRIES; i4++, i3 = 0 )
{
if ( !mfn_valid(l4[i4]) )
continue;
l3 = map_domain_page(mfn_x(l4[i4]));
- for ( i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
+ for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
{
if ( !mfn_valid(l3[i3]) )
continue;
unmap_domain_page(l2);
paging_free_log_dirty_page(d, l3[i3]);
+ l3[i3] = _mfn(INVALID_MFN);
+
+ if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
+ {
+ d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
+ d->arch.paging.preempt.log_dirty.i4 = i4;
+ rc = -EAGAIN;
+ break;
+ }
}
unmap_domain_page(l3);
+ if ( rc )
+ break;
paging_free_log_dirty_page(d, l4[i4]);
+ l4[i4] = _mfn(INVALID_MFN);
+
+ if ( i4 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
+ {
+ d->arch.paging.preempt.log_dirty.i3 = 0;
+ d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
+ rc = -EAGAIN;
+ break;
+ }
}
unmap_domain_page(l4);
- paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
- d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
- ASSERT(d->arch.paging.log_dirty.allocs == 0);
- d->arch.paging.log_dirty.failed_allocs = 0;
+ if ( !rc )
+ {
+ paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
+ d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
+
+ ASSERT(d->arch.paging.log_dirty.allocs == 0);
+ d->arch.paging.log_dirty.failed_allocs = 0;
+
+ rc = -d->arch.paging.preempt.log_dirty.done;
+ d->arch.paging.preempt.dom = NULL;
+ }
+ else
+ {
+ d->arch.paging.preempt.dom = current->domain;
+ d->arch.paging.preempt.op = XEN_DOMCTL_SHADOW_OP_OFF;
+ }
paging_unlock(d);
+
+ return rc;
}
int paging_log_dirty_enable(struct domain *d)
return ret;
}
-int paging_log_dirty_disable(struct domain *d)
+static int paging_log_dirty_disable(struct domain *d, bool_t resuming)
{
- int ret;
+ int ret = 1;
+
+ if ( !resuming )
+ {
+ domain_pause(d);
+ /* Safe because the domain is paused. */
+ if ( paging_mode_log_dirty(d) )
+ {
+ ret = d->arch.paging.log_dirty.disable_log_dirty(d);
+ ASSERT(ret <= 0);
+ }
+ }
+
+ ret = paging_free_log_dirty_bitmap(d, ret);
+ if ( ret == -EAGAIN )
+ return ret;
- domain_pause(d);
- /* Safe because the domain is paused. */
- ret = d->arch.paging.log_dirty.disable_log_dirty(d);
- if ( !paging_mode_log_dirty(d) )
- paging_free_log_dirty_bitmap(d);
domain_unpause(d);
return ret;
/* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN,
* clear the bitmap and stats as well. */
-int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc)
+static int paging_log_dirty_op(struct domain *d,
+ struct xen_domctl_shadow_op *sc,
+ bool_t resuming)
{
int rv = 0, clean = 0, peek = 1;
unsigned long pages = 0;
unsigned long *l1 = NULL;
int i4, i3, i2;
- domain_pause(d);
+ if ( !resuming )
+ domain_pause(d);
paging_lock(d);
+ if ( !d->arch.paging.preempt.dom )
+ memset(&d->arch.paging.preempt.log_dirty, 0,
+ sizeof(d->arch.paging.preempt.log_dirty));
+ else if ( d->arch.paging.preempt.dom != current->domain ||
+ d->arch.paging.preempt.op != sc->op )
+ {
+ paging_unlock(d);
+ ASSERT(!resuming);
+ domain_unpause(d);
+ return -EBUSY;
+ }
+
clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN);
PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n",
sc->stats.fault_count = d->arch.paging.log_dirty.fault_count;
sc->stats.dirty_count = d->arch.paging.log_dirty.dirty_count;
- if ( clean )
- {
- d->arch.paging.log_dirty.fault_count = 0;
- d->arch.paging.log_dirty.dirty_count = 0;
- }
-
if ( guest_handle_is_null(sc->dirty_bitmap) )
/* caller may have wanted just to clean the state or access stats. */
peek = 0;
goto out;
}
- pages = 0;
l4 = paging_map_log_dirty_bitmap(d);
+ i4 = d->arch.paging.preempt.log_dirty.i4;
+ i3 = d->arch.paging.preempt.log_dirty.i3;
+ pages = d->arch.paging.preempt.log_dirty.done;
- for ( i4 = 0;
- (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES);
- i4++ )
+ for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
{
l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
- for ( i3 = 0;
- (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
- i3++ )
+ for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
{
l2 = ((l3 && mfn_valid(l3[i3])) ?
map_domain_page(mfn_x(l3[i3])) : NULL);
}
if ( l2 )
unmap_domain_page(l2);
+
+ if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
+ {
+ d->arch.paging.preempt.log_dirty.i4 = i4;
+ d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
+ rv = -EAGAIN;
+ break;
+ }
}
if ( l3 )
unmap_domain_page(l3);
+
+ if ( !rv && i4 < LOGDIRTY_NODE_ENTRIES - 1 &&
+ hypercall_preempt_check() )
+ {
+ d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
+ d->arch.paging.preempt.log_dirty.i3 = 0;
+ rv = -EAGAIN;
+ }
+ if ( rv )
+ break;
}
if ( l4 )
unmap_domain_page(l4);
- if ( pages < sc->pages )
- sc->pages = pages;
+ if ( !rv )
+ {
+ d->arch.paging.preempt.dom = NULL;
+ if ( clean )
+ {
+ d->arch.paging.log_dirty.fault_count = 0;
+ d->arch.paging.log_dirty.dirty_count = 0;
+ }
+ }
+ else
+ {
+ d->arch.paging.preempt.dom = current->domain;
+ d->arch.paging.preempt.op = sc->op;
+ d->arch.paging.preempt.log_dirty.done = pages;
+ }
paging_unlock(d);
+ if ( rv )
+ {
+ /* Never leave the domain paused on real errors. */
+ ASSERT(rv == -EAGAIN);
+ return rv;
+ }
+
+ if ( pages < sc->pages )
+ sc->pages = pages;
if ( clean )
{
/* We need to further call clean_dirty_bitmap() functions of specific
return rv;
out:
+ d->arch.paging.preempt.dom = NULL;
paging_unlock(d);
domain_unpause(d);
d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap;
}
-/* This function fress log dirty bitmap resources. */
-static void paging_log_dirty_teardown(struct domain*d)
-{
- paging_free_log_dirty_bitmap(d);
-}
-
/************************************************/
/* CODE FOR PAGING SUPPORT */
/************************************************/
int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
- XEN_GUEST_HANDLE(void) u_domctl)
+ XEN_GUEST_HANDLE(void) u_domctl, bool_t resuming)
{
int rc;
return -EINVAL;
}
+ if ( resuming
+ ? (d->arch.paging.preempt.dom != current->domain ||
+ d->arch.paging.preempt.op != sc->op)
+ : (d->arch.paging.preempt.dom &&
+ sc->op != XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION) )
+ {
+ printk(XENLOG_G_DEBUG
+ "d%d:v%d: Paging op %#x on Dom%u with unfinished prior op %#x by Dom%u\n",
+ current->domain->domain_id, current->vcpu_id,
+ sc->op, d->domain_id, d->arch.paging.preempt.op,
+ d->arch.paging.preempt.dom
+ ? d->arch.paging.preempt.dom->domain_id : DOMID_INVALID);
+ return -EBUSY;
+ }
+
rc = xsm_shadow_control(d, sc->op);
if ( rc )
return rc;
return paging_log_dirty_enable(d);
case XEN_DOMCTL_SHADOW_OP_OFF:
- if ( paging_mode_log_dirty(d) )
- if ( (rc = paging_log_dirty_disable(d)) != 0 )
- return rc;
+ if ( (rc = paging_log_dirty_disable(d, resuming)) != 0 )
+ return rc;
break;
case XEN_DOMCTL_SHADOW_OP_CLEAN:
case XEN_DOMCTL_SHADOW_OP_PEEK:
- return paging_log_dirty_op(d, sc);
+ return paging_log_dirty_op(d, sc, resuming);
}
/* Here, dispatch domctl to the appropriate paging code */
return shadow_domctl(d, sc, u_domctl);
}
+long paging_domctl_continuation(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
+{
+ struct xen_domctl op;
+ struct domain *d;
+ int ret;
+
+ if ( copy_from_guest(&op, u_domctl, 1) )
+ return -EFAULT;
+
+ if ( op.interface_version != XEN_DOMCTL_INTERFACE_VERSION ||
+ op.cmd != XEN_DOMCTL_shadow_op )
+ return -EBADRQC;
+
+ d = rcu_lock_domain_by_id(op.domain);
+ if ( d == NULL )
+ return -ESRCH;
+
+ ret = xsm_domctl(d, op.cmd);
+ if ( !ret )
+ {
+ if ( domctl_lock_acquire() )
+ {
+ ret = paging_domctl(d, &op.u.shadow_op,
+ guest_handle_cast(u_domctl, void), 1);
+
+ domctl_lock_release();
+ }
+ else
+ ret = -EAGAIN;
+ }
+
+ rcu_unlock_domain(d);
+
+ if ( ret == -EAGAIN )
+ ret = hypercall_create_continuation(__HYPERVISOR_arch_1,
+ "h", u_domctl);
+ else if ( __copy_field_to_guest(u_domctl, &op, u.shadow_op) )
+ ret = -EFAULT;
+
+ return ret;
+}
+
/* Call when destroying a domain */
-void paging_teardown(struct domain *d)
+int paging_teardown(struct domain *d)
{
+ int rc;
+
if ( hap_enabled(d) )
hap_teardown(d);
else
shadow_teardown(d);
/* clean up log dirty resources. */
- paging_log_dirty_teardown(d);
+ rc = paging_free_log_dirty_bitmap(d, 0);
+ if ( rc == -EAGAIN )
+ return rc;
/* Move populate-on-demand cache back to domain_list for destruction */
p2m_pod_empty_cache(d);
+
+ return rc;
}
/* Call once all of the references to the domain have gone away */