/* functions which are paging mode specific */
const struct log_dirty_ops {
- int (*enable )(struct domain *d, bool log_global);
+ int (*enable )(struct domain *d);
int (*disable )(struct domain *d);
void (*clean )(struct domain *d);
} *ops;
/*
* hap code to call when log_dirty is enable. return 0 if no problem found.
*
- * NB: Domain that having device assigned should not set log_global. Because
+ * NB: Domains having a device assigned should not come here, because
* there is no way to track the memory updating from device.
*/
-static int cf_check hap_enable_log_dirty(struct domain *d, bool log_global)
+static int cf_check hap_enable_log_dirty(struct domain *d)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
* Refuse to turn on global log-dirty mode if
* there are outstanding p2m_ioreq_server pages.
*/
- if ( log_global && read_atomic(&p2m->ioreq.entry_count) )
+ if ( read_atomic(&p2m->ioreq.entry_count) )
return -EBUSY;
/* turn on PG_log_dirty bit in paging mode */
/* Enable hardware-assisted log-dirty if it is supported. */
p2m_enable_hardware_log_dirty(d);
- if ( log_global )
- {
- /*
- * Switch to log dirty mode, either by setting l1e entries of P2M table
- * to be read-only, or via hardware-assisted log-dirty.
- */
- p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
- guest_flush_tlb_mask(d, d->dirty_cpumask);
- }
+ /*
+ * Switch to log dirty mode, either by setting l1e entries of P2M table
+ * to be read-only, or via hardware-assisted log-dirty.
+ */
+ p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
+ guest_flush_tlb_mask(d, d->dirty_cpumask);
+
return 0;
}
return rc;
}
-static int paging_log_dirty_enable(struct domain *d, bool log_global)
+static int paging_log_dirty_enable(struct domain *d)
{
int ret;
- if ( has_arch_pdevs(d) && log_global )
+ if ( has_arch_pdevs(d) )
{
/*
* Refuse to turn on global log-dirty mode
return -EINVAL;
domain_pause(d);
- ret = d->arch.paging.log_dirty.ops->enable(d, log_global);
+ ret = d->arch.paging.log_dirty.ops->enable(d);
domain_unpause(d);
return ret;
break;
/* Else fall through... */
case XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
- return paging_log_dirty_enable(d, true);
+ return paging_log_dirty_enable(d);
case XEN_DOMCTL_SHADOW_OP_OFF:
if ( (rc = paging_log_dirty_disable(d, resuming)) != 0 )
DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags);
-static int cf_check sh_enable_log_dirty(struct domain *, bool log_global);
+static int cf_check sh_enable_log_dirty(struct domain *);
static int cf_check sh_disable_log_dirty(struct domain *);
static void cf_check sh_clean_dirty_bitmap(struct domain *);
/* Shadow specific code which is called in paging_log_dirty_enable().
* Return 0 if no problem found.
*/
-static int cf_check sh_enable_log_dirty(struct domain *d, bool log_global)
+static int cf_check sh_enable_log_dirty(struct domain *d)
{
int ret;
#include <xen/mm.h>
#include <asm/shadow.h>
-static int cf_check _enable_log_dirty(struct domain *d, bool log_global)
-{
- ASSERT(is_pv_domain(d));
- return -EOPNOTSUPP;
-}
-
-static int cf_check _disable_log_dirty(struct domain *d)
+static int cf_check _toggle_log_dirty(struct domain *d)
{
ASSERT(is_pv_domain(d));
return -EOPNOTSUPP;
{
/* For HVM set up pointers for safety, then fail. */
static const struct log_dirty_ops sh_none_ops = {
- .enable = _enable_log_dirty,
- .disable = _disable_log_dirty,
+ .enable = _toggle_log_dirty,
+ .disable = _toggle_log_dirty,
.clean = _clean_dirty_bitmap,
};