While there's little point in enabling both, the combination ought to at
least build correctly. Drop the direct PV_SHIM_EXCLUSIVE conditionals
and instead zap PG_log_dirty to zero under the right conditions, and key
other #ifdef-s off of that.
While there also expand on
ded576ce07e9 ("x86/shadow: dirty VRAM
tracking is needed for HVM only"): There was yet another is_hvm_domain()
missing, and code touching the struct fields needs to be guarded by
suitable #ifdef-s as well. While there also guard shadow-mode-only
fields accordingly.
Fixes: 8b5b49ceb3d9 ("x86: don't include domctl and alike in shim-exclusive builds")
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
/* Per-CPU variable for enforcing the lock ordering */
DEFINE_PER_CPU(int, mm_lock_level);
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
/************************************************/
/* LOG DIRTY SUPPORT */
d->arch.paging.log_dirty.ops = ops;
}
-#endif /* CONFIG_PV_SHIM_EXCLUSIVE */
+#endif /* PG_log_dirty */
/************************************************/
/* CODE FOR PAGING SUPPORT */
shadow_vcpu_init(v);
}
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl,
bool_t resuming)
return ret;
}
-#endif /* CONFIG_PV_SHIM_EXCLUSIVE */
+#endif /* PG_log_dirty */
/* Call when destroying a domain */
int paging_teardown(struct domain *d)
if ( preempted )
return -ERESTART;
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
/* clean up log dirty resources. */
rc = paging_free_log_dirty_bitmap(d, 0);
if ( rc == -ERESTART )
* calls now that we've torn down the bitmap */
d->arch.paging.mode &= ~PG_log_dirty;
- if ( d->arch.hvm.dirty_vram )
+#ifdef CONFIG_HVM
+ if ( is_hvm_domain(d) && d->arch.hvm.dirty_vram )
{
xfree(d->arch.hvm.dirty_vram->sl1ma);
xfree(d->arch.hvm.dirty_vram->dirty_bitmap);
XFREE(d->arch.hvm.dirty_vram);
}
+#endif
out:
paging_unlock(d);
}
}
+#ifdef CONFIG_HVM
if ( unlikely(level == 1) && is_hvm_domain(d) )
{
struct sh_dirty_vram *dirty_vram = d->arch.hvm.dirty_vram;
sflags &= ~_PAGE_RW;
}
}
+#endif
/* Read-only memory */
if ( p2m_is_readonly(p2mt) )
mfn_t sl1mfn,
struct domain *d)
{
+#ifdef CONFIG_HVM
mfn_t mfn = shadow_l1e_get_mfn(new_sl1e);
int flags = shadow_l1e_get_flags(new_sl1e);
unsigned long gfn;
dirty_vram->sl1ma[i] = mfn_to_maddr(sl1mfn)
| ((unsigned long)sl1e & ~PAGE_MASK);
}
+#endif
}
static inline void shadow_vram_put_l1e(shadow_l1e_t old_sl1e,
mfn_t sl1mfn,
struct domain *d)
{
+#ifdef CONFIG_HVM
mfn_t mfn = shadow_l1e_get_mfn(old_sl1e);
int flags = shadow_l1e_get_flags(old_sl1e);
unsigned long gfn;
dirty_vram->last_dirty = NOW();
}
}
+#endif
}
static int shadow_set_l1e(struct domain *d,
#define PG_translate 0
#define PG_external 0
#endif
+#if defined(CONFIG_HVM) || !defined(CONFIG_PV_SHIM_EXCLUSIVE)
/* Enable log dirty mode */
#define PG_log_dirty (XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY << PG_mode_shift)
+#else
+#define PG_log_dirty 0
+#endif
/* All paging modes. */
#define PG_MASK (PG_refcounts | PG_log_dirty | PG_translate | PG_external)
/*****************************************************************************
* Log dirty code */
-#ifndef CONFIG_PV_SHIM_EXCLUSIVE
+#if PG_log_dirty
/* get the dirty bitmap for a specific range of pfns */
void paging_log_dirty_range(struct domain *d,
#define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER * 2)) & \
(LOGDIRTY_NODE_ENTRIES-1))
+#ifdef CONFIG_HVM
/* VRAM dirty tracking support */
struct sh_dirty_vram {
unsigned long begin_pfn;
unsigned long end_pfn;
+#ifdef CONFIG_SHADOW_PAGING
paddr_t *sl1ma;
uint8_t *dirty_bitmap;
s_time_t last_dirty;
+#endif
};
+#endif
-#else /* !CONFIG_PV_SHIM_EXCLUSIVE */
+#else /* !PG_log_dirty */
static inline void paging_log_dirty_init(struct domain *d,
const struct log_dirty_ops *ops) {}
static inline void paging_mark_dirty(struct domain *d, mfn_t gmfn) {}
static inline void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn) {}
+static inline bool paging_mfn_is_dirty(struct domain *d, mfn_t gmfn) { return false; }
-#endif /* CONFIG_PV_SHIM_EXCLUSIVE */
+#endif /* PG_log_dirty */
/*****************************************************************************
* Entry points into the paging-assistance code */