Use the Kconfig generated CONFIG_HAS_MEM_PAGING defines in the code base.
Signed-off-by: Doug Goldstein <cardoe@cardoe.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
CONFIG_MIGRATE := y
CONFIG_XCUTILS := y
-HAS_MEM_PAGING := y
HAS_MEM_SHARING := y
CFLAGS += -m32 -march=i686
CONFIG_MIGRATE := y
CONFIG_XCUTILS := y
-HAS_MEM_PAGING := y
HAS_MEM_SHARING := y
CONFIG_XEN_INSTALL_SUFFIX := .gz
CFLAGS-$(perfc) += -DPERF_COUNTERS
CFLAGS-$(perfc_arrays) += -DPERF_ARRAYS
CFLAGS-$(lock_profile) += -DLOCK_PROFILE
-CFLAGS-$(HAS_MEM_PAGING) += -DHAS_MEM_PAGING
CFLAGS-$(HAS_MEM_SHARING) += -DHAS_MEM_SHARING
CFLAGS-$(frame_pointer) += -fno-omit-frame-pointer -DCONFIG_FRAME_POINTER
select HAS_IOPORTS
select HAS_KEXEC
select HAS_MEM_ACCESS
+ select HAS_MEM_PAGING
select HAS_NS16550
select HAS_PASSTHROUGH
select HAS_PCI
config HAS_MEM_ACCESS
bool
+# Select HAS_MEM_PAGING if mem paging is supported
+config HAS_MEM_PAGING
+ bool
+
# Select HAS_PDX if PDX is supported
config HAS_PDX
bool
page = get_page_from_gfn(d, gmfn, &p2mt, P2M_UNSHARE);
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
if ( p2m_is_paging(p2mt) )
{
if ( page )
break;
#endif
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
case VM_EVENT_REASON_MEM_PAGING:
p2m_mem_paging_resume(d, &rsp);
break;
return vm_event_grab_slot(ved, (current->domain != d));
}
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
/* Registered with Xen-bound event channel for incoming notifications. */
static void mem_paging_notification(struct vcpu *v, unsigned int port)
{
/* Clean up on domain destruction */
void vm_event_cleanup(struct domain *d)
{
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
if ( d->vm_event->paging.ring_page )
{
/* Destroying the wait queue head means waking up all
switch ( vec->mode )
{
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
case XEN_DOMCTL_VM_EVENT_OP_PAGING:
{
struct vm_event_domain *ved = &d->vm_event->paging;
}
#endif
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
static XSM_INLINE int xsm_mem_paging(XSM_DEFAULT_ARG struct domain *d)
{
XSM_ASSERT_ACTION(XSM_DM_PRIV);
int (*mem_access) (struct domain *d);
#endif
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
int (*mem_paging) (struct domain *d);
#endif
}
#endif
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
static inline int xsm_mem_paging (xsm_default_t def, struct domain *d)
{
return xsm_ops->mem_paging(d);
set_to_dummy_if_null(ops, mem_access);
#endif
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
set_to_dummy_if_null(ops, mem_paging);
#endif
}
#endif
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
static int flask_mem_paging(struct domain *d)
{
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_PAGING);
.mem_access = flask_mem_access,
#endif
-#ifdef HAS_MEM_PAGING
+#ifdef CONFIG_HAS_MEM_PAGING
.mem_paging = flask_mem_paging,
#endif