Disable it by default as it is only an experimental subsystem.
Signed-off-by: Tamas K Lengyel <tamas@tklengyel.com>
Acked-by: Daniel De Graaf <dgdegra@tycho.nsa.gov>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
select HAS_KEXEC
select MEM_ACCESS_ALWAYS_ON
select HAS_MEM_PAGING
- select HAS_MEM_SHARING
select HAS_NS16550
select HAS_PASSTHROUGH
select HAS_PCI
firmware, and will not function correctly in other scenarios.
If unsure, say N.
+
+config MEM_SHARING
+ bool "Xen memory sharing support" if EXPERT = "y"
+ depends on HVM
+
endmenu
source "common/Kconfig"
d->arch.auto_unmask = 0;
}
+#ifdef CONFIG_MEM_SHARING
PROGRESS(shared):
if ( is_hvm_domain(d) )
if ( ret )
return ret;
}
+#endif
spin_lock(&d->page_alloc_lock);
page_list_splice(&d->arch.relmem_list, &d->page_list);
break;
}
+#ifdef CONFIG_MEM_SHARING
case XEN_DOMCTL_mem_sharing_op:
ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op);
break;
+#endif
#if P2M_AUDIT && defined(CONFIG_HVM)
case XEN_DOMCTL_audit_p2m:
obj-$(CONFIG_SHADOW_PAGING) += guest_walk_2.o guest_walk_3.o guest_walk_4.o
obj-$(CONFIG_MEM_ACCESS) += mem_access.o
obj-y += mem_paging.o
-obj-y += mem_sharing.o
+obj-$(CONFIG_MEM_SHARING) += mem_sharing.o
obj-y += p2m.o p2m-pt.o
obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
obj-y += paging.o
case XENMEM_paging_op:
return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
+#ifdef CONFIG_MEM_SHARING
case XENMEM_sharing_op:
return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
+#endif
default:
rc = -ENOSYS;
case XENMEM_paging_op:
return mem_paging_memop(guest_handle_cast(arg, xen_mem_paging_op_t));
+#ifdef CONFIG_MEM_SHARING
case XENMEM_sharing_op:
return mem_sharing_memop(guest_handle_cast(arg, xen_mem_sharing_op_t));
+#endif
default:
rc = -ENOSYS;
config HAS_MEM_PAGING
bool
-config HAS_MEM_SHARING
- bool
-
config HAS_PDX
bool
/* Private domain structs for DOMID_XEN, DOMID_IO, etc. */
struct domain *__read_mostly dom_xen;
struct domain *__read_mostly dom_io;
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
struct domain *__read_mostly dom_cow;
#endif
if ( IS_ERR(dom_io) )
panic("Failed to create d[IO]: %ld\n", PTR_ERR(dom_io));
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
/*
* Initialise our COW domain.
* This domain owns sharable pages.
xfree(d->vm_event_paging);
#endif
xfree(d->vm_event_monitor);
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
xfree(d->vm_event_share);
#endif
v->maptrack_tail = MAPTRACK_TAIL;
}
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
gfn_t *gfn, uint16_t *status)
{
return -EAGAIN;
}
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
if ( (q & P2M_UNSHARE) && p2m_is_shared(p2mt) )
{
if ( page )
vm_event_resume(v->domain, v->domain->vm_event_monitor);
}
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
/* Registered with Xen-bound event channel for incoming notifications. */
static void mem_sharing_notification(struct vcpu *v, unsigned int port)
{
destroy_waitqueue_head(&d->vm_event_monitor->wq);
(void)vm_event_disable(d, &d->vm_event_monitor);
}
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
if ( vm_event_check_ring(d->vm_event_share) )
{
destroy_waitqueue_head(&d->vm_event_share->wq);
}
break;
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
case XEN_DOMCTL_VM_EVENT_OP_SHARING:
{
rc = -EINVAL;
#include <public/domctl.h>
#include <public/memory.h>
+#ifdef CONFIG_MEM_SHARING
+
/* Auditing of memory sharing code? */
#ifndef NDEBUG
#define MEM_SHARING_AUDIT 1
*/
int relinquish_shared_pages(struct domain *d);
+#else
+
+static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
+{
+ return 0;
+}
+static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
+{
+ return 0;
+}
+static inline int mem_sharing_unshare_page(struct domain *d,
+ unsigned long gfn,
+ uint16_t flags)
+{
+ ASSERT_UNREACHABLE();
+ return -EOPNOTSUPP;
+}
+static inline int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
+ bool allow_sleep)
+{
+ ASSERT_UNREACHABLE();
+ return -EOPNOTSUPP;
+}
+
+#endif
+
#endif /* __MEM_SHARING_H__ */
/* For non-pinnable single-page shadows, a higher entry that points
* at us. */
paddr_t up;
+
+#ifdef CONFIG_MEM_SHARING
/* For shared/sharable pages, we use a doubly-linked list
* of all the {pfn,domain} pairs that map this page. We also include
* an opaque handle, which is effectively a version, so that clients
* This list is allocated and freed when a page is shared/unshared.
*/
struct page_sharing_info *sharing;
+#endif
};
/* Reference count and various PGC_xxx flags and fields. */
/* Private domain structs for DOMID_XEN, DOMID_IO, etc. */
extern struct domain *dom_xen, *dom_io;
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
extern struct domain *dom_cow;
#else
# define dom_cow NULL
/* Various vm_events */
/* Memory sharing support */
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
struct vm_event_domain *vm_event_share;
#endif
/* Memory paging support */
}
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
static XSM_INLINE int xsm_mem_sharing(XSM_DEFAULT_ARG struct domain *d)
{
XSM_ASSERT_ACTION(XSM_DM_PRIV);
int (*mem_paging) (struct domain *d);
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
int (*mem_sharing) (struct domain *d);
#endif
}
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
static inline int xsm_mem_sharing (xsm_default_t def, struct domain *d)
{
return xsm_ops->mem_sharing(d);
set_to_dummy_if_null(ops, mem_paging);
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
set_to_dummy_if_null(ops, mem_sharing);
#endif
}
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
static int flask_mem_sharing(struct domain *d)
{
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__MEM_SHARING);
.mem_paging = flask_mem_paging,
#endif
-#ifdef CONFIG_HAS_MEM_SHARING
+#ifdef CONFIG_MEM_SHARING
.mem_sharing = flask_mem_sharing,
#endif