Going through the code, HAP, EPT, PoD and ALTP2M depend on HVM code.
Put these components under CONFIG_HVM. This further requires putting
one of the vm event under CONFIG_HVM.
Altp2m requires a bit more attention because its code is embedded in
generic x86 p2m code.
Also make hap_enabled evaluate to false when !CONFIG_HVM. Make sure it
evaluate its parameter to avoid unused variable warnings in its users.
Also sort items in Makefile while at it.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tamas K Lengyel <tamas@tklengyel.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
subdir-y += shadow
-subdir-y += hap
+subdir-$(CONFIG_HVM) += hap
-obj-y += paging.o
-obj-y += p2m.o p2m-pt.o p2m-ept.o p2m-pod.o
-obj-y += altp2m.o
+obj-$(CONFIG_HVM) += altp2m.o
obj-y += guest_walk_2.o
obj-y += guest_walk_3.o
obj-y += guest_walk_4.o
+obj-$(CONFIG_MEM_ACCESS) += mem_access.o
obj-y += mem_paging.o
obj-y += mem_sharing.o
-obj-y += mem_access.o
+obj-y += p2m.o p2m-pt.o
+obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
+obj-y += paging.o
guest_walk_%.o: guest_walk.c Makefile
$(CC) $(CFLAGS) -DGUEST_PAGING_LEVELS=$* -c $< -o $@
/* Return whether vCPU pause is required (aka. sync event) */
return (p2ma != p2m_access_n2rwx);
}
-#endif
int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
struct p2m_domain *ap2m, p2m_access_t a,
*/
return ap2m->set_entry(ap2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1);
}
+#endif
static int set_mem_access(struct domain *d, struct p2m_domain *p2m,
struct p2m_domain *ap2m, p2m_access_t a,
{
int rc = 0;
+#ifdef CONFIG_HVM
if ( ap2m )
{
rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, gfn);
rc = 0;
}
else
+#else
+ ASSERT(!ap2m);
+#endif
{
mfn_t mfn;
p2m_access_t _a;
long rc = 0;
/* altp2m view 0 is treated as the hostp2m */
+#ifdef CONFIG_HVM
if ( altp2m_idx )
{
if ( altp2m_idx >= MAX_ALTP2M ||
ap2m = d->arch.altp2m_p2m[altp2m_idx];
}
+#else
+ ASSERT(!altp2m_idx);
+#endif
if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
return -EINVAL;
long rc = 0;
/* altp2m view 0 is treated as the hostp2m */
+#ifdef CONFIG_HVM
if ( altp2m_idx )
{
if ( altp2m_idx >= MAX_ALTP2M ||
ap2m = d->arch.altp2m_p2m[altp2m_idx];
}
+#else
+ ASSERT(!altp2m_idx);
+#endif
p2m_lock(p2m);
if ( ap2m )
void arch_p2m_set_access_required(struct domain *d, bool access_required)
{
- unsigned int i;
-
ASSERT(atomic_read(&d->pause_count));
p2m_get_hostp2m(d)->access_required = access_required;
- if ( !altp2m_active(d) )
- return;
-
- for ( i = 0; i < MAX_ALTP2M; i++ )
+#ifdef CONFIG_HVM
+ if ( altp2m_active(d) )
{
- struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
+ unsigned int i;
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
- if ( p2m )
- p2m->access_required = access_required;
+ if ( p2m )
+ p2m->access_required = access_required;
+ }
}
+#endif
}
#ifdef CONFIG_HVM
if ( !p2m_is_sharable(p2mt) )
goto out;
+#ifdef CONFIG_HVM
/* Check if there are mem_access/remapped altp2m entries for this page */
if ( altp2m_active(d) )
{
altp2m_list_unlock(d);
}
+#endif
/* Try to convert the mfn to the sharable type */
page = mfn_to_page(mfn);
return 0;
}
-#endif
static void p2m_teardown_altp2m(struct domain *d)
{
return 0;
}
+#endif
int p2m_init(struct domain *d)
{
p2m_teardown_hostp2m(d);
return rc;
}
-#endif
rc = p2m_init_altp2m(d);
if ( rc )
{
p2m_teardown_hostp2m(d);
-#ifdef CONFIG_HVM
p2m_teardown_nestedp2m(d);
-#endif
}
+#endif
return rc;
}
void p2m_final_teardown(struct domain *d)
{
+#ifdef CONFIG_HVM
/*
* We must teardown both of them unconditionally because
* we initialise them unconditionally.
*/
p2m_teardown_altp2m(d);
-#ifdef CONFIG_HVM
p2m_teardown_nestedp2m(d);
#endif
}
}
-void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
-{
- if ( altp2m_active(v->domain) )
- p2m_switch_vcpu_altp2m_by_id(v, idx);
-}
-
#ifdef CONFIG_HVM
static struct p2m_domain *
p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
return i == nr ? 0 : i ?: ret;
}
+#ifdef CONFIG_HVM
+
+void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
+{
+ if ( altp2m_active(v->domain) )
+ p2m_switch_vcpu_altp2m_by_id(v, idx);
+}
+
bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, unsigned int idx)
{
struct domain *d = v->domain;
return ret;
}
+#endif /* CONFIG_HVM */
/*** Audit ***/
#ifndef __ASM_X86_ALTP2M_H
#define __ASM_X86_ALTP2M_H
+#ifdef CONFIG_HVM
+
#include <xen/types.h>
#include <xen/sched.h> /* for struct vcpu, struct domain */
#include <asm/hvm/vcpu.h> /* for vcpu_altp2m */
/* Alternate p2m HVM on/off per domain */
-static inline bool_t altp2m_active(const struct domain *d)
+static inline bool altp2m_active(const struct domain *d)
{
return d->arch.altp2m_active;
}
{
return vcpu_altp2m(v).p2midx;
}
+#else
+
+static inline bool altp2m_active(const struct domain *d)
+{
+ return false;
+}
+
+/* Only declaration is needed. DCE will optimise it out when linking. */
+uint16_t altp2m_vcpu_idx(const struct vcpu *v);
+
+#endif
#endif /* __ASM_X86_ALTP2M_H */
/* nestedhvm: translate l2 guest physical to host physical */
struct p2m_domain *nested_p2m[MAX_NESTEDP2M];
mm_lock_t nested_p2m_lock;
-#endif
/* altp2m: allow multiple copies of host p2m */
bool_t altp2m_active;
struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
mm_lock_t altp2m_list_lock;
uint64_t *altp2m_eptp;
+#endif
/* NB. protected by d->event_lock and by irq_desc[irq].lock */
struct radix_tree_root irq_pirq;
};
};
+#ifdef CONFIG_HVM
#define hap_enabled(d) ((d)->arch.hvm.hap_enabled)
+#else
+#define hap_enabled(d) ({(void)(d); false;})
+#endif
#endif /* __ASM_X86_HVM_DOMAIN_H__ */
* host p2m's lock. */
int defer_nested_flush;
+#ifdef CONFIG_HVM
/* Alternate p2m: count of vcpu's currently using this p2m. */
atomic_t active_vcpus;
+#endif
/* Pages used to construct the p2m */
struct page_list_head pages;
/*
* Alternate p2m: shadow p2m tables used for alternate memory views
*/
-
+#ifdef CONFIG_HVM
/* get current alternate p2m table */
static inline struct p2m_domain *p2m_get_altp2m(struct vcpu *v)
{
int p2m_altp2m_propagate_change(struct domain *d, gfn_t gfn,
mfn_t mfn, unsigned int page_order,
p2m_type_t p2mt, p2m_access_t p2ma);
+#else
+struct p2m_domain *p2m_get_altp2m(struct vcpu *v);
+static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx) {}
+#endif
/*
* p2m type to IOMMU flags