if ( cpu_has_vmx_virt_exceptions )
{
- p2m_type_t t;
- mfn_t mfn;
+ const struct page_info *pg = vcpu_altp2m(v).veinfo_pg;
- mfn = get_gfn_query_unlocked(d, gfn_x(vcpu_altp2m(v).veinfo_gfn), &t);
-
- if ( !mfn_eq(mfn, INVALID_MFN) )
+ if ( pg )
{
- __vmwrite(VIRT_EXCEPTION_INFO, mfn_x(mfn) << PAGE_SHIFT);
+ __vmwrite(VIRT_EXCEPTION_INFO, page_to_maddr(pg));
/*
* Make sure we have an up-to-date EPTP_INDEX when
* setting SECONDARY_EXEC_ENABLE_VIRT_EXCEPTIONS.
static bool_t vmx_vcpu_emulate_ve(struct vcpu *v)
{
- bool_t rc = 0, writable;
- gfn_t gfn = vcpu_altp2m(v).veinfo_gfn;
+ const struct page_info *pg = vcpu_altp2m(v).veinfo_pg;
ve_info_t *veinfo;
+ bool rc = false;
- if ( gfn_eq(gfn, INVALID_GFN) )
- return 0;
+ if ( !pg )
+ return rc;
- veinfo = hvm_map_guest_frame_rw(gfn_x(gfn), 0, &writable);
- if ( !veinfo )
- return 0;
- if ( !writable || veinfo->semaphore != 0 )
- goto out;
+ veinfo = __map_domain_page(pg);
- rc = 1;
+ if ( veinfo->semaphore != 0 )
+ goto out;
+ rc = true;
veinfo->exit_reason = EXIT_REASON_EPT_VIOLATION;
veinfo->semaphore = ~0;
veinfo->eptp_index = vcpu_altp2m(v).p2midx;
X86_EVENT_NO_EC);
out:
- hvm_unmap_guest_frame(veinfo, 0);
+ unmap_domain_page(veinfo);
+
+ if ( rc )
+ paging_mark_dirty(v->domain, page_to_mfn(pg));
+
return rc;
}
vcpu_pause(v);
vcpu_altp2m(v).p2midx = 0;
- vcpu_altp2m(v).veinfo_gfn = INVALID_GFN;
atomic_inc(&p2m_get_altp2m(v)->active_vcpus);
altp2m_vcpu_update_p2m(v);
int altp2m_vcpu_enable_ve(struct vcpu *v, gfn_t gfn)
{
+ struct domain *d = v->domain;
+ struct altp2mvcpu *a = &vcpu_altp2m(v);
p2m_type_t p2mt;
+ struct page_info *pg;
+ int rc;
+
+ /* Early exit path if #VE is already configured. */
+ if ( a->veinfo_pg )
+ return -EEXIST;
+
+ rc = check_get_page_from_gfn(d, gfn, false, &p2mt, &pg);
+ if ( rc )
+ return rc;
+
+ /*
+ * Looking for a plain piece of guest writeable RAM with isn't a magic
+ * frame such as a grant/ioreq/shared_info/etc mapping. We (ab)use the
+ * pageable() predicate for this, due to it having the same properties
+ * that we want.
+ */
+ if ( !p2m_is_pageable(p2mt) || is_xen_heap_page(pg) )
+ {
+ rc = -EINVAL;
+ goto err;
+ }
- if ( !gfn_eq(vcpu_altp2m(v).veinfo_gfn, INVALID_GFN) ||
- mfn_eq(get_gfn_query_unlocked(v->domain, gfn_x(gfn), &p2mt),
- INVALID_MFN) )
- return -EINVAL;
+ /*
+ * Update veinfo_pg, making sure to be safe with concurrent hypercalls.
+ * The first caller to make veinfo_pg become non-NULL will program its MFN
+ * into the VMCS, so must not be clobbered. Callers which lose the race
+ * back off with -EEXIST.
+ */
+ if ( cmpxchg(&a->veinfo_pg, NULL, pg) != NULL )
+ {
+ rc = -EEXIST;
+ goto err;
+ }
- vcpu_altp2m(v).veinfo_gfn = gfn;
altp2m_vcpu_update_vmfunc_ve(v);
return 0;
+
+ err:
+ put_page(pg);
+
+ return rc;
}
void altp2m_vcpu_disable_ve(struct vcpu *v)
{
- if ( !gfn_eq(vcpu_altp2m(v).veinfo_gfn, INVALID_GFN) )
+ struct altp2mvcpu *a = &vcpu_altp2m(v);
+ struct page_info *pg;
+
+ /*
+ * Update veinfo_pg, making sure to be safe with concurrent hypercalls.
+ * The winner of this race is responsible to update the VMCS to no longer
+ * point at the page, then drop the associated ref.
+ */
+ if ( (pg = xchg(&a->veinfo_pg, NULL)) )
{
- vcpu_altp2m(v).veinfo_gfn = INVALID_GFN;
altp2m_vcpu_update_vmfunc_ve(v);
+
+ put_page(pg);
}
}
#define vcpu_nestedhvm(v) ((v)->arch.hvm.nvcpu)
struct altp2mvcpu {
+ /*
+ * #VE information page. This pointer being non-NULL indicates that a
+ * VMCS's VIRT_EXCEPTION_INFO field is pointing to the page, and an extra
+ * page reference is held.
+ */
+ struct page_info *veinfo_pg;
uint16_t p2midx; /* alternate p2m index */
- gfn_t veinfo_gfn; /* #VE information page gfn */
};
#define vcpu_altp2m(v) ((v)->arch.hvm.avcpu)