/* On non-NULL return, we leave this function holding an additional
* ref on the underlying mfn, if any */
-static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable)
+static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable,
+ bool_t permanent)
{
void *map;
p2m_type_t p2mt;
if ( writable )
paging_mark_dirty(d, page_to_mfn(page));
- map = __map_domain_page(page);
+ if ( !permanent )
+ return __map_domain_page(page);
+
+ map = __map_domain_page_global(page);
+ if ( !map )
+ put_page(page);
+
return map;
}
-void *hvm_map_guest_frame_rw(unsigned long gfn)
+void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent)
{
- return __hvm_map_guest_frame(gfn, 1);
+ return __hvm_map_guest_frame(gfn, 1, permanent);
}
-void *hvm_map_guest_frame_ro(unsigned long gfn)
+void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent)
{
- return __hvm_map_guest_frame(gfn, 0);
+ return __hvm_map_guest_frame(gfn, 0, permanent);
}
-void hvm_unmap_guest_frame(void *p)
+void hvm_unmap_guest_frame(void *p, bool_t permanent)
{
- if ( p )
- {
- unsigned long mfn = domain_page_map_to_mfn(p);
+ unsigned long mfn;
+
+ if ( !p )
+ return;
+
+ mfn = domain_page_map_to_mfn(p);
+
+ if ( !permanent )
unmap_domain_page(p);
- put_page(mfn_to_page(mfn));
- }
+ else
+ unmap_domain_page_global(p);
+
+ put_page(mfn_to_page(mfn));
}
static void *hvm_map_entry(unsigned long va)
if ( (pfec == PFEC_page_paged) || (pfec == PFEC_page_shared) )
goto fail;
- v = hvm_map_guest_frame_rw(gfn);
+ v = hvm_map_guest_frame_rw(gfn, 0);
if ( v == NULL )
goto fail;
static void hvm_unmap_entry(void *p)
{
- hvm_unmap_guest_frame(p);
+ hvm_unmap_guest_frame(p, 0);
}
static int hvm_load_segment_selector(
nv->nv_ioport80 = 0;
nv->nv_ioportED = 0;
- if (nv->nv_vvmcx)
- hvm_unmap_guest_frame(nv->nv_vvmcx);
+ hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
nv->nv_vvmcx = NULL;
nv->nv_vvmcxaddr = VMCX_EADDR;
nv->nv_flushp2m = 0;
struct nestedvcpu *nv = &vcpu_nestedhvm(v);
if (nv->nv_vvmcx != NULL && nv->nv_vvmcxaddr != vmcbaddr) {
- ASSERT(nv->nv_vvmcx != NULL);
ASSERT(nv->nv_vvmcxaddr != VMCX_EADDR);
- hvm_unmap_guest_frame(nv->nv_vvmcx);
+ hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
nv->nv_vvmcx = NULL;
nv->nv_vvmcxaddr = VMCX_EADDR;
}
if (nv->nv_vvmcx == NULL) {
- nv->nv_vvmcx = hvm_map_guest_frame_rw(vmcbaddr >> PAGE_SHIFT);
+ nv->nv_vvmcx = hvm_map_guest_frame_rw(vmcbaddr >> PAGE_SHIFT, 1);
if (nv->nv_vvmcx == NULL)
return 0;
nv->nv_vvmcxaddr = vmcbaddr;
get_order_from_bytes(MSRPM_SIZE));
svm->ns_merged_msrpm = NULL;
}
+ hvm_unmap_guest_frame(nv->nv_vvmcx, 1);
+ nv->nv_vvmcx = NULL;
if (nv->nv_n2vmcx) {
free_vmcb(nv->nv_n2vmcx);
nv->nv_n2vmcx = NULL;
svm->ns_oiomap_pa = svm->ns_iomap_pa;
svm->ns_iomap_pa = ns_vmcb->_iopm_base_pa;
- ns_viomap = hvm_map_guest_frame_ro(svm->ns_iomap_pa >> PAGE_SHIFT);
+ ns_viomap = hvm_map_guest_frame_ro(svm->ns_iomap_pa >> PAGE_SHIFT, 0);
ASSERT(ns_viomap != NULL);
ioport_80 = test_bit(0x80, ns_viomap);
ioport_ed = test_bit(0xed, ns_viomap);
- hvm_unmap_guest_frame(ns_viomap);
+ hvm_unmap_guest_frame(ns_viomap, 0);
svm->ns_iomap = nestedhvm_vcpu_iomap_get(ioport_80, ioport_ed);
break;
}
- io_bitmap = hvm_map_guest_frame_ro(gfn);
+ io_bitmap = hvm_map_guest_frame_ro(gfn, 0);
if (io_bitmap == NULL) {
gdprintk(XENLOG_ERR,
"IOIO intercept: mapping of permission map failed\n");
}
enabled = test_bit(port, io_bitmap);
- hvm_unmap_guest_frame(io_bitmap);
+ hvm_unmap_guest_frame(io_bitmap, 0);
if (!enabled)
return NESTEDHVM_VMEXIT_HOST;
static void nvmx_update_apic_access_address(struct vcpu *v)
{
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
- u64 apic_gpfn, apic_mfn;
u32 ctrl;
- void *apic_va;
ctrl = __n2_secondary_exec_control(v);
if ( ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES )
{
+ p2m_type_t p2mt;
+ unsigned long apic_gpfn;
+ struct page_info *apic_pg;
+
apic_gpfn = __get_vvmcs(nvcpu->nv_vvmcx, APIC_ACCESS_ADDR) >> PAGE_SHIFT;
- apic_va = hvm_map_guest_frame_ro(apic_gpfn);
- apic_mfn = virt_to_mfn(apic_va);
- __vmwrite(APIC_ACCESS_ADDR, (apic_mfn << PAGE_SHIFT));
- hvm_unmap_guest_frame(apic_va);
+ apic_pg = get_page_from_gfn(v->domain, apic_gpfn, &p2mt, P2M_ALLOC);
+ ASSERT(apic_pg && !p2m_is_paging(p2mt));
+ __vmwrite(APIC_ACCESS_ADDR, page_to_maddr(apic_pg));
+ put_page(apic_pg);
}
else
__vmwrite(APIC_ACCESS_ADDR, 0);
static void nvmx_update_virtual_apic_address(struct vcpu *v)
{
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
- u64 vapic_gpfn, vapic_mfn;
u32 ctrl;
- void *vapic_va;
ctrl = __n2_exec_control(v);
if ( ctrl & CPU_BASED_TPR_SHADOW )
{
+ p2m_type_t p2mt;
+ unsigned long vapic_gpfn;
+ struct page_info *vapic_pg;
+
vapic_gpfn = __get_vvmcs(nvcpu->nv_vvmcx, VIRTUAL_APIC_PAGE_ADDR) >> PAGE_SHIFT;
- vapic_va = hvm_map_guest_frame_ro(vapic_gpfn);
- vapic_mfn = virt_to_mfn(vapic_va);
- __vmwrite(VIRTUAL_APIC_PAGE_ADDR, (vapic_mfn << PAGE_SHIFT));
- hvm_unmap_guest_frame(vapic_va);
+ vapic_pg = get_page_from_gfn(v->domain, vapic_gpfn, &p2mt, P2M_ALLOC);
+ ASSERT(vapic_pg && !p2m_is_paging(p2mt));
+ __vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vapic_pg));
+ put_page(vapic_pg);
}
else
__vmwrite(VIRTUAL_APIC_PAGE_ADDR, 0);
unsigned long gpa;
if ( nvmx->msrbitmap )
- hvm_unmap_guest_frame (nvmx->msrbitmap);
+ hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, MSR_BITMAP);
- nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT);
+ nvmx->msrbitmap = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
}
static void __map_io_bitmap(struct vcpu *v, u64 vmcs_reg)
index = vmcs_reg == IO_BITMAP_A ? 0 : 1;
if (nvmx->iobitmap[index])
- hvm_unmap_guest_frame (nvmx->iobitmap[index]);
+ hvm_unmap_guest_frame(nvmx->iobitmap[index], 1);
gpa = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx, vmcs_reg);
- nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT);
+ nvmx->iobitmap[index] = hvm_map_guest_frame_ro(gpa >> PAGE_SHIFT, 1);
}
static inline void map_io_bitmap_all(struct vcpu *v)
__clear_current_vvmcs(v);
if ( nvcpu->nv_vvmcxaddr != VMCX_EADDR )
- hvm_unmap_guest_frame(nvcpu->nv_vvmcx);
+ hvm_unmap_guest_frame(nvcpu->nv_vvmcx, 1);
nvcpu->nv_vvmcx = NULL;
nvcpu->nv_vvmcxaddr = VMCX_EADDR;
for (i=0; i<2; i++) {
if ( nvmx->iobitmap[i] ) {
- hvm_unmap_guest_frame(nvmx->iobitmap[i]);
+ hvm_unmap_guest_frame(nvmx->iobitmap[i], 1);
nvmx->iobitmap[i] = NULL;
}
}
if ( nvmx->msrbitmap ) {
- hvm_unmap_guest_frame(nvmx->msrbitmap);
+ hvm_unmap_guest_frame(nvmx->msrbitmap, 1);
nvmx->msrbitmap = NULL;
}
}
if ( nvcpu->nv_vvmcxaddr == VMCX_EADDR )
{
- nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT);
+ nvcpu->nv_vvmcx = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 1);
nvcpu->nv_vvmcxaddr = gpa;
map_io_bitmap_all (v);
__map_msr_bitmap(v);
else
{
/* Even if this VMCS isn't the current one, we must clear it. */
- vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT);
+ vvmcs = hvm_map_guest_frame_rw(gpa >> PAGE_SHIFT, 0);
if ( vvmcs )
__set_vvmcs(vvmcs, NVMX_LAUNCH_STATE, 0);
- hvm_unmap_guest_frame(vvmcs);
+ hvm_unmap_guest_frame(vvmcs, 0);
}
vmreturn(regs, VMSUCCEED);
unsigned int addr_size,
unsigned long *linear_addr);
-void *hvm_map_guest_frame_rw(unsigned long gfn);
-void *hvm_map_guest_frame_ro(unsigned long gfn);
-void hvm_unmap_guest_frame(void *p);
+void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent);
+void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
+void hvm_unmap_guest_frame(void *p, bool_t permanent);
static inline void hvm_set_info_guest(struct vcpu *v)
{