ioreq_t *p = get_ioreq(curr);
unsigned long ram_gfn = paddr_to_pfn(ram_gpa);
p2m_type_t p2mt;
- mfn_t ram_mfn;
+ struct page_info *ram_page;
int rc;
/* Check for paged out page */
- ram_mfn = get_gfn_unshare(curr->domain, ram_gfn, &p2mt);
+ ram_page = get_page_from_gfn(curr->domain, ram_gfn, &p2mt, P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )
{
- put_gfn(curr->domain, ram_gfn);
+ if ( ram_page )
+ put_page(ram_page);
p2m_mem_paging_populate(curr->domain, ram_gfn);
return X86EMUL_RETRY;
}
if ( p2m_is_shared(p2mt) )
{
- put_gfn(curr->domain, ram_gfn);
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_RETRY;
}
- /* Maintain a ref on the mfn to ensure liveness. Put the gfn
- * to avoid potential deadlock wrt event channel lock, later. */
- if ( mfn_valid(mfn_x(ram_mfn)) )
- if ( !get_page(mfn_to_page(mfn_x(ram_mfn)),
- curr->domain) )
- {
- put_gfn(curr->domain, ram_gfn);
- return X86EMUL_RETRY;
- }
- put_gfn(curr->domain, ram_gfn);
-
/*
* Weird-sized accesses have undefined behaviour: we discard writes
* and read all-ones.
ASSERT(p_data != NULL); /* cannot happen with a REP prefix */
if ( dir == IOREQ_READ )
memset(p_data, ~0, size);
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_UNHANDLEABLE;
}
unsigned int bytes = vio->mmio_large_write_bytes;
if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
{
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_OKAY;
}
}
{
memcpy(p_data, &vio->mmio_large_read[addr - pa],
size);
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_OKAY;
}
}
vio->io_state = HVMIO_none;
if ( p_data == NULL )
{
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_UNHANDLEABLE;
}
goto finish_access;
(addr == (vio->mmio_large_write_pa +
vio->mmio_large_write_bytes)) )
{
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_RETRY;
}
default:
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_UNHANDLEABLE;
}
{
gdprintk(XENLOG_WARNING, "WARNING: io already pending (%d)?\n",
p->state);
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_UNHANDLEABLE;
}
if ( rc != X86EMUL_OKAY )
{
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return rc;
}
}
}
- if ( mfn_valid(mfn_x(ram_mfn)) )
- put_page(mfn_to_page(mfn_x(ram_mfn)));
+ if ( ram_page )
+ put_page(ram_page);
return X86EMUL_OKAY;
}
{
struct page_info *page;
p2m_type_t p2mt;
- unsigned long mfn;
void *va;
- mfn = mfn_x(get_gfn_unshare(d, gmfn, &p2mt));
- if ( !p2m_is_ram(p2mt) )
- {
- put_gfn(d, gmfn);
- return -EINVAL;
- }
+ page = get_page_from_gfn(d, gmfn, &p2mt, P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )
{
- put_gfn(d, gmfn);
+ if ( page )
+ put_page(page);
p2m_mem_paging_populate(d, gmfn);
return -ENOENT;
}
if ( p2m_is_shared(p2mt) )
{
- put_gfn(d, gmfn);
+ if ( page )
+ put_page(page);
return -ENOENT;
}
- ASSERT(mfn_valid(mfn));
+ if ( !page )
+ return -EINVAL;
- page = mfn_to_page(mfn);
- if ( !get_page_and_type(page, d, PGT_writable_page) )
+ if ( !get_page_type(page, PGT_writable_page) )
{
- put_gfn(d, gmfn);
+ put_page(page);
return -EINVAL;
}
- va = map_domain_page_global(mfn);
+ va = __map_domain_page_global(page);
if ( va == NULL )
{
put_page_and_type(page);
- put_gfn(d, gmfn);
return -ENOMEM;
}
*_va = va;
*_page = page;
- put_gfn(d, gmfn);
-
return 0;
}
int hvm_set_cr0(unsigned long value)
{
struct vcpu *v = current;
- p2m_type_t p2mt;
- unsigned long gfn, mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
+ unsigned long gfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
+ struct page_info *page;
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
{
/* The guest CR3 must be pointing to the guest physical. */
gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT;
- mfn = mfn_x(get_gfn(v->domain, gfn, &p2mt));
- if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
- !get_page(mfn_to_page(mfn), v->domain))
+ page = get_page_from_gfn(v->domain, gfn, NULL, P2M_ALLOC);
+ if ( !page )
{
- put_gfn(v->domain, gfn);
- gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
- v->arch.hvm_vcpu.guest_cr[3], mfn);
+ gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx\n",
+ v->arch.hvm_vcpu.guest_cr[3]);
domain_crash(v->domain);
return X86EMUL_UNHANDLEABLE;
}
/* Now arch.guest_table points to machine physical. */
- v->arch.guest_table = pagetable_from_pfn(mfn);
+ v->arch.guest_table = pagetable_from_page(page);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
- v->arch.hvm_vcpu.guest_cr[3], mfn);
- put_gfn(v->domain, gfn);
+ v->arch.hvm_vcpu.guest_cr[3], page_to_mfn(page));
}
}
else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
int hvm_set_cr3(unsigned long value)
{
- unsigned long mfn;
- p2m_type_t p2mt;
struct vcpu *v = current;
+ struct page_info *page;
if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) &&
(value != v->arch.hvm_vcpu.guest_cr[3]) )
{
/* Shadow-mode CR3 change. Check PDBR and update refcounts. */
HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
- mfn = mfn_x(get_gfn(v->domain, value >> PAGE_SHIFT, &p2mt));
- if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
- !get_page(mfn_to_page(mfn), v->domain) )
- {
- put_gfn(v->domain, value >> PAGE_SHIFT);
- goto bad_cr3;
- }
+ page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT,
+ NULL, P2M_ALLOC);
+ if ( !page )
+ goto bad_cr3;
put_page(pagetable_get_page(v->arch.guest_table));
- v->arch.guest_table = pagetable_from_pfn(mfn);
- put_gfn(v->domain, value >> PAGE_SHIFT);
+ v->arch.guest_table = pagetable_from_page(page);
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
}
static void *__hvm_map_guest_frame(unsigned long gfn, bool_t writable)
{
void *map;
- unsigned long mfn;
p2m_type_t p2mt;
- struct page_info *pg;
+ struct page_info *page;
struct domain *d = current->domain;
- int rc;
- mfn = mfn_x(writable
- ? get_gfn_unshare(d, gfn, &p2mt)
- : get_gfn(d, gfn, &p2mt));
- if ( (p2m_is_shared(p2mt) && writable) || !p2m_is_ram(p2mt) )
+ page = get_page_from_gfn(d, gfn, &p2mt,
+ writable ? P2M_UNSHARE : P2M_ALLOC);
+ if ( (p2m_is_shared(p2mt) && writable) || !page )
{
- put_gfn(d, gfn);
+ if ( page )
+ put_page(page);
return NULL;
}
if ( p2m_is_paging(p2mt) )
{
- put_gfn(d, gfn);
+ put_page(page);
p2m_mem_paging_populate(d, gfn);
return NULL;
}
- ASSERT(mfn_valid(mfn));
-
if ( writable )
- paging_mark_dirty(d, mfn);
-
- /* Get a ref on the page, considering that it could be shared */
- pg = mfn_to_page(mfn);
- rc = get_page(pg, d);
- if ( !rc && !writable )
- /* Page could be shared */
- rc = get_page(pg, dom_cow);
- if ( !rc )
- {
- put_gfn(d, gfn);
- return NULL;
- }
+ paging_mark_dirty(d, page_to_mfn(page));
- map = map_domain_page(mfn);
- put_gfn(d, gfn);
+ map = __map_domain_page(page);
return map;
}
void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec)
{
struct vcpu *curr = current;
- unsigned long gfn, mfn;
+ unsigned long gfn;
+ struct page_info *page;
p2m_type_t p2mt;
char *p;
int count, todo = size;
gfn = addr >> PAGE_SHIFT;
}
- mfn = mfn_x(get_gfn_unshare(curr->domain, gfn, &p2mt));
+ page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
p2m_mem_paging_populate(curr->domain, gfn);
return HVMCOPY_gfn_paged_out;
}
if ( p2m_is_shared(p2mt) )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
return HVMCOPY_gfn_shared;
}
if ( p2m_is_grant(p2mt) )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
return HVMCOPY_unhandleable;
}
- if ( !p2m_is_ram(p2mt) )
+ if ( !page )
{
- put_gfn(curr->domain, gfn);
return HVMCOPY_bad_gfn_to_mfn;
}
- ASSERT(mfn_valid(mfn));
- p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
+ p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
if ( flags & HVMCOPY_to_guest )
{
if ( xchg(&lastpage, gfn) != gfn )
gdprintk(XENLOG_DEBUG, "guest attempted write to read-only"
" memory page. gfn=%#lx, mfn=%#lx\n",
- gfn, mfn);
+ gfn, page_to_mfn(page));
}
else
{
memcpy(p, buf, count);
- paging_mark_dirty(curr->domain, mfn);
+ paging_mark_dirty(curr->domain, page_to_mfn(page));
}
}
else
addr += count;
buf += count;
todo -= count;
- put_gfn(curr->domain, gfn);
+ put_page(page);
}
return HVMCOPY_okay;
static enum hvm_copy_result __hvm_clear(paddr_t addr, int size)
{
struct vcpu *curr = current;
- unsigned long gfn, mfn;
+ unsigned long gfn;
+ struct page_info *page;
p2m_type_t p2mt;
char *p;
int count, todo = size;
return HVMCOPY_bad_gva_to_gfn;
}
- mfn = mfn_x(get_gfn_unshare(curr->domain, gfn, &p2mt));
+ page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
if ( p2m_is_paging(p2mt) )
{
+ if ( page )
+ put_page(page);
p2m_mem_paging_populate(curr->domain, gfn);
- put_gfn(curr->domain, gfn);
return HVMCOPY_gfn_paged_out;
}
if ( p2m_is_shared(p2mt) )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
return HVMCOPY_gfn_shared;
}
if ( p2m_is_grant(p2mt) )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
return HVMCOPY_unhandleable;
}
- if ( !p2m_is_ram(p2mt) )
+ if ( !page )
{
- put_gfn(curr->domain, gfn);
+ if ( page )
+ put_page(page);
return HVMCOPY_bad_gfn_to_mfn;
}
- ASSERT(mfn_valid(mfn));
- p = (char *)map_domain_page(mfn) + (addr & ~PAGE_MASK);
+ p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
if ( p2mt == p2m_ram_ro )
{
if ( xchg(&lastpage, gfn) != gfn )
gdprintk(XENLOG_DEBUG, "guest attempted write to read-only"
" memory page. gfn=%#lx, mfn=%#lx\n",
- gfn, mfn);
+ gfn, page_to_mfn(page));
}
else
{
memset(p, 0x00, count);
- paging_mark_dirty(curr->domain, mfn);
+ paging_mark_dirty(curr->domain, page_to_mfn(page));
}
unmap_domain_page(p);
addr += count;
todo -= count;
- put_gfn(curr->domain, gfn);
+ put_page(page);
}
return HVMCOPY_okay;
for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
{
- p2m_type_t t;
- mfn_t mfn = get_gfn_unshare(d, pfn, &t);
- if ( p2m_is_paging(t) )
+ struct page_info *page;
+ page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
+ if ( page )
{
- put_gfn(d, pfn);
- p2m_mem_paging_populate(d, pfn);
- rc = -EINVAL;
- goto param_fail3;
- }
- if( p2m_is_shared(t) )
- {
- /* If it insists on not unsharing itself, crash the domain
- * rather than crashing the host down in mark dirty */
- gdprintk(XENLOG_WARNING,
- "shared pfn 0x%lx modified?\n", pfn);
- domain_crash(d);
- put_gfn(d, pfn);
- rc = -EINVAL;
- goto param_fail3;
- }
-
- if ( mfn_x(mfn) != INVALID_MFN )
- {
- paging_mark_dirty(d, mfn_x(mfn));
+ paging_mark_dirty(d, page_to_mfn(page));
/* These are most probably not page tables any more */
/* don't take a long time and don't die either */
- sh_remove_shadows(d->vcpu[0], mfn, 1, 0);
+ sh_remove_shadows(d->vcpu[0], _mfn(page_to_mfn(page)), 1, 0);
+ put_page(page);
}
- put_gfn(d, pfn);
}
param_fail3:
static void enable_hypercall_page(struct domain *d)
{
unsigned long gmfn = d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn;
- unsigned long mfn = get_gfn_untyped(d, gmfn);
+ struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
uint8_t *p;
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+ if ( !page || !get_page_type(page, PGT_writable_page) )
{
- put_gfn(d, gmfn);
- gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, mfn);
+ if ( page )
+ put_page(page);
+ gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
+ page_to_mfn(page));
return;
}
- p = map_domain_page(mfn);
+ p = __map_domain_page(page);
/*
* We set the bit 31 in %eax (reserved field in the Viridian hypercall
unmap_domain_page(p);
- put_page_and_type(mfn_to_page(mfn));
- put_gfn(d, gmfn);
+ put_page_and_type(page);
}
void initialize_apic_assist(struct vcpu *v)
{
struct domain *d = v->domain;
unsigned long gmfn = v->arch.hvm_vcpu.viridian.apic_assist.fields.pfn;
- unsigned long mfn = get_gfn_untyped(d, gmfn);
+ struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
uint8_t *p;
/*
* details of how Windows uses the page.
*/
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
+ if ( !page || !get_page_type(page, PGT_writable_page) )
{
- put_gfn(d, gmfn);
- gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, mfn);
+ if ( page )
+ put_page(page);
+ gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
+ page_to_mfn(page));
return;
}
- p = map_domain_page(mfn);
+ p = __map_domain_page(page);
*(u32 *)p = 0;
unmap_domain_page(p);
- put_page_and_type(mfn_to_page(mfn));
- put_gfn(d, gmfn);
+ put_page_and_type(page);
}
int wrmsr_viridian_regs(uint32_t idx, uint64_t val)
static int vmx_restore_cr0_cr3(
struct vcpu *v, unsigned long cr0, unsigned long cr3)
{
- unsigned long mfn = 0;
- p2m_type_t p2mt;
+ struct page_info *page = NULL;
if ( paging_mode_shadow(v->domain) )
{
if ( cr0 & X86_CR0_PG )
{
- mfn = mfn_x(get_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
- if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain) )
+ page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT,
+ NULL, P2M_ALLOC);
+ if ( !page )
{
- put_gfn(v->domain, cr3 >> PAGE_SHIFT);
gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%lx\n", cr3);
return -EINVAL;
}
if ( hvm_paging_enabled(v) )
put_page(pagetable_get_page(v->arch.guest_table));
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( cr0 & X86_CR0_PG )
- put_gfn(v->domain, cr3 >> PAGE_SHIFT);
+ v->arch.guest_table =
+ page ? pagetable_from_page(page) : pagetable_null();
}
v->arch.hvm_vcpu.guest_cr[0] = cr0 | X86_CR0_ET;
static void vmx_load_pdptrs(struct vcpu *v)
{
- unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3], mfn;
+ unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3];
uint64_t *guest_pdptrs;
+ struct page_info *page;
p2m_type_t p2mt;
char *p;
if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
goto crash;
- mfn = mfn_x(get_gfn_unshare(v->domain, cr3 >> PAGE_SHIFT, &p2mt));
- if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
- /* If we didn't succeed in unsharing, get_page will fail
- * (page still belongs to dom_cow) */
- !get_page(mfn_to_page(mfn), v->domain) )
+ page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
+ if ( !page )
{
/* Ideally you don't want to crash but rather go into a wait
* queue, but this is the wrong place. We're holding at least
* the paging lock */
gdprintk(XENLOG_ERR,
- "Bad cr3 on load pdptrs gfn %lx mfn %lx type %d\n",
- cr3 >> PAGE_SHIFT, mfn, (int) p2mt);
- put_gfn(v->domain, cr3 >> PAGE_SHIFT);
+ "Bad cr3 on load pdptrs gfn %lx type %d\n",
+ cr3 >> PAGE_SHIFT, (int) p2mt);
goto crash;
}
- put_gfn(v->domain, cr3 >> PAGE_SHIFT);
- p = map_domain_page(mfn);
+ p = __map_domain_page(page);
guest_pdptrs = (uint64_t *)(p + (cr3 & ~PAGE_MASK));
vmx_vmcs_exit(v);
unmap_domain_page(p);
- put_page(mfn_to_page(mfn));
+ put_page(page);
return;
crash: