int rc;
/* Check for paged out page */
- ram_mfn = gfn_to_mfn(current->domain, ram_gfn, &p2mt);
+ ram_mfn = gfn_to_mfn_unshare(current->domain, ram_gfn, &p2mt, 0);
if ( p2m_is_paging(p2mt) )
{
p2m_mem_paging_populate(curr->domain, ram_gfn);
return X86EMUL_RETRY;
}
+ if ( p2m_is_shared(p2mt) )
+ return X86EMUL_RETRY;
/*
* Weird-sized accesses have undefined behaviour: we discard writes
}
else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == INVALID_GFN )
{
- if ( pfec == PFEC_page_paged )
+ if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared )
return X86EMUL_RETRY;
hvm_inject_exception(TRAP_page_fault, pfec, addr);
return X86EMUL_EXCEPTION;
/* Is it contiguous with the preceding PFNs? If not then we're done. */
if ( (npfn == INVALID_GFN) || (npfn != (pfn + (reverse ? -i : i))) )
{
- if ( pfec == PFEC_page_paged )
+ if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared )
return X86EMUL_RETRY;
done /= bytes_per_rep;
if ( done == 0 )
return hvmemul_do_mmio(gpa, &reps, bytes, 0, IOREQ_READ, 0, p_data);
case HVMCOPY_gfn_paged_out:
return X86EMUL_RETRY;
+ case HVMCOPY_gfn_shared:
+ return X86EMUL_RETRY;
default:
break;
}
IOREQ_WRITE, 0, p_data);
case HVMCOPY_gfn_paged_out:
return X86EMUL_RETRY;
+ case HVMCOPY_gfn_shared:
+ return X86EMUL_RETRY;
default:
break;
}
if ( rc == HVMCOPY_gfn_paged_out )
return X86EMUL_RETRY;
+ if ( rc == HVMCOPY_gfn_shared )
+ return X86EMUL_RETRY;
if ( rc != HVMCOPY_okay )
{
gdprintk(XENLOG_WARNING, "Failed memory-to-memory REP MOVS: sgpa=%"
unsigned long mfn;
void *va;
- mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt));
+ mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn, &p2mt, 0));
if ( !p2m_is_ram(p2mt) )
return -EINVAL;
if ( p2m_is_paging(p2mt) )
p2m_mem_paging_populate(d, gmfn);
return -ENOENT;
}
+ if ( p2m_is_shared(p2mt) )
+ return -ENOENT;
ASSERT(mfn_valid(mfn));
page = mfn_to_page(mfn);
* we still treat it as a kernel-mode read (i.e. no access checks). */
pfec = PFEC_page_present;
gfn = paging_gva_to_gfn(current, va, &pfec);
- if ( pfec == PFEC_page_paged )
+ if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared )
return NULL;
mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt, 0));
if ( p2m_is_paging(p2mt) )
goto out;
if ( rc == HVMCOPY_gfn_paged_out )
goto out;
+ if ( rc == HVMCOPY_gfn_shared )
+ goto out;
eflags = regs->eflags;
if ( taskswitch_reason == TSW_iret )
goto out;
if ( rc == HVMCOPY_gfn_paged_out )
goto out;
+ if ( rc == HVMCOPY_gfn_shared )
+ goto out;
rc = hvm_copy_from_guest_virt(
&tss, tr.base, sizeof(tss), PFEC_page_present);
goto out;
if ( rc == HVMCOPY_gfn_paged_out )
goto out;
+ /* Note: this could be optimised, if the callee functions knew we want RO
+ * access */
+ if ( rc == HVMCOPY_gfn_shared )
+ goto out;
+
if ( hvm_set_cr3(tss.cr3) )
goto out;
exn_raised = 1;
if ( rc == HVMCOPY_gfn_paged_out )
goto out;
+ if ( rc == HVMCOPY_gfn_shared )
+ goto out;
if ( (tss.trace & 1) && !exn_raised )
hvm_inject_exception(TRAP_debug, tss_sel & 0xfff8, 0);
{
if ( pfec == PFEC_page_paged )
return HVMCOPY_gfn_paged_out;
+ if ( pfec == PFEC_page_shared )
+ return HVMCOPY_gfn_shared;
if ( flags & HVMCOPY_fault )
hvm_inject_exception(TRAP_page_fault, pfec, addr);
return HVMCOPY_bad_gva_to_gfn;
gfn = addr >> PAGE_SHIFT;
}
- mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt));
+ mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt, 0));
if ( p2m_is_paging(p2mt) )
{
p2m_mem_paging_populate(curr->domain, gfn);
return HVMCOPY_gfn_paged_out;
}
+ if ( p2m_is_shared(p2mt) )
+ return HVMCOPY_gfn_shared;
if ( p2m_is_grant(p2mt) )
return HVMCOPY_unhandleable;
if ( !p2m_is_ram(p2mt) )
{
for ( i = 0; i < p->count; i++ )
{
+ int ret;
+
rc = read_handler(v, p->addr + (sign * i * p->size), p->size,
&data);
if ( rc != X86EMUL_OKAY )
break;
- if ( hvm_copy_to_guest_phys(p->data + (sign * i * p->size), &data,
- p->size) == HVMCOPY_gfn_paged_out )
+ ret = hvm_copy_to_guest_phys(p->data + (sign * i * p->size),
+ &data,
+ p->size);
+ if ( (ret == HVMCOPY_gfn_paged_out) ||
+ (ret == HVMCOPY_gfn_shared) )
{
rc = X86EMUL_RETRY;
break;
{
for ( i = 0; i < p->count; i++ )
{
- if ( hvm_copy_from_guest_phys(&data,
- p->data + (sign * i * p->size),
- p->size) == HVMCOPY_gfn_paged_out )
+ int ret;
+
+ ret = hvm_copy_from_guest_phys(&data,
+ p->data + (sign * i * p->size),
+ p->size);
+ if ( (ret == HVMCOPY_gfn_paged_out) ||
+ (ret == HVMCOPY_gfn_shared) )
{
rc = X86EMUL_RETRY;
break;
if ( p->data_is_ptr )
{
- if ( hvm_copy_to_guest_phys(p->data + (sign * i * p->size), &data,
- p->size) == HVMCOPY_gfn_paged_out )
+ int ret;
+ ret = hvm_copy_to_guest_phys(p->data + (sign * i * p->size), &data,
+ p->size);
+ if ( (ret == HVMCOPY_gfn_paged_out) ||
+ (ret == HVMCOPY_gfn_shared) )
return X86EMUL_RETRY;
}
else
data = p->data;
if ( p->data_is_ptr )
{
- if ( hvm_copy_from_guest_phys(&data, p->data + (sign * i * p->size),
- p->size) == HVMCOPY_gfn_paged_out )
+ int ret;
+
+ ret = hvm_copy_from_guest_phys(&data,
+ p->data + (sign * i * p->size),
+ p->size);
+ if ( (ret == HVMCOPY_gfn_paged_out) &&
+ (ret == HVMCOPY_gfn_shared) )
return X86EMUL_RETRY;
}
return 0;
}
+static inline void *map_domain_gfn(struct domain *d,
+ gfn_t gfn,
+ mfn_t *mfn,
+ p2m_type_t *p2mt,
+ uint32_t *rc)
+{
+ /* Translate the gfn, unsharing if shared */
+ *mfn = gfn_to_mfn_unshare(d, gfn_x(gfn), p2mt, 0);
+ if ( p2m_is_paging(*p2mt) )
+ {
+ p2m_mem_paging_populate(d, gfn_x(gfn));
+
+ *rc = _PAGE_PAGED;
+ return NULL;
+ }
+ if ( p2m_is_shared(*p2mt) )
+ {
+ *rc = _PAGE_SHARED;
+ return NULL;
+ }
+ if ( !p2m_is_ram(*p2mt) )
+ {
+ *rc |= _PAGE_PRESENT;
+ return NULL;
+ }
+ ASSERT(mfn_valid(mfn_x(*mfn)));
+
+ return map_domain_page(mfn_x(*mfn));
+}
+
/* Walk the guest pagetables, after the manner of a hardware walker. */
uint32_t
if ( rc & _PAGE_PRESENT ) goto out;
/* Map the l3 table */
- gw->l3mfn = gfn_to_mfn(d, guest_l4e_get_gfn(gw->l4e), &p2mt);
- if ( p2m_is_paging(p2mt) )
- {
- p2m_mem_paging_populate(d, gfn_x(guest_l4e_get_gfn(gw->l4e)));
-
- rc = _PAGE_PAGED;
- goto out;
- }
- if ( !p2m_is_ram(p2mt) )
- {
- rc |= _PAGE_PRESENT;
+ l3p = map_domain_gfn(d,
+ guest_l4e_get_gfn(gw->l4e),
+ &gw->l3mfn,
+ &p2mt,
+ &rc);
+ if(l3p == NULL)
goto out;
- }
- ASSERT(mfn_valid(mfn_x(gw->l3mfn)));
-
/* Get the l3e and check its flags*/
- l3p = map_domain_page(mfn_x(gw->l3mfn));
gw->l3e = l3p[guest_l3_table_offset(va)];
gflags = guest_l3e_get_flags(gw->l3e) ^ _PAGE_NX_BIT;
rc |= ((gflags & mflags) ^ mflags);
#endif /* PAE or 64... */
/* Map the l2 table */
- gw->l2mfn = gfn_to_mfn(d, guest_l3e_get_gfn(gw->l3e), &p2mt);
- if ( p2m_is_paging(p2mt) )
- {
- p2m_mem_paging_populate(d, gfn_x(guest_l3e_get_gfn(gw->l3e)));
-
- rc = _PAGE_PAGED;
- goto out;
- }
- if ( !p2m_is_ram(p2mt) )
- {
- rc |= _PAGE_PRESENT;
+ l2p = map_domain_gfn(d,
+ guest_l3e_get_gfn(gw->l3e),
+ &gw->l2mfn,
+ &p2mt,
+ &rc);
+ if(l2p == NULL)
goto out;
- }
- ASSERT(mfn_valid(mfn_x(gw->l2mfn)));
-
/* Get the l2e */
- l2p = map_domain_page(mfn_x(gw->l2mfn));
gw->l2e = l2p[guest_l2_table_offset(va)];
#else /* 32-bit only... */
else
{
/* Not a superpage: carry on and find the l1e. */
- gw->l1mfn = gfn_to_mfn(d, guest_l2e_get_gfn(gw->l2e), &p2mt);
- if ( p2m_is_paging(p2mt) )
- {
- p2m_mem_paging_populate(d, gfn_x(guest_l2e_get_gfn(gw->l2e)));
-
- rc = _PAGE_PAGED;
+ l1p = map_domain_gfn(d,
+ guest_l2e_get_gfn(gw->l2e),
+ &gw->l1mfn,
+ &p2mt,
+ &rc);
+ if(l1p == NULL)
goto out;
- }
- if ( !p2m_is_ram(p2mt) )
- {
- rc |= _PAGE_PRESENT;
- goto out;
- }
- ASSERT(mfn_valid(mfn_x(gw->l1mfn)));
- l1p = map_domain_page(mfn_x(gw->l1mfn));
gw->l1e = l1p[guest_l1_table_offset(va)];
gflags = guest_l1e_get_flags(gw->l1e) ^ _PAGE_NX_BIT;
rc |= ((gflags & mflags) ^ mflags);
#if GUEST_PAGING_LEVELS <= CONFIG_PAGING_LEVELS
#include <asm/guest_pt.h>
+#include <asm/p2m.h>
unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
struct vcpu *v, unsigned long gva, uint32_t *pfec)
/* Get the top-level table's MFN */
cr3 = v->arch.hvm_vcpu.guest_cr[3];
- top_mfn = gfn_to_mfn(v->domain, _gfn(cr3 >> PAGE_SHIFT), &p2mt);
+ top_mfn = gfn_to_mfn_unshare(v->domain, cr3 >> PAGE_SHIFT, &p2mt, 0);
if ( p2m_is_paging(p2mt) )
{
// if ( p2m_is_paged(p2mt) )
pfec[0] = PFEC_page_paged;
return INVALID_GFN;
}
+ if ( p2m_is_shared(p2mt) )
+ {
+ pfec[0] = PFEC_page_shared;
+ return INVALID_GFN;
+ }
if ( !p2m_is_ram(p2mt) )
{
pfec[0] &= ~PFEC_page_present;
if ( missing == 0 )
{
gfn_t gfn = guest_l1e_get_gfn(gw.l1e);
- gfn_to_mfn(v->domain, gfn, &p2mt);
+ gfn_to_mfn_unshare(v->domain, gfn_x(gfn), &p2mt, 0);
if ( p2m_is_paging(p2mt) )
{
// if ( p2m_is_paged(p2mt) )
pfec[0] = PFEC_page_paged;
return INVALID_GFN;
}
+ if ( p2m_is_shared(p2mt) )
+ {
+ pfec[0] = PFEC_page_shared;
+ return INVALID_GFN;
+ }
return gfn_x(gfn);
}
if ( missing & _PAGE_PAGED )
pfec[0] = PFEC_page_paged;
+ if ( missing & _PAGE_SHARED )
+ pfec[0] = PFEC_page_shared;
+
return INVALID_GFN;
}
HVMCOPY_bad_gfn_to_mfn,
HVMCOPY_unhandleable,
HVMCOPY_gfn_paged_out,
+ HVMCOPY_gfn_shared,
};
/*
#define _PAGE_AVAIL 0xE00U
#define _PAGE_PSE_PAT 0x1000U
#define _PAGE_PAGED 0x2000U
+#define _PAGE_SHARED 0x4000U
/*
* Debug option: Ensure that granted mappings are not implicitly unmapped.
#define PFEC_reserved_bit (1U<<3)
#define PFEC_insn_fetch (1U<<4)
#define PFEC_page_paged (1U<<5)
+#define PFEC_page_shared (1U<<6)
#ifndef __ASSEMBLY__