when an MFN is shared. However, all existing calls can either infer the GFN (for
example p2m table destructor) or will not need to know GFN for shared pages.
This patch identifies and fixes all the M2P accessors, either by removing the
translation altogether or by making the relevant modifications. Shared MFNs have
a special value of SHARED_M2P_ENTRY stored in their M2P table slot.
Signed-off-by: Grzegorz Milos <Grzegorz.Milos@citrix.com>
/* Fill vMCE# injection and vMCE# MSR virtualization "
* "related data */
bank->mc_domid = result->owner;
+ /* XXX: Cannot handle shared pages yet
+ * (this should identify all domains and gfn mapping to
+ * the mfn in question) */
+ BUG_ON( result->owner == DOMID_COW );
if ( result->owner != DOMID_XEN ) {
d = get_domain_by_id(result->owner);
gfn =
page_list_for_each ( page, &d->page_list )
{
mfn = page_to_mfn(page);
+ BUG_ON(SHARED_M2P(get_gpfn_from_mfn(mfn)));
if ( get_gpfn_from_mfn(mfn) >= count )
{
BUG_ON(is_pv_32bit_domain(d));
gmfn = mfn_to_gmfn(owner, page_to_mfn(page));
ASSERT(VALID_M2P(gmfn));
- shadow_remove_all_shadows(owner->vcpu[0], _mfn(gmfn));
+ /* Page sharing not supported for shadowed domains */
+ if(!SHARED_M2P(gmfn))
+ shadow_remove_all_shadows(owner->vcpu[0], _mfn(gmfn));
}
if ( !(type & PGT_partial) )
spin_unlock(&d->grant_table->lock);
break;
case XENMAPSPACE_gmfn:
- xatp.idx = gmfn_to_mfn(d, xatp.idx);
+ {
+ p2m_type_t p2mt;
+
+ xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt, 0));
+ /* If the page is still shared, exit early */
+ if ( p2m_is_shared(p2mt) )
+ {
+ rcu_unlock_domain(d);
+ return -ENOMEM;
+ }
if ( !get_page_from_pagenr(xatp.idx, d) )
break;
mfn = xatp.idx;
page = mfn_to_page(mfn);
break;
+ }
default:
break;
}
/* Unmap from old location, if any. */
gpfn = get_gpfn_from_mfn(mfn);
+ ASSERT( gpfn != SHARED_M2P_ENTRY );
if ( gpfn != INVALID_M2P_ENTRY )
guest_physmap_remove_page(d, gpfn, mfn, 0);
goto out;
}
+ /* Update m2p entry to SHARED_M2P_ENTRY */
+ set_gpfn_from_mfn(mfn_x(mfn), SHARED_M2P_ENTRY);
+
ret = 0;
out:
printk("Could not change p2m type.\n");
BUG();
}
+ /* Update m2p entry */
+ set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), gfn);
return 0;
}
{
mfn = page_to_mfn(page);
gfn = get_gpfn_from_mfn(mfn_x(mfn));
+ /* Pages should not be shared that early */
+ ASSERT(gfn != SHARED_M2P_ENTRY);
page_count++;
if (
#ifdef __x86_64__
continue;
}
+ if ( gfn == SHARED_P2M_ENTRY)
+ {
+ P2M_PRINTK("shared mfn (%lx) on domain page list!\n",
+ mfn);
+ continue;
+ }
+
p2mfn = gfn_to_mfn_type_foreign(d, gfn, &type, p2m_query);
if ( mfn_x(p2mfn) != mfn )
{
for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++)
{
m2pfn = get_gpfn_from_mfn(mfn+i1);
- if ( m2pfn != (gfn + i1) )
+ /* Allow shared M2Ps */
+ if ( (m2pfn != (gfn + i1)) &&
+ (m2pfn != SHARED_M2P_ENTRY) )
{
pmbad++;
P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
m2pfn = get_gpfn_from_mfn(mfn);
if ( m2pfn != gfn &&
type != p2m_mmio_direct &&
- !p2m_is_grant(type) )
+ !p2m_is_grant(type) &&
+ !p2m_is_shared(type) )
{
pmbad++;
printk("mismatch: gfn %#lx -> mfn %#lx"
l1_pgentry_t *l1e;
l2_pgentry_t *l2e;
mfn_t l1mfn, l2mfn;
- int i1, i2;
+ unsigned long i1, i2, i3;
l3_pgentry_t *l3e;
- int i3;
#if CONFIG_PAGING_LEVELS == 4
l4_pgentry_t *l4e;
- int i4;
+ unsigned long i4;
#endif /* CONFIG_PAGING_LEVELS == 4 */
BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt));
if ( p2m_flags_to_type(flags) != ot )
continue;
mfn = l2e_get_pfn(l2e[i2]);
- gfn = get_gpfn_from_mfn(mfn);
+ /* Do not use get_gpfn_from_mfn because it may return
+ SHARED_M2P_ENTRY */
+ gfn = (i2 + (i3 + (i4 * L3_PAGETABLE_ENTRIES))
+ * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES;
flags = p2m_type_to_flags(nt);
l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE);
paging_write_p2m_entry(d, gfn, (l1_pgentry_t *)&l2e[i2],
if ( p2m_flags_to_type(flags) != ot )
continue;
mfn = l1e_get_pfn(l1e[i1]);
- gfn = get_gpfn_from_mfn(mfn);
+ gfn = i1 + (i2 + (i3 + (i4 * L3_PAGETABLE_ENTRIES))
+ * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES;
/* create a new 1le entry with the new type */
flags = p2m_type_to_flags(nt);
l1e_content = l1e_from_pfn(mfn, flags);
/* We /really/ mean PFN here, even for non-translated guests. */
pfn = get_gpfn_from_mfn(mfn_x(gmfn));
+ /* Shared MFNs should NEVER be marked dirty */
+ BUG_ON(SHARED_M2P(pfn));
/*
* Values with the MSB set denote MFNs that aren't really part of the
return;
gfn = mfn_to_gfn(d, mfn);
+ /* Page sharing not supported on shadow PTs */
+ BUG_ON(SHARED_M2P(gfn));
if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
{
return;
gfn = mfn_to_gfn(d, mfn);
+ /* Page sharing not supported on shadow PTs */
+ BUG_ON(SHARED_M2P(gfn));
if ( (gfn >= dirty_vram->begin_pfn) && (gfn < dirty_vram->end_pfn) )
{
/* We /really/ mean PFN here, even for non-translated guests. */
pfn = get_gpfn_from_mfn(mfn_x(gmfn));
+ /* Page sharing not supported for shadow domains */
+ BUG_ON(SHARED_M2P(pfn));
if ( unlikely(!VALID_M2P(pfn)) )
return 0;
break;
case 3: /* Read CR3 */
+ {
+ unsigned long mfn;
+
if ( !is_pv_32on64_vcpu(v) )
+ {
+ mfn = pagetable_get_pfn(v->arch.guest_table);
*reg = xen_pfn_to_cr3(mfn_to_gmfn(
- v->domain, pagetable_get_pfn(v->arch.guest_table)));
+ v->domain, mfn));
+ }
#ifdef CONFIG_COMPAT
else
+ {
+ mfn = l4e_get_pfn(*(l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)));
*reg = compat_pfn_to_cr3(mfn_to_gmfn(
- v->domain, l4e_get_pfn(*(l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)))));
+ v->domain, mfn));
+ }
#endif
- break;
+ /* PTs should not be shared */
+ BUG_ON(page_get_owner(mfn_to_page(mfn)) == dom_cow);
+ }
+ break;
case 4: /* Read CR4 */
*reg = v->arch.guest_context.ctrlreg[4];
info->tot_pages = d->tot_pages;
info->max_pages = d->max_pages;
info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
+ BUG_ON(SHARED_M2P(info->shared_info_frame));
memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
}
for ( i = 0; i < op.nr_frames; i++ )
{
gmfn = gnttab_shared_gmfn(d, d->grant_table, i);
+ /* Grant tables cannot be shared */
+ BUG_ON(SHARED_M2P(gmfn));
(void)copy_to_guest_offset(op.frame_list, i, &gmfn, 1);
}
#include <xen/tmem.h>
#include <asm/current.h>
#include <asm/hardirq.h>
+#include <asm/p2m.h>
#include <xen/numa.h>
#include <public/memory.h>
#include <xsm/xsm.h>
int guest_remove_page(struct domain *d, unsigned long gmfn)
{
struct page_info *page;
+ p2m_type_t p2mt;
unsigned long mfn;
- mfn = gmfn_to_mfn(d, gmfn);
+ mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt));
if ( unlikely(!mfn_valid(mfn)) )
{
gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
}
page = mfn_to_page(mfn);
+ /* If gmfn is shared, just drop the guest reference (which may or may not
+ * free the page) */
+ if(p2m_is_shared(p2mt))
+ {
+ put_page_and_type(page);
+ guest_physmap_remove_page(d, gmfn, mfn, 0);
+ return 1;
+ }
+
if ( unlikely(!get_page(page, d)) )
{
gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
for ( k = 0; k < (1UL << exch.in.extent_order); k++ )
{
- mfn = gmfn_to_mfn(d, gmfn + k);
+ p2m_type_t p2mt;
+
+ /* Shared pages cannot be exchanged */
+ mfn = mfn_x(gfn_to_mfn_unshare(d, gmfn + k, &p2mt, 0));
+ if ( p2m_is_shared(p2mt) )
+ {
+ rc = -ENOMEM;
+ goto fail;
+ }
if ( unlikely(!mfn_valid(mfn)) )
{
rc = -EINVAL;
/* Destroy final reference to each input page. */
while ( (page = page_list_remove_head(&in_chunk_list)) )
{
+ unsigned long gfn;
+
if ( !test_and_clear_bit(_PGC_allocated, &page->count_info) )
BUG();
mfn = page_to_mfn(page);
- guest_physmap_remove_page(d, mfn_to_gmfn(d, mfn), mfn, 0);
+ gfn = mfn_to_gmfn(d, mfn);
+ /* Pages were unshared above */
+ BUG_ON(SHARED_M2P(gfn));
+ guest_physmap_remove_page(d, gfn, mfn, 0);
put_page(page);
}
if ( is_hvm_domain(d) ||
(page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
{
+ BUG_ON(SHARED_M2P(mfn_to_gmfn(d, page_to_mfn(page))));
rc = hd->platform_ops->map_page(
d, mfn_to_gmfn(d, page_to_mfn(page)), page_to_mfn(page));
if (rc)
#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
#define INVALID_M2P_ENTRY (~0UL)
#define VALID_M2P(_e) (!((_e) & (1UL<<(BITS_PER_LONG-1))))
+#define SHARED_M2P_ENTRY (~0UL - 1UL)
+#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY)
#ifdef CONFIG_COMPAT
#define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
-#define set_gpfn_from_mfn(mfn, pfn) \
+#define set_gpfn_from_mfn(mfn, pfn) ({ \
+ struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
+ unsigned long entry = (d && (d == dom_cow)) ? \
+ SHARED_M2P_ENTRY : (pfn); \
((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 4 || \
- (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(pfn))), \
- machine_to_phys_mapping[(mfn)] = (pfn))
+ (compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(entry))), \
+ machine_to_phys_mapping[(mfn)] = (entry)); \
+ })
#else
-#define set_gpfn_from_mfn(mfn, pfn) (machine_to_phys_mapping[(mfn)] = (pfn))
+#define set_gpfn_from_mfn(mfn, pfn) ({ \
+ struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
+ if(d && (d == dom_cow)) \
+ machine_to_phys_mapping[(mfn)] = SHARED_M2P_ENTRY; \
+ else \
+ machine_to_phys_mapping[(mfn)] = (pfn); \
+ })
#endif
#define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])