Allocate the shared_info page at domain creation.
Implement arch_memory_op, only for XENMEM_add_to_physmap with space ==
XENMAPSPACE_shared_info, so that the guest can map the shared_info page.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Committed-by: Ian Campbell <ian.campbell@citrix.com>
if ( (rc = p2m_init(d)) != 0 )
goto fail;
+ if ( !is_idle_domain(d) )
+ {
+ rc = -ENOMEM;
+ if ( (d->shared_info = alloc_xenheap_pages(0, 0)) == NULL )
+ goto fail;
+
+ clear_page(d->shared_info);
+ share_xen_page_with_guest(
+ virt_to_page(d->shared_info), d, XENSHARE_writable);
+ }
+
d->max_vcpus = 8;
if ( (rc = domain_vgic_init(d)) != 0 )
#include <xen/mm.h>
#include <xen/preempt.h>
#include <xen/errno.h>
+#include <xen/guest_access.h>
#include <asm/page.h>
#include <asm/current.h>
+#include <public/memory.h>
+#include <xen/sched.h>
struct domain *dom_xen, *dom_io;
{
}
-long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+int donate_page(struct domain *d, struct page_info *page, unsigned int memflags)
{
+ ASSERT(0);
return -ENOSYS;
}
-int donate_page(struct domain *d, struct page_info *page, unsigned int memflags)
+void share_xen_page_with_guest(struct page_info *page,
+ struct domain *d, int readonly)
{
- ASSERT(0);
- return -ENOSYS;
+ if ( page_get_owner(page) == d )
+ return;
+
+ spin_lock(&d->page_alloc_lock);
+
+ /* The incremented type count pins as writable or read-only. */
+ page->u.inuse.type_info = (readonly ? PGT_none : PGT_writable_page);
+ page->u.inuse.type_info |= PGT_validated | 1;
+
+ page_set_owner(page, d);
+ wmb(); /* install valid domain ptr before updating refcnt. */
+ ASSERT((page->count_info & ~PGC_xen_heap) == 0);
+
+ /* Only add to the allocation list if the domain isn't dying. */
+ if ( !d->is_dying )
+ {
+ page->count_info |= PGC_allocated | 1;
+ if ( unlikely(d->xenheap_pages++ == 0) )
+ get_knownalive_domain(d);
+ page_list_add_tail(page, &d->xenpage_list);
+ }
+
+ spin_unlock(&d->page_alloc_lock);
+}
+
+static int xenmem_add_to_physmap_once(
+ struct domain *d,
+ const struct xen_add_to_physmap *xatp)
+{
+ unsigned long mfn = 0;
+ int rc;
+
+ switch ( xatp->space )
+ {
+ case XENMAPSPACE_shared_info:
+ if ( xatp->idx == 0 )
+ mfn = virt_to_mfn(d->shared_info);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ domain_lock(d);
+
+ /* Map at new location. */
+ rc = guest_physmap_add_page(d, xatp->gpfn, mfn, 0);
+
+ domain_unlock(d);
+
+ return rc;
+}
+
+static int xenmem_add_to_physmap(struct domain *d,
+ struct xen_add_to_physmap *xatp)
+{
+ return xenmem_add_to_physmap_once(d, xatp);
}
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+ int rc;
+
+ switch ( op )
+ {
+ case XENMEM_add_to_physmap:
+ {
+ struct xen_add_to_physmap xatp;
+ struct domain *d;
+
+ if ( copy_from_guest(&xatp, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ rc = xenmem_add_to_physmap(d, &xatp);
+
+ rcu_unlock_domain(d);
+
+ return rc;
+ }
+
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
/*
* Local variables:
* mode: C
}
/* else: third already valid */
- BUG_ON(third[third_table_offset(addr)].p2m.valid);
+ if ( third[third_table_offset(addr)].p2m.valid )
+ {
+ /* p2m entry already present */
+ free_domheap_page(
+ mfn_to_page(third[third_table_offset(addr)].p2m.base));
+ }
/* Allocate a new RAM page and attach */
if (alloc)
return create_p2m_entries(d, 0, start_gaddr, end_gaddr, maddr);
}
+int guest_physmap_add_page(struct domain *d,
+ unsigned long gpfn,
+ unsigned long mfn,
+ unsigned int page_order)
+{
+ return create_p2m_entries(d, 0, gpfn << PAGE_SHIFT,
+ (gpfn + (1<<page_order)) << PAGE_SHIFT,
+ mfn << PAGE_SHIFT);
+}
+
+void guest_physmap_remove_page(struct domain *d,
+ unsigned long gpfn,
+ unsigned long mfn, unsigned int page_order)
+{
+ ASSERT(0);
+}
+
int p2m_alloc_table(struct domain *d)
{
struct p2m_domain *p2m = &d->arch.p2m;
#ifndef __ARM_CONFIG_H__
#define __ARM_CONFIG_H__
+#define CONFIG_PAGING_ASSISTANCE 1
+
#define CONFIG_PAGING_LEVELS 3
#define CONFIG_ARM 1
#define _PGT_pinned PG_shift(5)
#define PGT_pinned PG_mask(1, 5)
+ /* Has this page been validated for use as its current type? */
+#define _PGT_validated PG_shift(6)
+#define PGT_validated PG_mask(1, 6)
+
/* Count of uses of this frame as its current type. */
#define PGT_count_width PG_shift(9)
#define PGT_count_mask ((1UL<<PGT_count_width)-1)
int map_mmio_regions(struct domain *d, paddr_t start_gaddr,
paddr_t end_gaddr, paddr_t maddr);
+/* Untyped version for RAM only, for compatibility */
+int guest_physmap_add_page(struct domain *d,
+ unsigned long gfn,
+ unsigned long mfn,
+ unsigned int page_order);
+void guest_physmap_remove_page(struct domain *d,
+ unsigned long gpfn,
+ unsigned long mfn, unsigned int page_order);
+
unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn);
/*
#ifndef _XEN_PAGING_H
#define _XEN_PAGING_H
+#define paging_mode_translate(d) (0)
+#define paging_mode_external(d) (0)
+
#endif /* XEN_PAGING_H */
/*