From: Stefano Stabellini Date: Fri, 1 Jun 2012 09:20:33 +0000 (+0100) Subject: arm: shared_info page allocation and mapping X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=7d802dffd0bf0131510f8a491c722eecef027305;p=people%2Fvhanquez%2Fxen-unstable.git arm: shared_info page allocation and mapping Allocate the shared_info page at domain creation. Implement arch_memory_op, only for XENMEM_add_to_physmap with space == XENMAPSPACE_shared_info, so that the guest can map the shared_info page. Signed-off-by: Stefano Stabellini Signed-off-by: Ian Campbell Committed-by: Ian Campbell --- diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c index edaff2252a..3a726c88bf 100644 --- a/xen/arch/arm/domain.c +++ b/xen/arch/arm/domain.c @@ -192,6 +192,17 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags) if ( (rc = p2m_init(d)) != 0 ) goto fail; + if ( !is_idle_domain(d) ) + { + rc = -ENOMEM; + if ( (d->shared_info = alloc_xenheap_pages(0, 0)) == NULL ) + goto fail; + + clear_page(d->shared_info); + share_xen_page_with_guest( + virt_to_page(d->shared_info), d, XENSHARE_writable); + } + d->max_vcpus = 8; if ( (rc = domain_vgic_init(d)) != 0 ) diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index fa2f5ec5bf..10ff883201 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -25,8 +25,11 @@ #include #include #include +#include #include #include +#include +#include struct domain *dom_xen, *dom_io; @@ -376,17 +379,104 @@ void arch_dump_shared_mem_info(void) { } -long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) +int donate_page(struct domain *d, struct page_info *page, unsigned int memflags) { + ASSERT(0); return -ENOSYS; } -int donate_page(struct domain *d, struct page_info *page, unsigned int memflags) +void share_xen_page_with_guest(struct page_info *page, + struct domain *d, int readonly) { - ASSERT(0); - return -ENOSYS; + if ( page_get_owner(page) == d ) + return; + + spin_lock(&d->page_alloc_lock); + + /* The incremented type count pins as writable or read-only. */ + page->u.inuse.type_info = (readonly ? PGT_none : PGT_writable_page); + page->u.inuse.type_info |= PGT_validated | 1; + + page_set_owner(page, d); + wmb(); /* install valid domain ptr before updating refcnt. */ + ASSERT((page->count_info & ~PGC_xen_heap) == 0); + + /* Only add to the allocation list if the domain isn't dying. */ + if ( !d->is_dying ) + { + page->count_info |= PGC_allocated | 1; + if ( unlikely(d->xenheap_pages++ == 0) ) + get_knownalive_domain(d); + page_list_add_tail(page, &d->xenpage_list); + } + + spin_unlock(&d->page_alloc_lock); +} + +static int xenmem_add_to_physmap_once( + struct domain *d, + const struct xen_add_to_physmap *xatp) +{ + unsigned long mfn = 0; + int rc; + + switch ( xatp->space ) + { + case XENMAPSPACE_shared_info: + if ( xatp->idx == 0 ) + mfn = virt_to_mfn(d->shared_info); + break; + default: + return -ENOSYS; + } + + domain_lock(d); + + /* Map at new location. */ + rc = guest_physmap_add_page(d, xatp->gpfn, mfn, 0); + + domain_unlock(d); + + return rc; +} + +static int xenmem_add_to_physmap(struct domain *d, + struct xen_add_to_physmap *xatp) +{ + return xenmem_add_to_physmap_once(d, xatp); } +long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) +{ + int rc; + + switch ( op ) + { + case XENMEM_add_to_physmap: + { + struct xen_add_to_physmap xatp; + struct domain *d; + + if ( copy_from_guest(&xatp, arg, 1) ) + return -EFAULT; + + rc = rcu_lock_target_domain_by_id(xatp.domid, &d); + if ( rc != 0 ) + return rc; + + rc = xenmem_add_to_physmap(d, &xatp); + + rcu_unlock_domain(d); + + return rc; + } + + default: + return -ENOSYS; + } + + return 0; +} /* * Local variables: * mode: C diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index 14614fd33c..4c94ef074a 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -118,7 +118,12 @@ static int create_p2m_entries(struct domain *d, } /* else: third already valid */ - BUG_ON(third[third_table_offset(addr)].p2m.valid); + if ( third[third_table_offset(addr)].p2m.valid ) + { + /* p2m entry already present */ + free_domheap_page( + mfn_to_page(third[third_table_offset(addr)].p2m.base)); + } /* Allocate a new RAM page and attach */ if (alloc) @@ -172,6 +177,23 @@ int map_mmio_regions(struct domain *d, return create_p2m_entries(d, 0, start_gaddr, end_gaddr, maddr); } +int guest_physmap_add_page(struct domain *d, + unsigned long gpfn, + unsigned long mfn, + unsigned int page_order) +{ + return create_p2m_entries(d, 0, gpfn << PAGE_SHIFT, + (gpfn + (1<arch.p2m; diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h index 1e4f108f9e..91e87e1571 100644 --- a/xen/include/asm-arm/config.h +++ b/xen/include/asm-arm/config.h @@ -7,6 +7,8 @@ #ifndef __ARM_CONFIG_H__ #define __ARM_CONFIG_H__ +#define CONFIG_PAGING_ASSISTANCE 1 + #define CONFIG_PAGING_LEVELS 3 #define CONFIG_ARM 1 diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index ea27e4f13e..53801b0190 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -78,6 +78,10 @@ struct page_info #define _PGT_pinned PG_shift(5) #define PGT_pinned PG_mask(1, 5) + /* Has this page been validated for use as its current type? */ +#define _PGT_validated PG_shift(6) +#define PGT_validated PG_mask(1, 6) + /* Count of uses of this frame as its current type. */ #define PGT_count_width PG_shift(9) #define PGT_count_mask ((1UL<