From: Vijaya Kumar K Date: Fri, 7 Apr 2017 10:04:14 +0000 (+0200) Subject: boot allocator: use arch helper for virt_to_mfn on DIRECTMAP_VIRT region X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=c6fdc9696a6a6eac59bf9c81121d1f1cd5b88dcd;p=people%2Froyger%2Fxen.git boot allocator: use arch helper for virt_to_mfn on DIRECTMAP_VIRT region On ARM platforms with NUMA, while initializing second memory node, panic is triggered from init_node_heap() when virt_to_mfn() is called for DIRECTMAP_VIRT region address because DIRECTMAP_VIRT region is not mapped to any virtual address. The check virt_to_mfn() here is used to know whether the max MFN is part of the direct mapping. The max MFN is found by calling virt_to_mfn on end address of DIRECTMAP_VIRT region, which is DIRECTMAP_VIRT_END. On ARM64, all RAM is currently direct mapped in Xen and virt_to_mfn uses the hardware for address translation. So if the virtual address is not mapped translation fault is raised. In this patch, instead of calling virt_to_mfn(), arch helper arch_mfn_in_directmap() is introduced. On ARM64 this arch helper will return true, because currently all RAM is direct mapped in Xen. On ARM32, only a limited amount of RAM, called xenheap, is always mapped and DIRECTMAP_VIRT region is not mapped. Hence return false. For x86 this helper does virt_to_mfn. Signed-off-by: Vijaya Kumar K Reviewed-by: Jan Beulich Reviewed-by: Julien Grall --- diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 68dba19375..9e41fb4cd3 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -520,9 +520,6 @@ static unsigned long init_node_heap(int node, unsigned long mfn, unsigned long needed = (sizeof(**_heap) + sizeof(**avail) * NR_ZONES + PAGE_SIZE - 1) >> PAGE_SHIFT; -#ifdef DIRECTMAP_VIRT_END - unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END); -#endif int i, j; if ( !first_node_initialised ) @@ -532,9 +529,8 @@ static unsigned long init_node_heap(int node, unsigned long mfn, first_node_initialised = 1; needed = 0; } -#ifdef DIRECTMAP_VIRT_END else if ( *use_tail && nr >= needed && - (mfn + nr) <= (virt_to_mfn(eva - 1) + 1) && + arch_mfn_in_directmap(mfn + nr) && (!xenheap_bits || !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) ) { @@ -543,7 +539,7 @@ static unsigned long init_node_heap(int node, unsigned long mfn, PAGE_SIZE - sizeof(**avail) * NR_ZONES; } else if ( nr >= needed && - (mfn + needed) <= (virt_to_mfn(eva - 1) + 1) && + arch_mfn_in_directmap(mfn + needed) && (!xenheap_bits || !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) ) { @@ -552,7 +548,6 @@ static unsigned long init_node_heap(int node, unsigned long mfn, PAGE_SIZE - sizeof(**avail) * NR_ZONES; *use_tail = 0; } -#endif else if ( get_order_from_bytes(sizeof(**_heap)) == get_order_from_pages(needed) ) { diff --git a/xen/include/asm-arm/arm32/mm.h b/xen/include/asm-arm/arm32/mm.h new file mode 100644 index 0000000000..68612499bf --- /dev/null +++ b/xen/include/asm-arm/arm32/mm.h @@ -0,0 +1,23 @@ +#ifndef __ARM_ARM32_MM_H__ +#define __ARM_ARM32_MM_H__ + +/* + * Only a limited amount of RAM, called xenheap, is always mapped on ARM32. + * For convenience always return false. + */ +static inline bool arch_mfn_in_directmap(unsigned long mfn) +{ + return false; +} + +#endif /* __ARM_ARM32_MM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/arm64/mm.h b/xen/include/asm-arm/arm64/mm.h new file mode 100644 index 0000000000..d0a3be7e15 --- /dev/null +++ b/xen/include/asm-arm/arm64/mm.h @@ -0,0 +1,23 @@ +#ifndef __ARM_ARM64_MM_H__ +#define __ARM_ARM64_MM_H__ + +/* + * On ARM64, all the RAM is currently direct mapped in Xen. + * Hence return always true. + */ +static inline bool arch_mfn_in_directmap(unsigned long mfn) +{ + return true; +} + +#endif /* __ARM_ARM64_MM_H__ */ + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h index 4892155596..0fef612f42 100644 --- a/xen/include/asm-arm/mm.h +++ b/xen/include/asm-arm/mm.h @@ -6,6 +6,14 @@ #include #include +#if defined(CONFIG_ARM_32) +# include +#elif defined(CONFIG_ARM_64) +# include +#else +# error "unknown ARM variant" +#endif + /* Align Xen to a 2 MiB boundary. */ #define XEN_PADDR_ALIGN (1 << 21) diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h index e22603ce98..119d7dec6b 100644 --- a/xen/include/asm-x86/mm.h +++ b/xen/include/asm-x86/mm.h @@ -602,4 +602,15 @@ extern const char zero_page[]; /* Build a 32bit PSE page table using 4MB pages. */ void write_32bit_pse_identmap(uint32_t *l2); +/* + * x86 maps part of physical memory via the directmap region. + * Return whether the input MFN falls in that range. + */ +static inline bool arch_mfn_in_directmap(unsigned long mfn) +{ + unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END); + + return mfn <= (virt_to_mfn(eva - 1) + 1); +} + #endif /* __ASM_X86_MM_H__ */