]> xenbits.xensource.com Git - people/julieng/linux-arm.git/commitdiff
arm64: override early_init_dt_add_memory_arch()
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Tue, 7 Apr 2015 10:23:13 +0000 (12:23 +0200)
committerJulien Grall <julien.grall@citrix.com>
Mon, 28 Sep 2015 11:05:14 +0000 (12:05 +0100)
Override the __weak early_init_dt_add_memory_arch() with our own
version. This allows us to relax the imposed restrictions at memory
discovery time, which is needed if we want to defer the assignment
of PHYS_OFFSET and make it independent of where the kernel Image
is placed in physical memory.

So copy the generic original, but only retain the check against
regions whose sizes become zero when clipped to page alignment.

For now, we will remove the range below PHYS_OFFSET explicitly
until we rework that logic in a subsequent patch. Any memory that
we will not be able to map due to insufficient size of the linear
region is also removed.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Robert Richter <rrichter@cavium.com>
Signed-off-by: Vadim Lomovtsev <Vadim.Lomovtsev@caviumnetworks.com>
arch/arm64/mm/init.c

index ad87ce826cce45ec0ac710c4dc1b721d8aaa8f97..14abe50c3ff6128c078a09b97499367efc360564 100644 (file)
@@ -158,6 +158,15 @@ early_param("mem", early_mem);
 
 void __init arm64_memblock_init(void)
 {
+       /*
+        * Remove the memory that we will not be able to cover
+        * with the linear mapping.
+        */
+       const s64 linear_region_size = -(s64)PAGE_OFFSET;
+
+       memblock_remove(0, memstart_addr);
+       memblock_remove(memstart_addr + linear_region_size, ULLONG_MAX);
+
        memblock_enforce_memory_limit(memory_limit);
 
        /*
@@ -374,3 +383,19 @@ static int __init keepinitrd_setup(char *__unused)
 
 __setup("keepinitrd", keepinitrd_setup);
 #endif
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+       if (!PAGE_ALIGNED(base)) {
+               if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
+                       pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
+                               base, base + size);
+                       return;
+               }
+               size -= PAGE_SIZE - (base & ~PAGE_MASK);
+               base = PAGE_ALIGN(base);
+       }
+       size &= PAGE_MASK;
+
+       memblock_add(base, size);
+}