]> xenbits.xensource.com Git - people/aperard/linux-arndale.git/commitdiff
ARM: LPAE: support 64-bit virt_to_phys patching
authorCyril Chemparathy <cyril@ti.com>
Fri, 21 Sep 2012 15:56:03 +0000 (11:56 -0400)
committerVasanth Ananthan <vasanthananthan@gmail.com>
Tue, 8 Jan 2013 10:34:43 +0000 (16:04 +0530)
This patch adds support for 64-bit physical addresses in virt_to_phys()
patching.  This does not do real 64-bit add/sub, but instead patches in the
upper 32-bits of the phys_offset directly into the output of virt_to_phys.

There is no corresponding change on the phys_to_virt() side, because
computations on the upper 32-bits would be discarded anyway.

Signed-off-by: Cyril Chemparathy <cyril@ti.com>
arch/arm/include/asm/memory.h
arch/arm/kernel/head.S
arch/arm/kernel/setup.c

index a4fc01e16481352637997dfcdb4b8dc61fc8ab31..064345455a35d4fd35690938c2b02a931c0a851c 100644 (file)
 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 
 extern unsigned long   __pv_offset;
-extern unsigned long   __pv_phys_offset;
+extern phys_addr_t     __pv_phys_offset;
 #define PHYS_OFFSET    __virt_to_phys(PAGE_OFFSET)
 
 static inline phys_addr_t __virt_to_phys(unsigned long x)
 {
-       unsigned long t;
+       phys_addr_t t;
+
+#ifndef CONFIG_ARM_LPAE
        early_patch_imm8("add", t, x, __pv_offset, 0);
+#else
+       unsigned long __tmp;
+
+#ifndef __ARMEB__
+#define PV_PHYS_HIGH   "(__pv_phys_offset + 4)"
+#else
+#define PV_PHYS_HIGH   "__pv_phys_offset"
+#endif
+
+       early_patch_stub(
+       /* type */              PATCH_IMM8,
+       /* code */
+               "ldr            %[tmp], =__pv_offset\n"
+               "ldr            %[tmp], [%[tmp]]\n"
+               "add            %Q[to], %[from], %[tmp]\n"
+               "ldr            %[tmp], =" PV_PHYS_HIGH "\n"
+               "ldr            %[tmp], [%[tmp]]\n"
+               "mov            %R[to], %[tmp]\n",
+       /* pad */               4,
+       /* patch_data */
+               ".long          __pv_offset\n"
+               "add            %Q[to], %[from], %[imm]\n"
+               ".long  "       PV_PHYS_HIGH "\n"
+               "mov            %R[to], %[imm]\n",
+       /* operands */
+               : [to]   "=r"   (t),
+                 [tmp]  "=&r"  (__tmp)
+               : [from] "r"    (x),
+                 [imm]  "I"    (__IMM8),
+                        "i"    (&__pv_offset),
+                        "i"    (&__pv_phys_offset));
+#endif
        return t;
 }
 
index 37e6da63d444008fe58df022130fa23910fb0b32..1ae52e1d896ae1a7fc8f033a071ec377dbba4009 100644 (file)
@@ -538,7 +538,11 @@ ENDPROC(__fixup_pv_offsets)
 
        .align
 1:     .long   .
+#if defined(CONFIG_ARM_LPAE) && defined(__ARMEB__)
+       .long   __pv_phys_offset + 4
+#else
        .long   __pv_phys_offset
+#endif
        .long   __pv_offset
        .long   PAGE_OFFSET
 #endif
index 60c8c55acdf047c9e63e05aca3b786c4901b7daf..96bf9e5bd1ac3cb8dbfb919fb04c0179c15ec514 100644 (file)
@@ -151,7 +151,7 @@ DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
  * The initializers here prevent these from landing in the BSS section.
  */
 unsigned long __pv_offset = 0xdeadbeef;
-unsigned long __pv_phys_offset = 0xdeadbeef;
+phys_addr_t   __pv_phys_offset = 0xdeadbeef;
 EXPORT_SYMBOL(__pv_phys_offset);
 
 #endif