ia64/xen-unstable

view xen/arch/ia64/xen/xenpatch.c @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 35b2c54f59d5
children 13eb21985d3d
line source
1 /******************************************************************************
2 * xenpatch.c
3 * Copyright (c) 2006 Silicon Graphics Inc.
4 * Jes Sorensen <jes@sgi.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 * Parts of this based on code from arch/ia64/kernel/patch.c
20 */
22 #include <xen/config.h>
23 #include <xen/lib.h>
24 #include <xen/init.h>
25 #include <asm/xensystem.h>
26 #include <asm/intrinsics.h>
28 /*
29 * This was adapted from code written by Tony Luck:
30 *
31 * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle
32 * like this:
33 *
34 * 6 6 5 4 3 2 1
35 * 3210987654321098765432109876543210987654321098765432109876543210
36 * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
37 *
38 * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
39 * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
40 */
41 static u64
42 get_imm64 (u64 insn_addr)
43 {
44 u64 *p = (u64 *) (insn_addr & -16); /* mask out slot number */
46 return ( (p[1] & 0x0800000000000000UL) << 4) | /*A*/
47 ((p[1] & 0x00000000007fffffUL) << 40) | /*B*/
48 ((p[0] & 0xffffc00000000000UL) >> 24) | /*C*/
49 ((p[1] & 0x0000100000000000UL) >> 23) | /*D*/
50 ((p[1] & 0x0003e00000000000UL) >> 29) | /*E*/
51 ((p[1] & 0x07fc000000000000UL) >> 43) | /*F*/
52 ((p[1] & 0x000007f000000000UL) >> 36); /*G*/
53 }
55 /* Patch instruction with "val" where "mask" has 1 bits. */
56 void
57 ia64_patch (u64 insn_addr, u64 mask, u64 val)
58 {
59 u64 m0, m1, v0, v1, b0, b1, *b = (u64 *) (insn_addr & -16);
60 #define insn_mask ((1UL << 41) - 1)
61 unsigned long shift;
63 b0 = b[0]; b1 = b[1];
64 /* 5 bits of template, then 3 x 41-bit instructions */
65 shift = 5 + 41 * (insn_addr % 16);
66 if (shift >= 64) {
67 m1 = mask << (shift - 64);
68 v1 = val << (shift - 64);
69 } else {
70 m0 = mask << shift; m1 = mask >> (64 - shift);
71 v0 = val << shift; v1 = val >> (64 - shift);
72 b[0] = (b0 & ~m0) | (v0 & m0);
73 }
74 b[1] = (b1 & ~m1) | (v1 & m1);
75 }
77 void
78 ia64_patch_imm64 (u64 insn_addr, u64 val)
79 {
80 /* The assembler may generate offset pointing to either slot 1
81 or slot 2 for a long (2-slot) instruction, occupying slots 1
82 and 2. */
83 insn_addr &= -16UL;
84 ia64_patch(insn_addr + 2, 0x01fffefe000UL,
85 (((val & 0x8000000000000000UL) >> 27) | /* bit 63 -> 36 */
86 ((val & 0x0000000000200000UL) << 0) | /* bit 21 -> 21 */
87 ((val & 0x00000000001f0000UL) << 6) | /* bit 16 -> 22 */
88 ((val & 0x000000000000ff80UL) << 20) | /* bit 7 -> 27 */
89 ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */));
90 ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
91 }
93 /*
94 * Add more patch points in seperate functions as appropriate
95 */
97 static void __init xen_patch_frametable_miss(u64 offset)
98 {
99 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
100 extern char frametable_miss;
101 u64 addr, val;
103 addr = (u64)&frametable_miss;
104 val = get_imm64(addr) + offset;
105 ia64_patch_imm64(addr, val);
106 #endif
107 }
109 /*
110 * We need sometimes to load the physical address of a kernel
111 * object. Often we can convert the virtual address to physical
112 * at execution time, but sometimes (either for performance reasons
113 * or during error recovery) we cannot to this. Patch the marked
114 * bundles to load the physical address.
115 */
116 void __init
117 ia64_patch_vtop (unsigned long start, unsigned long end)
118 {
119 s32 *offp = (s32 *)start;
120 u64 ip;
122 while (offp < (s32 *)end) {
123 ip = (u64)offp + *offp;
125 /* replace virtual address with corresponding physical address */
126 ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
127 ia64_fc((void *)ip);
128 ++offp;
129 }
130 ia64_sync_i();
131 ia64_srlz_i();
132 }
134 void __init xen_patch_kernel(void)
135 {
136 extern unsigned long xen_pstart;
137 unsigned long patch_offset;
139 patch_offset = xen_pstart - (KERNEL_START - PAGE_OFFSET);
141 printk("Xen patching physical address access by offset: "
142 "0x%lx\n", patch_offset);
144 xen_patch_frametable_miss(patch_offset);
146 ia64_sync_i();
147 ia64_srlz_i();
148 }