ia64/xen-unstable

view linux-2.6-xen-sparse/arch/xen/x86_64/kernel/head.S @ 6432:b54144915ae6

merge?
author cl349@firebug.cl.cam.ac.uk
date Thu Aug 25 16:26:30 2005 +0000 (2005-08-25)
parents 3428d58a85e1 8d31f9a9c423
children 0610add7c3fe
line source
1 /*
2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 *
9 * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
10 *
11 * Jun Nakajima <jun.nakajima@intel.com>
12 * Modified for Xen
13 */
16 #include <linux/linkage.h>
18 .section __xen_guest
19 .ascii "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=3.0,VIRT_BASE=0xffffffff80000000"
20 .ascii ",LOADER=generic"
21 /* .ascii ",PT_MODE_WRITABLE" */
22 .byte 0
25 #include <linux/threads.h>
26 #include <asm/desc.h>
27 #include <asm/segment.h>
28 #include <asm/page.h>
29 #include <asm/msr.h>
30 #include <asm/cache.h>
32 /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
33 * because we need identity-mapped pages on setup so define __START_KERNEL to
34 * 0x100000 for this stage
35 *
36 */
38 .text
39 .code64
40 .globl startup_64
41 startup_64:
42 ENTRY(_start)
43 cld
44 /* Copy the necessary stuff from xen_start_info structure. */
45 movq $xen_start_info_union,%rdi
46 movq $256,%rcx
47 rep movsq
49 #ifdef CONFIG_SMP
50 ENTRY(startup_64_smp)
51 cld
52 #endif /* CONFIG_SMP */
54 movq init_rsp(%rip),%rsp
55 /* zero EFLAGS after setting rsp */
56 pushq $0
57 popfq
58 movq initial_code(%rip),%rax
59 jmp *%rax
61 /* SMP bootup changes these two */
62 .globl initial_code
63 initial_code:
64 .quad x86_64_start_kernel
65 .globl init_rsp
66 init_rsp:
67 .quad init_thread_union+THREAD_SIZE-8
69 ENTRY(early_idt_handler)
70 xorl %eax,%eax
71 movq 8(%rsp),%rsi # get rip
72 movq (%rsp),%rdx
73 leaq early_idt_msg(%rip),%rdi
74 1: hlt # generate #GP
75 jmp 1b
77 early_idt_msg:
78 .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
80 #if 0
81 ENTRY(lgdt_finish)
82 movl $(__USER_DS),%eax # DS/ES contains default USER segment
83 movw %ax,%ds
84 movw %ax,%es
85 movl $(__KERNEL_DS),%eax
86 movw %ax,%ss # after changing gdt.
87 popq %rax # get the retrun address
88 pushq $(__KERNEL_CS)
89 pushq %rax
90 lretq
91 #endif
93 ENTRY(stext)
94 ENTRY(_stext)
96 /*
97 * This default setting generates an ident mapping at address 0x100000
98 * and a mapping for the kernel that precisely maps virtual address
99 * 0xffffffff80000000 to physical address 0x000000. (always using
100 * 2Mbyte large pages provided by PAE mode)
101 */
102 .org 0x1000
103 ENTRY(init_level4_pgt)
104 .fill 512,8,0
106 /*
107 * We update two pgd entries to make kernel and user pgd consistent
108 * at pgd_populate(). It can be used for kernel modules. So we place
109 * this page here for those cases to avoid memory corruption.
110 * We also use this page to establish the initiali mapping for
111 * vsyscall area.
112 */
113 .org 0x2000
114 ENTRY(init_level4_user_pgt)
115 .fill 512,8,0
117 /*
118 * In Xen the following pre-initialized pgt entries are re-initialized.
119 */
120 .org 0x3000
121 ENTRY(level3_kernel_pgt)
122 .fill 510,8,0
123 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
124 .quad 0x0000000000105007 /* -> level2_kernel_pgt */
125 .fill 1,8,0
127 .org 0x4000
128 ENTRY(level2_ident_pgt)
129 /* 40MB for bootup. */
130 .quad 0x0000000000000283
131 .quad 0x0000000000200183
132 .quad 0x0000000000400183
133 .quad 0x0000000000600183
134 .quad 0x0000000000800183
135 .quad 0x0000000000A00183
136 .quad 0x0000000000C00183
137 .quad 0x0000000000E00183
138 .quad 0x0000000001000183
139 .quad 0x0000000001200183
140 .quad 0x0000000001400183
141 .quad 0x0000000001600183
142 .quad 0x0000000001800183
143 .quad 0x0000000001A00183
144 .quad 0x0000000001C00183
145 .quad 0x0000000001E00183
146 .quad 0x0000000002000183
147 .quad 0x0000000002200183
148 .quad 0x0000000002400183
149 .quad 0x0000000002600183
150 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
151 .globl temp_boot_pmds
152 temp_boot_pmds:
153 .fill 492,8,0
155 .org 0x5000
156 ENTRY(level2_kernel_pgt)
157 /* 40MB kernel mapping. The kernel code cannot be bigger than that.
158 When you change this change KERNEL_TEXT_SIZE in page.h too. */
159 /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
160 .quad 0x0000000000000183
161 .quad 0x0000000000200183
162 .quad 0x0000000000400183
163 .quad 0x0000000000600183
164 .quad 0x0000000000800183
165 .quad 0x0000000000A00183
166 .quad 0x0000000000C00183
167 .quad 0x0000000000E00183
168 .quad 0x0000000001000183
169 .quad 0x0000000001200183
170 .quad 0x0000000001400183
171 .quad 0x0000000001600183
172 .quad 0x0000000001800183
173 .quad 0x0000000001A00183
174 .quad 0x0000000001C00183
175 .quad 0x0000000001E00183
176 .quad 0x0000000002000183
177 .quad 0x0000000002200183
178 .quad 0x0000000002400183
179 .quad 0x0000000002600183
180 /* Module mapping starts here */
181 .fill 492,8,0
183 /*
184 * This is used for vsyscall area mapping as we have a different
185 * level4 page table for user.
186 */
187 .org 0x6000
188 ENTRY(level3_user_pgt)
189 .fill 512,8,0
191 .org 0x7000
192 ENTRY(cpu_gdt_table)
193 /* The TLS descriptors are currently at a different place compared to i386.
194 Hopefully nobody expects them at a fixed place (Wine?) */
195 .quad 0x0000000000000000 /* NULL descriptor */
196 .quad 0x008ffa000000ffff /* __KERNEL_COMPAT32_CS */
197 .quad 0x00affa000000ffff /* __KERNEL_CS */
198 .quad 0x00cff2000000ffff /* __KERNEL_DS */
200 .quad 0x00cffa000000ffff /* __USER32_CS */
201 .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
202 .quad 0x00affa000000ffff /* __USER_CS */
203 .quad 0x00cffa000000ffff /* __KERNEL32_CS */
204 .quad 0,0 /* TSS */
205 .quad 0,0 /* LDT */
206 .quad 0,0,0 /* three TLS descriptors */
207 .quad 0 /* unused now? __KERNEL16_CS - 16bit PM for S3 wakeup. */
209 gdt_end:
210 #if 0
211 /* asm/segment.h:GDT_ENTRIES must match this */
212 /* This should be a multiple of the cache line size */
213 /* GDTs of other CPUs: */
214 .fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
215 #endif
217 .org 0x8000
218 ENTRY(empty_zero_page)
220 .org 0x9000
221 ENTRY(empty_bad_page)
223 .org 0xa000
224 ENTRY(empty_bad_pte_table)
226 .org 0xb000
227 ENTRY(empty_bad_pmd_table)
229 .org 0xc000
230 ENTRY(level3_physmem_pgt)
231 .quad 0x0000000000105007 /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
234 .org 0xd000
235 #ifdef CONFIG_ACPI_SLEEP
236 ENTRY(wakeup_level4_pgt)
237 .quad 0x0000000000102007 /* -> level3_ident_pgt */
238 .fill 255,8,0
239 .quad 0x000000000010a007
240 .fill 254,8,0
241 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
242 .quad 0x0000000000103007 /* -> level3_kernel_pgt */
243 #endif
245 .data
247 .align 16
248 .globl cpu_gdt_descr
249 cpu_gdt_descr:
250 .word gdt_end-cpu_gdt_table
251 gdt:
252 .quad cpu_gdt_table
253 #ifdef CONFIG_SMP
254 .rept NR_CPUS-1
255 .word 0
256 .quad 0
257 .endr
258 #endif
260 /* We need valid kernel segments for data and code in long mode too
261 * IRET will check the segment types kkeil 2000/10/28
262 * Also sysret mandates a special GDT layout
263 */
265 #if 0
266 .align L1_CACHE_BYTES
267 #endif
268 .align L1_CACHE_BYTES
269 ENTRY(idt_table)
270 .rept 256
271 .quad 0
272 .quad 0
273 .endr