ia64/xen-unstable

view xen/arch/ia64/xenasm.S @ 3108:85d6a1145160

bitkeeper revision 1.1159.187.7 (41a4e12eWWEz6Rwd4YlbRFZKcBjaMQ)

Merge arcadians.cl.cam.ac.uk:/auto/groups/xeno/BK/xen-2.0-testing.bk
into arcadians.cl.cam.ac.uk:/local/scratch-2/cl349/xen-2.0-testing.bk
author cl349@arcadians.cl.cam.ac.uk
date Wed Nov 24 19:29:50 2004 +0000 (2004-11-24)
parents b7cbbc4c7a3e
children 7ef582b6c9c4
line source
1 /*
2 * Assembly support routines for Xen/ia64
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
8 #include <linux/config.h>
9 #include <asm/asmmacro.h>
10 #include <asm/processor.h>
11 #include <asm/pgtable.h>
12 #include <asm/vhpt.h>
14 #define RunningOnHpSki(rx,ry,pn) \
15 addl rx = 2, r0; \
16 addl ry = 3, r0; \
17 ;; \
18 mov rx = cpuid[rx]; \
19 mov ry = cpuid[ry]; \
20 ;; \
21 cmp.eq pn,p0 = 0, rx; \
22 ;; \
23 (pn) movl rx = 0x7000004 ; \
24 ;; \
25 (pn) cmp.eq pn,p0 = ry, rx; \
26 ;;
28 //int platform_is_hp_ski(void)
29 GLOBAL_ENTRY(platform_is_hp_ski)
30 mov r8 = 0
31 RunningOnHpSki(r3,r9,p8)
32 (p8) mov r8 = 1
33 br.ret.sptk.many b0
34 END(platform_is_hp_ski)
36 // Change rr7 to the passed value while ensuring
37 // Xen is mapped into the new region
38 #define PSR_BITS_TO_CLEAR \
39 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
40 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
41 IA64_PSR_DFL | IA64_PSR_DFH)
42 // FIXME? Note that this turns off the DB bit (debug)
43 #define PSR_BITS_TO_SET IA64_PSR_BN
45 GLOBAL_ENTRY(ia64_new_rr7)
46 // not sure this unwind statement is correct...
47 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
48 alloc loc1 = ar.pfs, 1, 7, 0, 0
49 1: {
50 mov r28 = in0 // copy procedure index
51 mov r8 = ip // save ip to compute branch
52 mov loc0 = rp // save rp
53 };;
54 .body
55 movl loc2=PERCPU_ADDR
56 ;;
57 tpa loc2=loc2 // grab this BEFORE changing rr7
58 ;;
59 #if VHPT_ENABLED
60 movl loc6=VHPT_ADDR
61 ;;
62 tpa loc6=loc6 // grab this BEFORE changing rr7
63 ;;
64 #endif
65 movl loc5=SHAREDINFO_ADDR
66 ;;
67 tpa loc5=loc5 // grab this BEFORE changing rr7
68 ;;
69 mov loc3 = psr // save psr
70 adds r8 = 1f-1b,r8 // calculate return address for call
71 ;;
72 tpa r8=r8 // convert rp to physical
73 ;;
74 mov loc4=ar.rsc // save RSE configuration
75 ;;
76 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
77 movl r16=PSR_BITS_TO_CLEAR
78 movl r17=PSR_BITS_TO_SET
79 ;;
80 or loc3=loc3,r17 // add in psr the bits to set
81 ;;
82 andcm r16=loc3,r16 // removes bits to clear from psr
83 br.call.sptk.many rp=ia64_switch_mode_phys
84 1:
85 // now in physical mode with psr.i/ic off so do rr7 switch
86 dep r16=-1,r0,61,3
87 ;;
88 mov rr[r16]=in0
89 srlz.d
90 ;;
92 // re-pin mappings for kernel text and data
93 mov r18=KERNEL_TR_PAGE_SHIFT<<2
94 movl r17=KERNEL_START
95 ;;
96 rsm psr.i | psr.ic
97 ;;
98 srlz.i
99 ;;
100 ptr.i r17,r18
101 ptr.d r17,r18
102 ;;
103 mov cr.itir=r18
104 mov cr.ifa=r17
105 mov r16=IA64_TR_KERNEL
106 //mov r3=ip
107 movl r18=PAGE_KERNEL
108 ;;
109 dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
110 ;;
111 or r18=r2,r18
112 ;;
113 srlz.i
114 ;;
115 itr.i itr[r16]=r18
116 ;;
117 itr.d dtr[r16]=r18
118 ;;
120 // re-pin mappings for stack (current), per-cpu, vhpt, and shared info
122 // unless overlaps with KERNEL_TR
123 dep r18=0,r13,0,KERNEL_TR_PAGE_SHIFT
124 ;;
125 cmp.eq p7,p0=r17,r18
126 (p7) br.cond.sptk .stack_overlaps
127 ;;
128 movl r25=PAGE_KERNEL
129 dep r20=0,r13,50,14 // physical address of "current"
130 ;;
131 or r23=r25,r20 // construct PA | page properties
132 mov r25=IA64_GRANULE_SHIFT<<2
133 ;;
134 ptr.d r13,r25
135 ;;
136 mov cr.itir=r25
137 mov cr.ifa=r13 // VA of next task...
138 ;;
139 mov r25=IA64_TR_CURRENT_STACK
140 ;;
141 itr.d dtr[r25]=r23 // wire in new mapping...
142 ;;
143 .stack_overlaps:
145 movl r22=PERCPU_ADDR
146 ;;
147 movl r25=PAGE_KERNEL
148 ;;
149 mov r20=loc2 // saved percpu physical address
150 ;;
151 or r23=r25,r20 // construct PA | page properties
152 mov r24=PERCPU_PAGE_SHIFT<<2
153 ;;
154 ptr.d r22,r24
155 ;;
156 mov cr.itir=r24
157 mov cr.ifa=r22
158 ;;
159 mov r25=IA64_TR_PERCPU_DATA
160 ;;
161 itr.d dtr[r25]=r23 // wire in new mapping...
162 ;;
164 #if VHPT_ENABLED
165 movl r22=VHPT_ADDR
166 ;;
167 movl r25=PAGE_KERNEL
168 ;;
169 mov r20=loc6 // saved vhpt physical address
170 ;;
171 or r23=r25,r20 // construct PA | page properties
172 mov r24=VHPT_PAGE_SHIFT<<2
173 ;;
174 ptr.d r22,r24
175 ;;
176 mov cr.itir=r24
177 mov cr.ifa=r22
178 ;;
179 mov r25=IA64_TR_VHPT
180 ;;
181 itr.d dtr[r25]=r23 // wire in new mapping...
182 ;;
183 #endif
185 movl r22=SHAREDINFO_ADDR
186 ;;
187 movl r25=PAGE_KERNEL
188 ;;
189 mov r20=loc5 // saved sharedinfo physical address
190 ;;
191 or r23=r25,r20 // construct PA | page properties
192 mov r24=PAGE_SHIFT<<2
193 ;;
194 ptr.d r22,r24
195 ;;
196 mov cr.itir=r24
197 mov cr.ifa=r22
198 ;;
199 mov r25=IA64_TR_SHARED_INFO
200 ;;
201 itr.d dtr[r25]=r23 // wire in new mapping...
202 ;;
204 // done, switch back to virtual and return
205 mov r16=loc3 // r16= original psr
206 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
207 mov psr.l = loc3 // restore init PSR
209 mov ar.pfs = loc1
210 mov rp = loc0
211 ;;
212 mov ar.rsc=loc4 // restore RSE configuration
213 srlz.d // seralize restoration of psr.l
214 br.ret.sptk.many rp
215 END(ia64_new_rr7)
217 #include "minstate.h"
219 GLOBAL_ENTRY(ia64_prepare_handle_privop)
220 .prologue
221 /*
222 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
223 */
224 mov r16=r0
225 DO_SAVE_SWITCH_STACK
226 br.call.sptk.many rp=ia64_handle_privop // stack frame setup in ivt
227 .ret22: .body
228 DO_LOAD_SWITCH_STACK
229 br.cond.sptk.many rp // goes to ia64_leave_kernel
230 END(ia64_prepare_handle_privop)
232 GLOBAL_ENTRY(ia64_prepare_handle_break)
233 .prologue
234 /*
235 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
236 */
237 mov r16=r0
238 DO_SAVE_SWITCH_STACK
239 br.call.sptk.many rp=ia64_handle_break // stack frame setup in ivt
240 .ret23: .body
241 DO_LOAD_SWITCH_STACK
242 br.cond.sptk.many rp // goes to ia64_leave_kernel
243 END(ia64_prepare_handle_break)
245 GLOBAL_ENTRY(ia64_prepare_handle_reflection)
246 .prologue
247 /*
248 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
249 */
250 mov r16=r0
251 DO_SAVE_SWITCH_STACK
252 br.call.sptk.many rp=ia64_handle_reflection // stack frame setup in ivt
253 .ret24: .body
254 DO_LOAD_SWITCH_STACK
255 br.cond.sptk.many rp // goes to ia64_leave_kernel
256 END(ia64_prepare_handle_reflection)
258 // NOTE: instruction spacing must be explicit for recovery on miss
259 GLOBAL_ENTRY(__get_domain_bundle)
260 ld8 r8=[r32],8
261 nop 0
262 nop 0
263 ;;
264 ld8 r9=[r32]
265 nop 0
266 nop 0
267 ;;
268 br.ret.sptk.many rp
269 nop 0
270 nop 0
271 ;;
272 END(__get_domain_bundle)
274 GLOBAL_ENTRY(dorfirfi)
275 #define SI_CR_IIP_OFFSET 0x150
276 #define SI_CR_IPSR_OFFSET 0x148
277 #define SI_CR_IFS_OFFSET 0x158
278 movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET
279 movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET
280 movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET
281 ;;
282 ld8 r16 = [r16]
283 ld8 r17 = [r17]
284 ld8 r18 = [r18]
285 ;;
286 mov cr.iip=r16
287 mov cr.ipsr=r17
288 mov cr.ifs=r18
289 ;;
290 // fall through
291 END(dorfirfi)
293 GLOBAL_ENTRY(dorfi)
294 rfi
295 ;;
296 END(dorfirfi)
298 //
299 // Long's Peak UART Offsets
300 //
301 #define COM_TOP 0xff5e0000
302 #define COM_BOT 0xff5e2000
304 // UART offsets
305 #define UART_TX 0 /* Out: Transmit buffer (DLAB=0) */
306 #define UART_INT_ENB 1 /* interrupt enable (DLAB=0) */
307 #define UART_INT_ID 2 /* Interrupt ID register */
308 #define UART_LINE_CTL 3 /* Line control register */
309 #define UART_MODEM_CTL 4 /* Modem Control Register */
310 #define UART_LSR 5 /* In: Line Status Register */
311 #define UART_MSR 6 /* Modem status register */
312 #define UART_DLATCH_LOW UART_TX
313 #define UART_DLATCH_HIGH UART_INT_ENB
314 #define COM1 0x3f8
315 #define COM2 0x2F8
316 #define COM3 0x3E8
318 /* interrupt enable bits (offset 1) */
319 #define DATA_AVAIL_INT 1
320 #define XMIT_HOLD_EMPTY_INT 2
321 #define LINE_STAT_INT 4
322 #define MODEM_STAT_INT 8
324 /* line status bits (offset 5) */
325 #define REC_DATA_READY 1
326 #define OVERRUN 2
327 #define PARITY_ERROR 4
328 #define FRAMING_ERROR 8
329 #define BREAK_INTERRUPT 0x10
330 #define XMIT_HOLD_EMPTY 0x20
331 #define XMIT_SHIFT_EMPTY 0x40
333 // Write a single character
334 // input: r32 = character to be written
335 // output: none
336 GLOBAL_ENTRY(longs_peak_putc)
337 rsm psr.dt
338 movl r16 = 0x8000000000000000 + COM_TOP + UART_LSR
339 ;;
340 srlz.i
341 ;;
343 .Chk_THRE_p:
344 ld1.acq r18=[r16]
345 ;;
347 and r18 = XMIT_HOLD_EMPTY, r18
348 ;;
349 cmp4.eq p6,p0=0,r18
350 ;;
352 (p6) br .Chk_THRE_p
353 ;;
354 movl r16 = 0x8000000000000000 + COM_TOP + UART_TX
355 ;;
356 st1.rel [r16]=r32
357 ;;
358 ssm psr.dt
359 ;;
360 srlz.i
361 ;;
362 br.ret.sptk.many b0
363 END(longs_peak_putc)
365 /* derived from linux/arch/ia64/hp/sim/boot/boot_head.S */
366 GLOBAL_ENTRY(pal_emulator_static)
367 mov r8=-1
368 mov r9=256
369 ;;
370 cmp.gtu p7,p8=r9,r32 /* r32 <= 255? */
371 (p7) br.cond.sptk.few static
372 ;;
373 mov r9=512
374 ;;
375 cmp.gtu p7,p8=r9,r32
376 (p7) br.cond.sptk.few stacked
377 ;;
378 static: cmp.eq p7,p8=6,r32 /* PAL_PTCE_INFO */
379 (p8) br.cond.sptk.few 1f
380 ;;
381 mov r8=0 /* status = 0 */
382 movl r9=0x100000000 /* tc.base */
383 movl r10=0x0000000200000003 /* count[0], count[1] */
384 movl r11=0x1000000000002000 /* stride[0], stride[1] */
385 br.ret.sptk.few rp
386 1: cmp.eq p7,p8=14,r32 /* PAL_FREQ_RATIOS */
387 (p8) br.cond.sptk.few 1f
388 mov r8=0 /* status = 0 */
389 movl r9 =0x900000002 /* proc_ratio (1/100) */
390 movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
391 movl r11=0x900000002 /* itc_ratio<<32 (1/100) */
392 ;;
393 1: cmp.eq p7,p8=19,r32 /* PAL_RSE_INFO */
394 (p8) br.cond.sptk.few 1f
395 mov r8=0 /* status = 0 */
396 mov r9=96 /* num phys stacked */
397 mov r10=0 /* hints */
398 mov r11=0
399 br.ret.sptk.few rp
400 1: cmp.eq p7,p8=1,r32 /* PAL_CACHE_FLUSH */
401 (p8) br.cond.sptk.few 1f
402 #if 0
403 mov r9=ar.lc
404 movl r8=524288 /* flush 512k million cache lines (16MB) */
405 ;;
406 mov ar.lc=r8
407 movl r8=0xe000000000000000
408 ;;
409 .loop: fc r8
410 add r8=32,r8
411 br.cloop.sptk.few .loop
412 sync.i
413 ;;
414 srlz.i
415 ;;
416 mov ar.lc=r9
417 mov r8=r0
418 ;;
419 1: cmp.eq p7,p8=15,r32 /* PAL_PERF_MON_INFO */
420 (p8) br.cond.sptk.few 1f
421 mov r8=0 /* status = 0 */
422 movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */
423 mov r10=0 /* reserved */
424 mov r11=0 /* reserved */
425 mov r16=0xffff /* implemented PMC */
426 mov r17=0x3ffff /* implemented PMD */
427 add r18=8,r29 /* second index */
428 ;;
429 st8 [r29]=r16,16 /* store implemented PMC */
430 st8 [r18]=r0,16 /* clear remaining bits */
431 ;;
432 st8 [r29]=r0,16 /* clear remaining bits */
433 st8 [r18]=r0,16 /* clear remaining bits */
434 ;;
435 st8 [r29]=r17,16 /* store implemented PMD */
436 st8 [r18]=r0,16 /* clear remaining bits */
437 mov r16=0xf0 /* cycles count capable PMC */
438 ;;
439 st8 [r29]=r0,16 /* clear remaining bits */
440 st8 [r18]=r0,16 /* clear remaining bits */
441 mov r17=0xf0 /* retired bundles capable PMC */
442 ;;
443 st8 [r29]=r16,16 /* store cycles capable */
444 st8 [r18]=r0,16 /* clear remaining bits */
445 ;;
446 st8 [r29]=r0,16 /* clear remaining bits */
447 st8 [r18]=r0,16 /* clear remaining bits */
448 ;;
449 st8 [r29]=r17,16 /* store retired bundle capable */
450 st8 [r18]=r0,16 /* clear remaining bits */
451 ;;
452 st8 [r29]=r0,16 /* clear remaining bits */
453 st8 [r18]=r0,16 /* clear remaining bits */
454 ;;
455 1: br.cond.sptk.few rp
456 #else
457 1:
458 #endif
459 stacked:
460 br.ret.sptk.few rp
461 END(pal_emulator_static)