ia64/xen-unstable

view xen/arch/ia64/xenasm.S @ 4146:f2d61710e4d9

bitkeeper revision 1.1236.25.24 (42366e9aQ71LQ8uCB-Y1IwVNqx5eqA)

Merge djm@kirby.fc.hp.com://home/djm/src/xen/xeno-unstable-ia64.bk
into sportsman.spdomain:/home/djm/xeno-unstable-ia64.bk
author djm@sportsman.spdomain
date Tue Mar 15 05:11:54 2005 +0000 (2005-03-15)
parents 205e127344e9 c77189c38646
children 48ddf32dad5b
line source
1 /*
2 * Assembly support routines for Xen/ia64
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 */
8 #include <linux/config.h>
9 #include <asm/asmmacro.h>
10 #include <asm/processor.h>
11 #include <asm/pgtable.h>
12 #include <asm/vhpt.h>
14 #if 0
15 // FIXME: there's gotta be a better way...
16 // ski and spaski are different... moved to xenmisc.c
17 #define RunningOnHpSki(rx,ry,pn) \
18 addl rx = 2, r0; \
19 addl ry = 3, r0; \
20 ;; \
21 mov rx = cpuid[rx]; \
22 mov ry = cpuid[ry]; \
23 ;; \
24 cmp.eq pn,p0 = 0, rx; \
25 ;; \
26 (pn) movl rx = 0x7000004 ; \
27 ;; \
28 (pn) cmp.ge pn,p0 = ry, rx; \
29 ;;
31 //int platform_is_hp_ski(void)
32 GLOBAL_ENTRY(platform_is_hp_ski)
33 mov r8 = 0
34 RunningOnHpSki(r3,r9,p8)
35 (p8) mov r8 = 1
36 br.ret.sptk.many b0
37 END(platform_is_hp_ski)
38 #endif
40 // Change rr7 to the passed value while ensuring
41 // Xen is mapped into the new region.
42 // in0: new rr7 value
43 // in1: Xen virtual address of shared info (to be pinned)
44 #define PSR_BITS_TO_CLEAR \
45 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
46 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
47 IA64_PSR_DFL | IA64_PSR_DFH)
48 // FIXME? Note that this turns off the DB bit (debug)
49 #define PSR_BITS_TO_SET IA64_PSR_BN
51 GLOBAL_ENTRY(ia64_new_rr7)
52 // not sure this unwind statement is correct...
53 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
54 alloc loc1 = ar.pfs, 2, 7, 0, 0
55 1: {
56 mov r28 = in0 // copy procedure index
57 mov r8 = ip // save ip to compute branch
58 mov loc0 = rp // save rp
59 };;
60 .body
61 movl loc2=PERCPU_ADDR
62 ;;
63 tpa loc2=loc2 // grab this BEFORE changing rr7
64 ;;
65 #if VHPT_ENABLED
66 movl loc6=VHPT_ADDR
67 ;;
68 tpa loc6=loc6 // grab this BEFORE changing rr7
69 ;;
70 #endif
71 mov loc5=in1
72 ;;
73 tpa loc5=loc5 // grab this BEFORE changing rr7
74 ;;
75 mov loc3 = psr // save psr
76 adds r8 = 1f-1b,r8 // calculate return address for call
77 ;;
78 tpa r8=r8 // convert rp to physical
79 ;;
80 mov loc4=ar.rsc // save RSE configuration
81 ;;
82 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
83 movl r16=PSR_BITS_TO_CLEAR
84 movl r17=PSR_BITS_TO_SET
85 ;;
86 or loc3=loc3,r17 // add in psr the bits to set
87 ;;
88 andcm r16=loc3,r16 // removes bits to clear from psr
89 br.call.sptk.many rp=ia64_switch_mode_phys
90 1:
91 // now in physical mode with psr.i/ic off so do rr7 switch
92 dep r16=-1,r0,61,3
93 ;;
94 mov rr[r16]=in0
95 srlz.d
96 ;;
98 // re-pin mappings for kernel text and data
99 mov r18=KERNEL_TR_PAGE_SHIFT<<2
100 movl r17=KERNEL_START
101 ;;
102 rsm psr.i | psr.ic
103 ;;
104 srlz.i
105 ;;
106 ptr.i r17,r18
107 ptr.d r17,r18
108 ;;
109 mov cr.itir=r18
110 mov cr.ifa=r17
111 mov r16=IA64_TR_KERNEL
112 //mov r3=ip
113 movl r18=PAGE_KERNEL
114 ;;
115 dep r2=0,r3,0,KERNEL_TR_PAGE_SHIFT
116 ;;
117 or r18=r2,r18
118 ;;
119 srlz.i
120 ;;
121 itr.i itr[r16]=r18
122 ;;
123 itr.d dtr[r16]=r18
124 ;;
126 // re-pin mappings for stack (current), per-cpu, vhpt, and shared info
128 // unless overlaps with KERNEL_TR
129 dep r18=0,r13,0,KERNEL_TR_PAGE_SHIFT
130 ;;
131 cmp.eq p7,p0=r17,r18
132 (p7) br.cond.sptk .stack_overlaps
133 ;;
134 movl r25=PAGE_KERNEL
135 dep r20=0,r13,50,14 // physical address of "current"
136 ;;
137 or r23=r25,r20 // construct PA | page properties
138 mov r25=IA64_GRANULE_SHIFT<<2
139 ;;
140 ptr.d r13,r25
141 ;;
142 mov cr.itir=r25
143 mov cr.ifa=r13 // VA of next task...
144 ;;
145 mov r25=IA64_TR_CURRENT_STACK
146 ;;
147 itr.d dtr[r25]=r23 // wire in new mapping...
148 ;;
149 .stack_overlaps:
151 movl r22=PERCPU_ADDR
152 ;;
153 movl r25=PAGE_KERNEL
154 ;;
155 mov r20=loc2 // saved percpu physical address
156 ;;
157 or r23=r25,r20 // construct PA | page properties
158 mov r24=PERCPU_PAGE_SHIFT<<2
159 ;;
160 ptr.d r22,r24
161 ;;
162 mov cr.itir=r24
163 mov cr.ifa=r22
164 ;;
165 mov r25=IA64_TR_PERCPU_DATA
166 ;;
167 itr.d dtr[r25]=r23 // wire in new mapping...
168 ;;
170 #if VHPT_ENABLED
171 movl r22=VHPT_ADDR
172 ;;
173 movl r25=PAGE_KERNEL
174 ;;
175 mov r20=loc6 // saved vhpt physical address
176 ;;
177 or r23=r25,r20 // construct PA | page properties
178 mov r24=VHPT_PAGE_SHIFT<<2
179 ;;
180 ptr.d r22,r24
181 ;;
182 mov cr.itir=r24
183 mov cr.ifa=r22
184 ;;
185 mov r25=IA64_TR_VHPT
186 ;;
187 itr.d dtr[r25]=r23 // wire in new mapping...
188 ;;
189 #endif
191 movl r22=SHAREDINFO_ADDR
192 ;;
193 movl r25=PAGE_KERNEL
194 ;;
195 mov r20=loc5 // saved sharedinfo physical address
196 ;;
197 or r23=r25,r20 // construct PA | page properties
198 mov r24=PAGE_SHIFT<<2
199 ;;
200 ptr.d r22,r24
201 ;;
202 mov cr.itir=r24
203 mov cr.ifa=r22
204 ;;
205 mov r25=IA64_TR_SHARED_INFO
206 ;;
207 itr.d dtr[r25]=r23 // wire in new mapping...
208 ;;
210 // done, switch back to virtual and return
211 mov r16=loc3 // r16= original psr
212 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
213 mov psr.l = loc3 // restore init PSR
215 mov ar.pfs = loc1
216 mov rp = loc0
217 ;;
218 mov ar.rsc=loc4 // restore RSE configuration
219 srlz.d // seralize restoration of psr.l
220 br.ret.sptk.many rp
221 END(ia64_new_rr7)
223 #include "minstate.h"
225 GLOBAL_ENTRY(ia64_prepare_handle_privop)
226 .prologue
227 /*
228 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
229 */
230 mov r16=r0
231 DO_SAVE_SWITCH_STACK
232 br.call.sptk.many rp=ia64_handle_privop // stack frame setup in ivt
233 .ret22: .body
234 DO_LOAD_SWITCH_STACK
235 br.cond.sptk.many rp // goes to ia64_leave_kernel
236 END(ia64_prepare_handle_privop)
238 GLOBAL_ENTRY(ia64_prepare_handle_break)
239 .prologue
240 /*
241 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
242 */
243 mov r16=r0
244 DO_SAVE_SWITCH_STACK
245 br.call.sptk.many rp=ia64_handle_break // stack frame setup in ivt
246 .ret23: .body
247 DO_LOAD_SWITCH_STACK
248 br.cond.sptk.many rp // goes to ia64_leave_kernel
249 END(ia64_prepare_handle_break)
251 GLOBAL_ENTRY(ia64_prepare_handle_reflection)
252 .prologue
253 /*
254 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
255 */
256 mov r16=r0
257 DO_SAVE_SWITCH_STACK
258 br.call.sptk.many rp=ia64_handle_reflection // stack frame setup in ivt
259 .ret24: .body
260 DO_LOAD_SWITCH_STACK
261 br.cond.sptk.many rp // goes to ia64_leave_kernel
262 END(ia64_prepare_handle_reflection)
264 GLOBAL_ENTRY(__get_domain_bundle)
265 EX(.failure_in_get_bundle,ld8 r8=[r32],8)
266 ;;
267 EX(.failure_in_get_bundle,ld8 r9=[r32])
268 ;;
269 br.ret.sptk.many rp
270 ;;
271 .failure_in_get_bundle:
272 mov r8=0
273 ;;
274 mov r9=0
275 ;;
276 br.ret.sptk.many rp
277 ;;
278 END(__get_domain_bundle)
280 GLOBAL_ENTRY(dorfirfi)
281 #define SI_CR_IIP_OFFSET 0x10
282 #define SI_CR_IPSR_OFFSET 0x08
283 #define SI_CR_IFS_OFFSET 0x18
284 movl r16 = SHAREDINFO_ADDR+SI_CR_IIP_OFFSET
285 movl r17 = SHAREDINFO_ADDR+SI_CR_IPSR_OFFSET
286 movl r18 = SHAREDINFO_ADDR+SI_CR_IFS_OFFSET
287 ;;
288 ld8 r16 = [r16]
289 ld8 r17 = [r17]
290 ld8 r18 = [r18]
291 ;;
292 mov cr.iip=r16
293 mov cr.ipsr=r17
294 mov cr.ifs=r18
295 ;;
296 // fall through
297 END(dorfirfi)
299 GLOBAL_ENTRY(dorfi)
300 rfi
301 ;;
302 END(dorfirfi)
304 //
305 // Long's Peak UART Offsets
306 //
307 #define COM_TOP 0xff5e0000
308 #define COM_BOT 0xff5e2000
310 // UART offsets
311 #define UART_TX 0 /* Out: Transmit buffer (DLAB=0) */
312 #define UART_INT_ENB 1 /* interrupt enable (DLAB=0) */
313 #define UART_INT_ID 2 /* Interrupt ID register */
314 #define UART_LINE_CTL 3 /* Line control register */
315 #define UART_MODEM_CTL 4 /* Modem Control Register */
316 #define UART_LSR 5 /* In: Line Status Register */
317 #define UART_MSR 6 /* Modem status register */
318 #define UART_DLATCH_LOW UART_TX
319 #define UART_DLATCH_HIGH UART_INT_ENB
320 #define COM1 0x3f8
321 #define COM2 0x2F8
322 #define COM3 0x3E8
324 /* interrupt enable bits (offset 1) */
325 #define DATA_AVAIL_INT 1
326 #define XMIT_HOLD_EMPTY_INT 2
327 #define LINE_STAT_INT 4
328 #define MODEM_STAT_INT 8
330 /* line status bits (offset 5) */
331 #define REC_DATA_READY 1
332 #define OVERRUN 2
333 #define PARITY_ERROR 4
334 #define FRAMING_ERROR 8
335 #define BREAK_INTERRUPT 0x10
336 #define XMIT_HOLD_EMPTY 0x20
337 #define XMIT_SHIFT_EMPTY 0x40
339 // Write a single character
340 // input: r32 = character to be written
341 // output: none
342 GLOBAL_ENTRY(longs_peak_putc)
343 rsm psr.dt
344 movl r16 = 0x8000000000000000 + COM_TOP + UART_LSR
345 ;;
346 srlz.i
347 ;;
349 .Chk_THRE_p:
350 ld1.acq r18=[r16]
351 ;;
353 and r18 = XMIT_HOLD_EMPTY, r18
354 ;;
355 cmp4.eq p6,p0=0,r18
356 ;;
358 (p6) br .Chk_THRE_p
359 ;;
360 movl r16 = 0x8000000000000000 + COM_TOP + UART_TX
361 ;;
362 st1.rel [r16]=r32
363 ;;
364 ssm psr.dt
365 ;;
366 srlz.i
367 ;;
368 br.ret.sptk.many b0
369 END(longs_peak_putc)
371 /* derived from linux/arch/ia64/hp/sim/boot/boot_head.S */
372 GLOBAL_ENTRY(pal_emulator_static)
373 mov r8=-1
374 mov r9=256
375 ;;
376 cmp.gtu p7,p8=r9,r32 /* r32 <= 255? */
377 (p7) br.cond.sptk.few static
378 ;;
379 mov r9=512
380 ;;
381 cmp.gtu p7,p8=r9,r32
382 (p7) br.cond.sptk.few stacked
383 ;;
384 static: cmp.eq p7,p8=6,r32 /* PAL_PTCE_INFO */
385 (p8) br.cond.sptk.few 1f
386 ;;
387 mov r8=0 /* status = 0 */
388 movl r9=0x100000000 /* tc.base */
389 movl r10=0x0000000200000003 /* count[0], count[1] */
390 movl r11=0x1000000000002000 /* stride[0], stride[1] */
391 br.ret.sptk.few rp
392 1: cmp.eq p7,p8=14,r32 /* PAL_FREQ_RATIOS */
393 (p8) br.cond.sptk.few 1f
394 mov r8=0 /* status = 0 */
395 movl r9 =0x900000002 /* proc_ratio (1/100) */
396 movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
397 movl r11=0x900000002 /* itc_ratio<<32 (1/100) */
398 ;;
399 1: cmp.eq p7,p8=19,r32 /* PAL_RSE_INFO */
400 (p8) br.cond.sptk.few 1f
401 mov r8=0 /* status = 0 */
402 mov r9=96 /* num phys stacked */
403 mov r10=0 /* hints */
404 mov r11=0
405 br.ret.sptk.few rp
406 1: cmp.eq p7,p8=1,r32 /* PAL_CACHE_FLUSH */
407 (p8) br.cond.sptk.few 1f
408 #if 0
409 mov r9=ar.lc
410 movl r8=524288 /* flush 512k million cache lines (16MB) */
411 ;;
412 mov ar.lc=r8
413 movl r8=0xe000000000000000
414 ;;
415 .loop: fc r8
416 add r8=32,r8
417 br.cloop.sptk.few .loop
418 sync.i
419 ;;
420 srlz.i
421 ;;
422 mov ar.lc=r9
423 mov r8=r0
424 ;;
425 1: cmp.eq p7,p8=15,r32 /* PAL_PERF_MON_INFO */
426 (p8) br.cond.sptk.few 1f
427 mov r8=0 /* status = 0 */
428 movl r9 =0x08122f04 /* generic=4 width=47 retired=8 cycles=18 */
429 mov r10=0 /* reserved */
430 mov r11=0 /* reserved */
431 mov r16=0xffff /* implemented PMC */
432 mov r17=0x3ffff /* implemented PMD */
433 add r18=8,r29 /* second index */
434 ;;
435 st8 [r29]=r16,16 /* store implemented PMC */
436 st8 [r18]=r0,16 /* clear remaining bits */
437 ;;
438 st8 [r29]=r0,16 /* clear remaining bits */
439 st8 [r18]=r0,16 /* clear remaining bits */
440 ;;
441 st8 [r29]=r17,16 /* store implemented PMD */
442 st8 [r18]=r0,16 /* clear remaining bits */
443 mov r16=0xf0 /* cycles count capable PMC */
444 ;;
445 st8 [r29]=r0,16 /* clear remaining bits */
446 st8 [r18]=r0,16 /* clear remaining bits */
447 mov r17=0xf0 /* retired bundles capable PMC */
448 ;;
449 st8 [r29]=r16,16 /* store cycles capable */
450 st8 [r18]=r0,16 /* clear remaining bits */
451 ;;
452 st8 [r29]=r0,16 /* clear remaining bits */
453 st8 [r18]=r0,16 /* clear remaining bits */
454 ;;
455 st8 [r29]=r17,16 /* store retired bundle capable */
456 st8 [r18]=r0,16 /* clear remaining bits */
457 ;;
458 st8 [r29]=r0,16 /* clear remaining bits */
459 st8 [r18]=r0,16 /* clear remaining bits */
460 ;;
461 1: br.cond.sptk.few rp
462 #else
463 1:
464 #endif
465 stacked:
466 br.ret.sptk.few rp
467 END(pal_emulator_static)