ia64/xen-unstable

view xen/arch/ia64/xen/xenasm.S @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 234a7033e949
children 34a84a5306f7
line source
1 /*
2 * Assembly support routines for Xen/ia64
3 *
4 * Copyright (C) 2004 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 *
7 * Copyright (C) 2007 VA Linux Systems Japan K.K.
8 * Isaku Yamahata <yamahata at valinux co jp>
9 * ia64_copy_rbs()
10 */
12 #include <linux/config.h>
13 #include <asm/asmmacro.h>
14 #include <asm/processor.h>
15 #include <asm/pgtable.h>
16 #include <asm/vhpt.h>
17 #include <asm/asm-xsi-offsets.h>
18 #include <asm/vmmu.h>
19 #include <public/xen.h>
21 // Change rr7 to the passed value while ensuring
22 // Xen is mapped into the new region.
23 #define PSR_BITS_TO_CLEAR \
24 (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
25 IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
26 IA64_PSR_DFL | IA64_PSR_DFH | IA64_PSR_IC)
27 // FIXME? Note that this turns off the DB bit (debug)
28 #define PSR_BITS_TO_SET IA64_PSR_BN
30 //extern void ia64_new_rr7(unsigned long rid, /* in0 */
31 // void *shared_info, /* in1 */
32 // void *shared_arch_info, /* in2 */
33 // unsigned long shared_info_va, /* in3 */
34 // unsigned long va_vhpt) /* in4 */
35 //Local usage:
36 // loc0=rp, loc1=ar.pfs, loc2=percpu_paddr, loc3=psr, loc4=ar.rse
37 // loc5=pal_vaddr, loc6=xen_paddr, loc7=shared_archinfo_paddr,
38 // r16, r19, r20 are used by ia64_switch_mode_{phys, virt}()
39 GLOBAL_ENTRY(ia64_new_rr7)
40 // FIXME? not sure this unwind statement is correct...
41 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(1)
42 alloc loc1 = ar.pfs, 5, 8, 0, 0
43 movl loc2=PERCPU_ADDR
44 1: {
45 mov loc3 = psr // save psr
46 mov loc0 = rp // save rp
47 mov r8 = ip // save ip to compute branch
48 };;
49 .body
50 tpa loc2=loc2 // grab this BEFORE changing rr7
51 tpa in1=in1 // grab shared_info BEFORE changing rr7
52 adds r8 = 1f-1b,r8 // calculate return address for call
53 ;;
54 tpa loc7=in2 // grab arch_vcpu_info BEFORE chg rr7
55 movl r17=PSR_BITS_TO_SET
56 mov loc4=ar.rsc // save RSE configuration
57 movl r16=PSR_BITS_TO_CLEAR
58 ;;
59 tpa r8=r8 // convert rp to physical
60 mov ar.rsc=0 // put RSE in enforced lazy, LE mode
61 or loc3=loc3,r17 // add in psr the bits to set
62 ;;
63 movl loc5=pal_vaddr // get pal_vaddr
64 ;;
65 ld8 loc5=[loc5] // read pal_vaddr
66 ;;
67 andcm r16=loc3,r16 // removes bits to clear from psr
68 dep loc6=0,r8,0,KERNEL_TR_PAGE_SHIFT // Xen code paddr
69 br.call.sptk.many rp=ia64_switch_mode_phys
70 1:
71 // now in physical mode with psr.i/ic off so do rr7 switch
72 dep r16=-1,r0,61,3 // Note: belong to region 7!
73 ;;
74 mov rr[r16]=in0
75 ;;
76 srlz.d
77 ;;
78 movl r26=PAGE_KERNEL
79 ;;
81 // re-pin mappings for kernel text and data
82 mov r24=KERNEL_TR_PAGE_SHIFT<<2
83 movl r17=KERNEL_START
84 ;;
85 ptr.i r17,r24
86 ptr.d r17,r24
87 mov r16=IA64_TR_KERNEL
88 mov cr.itir=r24
89 mov cr.ifa=r17
90 or r18=loc6,r26
91 ;;
92 itr.i itr[r16]=r18
93 ;;
94 itr.d dtr[r16]=r18
95 ;;
97 // re-pin mappings for stack (current)
98 mov r25=IA64_GRANULE_SHIFT<<2
99 dep r21=0,r13,60,4 // physical address of "current"
100 ;;
101 ptr.d r13,r25
102 or r23=r21,r26 // construct PA | page properties
103 mov cr.itir=r25
104 mov cr.ifa=r13 // VA of next task...
105 mov r21=IA64_TR_CURRENT_STACK
106 ;;
107 itr.d dtr[r21]=r23 // wire in new mapping...
109 // Per-cpu
110 mov r24=PERCPU_PAGE_SHIFT<<2
111 movl r22=PERCPU_ADDR
112 ;;
113 ptr.d r22,r24
114 or r23=loc2,r26 // construct PA | page properties
115 mov cr.itir=r24
116 mov cr.ifa=r22
117 mov r25=IA64_TR_PERCPU_DATA
118 ;;
119 itr.d dtr[r25]=r23 // wire in new mapping...
121 // VHPT
122 #if VHPT_ENABLED
123 #if IA64_GRANULE_SHIFT < VHPT_SIZE_LOG2
124 #error "it must be that VHPT_SIZE_LOG2 <= IA64_GRANULE_SHIFT"
125 #endif
126 // unless overlaps with IA64_TR_CURRENT_STACK
127 dep r15=0,in4,0,IA64_GRANULE_SHIFT
128 dep r21=0,r13,0,IA64_GRANULE_SHIFT
129 ;;
130 cmp.eq p8,p0=r15,r21
131 (p8) br.cond.sptk .vhpt_overlaps
132 mov r21=IA64_TR_VHPT
133 dep r22=0,r15,60,4 // physical address of
134 // va_vhpt & ~(IA64_GRANULE_SIZE - 1)
135 mov r24=IA64_GRANULE_SHIFT<<2
136 ;;
137 ptr.d r15,r24
138 or r23=r22,r26 // construct PA | page properties
139 mov cr.itir=r24
140 mov cr.ifa=r15
141 srlz.d
142 ;;
143 itr.d dtr[r21]=r23 // wire in new mapping...
144 .vhpt_overlaps:
145 #endif
147 // Shared info
148 mov r24=XSI_SHIFT<<2
149 movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RW)
150 ;;
151 ptr.d in3,r24
152 or r23=in1,r25 // construct PA | page properties
153 mov cr.itir=r24
154 mov cr.ifa=in3
155 mov r21=IA64_TR_SHARED_INFO
156 ;;
157 itr.d dtr[r21]=r23 // wire in new mapping...
159 // Map mapped_regs
160 mov r22=XMAPPEDREGS_OFS
161 mov r24=XMAPPEDREGS_SHIFT<<2
162 ;;
163 add r22=r22,in3
164 ;;
165 ptr.d r22,r24
166 or r23=loc7,r25 // construct PA | page properties
167 mov cr.itir=r24
168 mov cr.ifa=r22
169 mov r21=IA64_TR_MAPPED_REGS
170 ;;
171 itr.d dtr[r21]=r23 // wire in new mapping...
173 // Purge/insert PAL TR
174 mov r24=IA64_TR_PALCODE
175 mov r23=IA64_GRANULE_SHIFT<<2
176 dep r25=0,loc5,60,4 // convert pal vaddr to paddr
177 ;;
178 ptr.i loc5,r23
179 or r25=r25,r26 // construct PA | page properties
180 mov cr.itir=r23
181 mov cr.ifa=loc5
182 ;;
183 itr.i itr[r24]=r25
185 // done, switch back to virtual and return
186 mov r16=loc3 // r16= original psr
187 br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
188 mov psr.l = loc3 // restore init PSR
190 mov ar.pfs = loc1
191 mov rp = loc0
192 ;;
193 mov ar.rsc=loc4 // restore RSE configuration
194 srlz.d // seralize restoration of psr.l
195 br.ret.sptk.many rp
196 END(ia64_new_rr7)
198 #if 0 /* Not used */
199 #include "minstate.h"
201 GLOBAL_ENTRY(ia64_prepare_handle_privop)
202 .prologue
203 /*
204 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
205 */
206 mov r16=r0
207 DO_SAVE_SWITCH_STACK
208 br.call.sptk.many rp=ia64_handle_privop // stack frame setup in ivt
209 .ret22: .body
210 DO_LOAD_SWITCH_STACK
211 br.cond.sptk.many rp // goes to ia64_leave_kernel
212 END(ia64_prepare_handle_privop)
214 GLOBAL_ENTRY(ia64_prepare_handle_break)
215 .prologue
216 /*
217 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
218 */
219 mov r16=r0
220 DO_SAVE_SWITCH_STACK
221 br.call.sptk.many rp=ia64_handle_break // stack frame setup in ivt
222 .ret23: .body
223 DO_LOAD_SWITCH_STACK
224 br.cond.sptk.many rp // goes to ia64_leave_kernel
225 END(ia64_prepare_handle_break)
227 GLOBAL_ENTRY(ia64_prepare_handle_reflection)
228 .prologue
229 /*
230 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
231 */
232 mov r16=r0
233 DO_SAVE_SWITCH_STACK
234 br.call.sptk.many rp=ia64_handle_reflection // stack frame setup in ivt
235 .ret24: .body
236 DO_LOAD_SWITCH_STACK
237 br.cond.sptk.many rp // goes to ia64_leave_kernel
238 END(ia64_prepare_handle_reflection)
239 #endif
241 GLOBAL_ENTRY(__get_domain_bundle)
242 EX(.failure_in_get_bundle,ld8 r8=[r32],8)
243 ;;
244 EX(.failure_in_get_bundle,ld8 r9=[r32])
245 ;;
246 br.ret.sptk.many rp
247 ;;
248 .failure_in_get_bundle:
249 mov r8=0
250 ;;
251 mov r9=0
252 ;;
253 br.ret.sptk.many rp
254 ;;
255 END(__get_domain_bundle)
257 /* derived from linux/arch/ia64/hp/sim/boot/boot_head.S */
258 GLOBAL_ENTRY(pal_emulator_static)
259 mov r8=-1
260 mov r9=256
261 ;;
262 cmp.gtu p7,p8=r9,r32 /* r32 <= 255? */
263 (p7) br.cond.sptk.few static
264 ;;
265 mov r9=512
266 ;;
267 cmp.gtu p7,p8=r9,r32
268 (p7) br.cond.sptk.few stacked
269 ;;
270 static: cmp.eq p7,p8=6,r32 /* PAL_PTCE_INFO */
271 (p8) br.cond.sptk.few 1f
272 ;;
273 mov r8=0 /* status = 0 */
274 movl r9=0x100000000 /* tc.base */
275 movl r10=0x0000000200000003 /* count[0], count[1] */
276 movl r11=0x1000000000002000 /* stride[0], stride[1] */
277 br.ret.sptk.few rp
278 1: cmp.eq p7,p8=14,r32 /* PAL_FREQ_RATIOS */
279 (p8) br.cond.sptk.few 1f
280 mov r8=0 /* status = 0 */
281 movl r9 =0x900000002 /* proc_ratio (1/100) */
282 movl r10=0x100000100 /* bus_ratio<<32 (1/256) */
283 movl r11=0x900000002 /* itc_ratio<<32 (1/100) */
284 ;;
285 1: cmp.eq p7,p8=19,r32 /* PAL_RSE_INFO */
286 (p8) br.cond.sptk.few 1f
287 mov r8=0 /* status = 0 */
288 mov r9=96 /* num phys stacked */
289 mov r10=0 /* hints */
290 mov r11=0
291 br.ret.sptk.few rp
292 1: cmp.eq p7,p8=1,r32 /* PAL_CACHE_FLUSH */
293 (p8) br.cond.sptk.few 1f
294 #if 0
295 mov r9=ar.lc
296 movl r8=524288 /* flush 512k million cache lines (16MB) */
297 ;;
298 mov ar.lc=r8
299 movl r8=0xe000000000000000
300 ;;
301 .loop: fc r8
302 add r8=32,r8
303 br.cloop.sptk.few .loop
304 sync.i
305 ;;
306 srlz.i
307 ;;
308 mov ar.lc=r9
309 mov r8=r0
310 ;;
311 1: cmp.eq p7,p8=15,r32 /* PAL_PERF_MON_INFO */
312 (p8) br.cond.sptk.few 1f
313 mov r8=0 /* status = 0 */
314 movl r9 =0x08122f04 /* generic=4 width=47 retired=8
315 * cycles=18
316 */
317 mov r10=0 /* reserved */
318 mov r11=0 /* reserved */
319 mov r16=0xffff /* implemented PMC */
320 mov r17=0x3ffff /* implemented PMD */
321 add r18=8,r29 /* second index */
322 ;;
323 st8 [r29]=r16,16 /* store implemented PMC */
324 st8 [r18]=r0,16 /* clear remaining bits */
325 ;;
326 st8 [r29]=r0,16 /* clear remaining bits */
327 st8 [r18]=r0,16 /* clear remaining bits */
328 ;;
329 st8 [r29]=r17,16 /* store implemented PMD */
330 st8 [r18]=r0,16 /* clear remaining bits */
331 mov r16=0xf0 /* cycles count capable PMC */
332 ;;
333 st8 [r29]=r0,16 /* clear remaining bits */
334 st8 [r18]=r0,16 /* clear remaining bits */
335 mov r17=0xf0 /* retired bundles capable PMC */
336 ;;
337 st8 [r29]=r16,16 /* store cycles capable */
338 st8 [r18]=r0,16 /* clear remaining bits */
339 ;;
340 st8 [r29]=r0,16 /* clear remaining bits */
341 st8 [r18]=r0,16 /* clear remaining bits */
342 ;;
343 st8 [r29]=r17,16 /* store retired bundle capable */
344 st8 [r18]=r0,16 /* clear remaining bits */
345 ;;
346 st8 [r29]=r0,16 /* clear remaining bits */
347 st8 [r18]=r0,16 /* clear remaining bits */
348 ;;
349 1: br.cond.sptk.few rp
350 #else
351 1:
352 #endif
353 stacked:
354 br.ret.sptk.few rp
355 END(pal_emulator_static)
357 // void ia64_copy_rbs(unsigned long* dst_bspstore, unsigned long* dst_rbs_size,
358 // unsigned long* dst_rnat_p,
359 // unsigned long* src_bsp, unsigned long src_rbs_size,
360 // unsigned long src_rnat);
361 // Caller must mask interrupions.
362 // Caller must ensure that src_rbs_size isn't larger than the number
363 // of physical stacked registers. otherwise loadrs fault with Illegal
364 // Operation fault resulting in panic.
365 //
366 // r14 = r32 = dst_bspstore
367 // r15 = r33 = dst_rbs_size_p
368 // r16 = r34 = dst_rnat_p
369 // r17 = r35 = src_bsp
370 // r18 = r36 = src_rbs_size
371 // r19 = r37 = src_rnat
372 //
373 // r20 = saved ar.rsc
374 // r21 = saved ar.bspstore
375 //
376 // r22 = saved_ar_rnat
377 // r23 = saved_ar_rp
378 // r24 = saved_ar_pfs
379 //
380 // we save the value in this register and store it into [dst_rbs_size_p] and
381 // [dst_rnat_p] after rse opeation is done.
382 // r30 = return value of __ia64_copy_rbs to ia64_copy_to_rbs = dst_rbs_size
383 // r31 = return value of __ia64_copy_rbs to ia64_copy_to_rbs = dst_rnat
384 //
385 #define dst_bspstore r14
386 #define dst_rbs_size_p r15
387 #define dst_rnat_p r16
388 #define src_bsp r17
389 #define src_rbs_size r18
390 #define src_rnat r19
392 #define saved_ar_rsc r20
393 #define saved_ar_bspstore r21
394 #define saved_ar_rnat r22
395 #define saved_rp r23
396 #define saved_ar_pfs r24
398 #define dst_rbs_size r30
399 #define dst_rnat r31
400 ENTRY(__ia64_copy_rbs)
401 .prologue
402 .fframe 0
404 // Here cfm.{sof, sol, sor, rrb}=0
405 //
406 // flush current register stack to backing store
407 {
408 flushrs // must be first isns in group
409 srlz.i
410 }
412 // switch to enforced lazy mode
413 mov saved_ar_rsc = ar.rsc
414 ;;
415 mov ar.rsc = 0
416 ;;
418 .save ar.bspstore, saved_ar_bspstore
419 mov saved_ar_bspstore = ar.bspstore
420 .save ar.rnat, saved_ar_rnat
421 mov saved_ar_rnat = ar.rnat
422 ;;
424 .body
425 // load from src
426 mov ar.bspstore = src_bsp
427 ;;
428 mov ar.rnat = src_rnat
429 shl src_rbs_size = src_rbs_size,16
430 ;;
431 mov ar.rsc = src_rbs_size
432 ;;
433 {
434 loadrs // must be first isns in group
435 ;;
436 }
438 // flush to dst
439 mov ar.bspstore = dst_bspstore
440 ;;
441 {
442 flushrs // must be first isns in group
443 srlz.i
444 }
445 ;;
446 mov dst_rbs_size = ar.bsp
447 mov dst_rnat = ar.rnat
448 ;;
449 sub dst_rbs_size = dst_rbs_size, dst_bspstore
451 // switch back to the original backing store
452 .restorereg ar.bspstore
453 mov ar.bspstore = saved_ar_bspstore
454 ;;
455 .restorereg ar.rnat
456 mov ar.rnat = saved_ar_rnat
457 ;;
458 // restore rsc
459 mov ar.rsc = saved_ar_rsc
461 ;;
462 br.ret.sptk.many rp
463 END(__ia64_copy_rbs)
465 GLOBAL_ENTRY(ia64_copy_rbs)
466 .prologue
467 .fframe 0
468 .save ar.pfs, saved_ar_pfs
469 alloc saved_ar_pfs = ar.pfs, 6, 0, 0, 0
470 .save.b 0x1, saved_rp
471 mov saved_rp = rp
473 .body
474 // we play with register backing store so that we can't use
475 // stacked registers.
476 // save in0-in5 to static scratch registres
477 mov dst_bspstore = r32
478 mov dst_rbs_size_p = r33
479 mov dst_rnat_p = r34
480 mov src_bsp = r35
481 mov src_rbs_size = r36
482 mov src_rnat = r37
483 ;;
484 // set cfm.{sof, sol, sor, rrb}=0 to avoid nasty stacked register
485 // issues related to cover by calling void __ia64_copy_rbs(void).
486 // cfm.{sof, sol, sor, rrb}=0 makes things easy.
487 br.call.sptk.many rp = __ia64_copy_rbs
489 st8 [dst_rbs_size_p] = dst_rbs_size
490 st8 [dst_rnat_p] = dst_rnat
492 .restorereg ar.pfs
493 mov ar.pfs = saved_ar_pfs
494 .restorereg rp
495 mov rp = saved_rp
496 ;;
497 br.ret.sptk.many rp
498 END(ia64_copy_rbs)