ia64/linux-2.6.18-xen.hg

annotate arch/ia64/xen/xenentry.S @ 912:dd42cdb0ab89

[IA64] Build blktap2 driver by default in x86 builds.

add CONFIG_XEN_BLKDEV_TAP2=y to buildconfigs/linux-defconfig_xen_ia64.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 12:09:16 2009 +0900 (2009-06-29)
parents a533be77c572
children
rev   line source
ian@26 1 /*
ian@26 2 * ia64/xen/entry.S
ian@26 3 *
ian@26 4 * Alternate kernel routines for Xen. Heavily leveraged from
ian@26 5 * ia64/kernel/entry.S
ian@26 6 *
ian@26 7 * Copyright (C) 2005 Hewlett-Packard Co
ian@26 8 * Dan Magenheimer <dan.magenheimer@.hp.com>
ian@26 9 */
ian@26 10
ian@26 11 #include <asm/asmmacro.h>
ian@26 12 #include <asm/cache.h>
ian@26 13 #include <asm/errno.h>
ian@26 14 #include <asm/kregs.h>
ian@26 15 #include <asm/asm-offsets.h>
ian@26 16 #include <asm/pgtable.h>
ian@26 17 #include <asm/percpu.h>
ian@26 18 #include <asm/processor.h>
ian@26 19 #include <asm/thread_info.h>
ian@26 20 #include <asm/unistd.h>
ian@26 21
ian@26 22 #ifdef CONFIG_XEN
ian@26 23 #include "xenminstate.h"
ian@26 24 #else
ian@26 25 #include "minstate.h"
ian@26 26 #endif
ian@26 27
ian@26 28 /*
ian@26 29 * prev_task <- ia64_switch_to(struct task_struct *next)
ian@26 30 * With Ingo's new scheduler, interrupts are disabled when this routine gets
ian@26 31 * called. The code starting at .map relies on this. The rest of the code
ian@26 32 * doesn't care about the interrupt masking status.
ian@26 33 */
ian@26 34 #ifdef CONFIG_XEN
ian@26 35 GLOBAL_ENTRY(xen_switch_to)
ian@26 36 .prologue
ian@26 37 alloc r16=ar.pfs,1,0,0,0
ian@26 38 movl r22=running_on_xen;;
ian@26 39 ld4 r22=[r22];;
ian@26 40 cmp.eq p7,p0=r22,r0
ian@26 41 (p7) br.cond.sptk.many __ia64_switch_to;;
ian@26 42 #else
ian@26 43 GLOBAL_ENTRY(ia64_switch_to)
ian@26 44 .prologue
ian@26 45 alloc r16=ar.pfs,1,0,0,0
ian@26 46 #endif
ian@26 47 DO_SAVE_SWITCH_STACK
ian@26 48 .body
ian@26 49
ian@26 50 adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
ian@26 51 movl r25=init_task
ian@26 52 mov r27=IA64_KR(CURRENT_STACK)
ian@26 53 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
ian@26 54 dep r20=0,in0,61,3 // physical address of "next"
ian@26 55 ;;
ian@26 56 st8 [r22]=sp // save kernel stack pointer of old task
ian@26 57 shr.u r26=r20,IA64_GRANULE_SHIFT
ian@26 58 cmp.eq p7,p6=r25,in0
ian@26 59 ;;
ian@26 60 /*
ian@26 61 * If we've already mapped this task's page, we can skip doing it again.
ian@26 62 */
ian@26 63 (p6) cmp.eq p7,p6=r26,r27
ian@26 64 (p6) br.cond.dpnt .map
ian@26 65 ;;
ian@26 66 .done:
ian@26 67 ld8 sp=[r21] // load kernel stack pointer of new task
ian@26 68 #ifdef CONFIG_XEN
ian@26 69 // update "current" application register
ian@26 70 mov r8=IA64_KR_CURRENT
ian@26 71 mov r9=in0;;
ian@26 72 XEN_HYPER_SET_KR
ian@26 73 #else
ian@26 74 mov IA64_KR(CURRENT)=in0 // update "current" application register
ian@26 75 #endif
ian@26 76 mov r8=r13 // return pointer to previously running task
ian@26 77 mov r13=in0 // set "current" pointer
ian@26 78 ;;
ian@26 79 DO_LOAD_SWITCH_STACK
ian@26 80
ian@26 81 #ifdef CONFIG_SMP
ian@26 82 sync.i // ensure "fc"s done by this CPU are visible on other CPUs
ian@26 83 #endif
ian@26 84 br.ret.sptk.many rp // boogie on out in new context
ian@26 85
ian@26 86 .map:
ian@26 87 #ifdef CONFIG_XEN
ian@26 88 movl r25=XSI_PSR_IC // clear psr.ic
ian@26 89 ;;
ian@26 90 st4 [r25]=r0
ian@26 91 ;;
ian@26 92 #else
ian@26 93 rsm psr.ic // interrupts (psr.i) are already disabled here
ian@26 94 #endif
ian@26 95 movl r25=PAGE_KERNEL
ian@26 96 ;;
ian@26 97 srlz.d
ian@26 98 or r23=r25,r20 // construct PA | page properties
ian@26 99 mov r25=IA64_GRANULE_SHIFT<<2
ian@26 100 ;;
ian@26 101 #ifdef CONFIG_XEN
ian@26 102 movl r8=XSI_ITIR
ian@26 103 ;;
ian@26 104 st8 [r8]=r25
ian@26 105 ;;
ian@26 106 movl r8=XSI_IFA
ian@26 107 ;;
ian@26 108 st8 [r8]=in0 // VA of next task...
ian@26 109 ;;
ian@26 110 mov r25=IA64_TR_CURRENT_STACK
ian@26 111 // remember last page we mapped...
ian@26 112 mov r8=IA64_KR_CURRENT_STACK
ian@26 113 mov r9=r26;;
ian@26 114 XEN_HYPER_SET_KR;;
ian@26 115 #else
ian@26 116 mov cr.itir=r25
ian@26 117 mov cr.ifa=in0 // VA of next task...
ian@26 118 ;;
ian@26 119 mov r25=IA64_TR_CURRENT_STACK
ian@26 120 mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
ian@26 121 #endif
ian@26 122 ;;
ian@26 123 itr.d dtr[r25]=r23 // wire in new mapping...
ian@26 124 #ifdef CONFIG_XEN
ian@26 125 ;;
ian@26 126 srlz.d
ian@26 127 mov r9=1
ian@26 128 movl r8=XSI_PSR_IC
ian@26 129 ;;
ian@26 130 st4 [r8]=r9
ian@26 131 ;;
ian@26 132 #else
ian@26 133 ssm psr.ic // reenable the psr.ic bit
ian@26 134 ;;
ian@26 135 srlz.d
ian@26 136 #endif
ian@26 137 br.cond.sptk .done
ian@26 138 #ifdef CONFIG_XEN
ian@26 139 END(xen_switch_to)
ian@26 140 #else
ian@26 141 END(ia64_switch_to)
ian@26 142 #endif
ian@26 143
ian@26 144 /*
ian@26 145 * Invoke a system call, but do some tracing before and after the call.
ian@26 146 * We MUST preserve the current register frame throughout this routine
ian@26 147 * because some system calls (such as ia64_execve) directly
ian@26 148 * manipulate ar.pfs.
ian@26 149 */
ian@26 150 #ifdef CONFIG_XEN
ian@26 151 GLOBAL_ENTRY(xen_trace_syscall)
ian@26 152 PT_REGS_UNWIND_INFO(0)
ian@26 153 movl r16=running_on_xen;;
ian@26 154 ld4 r16=[r16];;
ian@26 155 cmp.eq p7,p0=r16,r0
ian@26 156 (p7) br.cond.sptk.many __ia64_trace_syscall;;
ian@26 157 #else
ian@26 158 GLOBAL_ENTRY(ia64_trace_syscall)
ian@26 159 PT_REGS_UNWIND_INFO(0)
ian@26 160 #endif
ian@26 161 /*
ian@26 162 * We need to preserve the scratch registers f6-f11 in case the system
ian@26 163 * call is sigreturn.
ian@26 164 */
ian@26 165 adds r16=PT(F6)+16,sp
ian@26 166 adds r17=PT(F7)+16,sp
ian@26 167 ;;
ian@26 168 stf.spill [r16]=f6,32
ian@26 169 stf.spill [r17]=f7,32
ian@26 170 ;;
ian@26 171 stf.spill [r16]=f8,32
ian@26 172 stf.spill [r17]=f9,32
ian@26 173 ;;
ian@26 174 stf.spill [r16]=f10
ian@26 175 stf.spill [r17]=f11
ian@26 176 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
ian@26 177 adds r16=PT(F6)+16,sp
ian@26 178 adds r17=PT(F7)+16,sp
ian@26 179 ;;
ian@26 180 ldf.fill f6=[r16],32
ian@26 181 ldf.fill f7=[r17],32
ian@26 182 ;;
ian@26 183 ldf.fill f8=[r16],32
ian@26 184 ldf.fill f9=[r17],32
ian@26 185 ;;
ian@26 186 ldf.fill f10=[r16]
ian@26 187 ldf.fill f11=[r17]
ian@26 188 // the syscall number may have changed, so re-load it and re-calculate the
ian@26 189 // syscall entry-point:
ian@26 190 adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #)
ian@26 191 ;;
ian@26 192 ld8 r15=[r15]
ian@26 193 mov r3=NR_syscalls - 1
ian@26 194 ;;
ian@26 195 adds r15=-1024,r15
ian@26 196 movl r16=sys_call_table
ian@26 197 ;;
ian@26 198 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
ian@26 199 cmp.leu p6,p7=r15,r3
ian@26 200 ;;
ian@26 201 (p6) ld8 r20=[r20] // load address of syscall entry point
ian@26 202 (p7) movl r20=sys_ni_syscall
ian@26 203 ;;
ian@26 204 mov b6=r20
ian@26 205 br.call.sptk.many rp=b6 // do the syscall
ian@26 206 .strace_check_retval:
ian@26 207 cmp.lt p6,p0=r8,r0 // syscall failed?
ian@26 208 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
ian@26 209 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
ian@26 210 mov r10=0
ian@26 211 (p6) br.cond.sptk strace_error // syscall failed ->
ian@26 212 ;; // avoid RAW on r10
ian@26 213 .strace_save_retval:
ian@26 214 .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
ian@26 215 .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
ian@26 216 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
ian@26 217 .ret3:
ian@26 218 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
ian@26 219 br.cond.sptk .work_pending_syscall_end
ian@26 220
ian@26 221 strace_error:
ian@26 222 ld8 r3=[r2] // load pt_regs.r8
ian@26 223 sub r9=0,r8 // negate return value to get errno value
ian@26 224 ;;
ian@26 225 cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
ian@26 226 adds r3=16,r2 // r3=&pt_regs.r10
ian@26 227 ;;
ian@26 228 (p6) mov r10=-1
ian@26 229 (p6) mov r8=r9
ian@26 230 br.cond.sptk .strace_save_retval
ian@26 231 #ifdef CONFIG_XEN
ian@26 232 END(xen_trace_syscall)
ian@26 233 #else
ian@26 234 END(ia64_trace_syscall)
ian@26 235 #endif
ian@26 236
ian@26 237 #ifdef CONFIG_XEN
ian@26 238 GLOBAL_ENTRY(xen_ret_from_clone)
ian@26 239 PT_REGS_UNWIND_INFO(0)
ian@26 240 movl r16=running_on_xen;;
ian@26 241 ld4 r16=[r16];;
ian@26 242 cmp.eq p7,p0=r16,r0
ian@26 243 (p7) br.cond.sptk.many __ia64_ret_from_clone;;
ian@26 244 #else
ian@26 245 GLOBAL_ENTRY(ia64_ret_from_clone)
ian@26 246 PT_REGS_UNWIND_INFO(0)
ian@26 247 #endif
ian@26 248 { /*
ian@26 249 * Some versions of gas generate bad unwind info if the first instruction of a
ian@26 250 * procedure doesn't go into the first slot of a bundle. This is a workaround.
ian@26 251 */
ian@26 252 nop.m 0
ian@26 253 nop.i 0
ian@26 254 /*
ian@26 255 * We need to call schedule_tail() to complete the scheduling process.
ian@26 256 * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the
ian@26 257 * address of the previously executing task.
ian@26 258 */
ian@26 259 br.call.sptk.many rp=ia64_invoke_schedule_tail
ian@26 260 }
ian@26 261 .ret8:
ian@26 262 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
ian@26 263 ;;
ian@26 264 ld4 r2=[r2]
ian@26 265 ;;
ian@26 266 mov r8=0
ian@26 267 and r2=_TIF_SYSCALL_TRACEAUDIT,r2
ian@26 268 ;;
ian@26 269 cmp.ne p6,p0=r2,r0
ian@26 270 (p6) br.cond.spnt .strace_check_retval
ian@26 271 ;; // added stop bits to prevent r8 dependency
ian@26 272 #ifdef CONFIG_XEN
ian@26 273 br.cond.sptk ia64_ret_from_syscall
ian@26 274 END(xen_ret_from_clone)
ian@26 275 #else
ian@26 276 END(ia64_ret_from_clone)
ian@26 277 #endif
ian@26 278 /*
ian@26 279 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
ian@26 280 * need to switch to bank 0 and doesn't restore the scratch registers.
ian@26 281 * To avoid leaking kernel bits, the scratch registers are set to
ian@26 282 * the following known-to-be-safe values:
ian@26 283 *
ian@26 284 * r1: restored (global pointer)
ian@26 285 * r2: cleared
ian@26 286 * r3: 1 (when returning to user-level)
ian@26 287 * r8-r11: restored (syscall return value(s))
ian@26 288 * r12: restored (user-level stack pointer)
ian@26 289 * r13: restored (user-level thread pointer)
ian@26 290 * r14: set to __kernel_syscall_via_epc
ian@26 291 * r15: restored (syscall #)
ian@26 292 * r16-r17: cleared
ian@26 293 * r18: user-level b6
ian@26 294 * r19: cleared
ian@26 295 * r20: user-level ar.fpsr
ian@26 296 * r21: user-level b0
ian@26 297 * r22: cleared
ian@26 298 * r23: user-level ar.bspstore
ian@26 299 * r24: user-level ar.rnat
ian@26 300 * r25: user-level ar.unat
ian@26 301 * r26: user-level ar.pfs
ian@26 302 * r27: user-level ar.rsc
ian@26 303 * r28: user-level ip
ian@26 304 * r29: user-level psr
ian@26 305 * r30: user-level cfm
ian@26 306 * r31: user-level pr
ian@26 307 * f6-f11: cleared
ian@26 308 * pr: restored (user-level pr)
ian@26 309 * b0: restored (user-level rp)
ian@26 310 * b6: restored
ian@26 311 * b7: set to __kernel_syscall_via_epc
ian@26 312 * ar.unat: restored (user-level ar.unat)
ian@26 313 * ar.pfs: restored (user-level ar.pfs)
ian@26 314 * ar.rsc: restored (user-level ar.rsc)
ian@26 315 * ar.rnat: restored (user-level ar.rnat)
ian@26 316 * ar.bspstore: restored (user-level ar.bspstore)
ian@26 317 * ar.fpsr: restored (user-level ar.fpsr)
ian@26 318 * ar.ccv: cleared
ian@26 319 * ar.csd: cleared
ian@26 320 * ar.ssd: cleared
ian@26 321 */
ian@26 322 #ifdef CONFIG_XEN
ian@26 323 GLOBAL_ENTRY(xen_leave_syscall)
ian@26 324 PT_REGS_UNWIND_INFO(0)
ian@26 325 movl r22=running_on_xen;;
ian@26 326 ld4 r22=[r22];;
ian@26 327 cmp.eq p7,p0=r22,r0
ian@26 328 (p7) br.cond.sptk.many __ia64_leave_syscall;;
ian@26 329 #else
ian@26 330 ENTRY(ia64_leave_syscall)
ian@26 331 PT_REGS_UNWIND_INFO(0)
ian@26 332 #endif
ian@26 333 /*
ian@26 334 * work.need_resched etc. mustn't get changed by this CPU before it returns to
ian@26 335 * user- or fsys-mode, hence we disable interrupts early on.
ian@26 336 *
ian@26 337 * p6 controls whether current_thread_info()->flags needs to be check for
ian@26 338 * extra work. We always check for extra work when returning to user-level.
ian@26 339 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
ian@26 340 * is 0. After extra work processing has been completed, execution
ian@26 341 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
ian@26 342 * needs to be redone.
ian@26 343 */
ian@26 344 #ifdef CONFIG_PREEMPT
ian@26 345 rsm psr.i // disable interrupts
ian@26 346 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
ian@26 347 (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
ian@26 348 ;;
ian@26 349 .pred.rel.mutex pUStk,pKStk
ian@26 350 (pKStk) ld4 r21=[r20] // r21 <- preempt_count
ian@26 351 (pUStk) mov r21=0 // r21 <- 0
ian@26 352 ;;
ian@26 353 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
ian@26 354 #else /* !CONFIG_PREEMPT */
ian@26 355 #ifdef CONFIG_XEN
ian@26 356 movl r2=XSI_PSR_I_ADDR
ian@26 357 mov r18=1
ian@26 358 ;;
ian@26 359 ld8 r2=[r2]
ian@26 360 ;;
ian@26 361 (pUStk) st1 [r2]=r18
ian@26 362 #else
ian@26 363 (pUStk) rsm psr.i
ian@26 364 #endif
ian@26 365 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
ian@26 366 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
ian@26 367 #endif
ian@26 368 .work_processed_syscall:
ian@26 369 adds r2=PT(LOADRS)+16,r12
ian@26 370 adds r3=PT(AR_BSPSTORE)+16,r12
ian@26 371 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
ian@26 372 ;;
ian@26 373 (p6) ld4 r31=[r18] // load current_thread_info()->flags
ian@26 374 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
ian@26 375 nop.i 0
ian@26 376 ;;
ian@26 377 mov r16=ar.bsp // M2 get existing backing store pointer
ian@26 378 ld8 r18=[r2],PT(R9)-PT(B6) // load b6
ian@26 379 (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
ian@26 380 ;;
ian@26 381 ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
ian@26 382 (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
ian@26 383 (p6) br.cond.spnt .work_pending_syscall
ian@26 384 ;;
ian@26 385 // start restoring the state saved on the kernel stack (struct pt_regs):
ian@26 386 ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
ian@26 387 ld8 r11=[r3],PT(CR_IIP)-PT(R11)
ian@26 388 (pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
ian@26 389 ;;
ian@26 390 invala // M0|1 invalidate ALAT
ian@26 391 #ifdef CONFIG_XEN
ian@26 392 movl r28=XSI_PSR_I_ADDR
ian@26 393 movl r29=XSI_PSR_IC
ian@26 394 ;;
ian@26 395 ld8 r28=[r28]
ian@26 396 mov r30=1
ian@26 397 ;;
ian@26 398 st1 [r28]=r30
ian@26 399 st4 [r29]=r0 // note: clears both vpsr.i and vpsr.ic!
ian@26 400 ;;
ian@26 401 #else
ian@26 402 rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection
ian@26 403 #endif
ian@26 404 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
ian@26 405
ian@26 406 ld8 r29=[r2],16 // M0|1 load cr.ipsr
ian@26 407 ld8 r28=[r3],16 // M0|1 load cr.iip
ian@26 408 mov r22=r0 // A clear r22
ian@26 409 ;;
ian@26 410 ld8 r30=[r2],16 // M0|1 load cr.ifs
ian@26 411 ld8 r25=[r3],16 // M0|1 load ar.unat
ian@26 412 (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
ian@26 413 ;;
ian@26 414 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
ian@26 415 #ifdef CONFIG_XEN
ian@26 416 (pKStk) mov r21=r8
ian@26 417 (pKStk) XEN_HYPER_GET_PSR
ian@26 418 ;;
ian@26 419 (pKStk) mov r22=r8
ian@26 420 (pKStk) mov r8=r21
ian@26 421 ;;
ian@26 422 #else
ian@26 423 (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
ian@26 424 #endif
ian@26 425 nop 0
ian@26 426 ;;
ian@26 427 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
ian@26 428 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
ian@26 429 mov f6=f0 // F clear f6
ian@26 430 ;;
ian@26 431 ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
ian@26 432 ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
ian@26 433 mov f7=f0 // F clear f7
ian@26 434 ;;
ian@26 435 ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
ian@26 436 ld8.fill r1=[r3],16 // M0|1 load r1
ian@26 437 (pUStk) mov r17=1 // A
ian@26 438 ;;
ian@26 439 (pUStk) st1 [r14]=r17 // M2|3
ian@26 440 ld8.fill r13=[r3],16 // M0|1
ian@26 441 mov f8=f0 // F clear f8
ian@26 442 ;;
ian@26 443 ld8.fill r12=[r2] // M0|1 restore r12 (sp)
ian@26 444 ld8.fill r15=[r3] // M0|1 restore r15
ian@26 445 mov b6=r18 // I0 restore b6
ian@26 446
ian@26 447 addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
ian@26 448 mov f9=f0 // F clear f9
ian@26 449 (pKStk) br.cond.dpnt.many skip_rbs_switch // B
ian@26 450
ian@26 451 srlz.d // M0 ensure interruption collection is off (for cover)
ian@26 452 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
ian@26 453 #ifdef CONFIG_XEN
ian@26 454 XEN_HYPER_COVER;
ian@26 455 #else
ian@26 456 cover // B add current frame into dirty partition & set cr.ifs
ian@26 457 #endif
ian@26 458 ;;
ian@26 459 (pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8
ian@26 460 mov r19=ar.bsp // M2 get new backing store pointer
ian@26 461 mov f10=f0 // F clear f10
ian@26 462
ian@26 463 nop.m 0
ian@26 464 movl r14=__kernel_syscall_via_epc // X
ian@26 465 ;;
ian@26 466 mov.m ar.csd=r0 // M2 clear ar.csd
ian@26 467 mov.m ar.ccv=r0 // M2 clear ar.ccv
ian@26 468 mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
ian@26 469
ian@26 470 mov.m ar.ssd=r0 // M2 clear ar.ssd
ian@26 471 mov f11=f0 // F clear f11
ian@26 472 br.cond.sptk.many rbs_switch // B
ian@26 473 #ifdef CONFIG_XEN
ian@26 474 END(xen_leave_syscall)
ian@26 475 #else
ian@26 476 END(ia64_leave_syscall)
ian@26 477 #endif
ian@26 478
ian@26 479 #ifdef CONFIG_XEN
ian@26 480 GLOBAL_ENTRY(xen_leave_kernel)
ian@26 481 PT_REGS_UNWIND_INFO(0)
ian@26 482 movl r22=running_on_xen;;
ian@26 483 ld4 r22=[r22];;
ian@26 484 cmp.eq p7,p0=r22,r0
ian@26 485 (p7) br.cond.sptk.many __ia64_leave_kernel;;
ian@26 486 #else
ian@26 487 GLOBAL_ENTRY(ia64_leave_kernel)
ian@26 488 PT_REGS_UNWIND_INFO(0)
ian@26 489 #endif
ian@26 490 /*
ian@26 491 * work.need_resched etc. mustn't get changed by this CPU before it returns to
ian@26 492 * user- or fsys-mode, hence we disable interrupts early on.
ian@26 493 *
ian@26 494 * p6 controls whether current_thread_info()->flags needs to be check for
ian@26 495 * extra work. We always check for extra work when returning to user-level.
ian@26 496 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
ian@26 497 * is 0. After extra work processing has been completed, execution
ian@26 498 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
ian@26 499 * needs to be redone.
ian@26 500 */
ian@26 501 #ifdef CONFIG_PREEMPT
ian@26 502 rsm psr.i // disable interrupts
ian@26 503 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
ian@26 504 (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
ian@26 505 ;;
ian@26 506 .pred.rel.mutex pUStk,pKStk
ian@26 507 (pKStk) ld4 r21=[r20] // r21 <- preempt_count
ian@26 508 (pUStk) mov r21=0 // r21 <- 0
ian@26 509 ;;
ian@26 510 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
ian@26 511 #else
ian@26 512 #ifdef CONFIG_XEN
ian@26 513 (pUStk) movl r17=XSI_PSR_I_ADDR
ian@26 514 (pUStk) mov r31=1
ian@26 515 ;;
ian@26 516 (pUStk) ld8 r17=[r17]
ian@26 517 ;;
ian@26 518 (pUStk) st1 [r17]=r31
ian@26 519 ;;
ian@26 520 #else
ian@26 521 (pUStk) rsm psr.i
ian@26 522 #endif
ian@26 523 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
ian@26 524 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
ian@26 525 #endif
ian@26 526 .work_processed_kernel:
ian@26 527 adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
ian@26 528 ;;
ian@26 529 (p6) ld4 r31=[r17] // load current_thread_info()->flags
ian@26 530 adds r21=PT(PR)+16,r12
ian@26 531 ;;
ian@26 532
ian@26 533 lfetch [r21],PT(CR_IPSR)-PT(PR)
ian@26 534 adds r2=PT(B6)+16,r12
ian@26 535 adds r3=PT(R16)+16,r12
ian@26 536 ;;
ian@26 537 lfetch [r21]
ian@26 538 ld8 r28=[r2],8 // load b6
ian@26 539 adds r29=PT(R24)+16,r12
ian@26 540
ian@26 541 ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
ian@26 542 adds r30=PT(AR_CCV)+16,r12
ian@26 543 (p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
ian@26 544 ;;
ian@26 545 ld8.fill r24=[r29]
ian@26 546 ld8 r15=[r30] // load ar.ccv
ian@26 547 (p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
ian@26 548 ;;
ian@26 549 ld8 r29=[r2],16 // load b7
ian@26 550 ld8 r30=[r3],16 // load ar.csd
ian@26 551 (p6) br.cond.spnt .work_pending
ian@26 552 ;;
ian@26 553 ld8 r31=[r2],16 // load ar.ssd
ian@26 554 ld8.fill r8=[r3],16
ian@26 555 ;;
ian@26 556 ld8.fill r9=[r2],16
ian@26 557 ld8.fill r10=[r3],PT(R17)-PT(R10)
ian@26 558 ;;
ian@26 559 ld8.fill r11=[r2],PT(R18)-PT(R11)
ian@26 560 ld8.fill r17=[r3],16
ian@26 561 ;;
ian@26 562 ld8.fill r18=[r2],16
ian@26 563 ld8.fill r19=[r3],16
ian@26 564 ;;
ian@26 565 ld8.fill r20=[r2],16
ian@26 566 ld8.fill r21=[r3],16
ian@26 567 mov ar.csd=r30
ian@26 568 mov ar.ssd=r31
ian@26 569 ;;
ian@26 570 #ifdef CONFIG_XEN
ian@26 571 movl r23=XSI_PSR_I_ADDR
ian@26 572 movl r22=XSI_PSR_IC
ian@26 573 ;;
ian@26 574 ld8 r23=[r23]
ian@26 575 mov r25=1
ian@26 576 ;;
ian@26 577 st1 [r23]=r25
ian@26 578 st4 [r22]=r0 // note: clears both vpsr.i and vpsr.ic!
ian@26 579 ;;
ian@26 580 #else
ian@26 581 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
ian@26 582 #endif
ian@26 583 invala // invalidate ALAT
ian@26 584 ;;
ian@26 585 ld8.fill r22=[r2],24
ian@26 586 ld8.fill r23=[r3],24
ian@26 587 mov b6=r28
ian@26 588 ;;
ian@26 589 ld8.fill r25=[r2],16
ian@26 590 ld8.fill r26=[r3],16
ian@26 591 mov b7=r29
ian@26 592 ;;
ian@26 593 ld8.fill r27=[r2],16
ian@26 594 ld8.fill r28=[r3],16
ian@26 595 ;;
ian@26 596 ld8.fill r29=[r2],16
ian@26 597 ld8.fill r30=[r3],24
ian@26 598 ;;
ian@26 599 ld8.fill r31=[r2],PT(F9)-PT(R31)
ian@26 600 adds r3=PT(F10)-PT(F6),r3
ian@26 601 ;;
ian@26 602 ldf.fill f9=[r2],PT(F6)-PT(F9)
ian@26 603 ldf.fill f10=[r3],PT(F8)-PT(F10)
ian@26 604 ;;
ian@26 605 ldf.fill f6=[r2],PT(F7)-PT(F6)
ian@26 606 ;;
ian@26 607 ldf.fill f7=[r2],PT(F11)-PT(F7)
ian@26 608 ldf.fill f8=[r3],32
ian@26 609 ;;
ian@26 610 srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
ian@26 611 mov ar.ccv=r15
ian@26 612 ;;
ian@26 613 ldf.fill f11=[r2]
ian@26 614 #ifdef CONFIG_XEN
ian@26 615 ;;
ian@26 616 // r16-r31 all now hold bank1 values
ian@26 617 mov r15=ar.unat
ian@26 618 movl r2=XSI_BANK1_R16
ian@26 619 movl r3=XSI_BANK1_R16+8
ian@26 620 ;;
ian@26 621 .mem.offset 0,0; st8.spill [r2]=r16,16
ian@26 622 .mem.offset 8,0; st8.spill [r3]=r17,16
ian@26 623 ;;
ian@26 624 .mem.offset 0,0; st8.spill [r2]=r18,16
ian@26 625 .mem.offset 8,0; st8.spill [r3]=r19,16
ian@26 626 ;;
ian@26 627 .mem.offset 0,0; st8.spill [r2]=r20,16
ian@26 628 .mem.offset 8,0; st8.spill [r3]=r21,16
ian@26 629 ;;
ian@26 630 .mem.offset 0,0; st8.spill [r2]=r22,16
ian@26 631 .mem.offset 8,0; st8.spill [r3]=r23,16
ian@26 632 ;;
ian@26 633 .mem.offset 0,0; st8.spill [r2]=r24,16
ian@26 634 .mem.offset 8,0; st8.spill [r3]=r25,16
ian@26 635 ;;
ian@26 636 .mem.offset 0,0; st8.spill [r2]=r26,16
ian@26 637 .mem.offset 8,0; st8.spill [r3]=r27,16
ian@26 638 ;;
ian@26 639 .mem.offset 0,0; st8.spill [r2]=r28,16
ian@26 640 .mem.offset 8,0; st8.spill [r3]=r29,16
ian@26 641 ;;
ian@26 642 .mem.offset 0,0; st8.spill [r2]=r30,16
ian@26 643 .mem.offset 8,0; st8.spill [r3]=r31,16
ian@26 644 ;;
ian@26 645 mov r3=ar.unat
ian@26 646 movl r2=XSI_B1NAT
ian@26 647 ;;
ian@26 648 st8 [r2]=r3
ian@26 649 mov ar.unat=r15
ian@26 650 movl r2=XSI_BANKNUM;;
ian@26 651 st4 [r2]=r0;
ian@26 652 #else
ian@26 653 bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
ian@26 654 #endif
ian@26 655 ;;
ian@26 656 (pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
ian@26 657 adds r16=PT(CR_IPSR)+16,r12
ian@26 658 adds r17=PT(CR_IIP)+16,r12
ian@26 659
ian@26 660 #ifdef CONFIG_XEN
ian@26 661 (pKStk) mov r29=r8
ian@26 662 (pKStk) XEN_HYPER_GET_PSR
ian@26 663 ;;
ian@26 664 (pKStk) mov r22=r8
ian@26 665 (pKStk) mov r8=r29
ian@26 666 ;;
ian@26 667 #else
ian@26 668 (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
ian@26 669 #endif
ian@26 670 nop.i 0
ian@26 671 nop.i 0
ian@26 672 ;;
ian@26 673 ld8 r29=[r16],16 // load cr.ipsr
ian@26 674 ld8 r28=[r17],16 // load cr.iip
ian@26 675 ;;
ian@26 676 ld8 r30=[r16],16 // load cr.ifs
ian@26 677 ld8 r25=[r17],16 // load ar.unat
ian@26 678 ;;
ian@26 679 ld8 r26=[r16],16 // load ar.pfs
ian@26 680 ld8 r27=[r17],16 // load ar.rsc
ian@26 681 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
ian@26 682 ;;
ian@26 683 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
ian@26 684 ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
ian@26 685 ;;
ian@26 686 ld8 r31=[r16],16 // load predicates
ian@26 687 ld8 r21=[r17],16 // load b0
ian@26 688 ;;
ian@26 689 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
ian@26 690 ld8.fill r1=[r17],16 // load r1
ian@26 691 ;;
ian@26 692 ld8.fill r12=[r16],16
ian@26 693 ld8.fill r13=[r17],16
ian@26 694 (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
ian@26 695 ;;
ian@26 696 ld8 r20=[r16],16 // ar.fpsr
ian@26 697 ld8.fill r15=[r17],16
ian@26 698 ;;
ian@26 699 ld8.fill r14=[r16],16
ian@26 700 ld8.fill r2=[r17]
ian@26 701 (pUStk) mov r17=1
ian@26 702 ;;
ian@26 703 ld8.fill r3=[r16]
ian@26 704 (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
ian@26 705 shr.u r18=r19,16 // get byte size of existing "dirty" partition
ian@26 706 ;;
ian@26 707 mov r16=ar.bsp // get existing backing store pointer
ian@26 708 addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
ian@26 709 ;;
ian@26 710 ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
ian@26 711 (pKStk) br.cond.dpnt skip_rbs_switch
ian@26 712
ian@26 713 /*
ian@26 714 * Restore user backing store.
ian@26 715 *
ian@26 716 * NOTE: alloc, loadrs, and cover can't be predicated.
ian@26 717 */
ian@26 718 (pNonSys) br.cond.dpnt dont_preserve_current_frame
ian@26 719
ian@26 720 #ifdef CONFIG_XEN
ian@26 721 XEN_HYPER_COVER;
ian@26 722 #else
ian@26 723 cover // add current frame into dirty partition and set cr.ifs
ian@26 724 #endif
ian@26 725 ;;
ian@26 726 mov r19=ar.bsp // get new backing store pointer
ian@26 727 rbs_switch:
ian@26 728 sub r16=r16,r18 // krbs = old bsp - size of dirty partition
ian@26 729 cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
ian@26 730 ;;
ian@26 731 sub r19=r19,r16 // calculate total byte size of dirty partition
ian@26 732 add r18=64,r18 // don't force in0-in7 into memory...
ian@26 733 ;;
ian@26 734 shl r19=r19,16 // shift size of dirty partition into loadrs position
ian@26 735 ;;
ian@26 736 dont_preserve_current_frame:
ian@26 737 /*
ian@26 738 * To prevent leaking bits between the kernel and user-space,
ian@26 739 * we must clear the stacked registers in the "invalid" partition here.
ian@26 740 * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
ian@26 741 * 5 registers/cycle on McKinley).
ian@26 742 */
ian@26 743 # define pRecurse p6
ian@26 744 # define pReturn p7
ian@26 745 #ifdef CONFIG_ITANIUM
ian@26 746 # define Nregs 10
ian@26 747 #else
ian@26 748 # define Nregs 14
ian@26 749 #endif
ian@26 750 alloc loc0=ar.pfs,2,Nregs-2,2,0
ian@26 751 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
ian@26 752 sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
ian@26 753 ;;
ian@26 754 mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
ian@26 755 shladd in0=loc1,3,r17
ian@26 756 mov in1=0
ian@26 757 ;;
ian@26 758 TEXT_ALIGN(32)
ian@26 759 rse_clear_invalid:
ian@26 760 #ifdef CONFIG_ITANIUM
ian@26 761 // cycle 0
ian@26 762 { .mii
ian@26 763 alloc loc0=ar.pfs,2,Nregs-2,2,0
ian@26 764 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
ian@26 765 add out0=-Nregs*8,in0
ian@26 766 }{ .mfb
ian@26 767 add out1=1,in1 // increment recursion count
ian@26 768 nop.f 0
ian@26 769 nop.b 0 // can't do br.call here because of alloc (WAW on CFM)
ian@26 770 ;;
ian@26 771 }{ .mfi // cycle 1
ian@26 772 mov loc1=0
ian@26 773 nop.f 0
ian@26 774 mov loc2=0
ian@26 775 }{ .mib
ian@26 776 mov loc3=0
ian@26 777 mov loc4=0
ian@26 778 (pRecurse) br.call.sptk.many b0=rse_clear_invalid
ian@26 779
ian@26 780 }{ .mfi // cycle 2
ian@26 781 mov loc5=0
ian@26 782 nop.f 0
ian@26 783 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
ian@26 784 }{ .mib
ian@26 785 mov loc6=0
ian@26 786 mov loc7=0
ian@26 787 (pReturn) br.ret.sptk.many b0
ian@26 788 }
ian@26 789 #else /* !CONFIG_ITANIUM */
ian@26 790 alloc loc0=ar.pfs,2,Nregs-2,2,0
ian@26 791 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
ian@26 792 add out0=-Nregs*8,in0
ian@26 793 add out1=1,in1 // increment recursion count
ian@26 794 mov loc1=0
ian@26 795 mov loc2=0
ian@26 796 ;;
ian@26 797 mov loc3=0
ian@26 798 mov loc4=0
ian@26 799 mov loc5=0
ian@26 800 mov loc6=0
ian@26 801 mov loc7=0
ian@26 802 (pRecurse) br.call.dptk.few b0=rse_clear_invalid
ian@26 803 ;;
ian@26 804 mov loc8=0
ian@26 805 mov loc9=0
ian@26 806 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
ian@26 807 mov loc10=0
ian@26 808 mov loc11=0
ian@26 809 (pReturn) br.ret.dptk.many b0
ian@26 810 #endif /* !CONFIG_ITANIUM */
ian@26 811 # undef pRecurse
ian@26 812 # undef pReturn
ian@26 813 ;;
ian@26 814 alloc r17=ar.pfs,0,0,0,0 // drop current register frame
ian@26 815 ;;
ian@26 816 loadrs
ian@26 817 ;;
ian@26 818 skip_rbs_switch:
ian@26 819 mov ar.unat=r25 // M2
ian@26 820 (pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22
ian@26 821 (pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
ian@26 822 ;;
ian@26 823 (pUStk) mov ar.bspstore=r23 // M2
ian@26 824 (pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
ian@26 825 (pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
ian@26 826 ;;
ian@26 827 #ifdef CONFIG_XEN
ian@26 828 movl r25=XSI_IPSR
ian@26 829 ;;
ian@26 830 st8[r25]=r29,XSI_IFS_OFS-XSI_IPSR_OFS
ian@26 831 ;;
ian@26 832 #else
ian@26 833 mov cr.ipsr=r29 // M2
ian@26 834 #endif
ian@26 835 mov ar.pfs=r26 // I0
ian@26 836 (pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
ian@26 837
ian@26 838 #ifdef CONFIG_XEN
ian@26 839 (p9) st8 [r25]=r30
ian@26 840 ;;
ian@26 841 adds r25=XSI_IIP_OFS-XSI_IFS_OFS,r25
ian@26 842 ;;
ian@26 843 #else
ian@26 844 (p9) mov cr.ifs=r30 // M2
ian@26 845 #endif
ian@26 846 mov b0=r21 // I0
ian@26 847 (pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
ian@26 848
ian@26 849 mov ar.fpsr=r20 // M2
ian@26 850 #ifdef CONFIG_XEN
ian@26 851 st8 [r25]=r28
ian@26 852 #else
ian@26 853 mov cr.iip=r28 // M2
ian@26 854 #endif
ian@26 855 nop 0
ian@26 856 ;;
ian@26 857 (pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
ian@26 858 nop 0
ian@26 859 (pLvSys)mov r2=r0
ian@26 860
ian@26 861 mov ar.rsc=r27 // M2
ian@26 862 mov pr=r31,-1 // I0
ian@26 863 #ifdef CONFIG_XEN
ian@26 864 ;;
ian@26 865 XEN_HYPER_RFI;
ian@26 866 #else
ian@26 867 rfi // B
ian@26 868 #endif
ian@26 869
ian@26 870 /*
ian@26 871 * On entry:
ian@26 872 * r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
ian@26 873 * r31 = current->thread_info->flags
ian@26 874 * On exit:
ian@26 875 * p6 = TRUE if work-pending-check needs to be redone
ian@26 876 */
ian@26 877 .work_pending_syscall:
ian@26 878 add r2=-8,r2
ian@26 879 add r3=-8,r3
ian@26 880 ;;
ian@26 881 st8 [r2]=r8
ian@26 882 st8 [r3]=r10
ian@26 883 .work_pending:
ian@26 884 tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
ian@26 885 (p6) br.cond.sptk.few .notify
ian@26 886 #ifdef CONFIG_PREEMPT
ian@26 887 (pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
ian@26 888 ;;
ian@26 889 (pKStk) st4 [r20]=r21
ian@26 890 ssm psr.i // enable interrupts
ian@26 891 #endif
ian@26 892 br.call.spnt.many rp=schedule
ian@26 893 .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
ian@26 894 #ifdef CONFIG_XEN
ian@26 895 movl r2=XSI_PSR_I_ADDR
ian@26 896 mov r20=1
ian@26 897 ;;
ian@26 898 ld8 r2=[r2]
ian@26 899 ;;
ian@26 900 st1 [r2]=r20
ian@26 901 #else
ian@26 902 rsm psr.i // disable interrupts
ian@26 903 #endif
ian@26 904 ;;
ian@26 905 #ifdef CONFIG_PREEMPT
ian@26 906 (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
ian@26 907 ;;
ian@26 908 (pKStk) st4 [r20]=r0 // preempt_count() <- 0
ian@26 909 #endif
ian@26 910 (pLvSys)br.cond.sptk.few .work_pending_syscall_end
ian@26 911 br.cond.sptk.many .work_processed_kernel // re-check
ian@26 912
ian@26 913 .notify:
ian@26 914 (pUStk) br.call.spnt.many rp=notify_resume_user
ian@26 915 .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
ian@26 916 (pLvSys)br.cond.sptk.few .work_pending_syscall_end
ian@26 917 br.cond.sptk.many .work_processed_kernel // don't re-check
ian@26 918
ian@26 919 .work_pending_syscall_end:
ian@26 920 adds r2=PT(R8)+16,r12
ian@26 921 adds r3=PT(R10)+16,r12
ian@26 922 ;;
ian@26 923 ld8 r8=[r2]
ian@26 924 ld8 r10=[r3]
ian@26 925 br.cond.sptk.many .work_processed_syscall // re-check
ian@26 926
ian@26 927 #ifdef CONFIG_XEN
ian@26 928 END(xen_leave_kernel)
ian@26 929 #else
ian@26 930 END(ia64_leave_kernel)
ian@26 931 #endif