ia64/xen-unstable
changeset 4712:bd239611771a
bitkeeper revision 1.1327.1.14 (4272b089iYvr5Anp8Hwey0z6acqlvg)
More code cleanup
More code cleanup
author | djm@kirby.fc.hp.com |
---|---|
date | Fri Apr 29 22:09:13 2005 +0000 (2005-04-29) |
parents | a2f72e2d9a86 |
children | 8368507eb193 |
files | .rootkeys xen/arch/ia64/Makefile xen/arch/ia64/ivt.S xen/arch/ia64/patch/linux-2.6.11/bootmem.h xen/arch/ia64/patch/linux-2.6.11/efi.c xen/arch/ia64/patch/linux-2.6.11/entry.S xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c xen/arch/ia64/patch/linux-2.6.11/ivt.S xen/arch/ia64/patch/linux-2.6.11/lds.S xen/arch/ia64/tools/mkbuildtree xen/arch/ia64/xenirq.c |
line diff
1.1 --- a/.rootkeys Thu Apr 28 22:40:58 2005 +0000 1.2 +++ b/.rootkeys Fri Apr 29 22:09:13 2005 +0000 1.3 @@ -1078,21 +1078,18 @@ 421098b2ZlaBcyiuuPr3WpzaSDwg6Q xen/arch/ 1.4 4239e98a_HX-FCIcXtVqY0BbrDqVug xen/arch/ia64/hypercall.c 1.5 421098b3LYAS8xJkQiGP7tiTlyBt0Q xen/arch/ia64/idle0_task.c 1.6 421098b3ys5GAr4z6_H1jD33oem82g xen/arch/ia64/irq.c 1.7 +4272a8e4lavI6DrTvqaIhXeR5RuKBw xen/arch/ia64/ivt.S 1.8 421098b3Heh72KuoVlND3CH6c0B0aA xen/arch/ia64/lib/Makefile 1.9 421098b3O0MYMUsmYVFy84VV_1gFwQ xen/arch/ia64/mm_init.c 1.10 -425ae516skiHBZU-Kfwxv2YWXfNRWQ xen/arch/ia64/patch/linux-2.6.11/bootmem.h 1.11 425ae516maKAsHBJVSzs19cdRgt3Nw xen/arch/ia64/patch/linux-2.6.11/cpumask.h 1.12 425ae516cGqvMzGtihTEsQXAXsuOhQ xen/arch/ia64/patch/linux-2.6.11/efi.c 1.13 425ae516Y1A4q4_Kfre3qnDj7lbHJg xen/arch/ia64/patch/linux-2.6.11/entry.S 1.14 425ae516txAP-owjzpTJ7ThfzWR8nw xen/arch/ia64/patch/linux-2.6.11/hardirq.h 1.15 425ae516PDO1ESDHXHVeDNvlqUfmdQ xen/arch/ia64/patch/linux-2.6.11/head.S 1.16 -425ae516JR7HWvt1zxJ-wLvEWmJGgg xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h 1.17 425ae516AHRNmaVuZjJY-9YjmKRDqg xen/arch/ia64/patch/linux-2.6.11/interrupt.h 1.18 425ae516U2wFUzrUJQUpy3z38jZHsQ xen/arch/ia64/patch/linux-2.6.11/io.h 1.19 425ae516GGRmXijPBLC5ii6yWOn0rg xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c 1.20 -425ae516qQA5dHuIybqfN3nEzM_Zvg xen/arch/ia64/patch/linux-2.6.11/ivt.S 1.21 425ae516atiECmpn_6nZDw4kkmbJ6g xen/arch/ia64/patch/linux-2.6.11/kregs.h 1.22 -425ae516lwlYwHG1Jv93kC3tfU5caw xen/arch/ia64/patch/linux-2.6.11/lds.S 1.23 425ae516Je2zI-Iw30_uGhvUYdlCZQ xen/arch/ia64/patch/linux-2.6.11/mca_asm.h 1.24 425ae5160-9wHxh0tOnIjavEjt6W0A xen/arch/ia64/patch/linux-2.6.11/minstate.h 1.25 425ae516N7SaORdbodDr90tmtCzYXw xen/arch/ia64/patch/linux-2.6.11/mm_contig.c 1.26 @@ -1168,6 +1165,7 @@ 4252ace74lKUPFnO8PmF0Dtpk7Xkng xen/arch/ 1.27 41a26ebc--sjlYZQxmIxyCx3jw70qA xen/arch/ia64/vcpu.c 1.28 421098b6M2WhsJ_ZMzFamAQcdc5gzw xen/arch/ia64/vhpt.c 1.29 41a26ebc4jSBGQOuyNIPDST58mNbBw xen/arch/ia64/xenasm.S 1.30 +4272adaeit9raZ9KnjO_wR4Ii9LJNQ xen/arch/ia64/xenirq.c 1.31 427162263zDUiPmTj-lP4eGyXs5eIg xen/arch/ia64/xenmem.c 1.32 421098b6mWyFPtkhPz9h1LCmKpoCLg xen/arch/ia64/xenmisc.c 1.33 421098b6lY2JzrV1oFDbrt7XQhtElg xen/arch/ia64/xensetup.c
2.1 --- a/xen/arch/ia64/Makefile Thu Apr 28 22:40:58 2005 +0000 2.2 +++ b/xen/arch/ia64/Makefile Fri Apr 29 22:09:13 2005 +0000 2.3 @@ -7,7 +7,7 @@ OBJS = xensetup.o setup.o time.o irq.o i 2.4 machvec.o dom0_ops.o domain.o \ 2.5 idle0_task.o pal.o hpsim.o efi.o efi_stub.o ivt.o mm_contig.o \ 2.6 xenmem.o sal.o cmdline.o mm_init.o tlb.o smpboot.o \ 2.7 - extable.o linuxextable.o \ 2.8 + extable.o linuxextable.o xenirq.o \ 2.9 regionreg.o entry.o unaligned.o privop.o vcpu.o \ 2.10 irq_ia64.o irq_lsapic.o vhpt.o xenasm.o dom_fw.o 2.11 # perfmon.o
3.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 3.2 +++ b/xen/arch/ia64/ivt.S Fri Apr 29 22:09:13 2005 +0000 3.3 @@ -0,0 +1,1870 @@ 3.4 + 3.5 +#ifdef XEN 3.6 +//#define CONFIG_DISABLE_VHPT // FIXME: change when VHPT is enabled?? 3.7 +// these are all hacked out for now as the entire IVT 3.8 +// will eventually be replaced... just want to use it 3.9 +// for startup code to handle TLB misses 3.10 +//#define ia64_leave_kernel 0 3.11 +//#define ia64_ret_from_syscall 0 3.12 +//#define ia64_handle_irq 0 3.13 +//#define ia64_fault 0 3.14 +#define ia64_illegal_op_fault 0 3.15 +#define ia64_prepare_handle_unaligned 0 3.16 +#define ia64_bad_break 0 3.17 +#define ia64_trace_syscall 0 3.18 +#define sys_call_table 0 3.19 +#define sys_ni_syscall 0 3.20 +#include <asm/vhpt.h> 3.21 +#endif 3.22 +/* 3.23 + * arch/ia64/kernel/ivt.S 3.24 + * 3.25 + * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co 3.26 + * Stephane Eranian <eranian@hpl.hp.com> 3.27 + * David Mosberger <davidm@hpl.hp.com> 3.28 + * Copyright (C) 2000, 2002-2003 Intel Co 3.29 + * Asit Mallick <asit.k.mallick@intel.com> 3.30 + * Suresh Siddha <suresh.b.siddha@intel.com> 3.31 + * Kenneth Chen <kenneth.w.chen@intel.com> 3.32 + * Fenghua Yu <fenghua.yu@intel.com> 3.33 + * 3.34 + * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP 3.35 + * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT. 3.36 + */ 3.37 +/* 3.38 + * This file defines the interruption vector table used by the CPU. 3.39 + * It does not include one entry per possible cause of interruption. 3.40 + * 3.41 + * The first 20 entries of the table contain 64 bundles each while the 3.42 + * remaining 48 entries contain only 16 bundles each. 3.43 + * 3.44 + * The 64 bundles are used to allow inlining the whole handler for critical 3.45 + * interruptions like TLB misses. 3.46 + * 3.47 + * For each entry, the comment is as follows: 3.48 + * 3.49 + * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 3.50 + * entry offset ----/ / / / / 3.51 + * entry number ---------/ / / / 3.52 + * size of the entry -------------/ / / 3.53 + * vector name -------------------------------------/ / 3.54 + * interruptions triggering this vector ----------------------/ 3.55 + * 3.56 + * The table is 32KB in size and must be aligned on 32KB boundary. 3.57 + * (The CPU ignores the 15 lower bits of the address) 3.58 + * 3.59 + * Table is based upon EAS2.6 (Oct 1999) 3.60 + */ 3.61 + 3.62 +#include <linux/config.h> 3.63 + 3.64 +#include <asm/asmmacro.h> 3.65 +#include <asm/break.h> 3.66 +#include <asm/ia32.h> 3.67 +#include <asm/kregs.h> 3.68 +#include <asm/offsets.h> 3.69 +#include <asm/pgtable.h> 3.70 +#include <asm/processor.h> 3.71 +#include <asm/ptrace.h> 3.72 +#include <asm/system.h> 3.73 +#include <asm/thread_info.h> 3.74 +#include <asm/unistd.h> 3.75 +#include <asm/errno.h> 3.76 + 3.77 +#if 1 3.78 +# define PSR_DEFAULT_BITS psr.ac 3.79 +#else 3.80 +# define PSR_DEFAULT_BITS 0 3.81 +#endif 3.82 + 3.83 +#if 0 3.84 + /* 3.85 + * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't 3.86 + * needed for something else before enabling this... 3.87 + */ 3.88 +# define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16 3.89 +#else 3.90 +# define DBG_FAULT(i) 3.91 +#endif 3.92 + 3.93 +#define MINSTATE_VIRT /* needed by minstate.h */ 3.94 +#include "minstate.h" 3.95 + 3.96 +#define FAULT(n) \ 3.97 + mov r31=pr; \ 3.98 + mov r19=n;; /* prepare to save predicates */ \ 3.99 + br.sptk.many dispatch_to_fault_handler 3.100 + 3.101 +#ifdef XEN 3.102 +#define REFLECT(n) \ 3.103 + mov r31=pr; \ 3.104 + mov r19=n;; /* prepare to save predicates */ \ 3.105 + br.sptk.many dispatch_reflection 3.106 +#endif 3.107 + 3.108 + .section .text.ivt,"ax" 3.109 + 3.110 + .align 32768 // align on 32KB boundary 3.111 + .global ia64_ivt 3.112 +ia64_ivt: 3.113 +///////////////////////////////////////////////////////////////////////////////////////// 3.114 +// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) 3.115 +ENTRY(vhpt_miss) 3.116 + DBG_FAULT(0) 3.117 + /* 3.118 + * The VHPT vector is invoked when the TLB entry for the virtual page table 3.119 + * is missing. This happens only as a result of a previous 3.120 + * (the "original") TLB miss, which may either be caused by an instruction 3.121 + * fetch or a data access (or non-access). 3.122 + * 3.123 + * What we do here is normal TLB miss handing for the _original_ miss, followed 3.124 + * by inserting the TLB entry for the virtual page table page that the VHPT 3.125 + * walker was attempting to access. The latter gets inserted as long 3.126 + * as both L1 and L2 have valid mappings for the faulting address. 3.127 + * The TLB entry for the original miss gets inserted only if 3.128 + * the L3 entry indicates that the page is present. 3.129 + * 3.130 + * do_page_fault gets invoked in the following cases: 3.131 + * - the faulting virtual address uses unimplemented address bits 3.132 + * - the faulting virtual address has no L1, L2, or L3 mapping 3.133 + */ 3.134 + mov r16=cr.ifa // get address that caused the TLB miss 3.135 +#ifdef CONFIG_HUGETLB_PAGE 3.136 + movl r18=PAGE_SHIFT 3.137 + mov r25=cr.itir 3.138 +#endif 3.139 + ;; 3.140 + rsm psr.dt // use physical addressing for data 3.141 + mov r31=pr // save the predicate registers 3.142 + mov r19=IA64_KR(PT_BASE) // get page table base address 3.143 + shl r21=r16,3 // shift bit 60 into sign bit 3.144 + shr.u r17=r16,61 // get the region number into r17 3.145 + ;; 3.146 + shr r22=r21,3 3.147 +#ifdef CONFIG_HUGETLB_PAGE 3.148 + extr.u r26=r25,2,6 3.149 + ;; 3.150 + cmp.ne p8,p0=r18,r26 3.151 + sub r27=r26,r18 3.152 + ;; 3.153 +(p8) dep r25=r18,r25,2,6 3.154 +(p8) shr r22=r22,r27 3.155 +#endif 3.156 + ;; 3.157 + cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? 3.158 + shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address 3.159 + ;; 3.160 +(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 3.161 + 3.162 + srlz.d 3.163 + LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir 3.164 + 3.165 + .pred.rel "mutex", p6, p7 3.166 +(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 3.167 +(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 3.168 + ;; 3.169 +(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 3.170 +(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 3.171 + cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 3.172 + shr.u r18=r22,PMD_SHIFT // shift L2 index into position 3.173 + ;; 3.174 + ld8 r17=[r17] // fetch the L1 entry (may be 0) 3.175 + ;; 3.176 +(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? 3.177 + dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 3.178 + ;; 3.179 +(p7) ld8 r20=[r17] // fetch the L2 entry (may be 0) 3.180 + shr.u r19=r22,PAGE_SHIFT // shift L3 index into position 3.181 + ;; 3.182 +(p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL? 3.183 + dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 3.184 + ;; 3.185 +(p7) ld8 r18=[r21] // read the L3 PTE 3.186 + mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss 3.187 + ;; 3.188 +(p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? 3.189 + mov r22=cr.iha // get the VHPT address that caused the TLB miss 3.190 + ;; // avoid RAW on p7 3.191 +(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? 3.192 + dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address 3.193 + ;; 3.194 +(p10) itc.i r18 // insert the instruction TLB entry 3.195 +(p11) itc.d r18 // insert the data TLB entry 3.196 +(p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) 3.197 + mov cr.ifa=r22 3.198 + 3.199 +#ifdef CONFIG_HUGETLB_PAGE 3.200 +(p8) mov cr.itir=r25 // change to default page-size for VHPT 3.201 +#endif 3.202 + 3.203 + /* 3.204 + * Now compute and insert the TLB entry for the virtual page table. We never 3.205 + * execute in a page table page so there is no need to set the exception deferral 3.206 + * bit. 3.207 + */ 3.208 + adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 3.209 + ;; 3.210 +(p7) itc.d r24 3.211 + ;; 3.212 +#ifdef CONFIG_SMP 3.213 + /* 3.214 + * Tell the assemblers dependency-violation checker that the above "itc" instructions 3.215 + * cannot possibly affect the following loads: 3.216 + */ 3.217 + dv_serialize_data 3.218 + 3.219 + /* 3.220 + * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g 3.221 + * between reading the pagetable and the "itc". If so, flush the entry we 3.222 + * inserted and retry. 3.223 + */ 3.224 + ld8 r25=[r21] // read L3 PTE again 3.225 + ld8 r26=[r17] // read L2 entry again 3.226 + ;; 3.227 + cmp.ne p6,p7=r26,r20 // did L2 entry change 3.228 + mov r27=PAGE_SHIFT<<2 3.229 + ;; 3.230 +(p6) ptc.l r22,r27 // purge PTE page translation 3.231 +(p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change 3.232 + ;; 3.233 +(p6) ptc.l r16,r27 // purge translation 3.234 +#endif 3.235 + 3.236 + mov pr=r31,-1 // restore predicate registers 3.237 + rfi 3.238 +END(vhpt_miss) 3.239 + 3.240 + .org ia64_ivt+0x400 3.241 +///////////////////////////////////////////////////////////////////////////////////////// 3.242 +// 0x0400 Entry 1 (size 64 bundles) ITLB (21) 3.243 +ENTRY(itlb_miss) 3.244 + DBG_FAULT(1) 3.245 +#ifdef XEN 3.246 + VHPT_CCHAIN_LOOKUP(itlb_miss,i) 3.247 +#ifdef VHPT_GLOBAL 3.248 + br.cond.sptk page_fault 3.249 + ;; 3.250 +#endif 3.251 +#endif 3.252 + /* 3.253 + * The ITLB handler accesses the L3 PTE via the virtually mapped linear 3.254 + * page table. If a nested TLB miss occurs, we switch into physical 3.255 + * mode, walk the page table, and then re-execute the L3 PTE read 3.256 + * and go on normally after that. 3.257 + */ 3.258 + mov r16=cr.ifa // get virtual address 3.259 + mov r29=b0 // save b0 3.260 + mov r31=pr // save predicates 3.261 +.itlb_fault: 3.262 + mov r17=cr.iha // get virtual address of L3 PTE 3.263 + movl r30=1f // load nested fault continuation point 3.264 + ;; 3.265 +1: ld8 r18=[r17] // read L3 PTE 3.266 + ;; 3.267 + mov b0=r29 3.268 + tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 3.269 +(p6) br.cond.spnt page_fault 3.270 + ;; 3.271 + itc.i r18 3.272 + ;; 3.273 +#ifdef CONFIG_SMP 3.274 + /* 3.275 + * Tell the assemblers dependency-violation checker that the above "itc" instructions 3.276 + * cannot possibly affect the following loads: 3.277 + */ 3.278 + dv_serialize_data 3.279 + 3.280 + ld8 r19=[r17] // read L3 PTE again and see if same 3.281 + mov r20=PAGE_SHIFT<<2 // setup page size for purge 3.282 + ;; 3.283 + cmp.ne p7,p0=r18,r19 3.284 + ;; 3.285 +(p7) ptc.l r16,r20 3.286 +#endif 3.287 + mov pr=r31,-1 3.288 + rfi 3.289 +END(itlb_miss) 3.290 + 3.291 + .org ia64_ivt+0x0800 3.292 +///////////////////////////////////////////////////////////////////////////////////////// 3.293 +// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) 3.294 +ENTRY(dtlb_miss) 3.295 + DBG_FAULT(2) 3.296 +#ifdef XEN 3.297 + VHPT_CCHAIN_LOOKUP(dtlb_miss,d) 3.298 +#ifdef VHPT_GLOBAL 3.299 + br.cond.sptk page_fault 3.300 + ;; 3.301 +#endif 3.302 +#endif 3.303 + /* 3.304 + * The DTLB handler accesses the L3 PTE via the virtually mapped linear 3.305 + * page table. If a nested TLB miss occurs, we switch into physical 3.306 + * mode, walk the page table, and then re-execute the L3 PTE read 3.307 + * and go on normally after that. 3.308 + */ 3.309 + mov r16=cr.ifa // get virtual address 3.310 + mov r29=b0 // save b0 3.311 + mov r31=pr // save predicates 3.312 +dtlb_fault: 3.313 + mov r17=cr.iha // get virtual address of L3 PTE 3.314 + movl r30=1f // load nested fault continuation point 3.315 + ;; 3.316 +1: ld8 r18=[r17] // read L3 PTE 3.317 + ;; 3.318 + mov b0=r29 3.319 + tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? 3.320 +(p6) br.cond.spnt page_fault 3.321 + ;; 3.322 + itc.d r18 3.323 + ;; 3.324 +#ifdef CONFIG_SMP 3.325 + /* 3.326 + * Tell the assemblers dependency-violation checker that the above "itc" instructions 3.327 + * cannot possibly affect the following loads: 3.328 + */ 3.329 + dv_serialize_data 3.330 + 3.331 + ld8 r19=[r17] // read L3 PTE again and see if same 3.332 + mov r20=PAGE_SHIFT<<2 // setup page size for purge 3.333 + ;; 3.334 + cmp.ne p7,p0=r18,r19 3.335 + ;; 3.336 +(p7) ptc.l r16,r20 3.337 +#endif 3.338 + mov pr=r31,-1 3.339 + rfi 3.340 +END(dtlb_miss) 3.341 + 3.342 + .org ia64_ivt+0x0c00 3.343 +///////////////////////////////////////////////////////////////////////////////////////// 3.344 +// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 3.345 +ENTRY(alt_itlb_miss) 3.346 + DBG_FAULT(3) 3.347 +#ifdef XEN 3.348 +//#ifdef VHPT_GLOBAL 3.349 +// VHPT_CCHAIN_LOOKUP(alt_itlb_miss,i) 3.350 +// br.cond.sptk page_fault 3.351 +// ;; 3.352 +//#endif 3.353 +#endif 3.354 + mov r16=cr.ifa // get address that caused the TLB miss 3.355 + movl r17=PAGE_KERNEL 3.356 + mov r21=cr.ipsr 3.357 + movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 3.358 + mov r31=pr 3.359 + ;; 3.360 +#ifdef CONFIG_DISABLE_VHPT 3.361 + shr.u r22=r16,61 // get the region number into r21 3.362 + ;; 3.363 + cmp.gt p8,p0=6,r22 // user mode 3.364 + ;; 3.365 +(p8) thash r17=r16 3.366 + ;; 3.367 +(p8) mov cr.iha=r17 3.368 +(p8) mov r29=b0 // save b0 3.369 +(p8) br.cond.dptk .itlb_fault 3.370 +#endif 3.371 + extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl 3.372 + and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 3.373 + shr.u r18=r16,57 // move address bit 61 to bit 4 3.374 + ;; 3.375 + andcm r18=0x10,r18 // bit 4=~address-bit(61) 3.376 + cmp.ne p8,p0=r0,r23 // psr.cpl != 0? 3.377 + or r19=r17,r19 // insert PTE control bits into r19 3.378 + ;; 3.379 + or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 3.380 +(p8) br.cond.spnt page_fault 3.381 + ;; 3.382 + itc.i r19 // insert the TLB entry 3.383 + mov pr=r31,-1 3.384 + rfi 3.385 +END(alt_itlb_miss) 3.386 + 3.387 + .org ia64_ivt+0x1000 3.388 +///////////////////////////////////////////////////////////////////////////////////////// 3.389 +// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 3.390 +ENTRY(alt_dtlb_miss) 3.391 + DBG_FAULT(4) 3.392 +#ifdef XEN 3.393 +//#ifdef VHPT_GLOBAL 3.394 +// VHPT_CCHAIN_LOOKUP(alt_dtlb_miss,d) 3.395 +// br.cond.sptk page_fault 3.396 +// ;; 3.397 +//#endif 3.398 +#endif 3.399 + mov r16=cr.ifa // get address that caused the TLB miss 3.400 + movl r17=PAGE_KERNEL 3.401 + mov r20=cr.isr 3.402 + movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) 3.403 + mov r21=cr.ipsr 3.404 + mov r31=pr 3.405 + ;; 3.406 +#ifdef CONFIG_DISABLE_VHPT 3.407 + shr.u r22=r16,61 // get the region number into r21 3.408 + ;; 3.409 + cmp.gt p8,p0=6,r22 // access to region 0-5 3.410 + ;; 3.411 +(p8) thash r17=r16 3.412 + ;; 3.413 +(p8) mov cr.iha=r17 3.414 +(p8) mov r29=b0 // save b0 3.415 +(p8) br.cond.dptk dtlb_fault 3.416 +#endif 3.417 + extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl 3.418 + and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field 3.419 + tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? 3.420 + shr.u r18=r16,57 // move address bit 61 to bit 4 3.421 + and r19=r19,r16 // clear ed, reserved bits, and PTE control bits 3.422 + tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? 3.423 + ;; 3.424 + andcm r18=0x10,r18 // bit 4=~address-bit(61) 3.425 + cmp.ne p8,p0=r0,r23 3.426 +(p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field 3.427 +(p8) br.cond.spnt page_fault 3.428 +#ifdef XEN 3.429 + ;; 3.430 + // FIXME: inadequate test, this is where we test for Xen address 3.431 + // note that 0xf000 (cached) and 0xd000 (uncached) addresses 3.432 + // should be OK. (Though no I/O is done in Xen, EFI needs uncached 3.433 + // addresses and some domain EFI calls are passed through) 3.434 + tbit.nz p0,p8=r16,60 3.435 +(p8) br.cond.spnt page_fault 3.436 +//(p8) br.cond.spnt 0 3.437 + ;; 3.438 +#endif 3.439 + 3.440 + dep r21=-1,r21,IA64_PSR_ED_BIT,1 3.441 + or r19=r19,r17 // insert PTE control bits into r19 3.442 + ;; 3.443 + or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 3.444 +(p6) mov cr.ipsr=r21 3.445 + ;; 3.446 +(p7) itc.d r19 // insert the TLB entry 3.447 + mov pr=r31,-1 3.448 + rfi 3.449 +END(alt_dtlb_miss) 3.450 + 3.451 + .org ia64_ivt+0x1400 3.452 +///////////////////////////////////////////////////////////////////////////////////////// 3.453 +// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) 3.454 +ENTRY(nested_dtlb_miss) 3.455 + /* 3.456 + * In the absence of kernel bugs, we get here when the virtually mapped linear 3.457 + * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction 3.458 + * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page 3.459 + * table is missing, a nested TLB miss fault is triggered and control is 3.460 + * transferred to this point. When this happens, we lookup the pte for the 3.461 + * faulting address by walking the page table in physical mode and return to the 3.462 + * continuation point passed in register r30 (or call page_fault if the address is 3.463 + * not mapped). 3.464 + * 3.465 + * Input: r16: faulting address 3.466 + * r29: saved b0 3.467 + * r30: continuation address 3.468 + * r31: saved pr 3.469 + * 3.470 + * Output: r17: physical address of L3 PTE of faulting address 3.471 + * r29: saved b0 3.472 + * r30: continuation address 3.473 + * r31: saved pr 3.474 + * 3.475 + * Clobbered: b0, r18, r19, r21, psr.dt (cleared) 3.476 + */ 3.477 + rsm psr.dt // switch to using physical data addressing 3.478 + mov r19=IA64_KR(PT_BASE) // get the page table base address 3.479 + shl r21=r16,3 // shift bit 60 into sign bit 3.480 + ;; 3.481 + shr.u r17=r16,61 // get the region number into r17 3.482 + ;; 3.483 + cmp.eq p6,p7=5,r17 // is faulting address in region 5? 3.484 + shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address 3.485 + ;; 3.486 +(p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place 3.487 + 3.488 + srlz.d 3.489 + LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir 3.490 + 3.491 + .pred.rel "mutex", p6, p7 3.492 +(p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT 3.493 +(p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 3.494 + ;; 3.495 +(p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8 3.496 +(p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8) 3.497 + cmp.eq p7,p6=0,r21 // unused address bits all zeroes? 3.498 + shr.u r18=r16,PMD_SHIFT // shift L2 index into position 3.499 + ;; 3.500 + ld8 r17=[r17] // fetch the L1 entry (may be 0) 3.501 + ;; 3.502 +(p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL? 3.503 + dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry 3.504 + ;; 3.505 +(p7) ld8 r17=[r17] // fetch the L2 entry (may be 0) 3.506 + shr.u r19=r16,PAGE_SHIFT // shift L3 index into position 3.507 + ;; 3.508 +(p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL? 3.509 + dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry 3.510 +(p6) br.cond.spnt page_fault 3.511 + mov b0=r30 3.512 + br.sptk.many b0 // return to continuation point 3.513 +END(nested_dtlb_miss) 3.514 + 3.515 + .org ia64_ivt+0x1800 3.516 +///////////////////////////////////////////////////////////////////////////////////////// 3.517 +// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) 3.518 +ENTRY(ikey_miss) 3.519 +#ifdef XEN 3.520 + REFLECT(6) 3.521 +#endif 3.522 + DBG_FAULT(6) 3.523 + FAULT(6) 3.524 +END(ikey_miss) 3.525 + 3.526 + //----------------------------------------------------------------------------------- 3.527 + // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) 3.528 +ENTRY(page_fault) 3.529 + ssm psr.dt 3.530 + ;; 3.531 + srlz.i 3.532 + ;; 3.533 + SAVE_MIN_WITH_COVER 3.534 +#ifdef XEN 3.535 + alloc r15=ar.pfs,0,0,4,0 3.536 + mov out0=cr.ifa 3.537 + mov out1=cr.isr 3.538 + mov out3=cr.itir 3.539 +#else 3.540 + alloc r15=ar.pfs,0,0,3,0 3.541 + mov out0=cr.ifa 3.542 + mov out1=cr.isr 3.543 +#endif 3.544 + adds r3=8,r2 // set up second base pointer 3.545 + ;; 3.546 + ssm psr.ic | PSR_DEFAULT_BITS 3.547 + ;; 3.548 + srlz.i // guarantee that interruption collectin is on 3.549 + ;; 3.550 +(p15) ssm psr.i // restore psr.i 3.551 + movl r14=ia64_leave_kernel 3.552 + ;; 3.553 + SAVE_REST 3.554 + mov rp=r14 3.555 + ;; 3.556 + adds out2=16,r12 // out2 = pointer to pt_regs 3.557 + br.call.sptk.many b6=ia64_do_page_fault // ignore return address 3.558 +END(page_fault) 3.559 + 3.560 + .org ia64_ivt+0x1c00 3.561 +///////////////////////////////////////////////////////////////////////////////////////// 3.562 +// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 3.563 +ENTRY(dkey_miss) 3.564 +#ifdef XEN 3.565 + REFLECT(7) 3.566 +#endif 3.567 + DBG_FAULT(7) 3.568 + FAULT(7) 3.569 +END(dkey_miss) 3.570 + 3.571 + .org ia64_ivt+0x2000 3.572 +///////////////////////////////////////////////////////////////////////////////////////// 3.573 +// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) 3.574 +ENTRY(dirty_bit) 3.575 +#ifdef XEN 3.576 + REFLECT(8) 3.577 +#endif 3.578 + DBG_FAULT(8) 3.579 + /* 3.580 + * What we do here is to simply turn on the dirty bit in the PTE. We need to 3.581 + * update both the page-table and the TLB entry. To efficiently access the PTE, 3.582 + * we address it through the virtual page table. Most likely, the TLB entry for 3.583 + * the relevant virtual page table page is still present in the TLB so we can 3.584 + * normally do this without additional TLB misses. In case the necessary virtual 3.585 + * page table TLB entry isn't present, we take a nested TLB miss hit where we look 3.586 + * up the physical address of the L3 PTE and then continue at label 1 below. 3.587 + */ 3.588 + mov r16=cr.ifa // get the address that caused the fault 3.589 + movl r30=1f // load continuation point in case of nested fault 3.590 + ;; 3.591 + thash r17=r16 // compute virtual address of L3 PTE 3.592 + mov r29=b0 // save b0 in case of nested fault 3.593 + mov r31=pr // save pr 3.594 +#ifdef CONFIG_SMP 3.595 + mov r28=ar.ccv // save ar.ccv 3.596 + ;; 3.597 +1: ld8 r18=[r17] 3.598 + ;; // avoid RAW on r18 3.599 + mov ar.ccv=r18 // set compare value for cmpxchg 3.600 + or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits 3.601 + ;; 3.602 + cmpxchg8.acq r26=[r17],r25,ar.ccv 3.603 + mov r24=PAGE_SHIFT<<2 3.604 + ;; 3.605 + cmp.eq p6,p7=r26,r18 3.606 + ;; 3.607 +(p6) itc.d r25 // install updated PTE 3.608 + ;; 3.609 + /* 3.610 + * Tell the assemblers dependency-violation checker that the above "itc" instructions 3.611 + * cannot possibly affect the following loads: 3.612 + */ 3.613 + dv_serialize_data 3.614 + 3.615 + ld8 r18=[r17] // read PTE again 3.616 + ;; 3.617 + cmp.eq p6,p7=r18,r25 // is it same as the newly installed 3.618 + ;; 3.619 +(p7) ptc.l r16,r24 3.620 + mov b0=r29 // restore b0 3.621 + mov ar.ccv=r28 3.622 +#else 3.623 + ;; 3.624 +1: ld8 r18=[r17] 3.625 + ;; // avoid RAW on r18 3.626 + or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits 3.627 + mov b0=r29 // restore b0 3.628 + ;; 3.629 + st8 [r17]=r18 // store back updated PTE 3.630 + itc.d r18 // install updated PTE 3.631 +#endif 3.632 + mov pr=r31,-1 // restore pr 3.633 + rfi 3.634 +END(dirty_bit) 3.635 + 3.636 + .org ia64_ivt+0x2400 3.637 +///////////////////////////////////////////////////////////////////////////////////////// 3.638 +// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) 3.639 +ENTRY(iaccess_bit) 3.640 +#ifdef XEN 3.641 + REFLECT(9) 3.642 +#endif 3.643 + DBG_FAULT(9) 3.644 + // Like Entry 8, except for instruction access 3.645 + mov r16=cr.ifa // get the address that caused the fault 3.646 + movl r30=1f // load continuation point in case of nested fault 3.647 + mov r31=pr // save predicates 3.648 +#ifdef CONFIG_ITANIUM 3.649 + /* 3.650 + * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. 3.651 + */ 3.652 + mov r17=cr.ipsr 3.653 + ;; 3.654 + mov r18=cr.iip 3.655 + tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? 3.656 + ;; 3.657 +(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa 3.658 +#endif /* CONFIG_ITANIUM */ 3.659 + ;; 3.660 + thash r17=r16 // compute virtual address of L3 PTE 3.661 + mov r29=b0 // save b0 in case of nested fault) 3.662 +#ifdef CONFIG_SMP 3.663 + mov r28=ar.ccv // save ar.ccv 3.664 + ;; 3.665 +1: ld8 r18=[r17] 3.666 + ;; 3.667 + mov ar.ccv=r18 // set compare value for cmpxchg 3.668 + or r25=_PAGE_A,r18 // set the accessed bit 3.669 + ;; 3.670 + cmpxchg8.acq r26=[r17],r25,ar.ccv 3.671 + mov r24=PAGE_SHIFT<<2 3.672 + ;; 3.673 + cmp.eq p6,p7=r26,r18 3.674 + ;; 3.675 +(p6) itc.i r25 // install updated PTE 3.676 + ;; 3.677 + /* 3.678 + * Tell the assemblers dependency-violation checker that the above "itc" instructions 3.679 + * cannot possibly affect the following loads: 3.680 + */ 3.681 + dv_serialize_data 3.682 + 3.683 + ld8 r18=[r17] // read PTE again 3.684 + ;; 3.685 + cmp.eq p6,p7=r18,r25 // is it same as the newly installed 3.686 + ;; 3.687 +(p7) ptc.l r16,r24 3.688 + mov b0=r29 // restore b0 3.689 + mov ar.ccv=r28 3.690 +#else /* !CONFIG_SMP */ 3.691 + ;; 3.692 +1: ld8 r18=[r17] 3.693 + ;; 3.694 + or r18=_PAGE_A,r18 // set the accessed bit 3.695 + mov b0=r29 // restore b0 3.696 + ;; 3.697 + st8 [r17]=r18 // store back updated PTE 3.698 + itc.i r18 // install updated PTE 3.699 +#endif /* !CONFIG_SMP */ 3.700 + mov pr=r31,-1 3.701 + rfi 3.702 +END(iaccess_bit) 3.703 + 3.704 + .org ia64_ivt+0x2800 3.705 +///////////////////////////////////////////////////////////////////////////////////////// 3.706 +// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) 3.707 +ENTRY(daccess_bit) 3.708 +#ifdef XEN 3.709 + REFLECT(10) 3.710 +#endif 3.711 + DBG_FAULT(10) 3.712 + // Like Entry 8, except for data access 3.713 + mov r16=cr.ifa // get the address that caused the fault 3.714 + movl r30=1f // load continuation point in case of nested fault 3.715 + ;; 3.716 + thash r17=r16 // compute virtual address of L3 PTE 3.717 + mov r31=pr 3.718 + mov r29=b0 // save b0 in case of nested fault) 3.719 +#ifdef CONFIG_SMP 3.720 + mov r28=ar.ccv // save ar.ccv 3.721 + ;; 3.722 +1: ld8 r18=[r17] 3.723 + ;; // avoid RAW on r18 3.724 + mov ar.ccv=r18 // set compare value for cmpxchg 3.725 + or r25=_PAGE_A,r18 // set the dirty bit 3.726 + ;; 3.727 + cmpxchg8.acq r26=[r17],r25,ar.ccv 3.728 + mov r24=PAGE_SHIFT<<2 3.729 + ;; 3.730 + cmp.eq p6,p7=r26,r18 3.731 + ;; 3.732 +(p6) itc.d r25 // install updated PTE 3.733 + /* 3.734 + * Tell the assemblers dependency-violation checker that the above "itc" instructions 3.735 + * cannot possibly affect the following loads: 3.736 + */ 3.737 + dv_serialize_data 3.738 + ;; 3.739 + ld8 r18=[r17] // read PTE again 3.740 + ;; 3.741 + cmp.eq p6,p7=r18,r25 // is it same as the newly installed 3.742 + ;; 3.743 +(p7) ptc.l r16,r24 3.744 + mov ar.ccv=r28 3.745 +#else 3.746 + ;; 3.747 +1: ld8 r18=[r17] 3.748 + ;; // avoid RAW on r18 3.749 + or r18=_PAGE_A,r18 // set the accessed bit 3.750 + ;; 3.751 + st8 [r17]=r18 // store back updated PTE 3.752 + itc.d r18 // install updated PTE 3.753 +#endif 3.754 + mov b0=r29 // restore b0 3.755 + mov pr=r31,-1 3.756 + rfi 3.757 +END(daccess_bit) 3.758 + 3.759 + .org ia64_ivt+0x2c00 3.760 +///////////////////////////////////////////////////////////////////////////////////////// 3.761 +// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) 3.762 +ENTRY(break_fault) 3.763 + /* 3.764 + * The streamlined system call entry/exit paths only save/restore the initial part 3.765 + * of pt_regs. This implies that the callers of system-calls must adhere to the 3.766 + * normal procedure calling conventions. 3.767 + * 3.768 + * Registers to be saved & restored: 3.769 + * CR registers: cr.ipsr, cr.iip, cr.ifs 3.770 + * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr 3.771 + * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15 3.772 + * Registers to be restored only: 3.773 + * r8-r11: output value from the system call. 3.774 + * 3.775 + * During system call exit, scratch registers (including r15) are modified/cleared 3.776 + * to prevent leaking bits from kernel to user level. 3.777 + */ 3.778 + DBG_FAULT(11) 3.779 +#ifdef XEN 3.780 + mov r16=cr.isr 3.781 + mov r17=cr.iim 3.782 + mov r31=pr 3.783 + ;; 3.784 + cmp.eq p7,p0=r0,r17 // is this a psuedo-cover? 3.785 + // FIXME: may also need to check slot==2? 3.786 +(p7) br.sptk.many dispatch_privop_fault 3.787 + br.sptk.many dispatch_break_fault 3.788 +#endif 3.789 + mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat. 3.790 + mov r17=cr.iim 3.791 + mov r18=__IA64_BREAK_SYSCALL 3.792 + mov r21=ar.fpsr 3.793 + mov r29=cr.ipsr 3.794 + mov r19=b6 3.795 + mov r25=ar.unat 3.796 + mov r27=ar.rsc 3.797 + mov r26=ar.pfs 3.798 + mov r28=cr.iip 3.799 +#ifndef XEN 3.800 + mov r31=pr // prepare to save predicates 3.801 +#endif 3.802 + mov r20=r1 3.803 + ;; 3.804 + adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 3.805 + cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so) 3.806 +(p7) br.cond.spnt non_syscall 3.807 + ;; 3.808 + ld1 r17=[r16] // load current->thread.on_ustack flag 3.809 + st1 [r16]=r0 // clear current->thread.on_ustack flag 3.810 + add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT 3.811 + ;; 3.812 + invala 3.813 + 3.814 + /* adjust return address so we skip over the break instruction: */ 3.815 + 3.816 + extr.u r8=r29,41,2 // extract ei field from cr.ipsr 3.817 + ;; 3.818 + cmp.eq p6,p7=2,r8 // isr.ei==2? 3.819 + mov r2=r1 // setup r2 for ia64_syscall_setup 3.820 + ;; 3.821 +(p6) mov r8=0 // clear ei to 0 3.822 +(p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped 3.823 +(p7) adds r8=1,r8 // increment ei to next slot 3.824 + ;; 3.825 + cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already? 3.826 + dep r29=r8,r29,41,2 // insert new ei into cr.ipsr 3.827 + ;; 3.828 + 3.829 + // switch from user to kernel RBS: 3.830 + MINSTATE_START_SAVE_MIN_VIRT 3.831 + br.call.sptk.many b7=ia64_syscall_setup 3.832 + ;; 3.833 + MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1 3.834 + ssm psr.ic | PSR_DEFAULT_BITS 3.835 + ;; 3.836 + srlz.i // guarantee that interruption collection is on 3.837 + mov r3=NR_syscalls - 1 3.838 + ;; 3.839 +(p15) ssm psr.i // restore psr.i 3.840 + // p10==true means out registers are more than 8 or r15's Nat is true 3.841 +(p10) br.cond.spnt.many ia64_ret_from_syscall 3.842 + ;; 3.843 + movl r16=sys_call_table 3.844 + 3.845 + adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024 3.846 + movl r2=ia64_ret_from_syscall 3.847 + ;; 3.848 + shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024) 3.849 + cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ? 3.850 + mov rp=r2 // set the real return addr 3.851 + ;; 3.852 +(p6) ld8 r20=[r20] // load address of syscall entry point 3.853 +(p7) movl r20=sys_ni_syscall 3.854 + 3.855 + add r2=TI_FLAGS+IA64_TASK_SIZE,r13 3.856 + ;; 3.857 + ld4 r2=[r2] // r2 = current_thread_info()->flags 3.858 + ;; 3.859 + and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit 3.860 + ;; 3.861 + cmp.eq p8,p0=r2,r0 3.862 + mov b6=r20 3.863 + ;; 3.864 +(p8) br.call.sptk.many b6=b6 // ignore this return addr 3.865 + br.cond.sptk ia64_trace_syscall 3.866 + // NOT REACHED 3.867 +END(break_fault) 3.868 + 3.869 + .org ia64_ivt+0x3000 3.870 +///////////////////////////////////////////////////////////////////////////////////////// 3.871 +// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) 3.872 +ENTRY(interrupt) 3.873 + DBG_FAULT(12) 3.874 + mov r31=pr // prepare to save predicates 3.875 + ;; 3.876 + SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 3.877 + ssm psr.ic | PSR_DEFAULT_BITS 3.878 + ;; 3.879 + adds r3=8,r2 // set up second base pointer for SAVE_REST 3.880 + srlz.i // ensure everybody knows psr.ic is back on 3.881 + ;; 3.882 + SAVE_REST 3.883 + ;; 3.884 + alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group 3.885 + mov out0=cr.ivr // pass cr.ivr as first arg 3.886 + add out1=16,sp // pass pointer to pt_regs as second arg 3.887 + ;; 3.888 + srlz.d // make sure we see the effect of cr.ivr 3.889 + movl r14=ia64_leave_kernel 3.890 + ;; 3.891 + mov rp=r14 3.892 + br.call.sptk.many b6=ia64_handle_irq 3.893 +END(interrupt) 3.894 + 3.895 + .org ia64_ivt+0x3400 3.896 +///////////////////////////////////////////////////////////////////////////////////////// 3.897 +// 0x3400 Entry 13 (size 64 bundles) Reserved 3.898 + DBG_FAULT(13) 3.899 + FAULT(13) 3.900 + 3.901 +#ifdef XEN 3.902 + // There is no particular reason for this code to be here, other than that 3.903 + // there happens to be space here that would go unused otherwise. If this 3.904 + // fault ever gets "unreserved", simply moved the following code to a more 3.905 + // suitable spot... 3.906 + 3.907 +ENTRY(dispatch_break_fault) 3.908 + SAVE_MIN_WITH_COVER 3.909 + ;; 3.910 + alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) 3.911 + mov out0=cr.ifa 3.912 + adds out1=16,sp 3.913 + mov out2=cr.isr // FIXME: pity to make this slow access twice 3.914 + mov out3=cr.iim // FIXME: pity to make this slow access twice 3.915 + 3.916 + ssm psr.ic | PSR_DEFAULT_BITS 3.917 + ;; 3.918 + srlz.i // guarantee that interruption collection is on 3.919 + ;; 3.920 +(p15) ssm psr.i // restore psr.i 3.921 + adds r3=8,r2 // set up second base pointer 3.922 + ;; 3.923 + SAVE_REST 3.924 + movl r14=ia64_leave_kernel 3.925 + ;; 3.926 + mov rp=r14 3.927 + br.sptk.many ia64_prepare_handle_break 3.928 +END(dispatch_break_fault) 3.929 +#endif 3.930 + 3.931 + .org ia64_ivt+0x3800 3.932 +///////////////////////////////////////////////////////////////////////////////////////// 3.933 +// 0x3800 Entry 14 (size 64 bundles) Reserved 3.934 + DBG_FAULT(14) 3.935 + FAULT(14) 3.936 + 3.937 + /* 3.938 + * There is no particular reason for this code to be here, other than that 3.939 + * there happens to be space here that would go unused otherwise. If this 3.940 + * fault ever gets "unreserved", simply moved the following code to a more 3.941 + * suitable spot... 3.942 + * 3.943 + * ia64_syscall_setup() is a separate subroutine so that it can 3.944 + * allocate stacked registers so it can safely demine any 3.945 + * potential NaT values from the input registers. 3.946 + * 3.947 + * On entry: 3.948 + * - executing on bank 0 or bank 1 register set (doesn't matter) 3.949 + * - r1: stack pointer 3.950 + * - r2: current task pointer 3.951 + * - r3: preserved 3.952 + * - r11: original contents (saved ar.pfs to be saved) 3.953 + * - r12: original contents (sp to be saved) 3.954 + * - r13: original contents (tp to be saved) 3.955 + * - r15: original contents (syscall # to be saved) 3.956 + * - r18: saved bsp (after switching to kernel stack) 3.957 + * - r19: saved b6 3.958 + * - r20: saved r1 (gp) 3.959 + * - r21: saved ar.fpsr 3.960 + * - r22: kernel's register backing store base (krbs_base) 3.961 + * - r23: saved ar.bspstore 3.962 + * - r24: saved ar.rnat 3.963 + * - r25: saved ar.unat 3.964 + * - r26: saved ar.pfs 3.965 + * - r27: saved ar.rsc 3.966 + * - r28: saved cr.iip 3.967 + * - r29: saved cr.ipsr 3.968 + * - r31: saved pr 3.969 + * - b0: original contents (to be saved) 3.970 + * On exit: 3.971 + * - executing on bank 1 registers 3.972 + * - psr.ic enabled, interrupts restored 3.973 + * - p10: TRUE if syscall is invoked with more than 8 out 3.974 + * registers or r15's Nat is true 3.975 + * - r1: kernel's gp 3.976 + * - r3: preserved (same as on entry) 3.977 + * - r8: -EINVAL if p10 is true 3.978 + * - r12: points to kernel stack 3.979 + * - r13: points to current task 3.980 + * - p15: TRUE if interrupts need to be re-enabled 3.981 + * - ar.fpsr: set to kernel settings 3.982 + */ 3.983 +GLOBAL_ENTRY(ia64_syscall_setup) 3.984 +#ifndef XEN 3.985 +#if PT(B6) != 0 3.986 +# error This code assumes that b6 is the first field in pt_regs. 3.987 +#endif 3.988 +#endif 3.989 + st8 [r1]=r19 // save b6 3.990 + add r16=PT(CR_IPSR),r1 // initialize first base pointer 3.991 + add r17=PT(R11),r1 // initialize second base pointer 3.992 + ;; 3.993 + alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable 3.994 + st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr 3.995 + tnat.nz p8,p0=in0 3.996 + 3.997 + st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11 3.998 + tnat.nz p9,p0=in1 3.999 +(pKStk) mov r18=r0 // make sure r18 isn't NaT 3.1000 + ;; 3.1001 + 3.1002 + st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs 3.1003 + st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip 3.1004 + mov r28=b0 // save b0 (2 cyc) 3.1005 + ;; 3.1006 + 3.1007 + st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat 3.1008 + dep r19=0,r19,38,26 // clear all bits but 0..37 [I0] 3.1009 +(p8) mov in0=-1 3.1010 + ;; 3.1011 + 3.1012 + st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs 3.1013 + extr.u r11=r19,7,7 // I0 // get sol of ar.pfs 3.1014 + and r8=0x7f,r19 // A // get sof of ar.pfs 3.1015 + 3.1016 + st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc 3.1017 + tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0 3.1018 +(p9) mov in1=-1 3.1019 + ;; 3.1020 + 3.1021 +(pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8 3.1022 + tnat.nz p10,p0=in2 3.1023 + add r11=8,r11 3.1024 + ;; 3.1025 +(pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field 3.1026 +(pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field 3.1027 + tnat.nz p11,p0=in3 3.1028 + ;; 3.1029 +(p10) mov in2=-1 3.1030 + tnat.nz p12,p0=in4 // [I0] 3.1031 +(p11) mov in3=-1 3.1032 + ;; 3.1033 +(pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat 3.1034 +(pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore 3.1035 + shl r18=r18,16 // compute ar.rsc to be used for "loadrs" 3.1036 + ;; 3.1037 + st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates 3.1038 + st8 [r17]=r28,PT(R1)-PT(B0) // save b0 3.1039 + tnat.nz p13,p0=in5 // [I0] 3.1040 + ;; 3.1041 + st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs" 3.1042 + st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1 3.1043 +(p12) mov in4=-1 3.1044 + ;; 3.1045 + 3.1046 +.mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12 3.1047 +.mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13 3.1048 +(p13) mov in5=-1 3.1049 + ;; 3.1050 + st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr 3.1051 + tnat.nz p14,p0=in6 3.1052 + cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 3.1053 + ;; 3.1054 + stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error) 3.1055 +(p9) tnat.nz p10,p0=r15 3.1056 + adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) 3.1057 + 3.1058 + st8.spill [r17]=r15 // save r15 3.1059 + tnat.nz p8,p0=in7 3.1060 + nop.i 0 3.1061 + 3.1062 + mov r13=r2 // establish `current' 3.1063 + movl r1=__gp // establish kernel global pointer 3.1064 + ;; 3.1065 +(p14) mov in6=-1 3.1066 +(p8) mov in7=-1 3.1067 + nop.i 0 3.1068 + 3.1069 + cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 3.1070 + movl r17=FPSR_DEFAULT 3.1071 + ;; 3.1072 + mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value 3.1073 +(p10) mov r8=-EINVAL 3.1074 + br.ret.sptk.many b7 3.1075 +END(ia64_syscall_setup) 3.1076 + 3.1077 + .org ia64_ivt+0x3c00 3.1078 +///////////////////////////////////////////////////////////////////////////////////////// 3.1079 +// 0x3c00 Entry 15 (size 64 bundles) Reserved 3.1080 + DBG_FAULT(15) 3.1081 + FAULT(15) 3.1082 + 3.1083 + /* 3.1084 + * Squatting in this space ... 3.1085 + * 3.1086 + * This special case dispatcher for illegal operation faults allows preserved 3.1087 + * registers to be modified through a callback function (asm only) that is handed 3.1088 + * back from the fault handler in r8. Up to three arguments can be passed to the 3.1089 + * callback function by returning an aggregate with the callback as its first 3.1090 + * element, followed by the arguments. 3.1091 + */ 3.1092 +ENTRY(dispatch_illegal_op_fault) 3.1093 + SAVE_MIN_WITH_COVER 3.1094 + ssm psr.ic | PSR_DEFAULT_BITS 3.1095 + ;; 3.1096 + srlz.i // guarantee that interruption collection is on 3.1097 + ;; 3.1098 +(p15) ssm psr.i // restore psr.i 3.1099 + adds r3=8,r2 // set up second base pointer for SAVE_REST 3.1100 + ;; 3.1101 + alloc r14=ar.pfs,0,0,1,0 // must be first in insn group 3.1102 + mov out0=ar.ec 3.1103 + ;; 3.1104 + SAVE_REST 3.1105 + ;; 3.1106 + br.call.sptk.many rp=ia64_illegal_op_fault 3.1107 +.ret0: ;; 3.1108 + alloc r14=ar.pfs,0,0,3,0 // must be first in insn group 3.1109 + mov out0=r9 3.1110 + mov out1=r10 3.1111 + mov out2=r11 3.1112 + movl r15=ia64_leave_kernel 3.1113 + ;; 3.1114 + mov rp=r15 3.1115 + mov b6=r8 3.1116 + ;; 3.1117 + cmp.ne p6,p0=0,r8 3.1118 +(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel 3.1119 + br.sptk.many ia64_leave_kernel 3.1120 +END(dispatch_illegal_op_fault) 3.1121 + 3.1122 + .org ia64_ivt+0x4000 3.1123 +///////////////////////////////////////////////////////////////////////////////////////// 3.1124 +// 0x4000 Entry 16 (size 64 bundles) Reserved 3.1125 + DBG_FAULT(16) 3.1126 + FAULT(16) 3.1127 + 3.1128 +#ifdef XEN 3.1129 + // There is no particular reason for this code to be here, other than that 3.1130 + // there happens to be space here that would go unused otherwise. If this 3.1131 + // fault ever gets "unreserved", simply moved the following code to a more 3.1132 + // suitable spot... 3.1133 + 3.1134 +ENTRY(dispatch_privop_fault) 3.1135 + SAVE_MIN_WITH_COVER 3.1136 + ;; 3.1137 + alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) 3.1138 + mov out0=cr.ifa 3.1139 + adds out1=16,sp 3.1140 + mov out2=cr.isr // FIXME: pity to make this slow access twice 3.1141 + mov out3=cr.itir 3.1142 + 3.1143 + ssm psr.ic | PSR_DEFAULT_BITS 3.1144 + ;; 3.1145 + srlz.i // guarantee that interruption collection is on 3.1146 + ;; 3.1147 +(p15) ssm psr.i // restore psr.i 3.1148 + adds r3=8,r2 // set up second base pointer 3.1149 + ;; 3.1150 + SAVE_REST 3.1151 + movl r14=ia64_leave_kernel 3.1152 + ;; 3.1153 + mov rp=r14 3.1154 + br.sptk.many ia64_prepare_handle_privop 3.1155 +END(dispatch_privop_fault) 3.1156 +#endif 3.1157 + 3.1158 + 3.1159 + .org ia64_ivt+0x4400 3.1160 +///////////////////////////////////////////////////////////////////////////////////////// 3.1161 +// 0x4400 Entry 17 (size 64 bundles) Reserved 3.1162 + DBG_FAULT(17) 3.1163 + FAULT(17) 3.1164 + 3.1165 +ENTRY(non_syscall) 3.1166 + SAVE_MIN_WITH_COVER 3.1167 + 3.1168 + // There is no particular reason for this code to be here, other than that 3.1169 + // there happens to be space here that would go unused otherwise. If this 3.1170 + // fault ever gets "unreserved", simply moved the following code to a more 3.1171 + // suitable spot... 3.1172 + 3.1173 + alloc r14=ar.pfs,0,0,2,0 3.1174 + mov out0=cr.iim 3.1175 + add out1=16,sp 3.1176 + adds r3=8,r2 // set up second base pointer for SAVE_REST 3.1177 + 3.1178 + ssm psr.ic | PSR_DEFAULT_BITS 3.1179 + ;; 3.1180 + srlz.i // guarantee that interruption collection is on 3.1181 + ;; 3.1182 +(p15) ssm psr.i // restore psr.i 3.1183 + movl r15=ia64_leave_kernel 3.1184 + ;; 3.1185 + SAVE_REST 3.1186 + mov rp=r15 3.1187 + ;; 3.1188 + br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr 3.1189 +END(non_syscall) 3.1190 + 3.1191 + .org ia64_ivt+0x4800 3.1192 +///////////////////////////////////////////////////////////////////////////////////////// 3.1193 +// 0x4800 Entry 18 (size 64 bundles) Reserved 3.1194 + DBG_FAULT(18) 3.1195 + FAULT(18) 3.1196 + 3.1197 + /* 3.1198 + * There is no particular reason for this code to be here, other than that 3.1199 + * there happens to be space here that would go unused otherwise. If this 3.1200 + * fault ever gets "unreserved", simply moved the following code to a more 3.1201 + * suitable spot... 3.1202 + */ 3.1203 + 3.1204 +ENTRY(dispatch_unaligned_handler) 3.1205 + SAVE_MIN_WITH_COVER 3.1206 + ;; 3.1207 + alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) 3.1208 + mov out0=cr.ifa 3.1209 + adds out1=16,sp 3.1210 + 3.1211 + ssm psr.ic | PSR_DEFAULT_BITS 3.1212 + ;; 3.1213 + srlz.i // guarantee that interruption collection is on 3.1214 + ;; 3.1215 +(p15) ssm psr.i // restore psr.i 3.1216 + adds r3=8,r2 // set up second base pointer 3.1217 + ;; 3.1218 + SAVE_REST 3.1219 + movl r14=ia64_leave_kernel 3.1220 + ;; 3.1221 + mov rp=r14 3.1222 + br.sptk.many ia64_prepare_handle_unaligned 3.1223 +END(dispatch_unaligned_handler) 3.1224 + 3.1225 + .org ia64_ivt+0x4c00 3.1226 +///////////////////////////////////////////////////////////////////////////////////////// 3.1227 +// 0x4c00 Entry 19 (size 64 bundles) Reserved 3.1228 + DBG_FAULT(19) 3.1229 + FAULT(19) 3.1230 + 3.1231 + /* 3.1232 + * There is no particular reason for this code to be here, other than that 3.1233 + * there happens to be space here that would go unused otherwise. If this 3.1234 + * fault ever gets "unreserved", simply moved the following code to a more 3.1235 + * suitable spot... 3.1236 + */ 3.1237 + 3.1238 +ENTRY(dispatch_to_fault_handler) 3.1239 + /* 3.1240 + * Input: 3.1241 + * psr.ic: off 3.1242 + * r19: fault vector number (e.g., 24 for General Exception) 3.1243 + * r31: contains saved predicates (pr) 3.1244 + */ 3.1245 + SAVE_MIN_WITH_COVER_R19 3.1246 + alloc r14=ar.pfs,0,0,5,0 3.1247 + mov out0=r15 3.1248 + mov out1=cr.isr 3.1249 + mov out2=cr.ifa 3.1250 + mov out3=cr.iim 3.1251 + mov out4=cr.itir 3.1252 + ;; 3.1253 + ssm psr.ic | PSR_DEFAULT_BITS 3.1254 + ;; 3.1255 + srlz.i // guarantee that interruption collection is on 3.1256 + ;; 3.1257 +(p15) ssm psr.i // restore psr.i 3.1258 + adds r3=8,r2 // set up second base pointer for SAVE_REST 3.1259 + ;; 3.1260 + SAVE_REST 3.1261 + movl r14=ia64_leave_kernel 3.1262 + ;; 3.1263 + mov rp=r14 3.1264 + br.call.sptk.many b6=ia64_fault 3.1265 +END(dispatch_to_fault_handler) 3.1266 + 3.1267 +// 3.1268 +// --- End of long entries, Beginning of short entries 3.1269 +// 3.1270 + 3.1271 + .org ia64_ivt+0x5000 3.1272 +///////////////////////////////////////////////////////////////////////////////////////// 3.1273 +// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) 3.1274 +ENTRY(page_not_present) 3.1275 +#ifdef XEN 3.1276 + REFLECT(20) 3.1277 +#endif 3.1278 + DBG_FAULT(20) 3.1279 + mov r16=cr.ifa 3.1280 + rsm psr.dt 3.1281 + /* 3.1282 + * The Linux page fault handler doesn't expect non-present pages to be in 3.1283 + * the TLB. Flush the existing entry now, so we meet that expectation. 3.1284 + */ 3.1285 + mov r17=PAGE_SHIFT<<2 3.1286 + ;; 3.1287 + ptc.l r16,r17 3.1288 + ;; 3.1289 + mov r31=pr 3.1290 + srlz.d 3.1291 + br.sptk.many page_fault 3.1292 +END(page_not_present) 3.1293 + 3.1294 + .org ia64_ivt+0x5100 3.1295 +///////////////////////////////////////////////////////////////////////////////////////// 3.1296 +// 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) 3.1297 +ENTRY(key_permission) 3.1298 +#ifdef XEN 3.1299 + REFLECT(21) 3.1300 +#endif 3.1301 + DBG_FAULT(21) 3.1302 + mov r16=cr.ifa 3.1303 + rsm psr.dt 3.1304 + mov r31=pr 3.1305 + ;; 3.1306 + srlz.d 3.1307 + br.sptk.many page_fault 3.1308 +END(key_permission) 3.1309 + 3.1310 + .org ia64_ivt+0x5200 3.1311 +///////////////////////////////////////////////////////////////////////////////////////// 3.1312 +// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 3.1313 +ENTRY(iaccess_rights) 3.1314 +#ifdef XEN 3.1315 + REFLECT(22) 3.1316 +#endif 3.1317 + DBG_FAULT(22) 3.1318 + mov r16=cr.ifa 3.1319 + rsm psr.dt 3.1320 + mov r31=pr 3.1321 + ;; 3.1322 + srlz.d 3.1323 + br.sptk.many page_fault 3.1324 +END(iaccess_rights) 3.1325 + 3.1326 + .org ia64_ivt+0x5300 3.1327 +///////////////////////////////////////////////////////////////////////////////////////// 3.1328 +// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 3.1329 +ENTRY(daccess_rights) 3.1330 +#ifdef XEN 3.1331 + REFLECT(23) 3.1332 +#endif 3.1333 + DBG_FAULT(23) 3.1334 + mov r16=cr.ifa 3.1335 + rsm psr.dt 3.1336 + mov r31=pr 3.1337 + ;; 3.1338 + srlz.d 3.1339 + br.sptk.many page_fault 3.1340 +END(daccess_rights) 3.1341 + 3.1342 + .org ia64_ivt+0x5400 3.1343 +///////////////////////////////////////////////////////////////////////////////////////// 3.1344 +// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) 3.1345 +ENTRY(general_exception) 3.1346 + DBG_FAULT(24) 3.1347 + mov r16=cr.isr 3.1348 + mov r31=pr 3.1349 + ;; 3.1350 +#ifdef XEN 3.1351 + cmp4.ge p6,p0=0x20,r16 3.1352 +(p6) br.sptk.many dispatch_privop_fault 3.1353 +#else 3.1354 + cmp4.eq p6,p0=0,r16 3.1355 +(p6) br.sptk.many dispatch_illegal_op_fault 3.1356 +#endif 3.1357 + ;; 3.1358 + mov r19=24 // fault number 3.1359 + br.sptk.many dispatch_to_fault_handler 3.1360 +END(general_exception) 3.1361 + 3.1362 + .org ia64_ivt+0x5500 3.1363 +///////////////////////////////////////////////////////////////////////////////////////// 3.1364 +// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) 3.1365 +ENTRY(disabled_fp_reg) 3.1366 +#ifdef XEN 3.1367 + REFLECT(25) 3.1368 +#endif 3.1369 + DBG_FAULT(25) 3.1370 + rsm psr.dfh // ensure we can access fph 3.1371 + ;; 3.1372 + srlz.d 3.1373 + mov r31=pr 3.1374 + mov r19=25 3.1375 + br.sptk.many dispatch_to_fault_handler 3.1376 +END(disabled_fp_reg) 3.1377 + 3.1378 + .org ia64_ivt+0x5600 3.1379 +///////////////////////////////////////////////////////////////////////////////////////// 3.1380 +// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) 3.1381 +ENTRY(nat_consumption) 3.1382 +#ifdef XEN 3.1383 + REFLECT(26) 3.1384 +#endif 3.1385 + DBG_FAULT(26) 3.1386 + FAULT(26) 3.1387 +END(nat_consumption) 3.1388 + 3.1389 + .org ia64_ivt+0x5700 3.1390 +///////////////////////////////////////////////////////////////////////////////////////// 3.1391 +// 0x5700 Entry 27 (size 16 bundles) Speculation (40) 3.1392 +ENTRY(speculation_vector) 3.1393 +#ifdef XEN 3.1394 + // this probably need not reflect... 3.1395 + REFLECT(27) 3.1396 +#endif 3.1397 + DBG_FAULT(27) 3.1398 + /* 3.1399 + * A [f]chk.[as] instruction needs to take the branch to the recovery code but 3.1400 + * this part of the architecture is not implemented in hardware on some CPUs, such 3.1401 + * as Itanium. Thus, in general we need to emulate the behavior. IIM contains 3.1402 + * the relative target (not yet sign extended). So after sign extending it we 3.1403 + * simply add it to IIP. We also need to reset the EI field of the IPSR to zero, 3.1404 + * i.e., the slot to restart into. 3.1405 + * 3.1406 + * cr.imm contains zero_ext(imm21) 3.1407 + */ 3.1408 + mov r18=cr.iim 3.1409 + ;; 3.1410 + mov r17=cr.iip 3.1411 + shl r18=r18,43 // put sign bit in position (43=64-21) 3.1412 + ;; 3.1413 + 3.1414 + mov r16=cr.ipsr 3.1415 + shr r18=r18,39 // sign extend (39=43-4) 3.1416 + ;; 3.1417 + 3.1418 + add r17=r17,r18 // now add the offset 3.1419 + ;; 3.1420 + mov cr.iip=r17 3.1421 + dep r16=0,r16,41,2 // clear EI 3.1422 + ;; 3.1423 + 3.1424 + mov cr.ipsr=r16 3.1425 + ;; 3.1426 + 3.1427 + rfi // and go back 3.1428 +END(speculation_vector) 3.1429 + 3.1430 + .org ia64_ivt+0x5800 3.1431 +///////////////////////////////////////////////////////////////////////////////////////// 3.1432 +// 0x5800 Entry 28 (size 16 bundles) Reserved 3.1433 + DBG_FAULT(28) 3.1434 + FAULT(28) 3.1435 + 3.1436 + .org ia64_ivt+0x5900 3.1437 +///////////////////////////////////////////////////////////////////////////////////////// 3.1438 +// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) 3.1439 +ENTRY(debug_vector) 3.1440 +#ifdef XEN 3.1441 + REFLECT(29) 3.1442 +#endif 3.1443 + DBG_FAULT(29) 3.1444 + FAULT(29) 3.1445 +END(debug_vector) 3.1446 + 3.1447 + .org ia64_ivt+0x5a00 3.1448 +///////////////////////////////////////////////////////////////////////////////////////// 3.1449 +// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) 3.1450 +ENTRY(unaligned_access) 3.1451 +#ifdef XEN 3.1452 + REFLECT(30) 3.1453 +#endif 3.1454 + DBG_FAULT(30) 3.1455 + mov r16=cr.ipsr 3.1456 + mov r31=pr // prepare to save predicates 3.1457 + ;; 3.1458 + br.sptk.many dispatch_unaligned_handler 3.1459 +END(unaligned_access) 3.1460 + 3.1461 + .org ia64_ivt+0x5b00 3.1462 +///////////////////////////////////////////////////////////////////////////////////////// 3.1463 +// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) 3.1464 +ENTRY(unsupported_data_reference) 3.1465 +#ifdef XEN 3.1466 + REFLECT(31) 3.1467 +#endif 3.1468 + DBG_FAULT(31) 3.1469 + FAULT(31) 3.1470 +END(unsupported_data_reference) 3.1471 + 3.1472 + .org ia64_ivt+0x5c00 3.1473 +///////////////////////////////////////////////////////////////////////////////////////// 3.1474 +// 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64) 3.1475 +ENTRY(floating_point_fault) 3.1476 +#ifdef XEN 3.1477 + REFLECT(32) 3.1478 +#endif 3.1479 + DBG_FAULT(32) 3.1480 + FAULT(32) 3.1481 +END(floating_point_fault) 3.1482 + 3.1483 + .org ia64_ivt+0x5d00 3.1484 +///////////////////////////////////////////////////////////////////////////////////////// 3.1485 +// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) 3.1486 +ENTRY(floating_point_trap) 3.1487 +#ifdef XEN 3.1488 + REFLECT(33) 3.1489 +#endif 3.1490 + DBG_FAULT(33) 3.1491 + FAULT(33) 3.1492 +END(floating_point_trap) 3.1493 + 3.1494 + .org ia64_ivt+0x5e00 3.1495 +///////////////////////////////////////////////////////////////////////////////////////// 3.1496 +// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) 3.1497 +ENTRY(lower_privilege_trap) 3.1498 +#ifdef XEN 3.1499 + REFLECT(34) 3.1500 +#endif 3.1501 + DBG_FAULT(34) 3.1502 + FAULT(34) 3.1503 +END(lower_privilege_trap) 3.1504 + 3.1505 + .org ia64_ivt+0x5f00 3.1506 +///////////////////////////////////////////////////////////////////////////////////////// 3.1507 +// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) 3.1508 +ENTRY(taken_branch_trap) 3.1509 +#ifdef XEN 3.1510 + REFLECT(35) 3.1511 +#endif 3.1512 + DBG_FAULT(35) 3.1513 + FAULT(35) 3.1514 +END(taken_branch_trap) 3.1515 + 3.1516 + .org ia64_ivt+0x6000 3.1517 +///////////////////////////////////////////////////////////////////////////////////////// 3.1518 +// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) 3.1519 +ENTRY(single_step_trap) 3.1520 +#ifdef XEN 3.1521 + REFLECT(36) 3.1522 +#endif 3.1523 + DBG_FAULT(36) 3.1524 + FAULT(36) 3.1525 +END(single_step_trap) 3.1526 + 3.1527 + .org ia64_ivt+0x6100 3.1528 +///////////////////////////////////////////////////////////////////////////////////////// 3.1529 +// 0x6100 Entry 37 (size 16 bundles) Reserved 3.1530 + DBG_FAULT(37) 3.1531 + FAULT(37) 3.1532 + 3.1533 + .org ia64_ivt+0x6200 3.1534 +///////////////////////////////////////////////////////////////////////////////////////// 3.1535 +// 0x6200 Entry 38 (size 16 bundles) Reserved 3.1536 + DBG_FAULT(38) 3.1537 + FAULT(38) 3.1538 + 3.1539 + .org ia64_ivt+0x6300 3.1540 +///////////////////////////////////////////////////////////////////////////////////////// 3.1541 +// 0x6300 Entry 39 (size 16 bundles) Reserved 3.1542 + DBG_FAULT(39) 3.1543 + FAULT(39) 3.1544 + 3.1545 + .org ia64_ivt+0x6400 3.1546 +///////////////////////////////////////////////////////////////////////////////////////// 3.1547 +// 0x6400 Entry 40 (size 16 bundles) Reserved 3.1548 + DBG_FAULT(40) 3.1549 + FAULT(40) 3.1550 + 3.1551 + .org ia64_ivt+0x6500 3.1552 +///////////////////////////////////////////////////////////////////////////////////////// 3.1553 +// 0x6500 Entry 41 (size 16 bundles) Reserved 3.1554 + DBG_FAULT(41) 3.1555 + FAULT(41) 3.1556 + 3.1557 + .org ia64_ivt+0x6600 3.1558 +///////////////////////////////////////////////////////////////////////////////////////// 3.1559 +// 0x6600 Entry 42 (size 16 bundles) Reserved 3.1560 + DBG_FAULT(42) 3.1561 + FAULT(42) 3.1562 + 3.1563 + .org ia64_ivt+0x6700 3.1564 +///////////////////////////////////////////////////////////////////////////////////////// 3.1565 +// 0x6700 Entry 43 (size 16 bundles) Reserved 3.1566 + DBG_FAULT(43) 3.1567 + FAULT(43) 3.1568 + 3.1569 + .org ia64_ivt+0x6800 3.1570 +///////////////////////////////////////////////////////////////////////////////////////// 3.1571 +// 0x6800 Entry 44 (size 16 bundles) Reserved 3.1572 + DBG_FAULT(44) 3.1573 + FAULT(44) 3.1574 + 3.1575 + .org ia64_ivt+0x6900 3.1576 +///////////////////////////////////////////////////////////////////////////////////////// 3.1577 +// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) 3.1578 +ENTRY(ia32_exception) 3.1579 +#ifdef XEN 3.1580 + REFLECT(45) 3.1581 +#endif 3.1582 + DBG_FAULT(45) 3.1583 + FAULT(45) 3.1584 +END(ia32_exception) 3.1585 + 3.1586 + .org ia64_ivt+0x6a00 3.1587 +///////////////////////////////////////////////////////////////////////////////////////// 3.1588 +// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) 3.1589 +ENTRY(ia32_intercept) 3.1590 +#ifdef XEN 3.1591 + REFLECT(46) 3.1592 +#endif 3.1593 + DBG_FAULT(46) 3.1594 +#ifdef CONFIG_IA32_SUPPORT 3.1595 + mov r31=pr 3.1596 + mov r16=cr.isr 3.1597 + ;; 3.1598 + extr.u r17=r16,16,8 // get ISR.code 3.1599 + mov r18=ar.eflag 3.1600 + mov r19=cr.iim // old eflag value 3.1601 + ;; 3.1602 + cmp.ne p6,p0=2,r17 3.1603 +(p6) br.cond.spnt 1f // not a system flag fault 3.1604 + xor r16=r18,r19 3.1605 + ;; 3.1606 + extr.u r17=r16,18,1 // get the eflags.ac bit 3.1607 + ;; 3.1608 + cmp.eq p6,p0=0,r17 3.1609 +(p6) br.cond.spnt 1f // eflags.ac bit didn't change 3.1610 + ;; 3.1611 + mov pr=r31,-1 // restore predicate registers 3.1612 + rfi 3.1613 + 3.1614 +1: 3.1615 +#endif // CONFIG_IA32_SUPPORT 3.1616 + FAULT(46) 3.1617 +END(ia32_intercept) 3.1618 + 3.1619 + .org ia64_ivt+0x6b00 3.1620 +///////////////////////////////////////////////////////////////////////////////////////// 3.1621 +// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) 3.1622 +ENTRY(ia32_interrupt) 3.1623 +#ifdef XEN 3.1624 + REFLECT(47) 3.1625 +#endif 3.1626 + DBG_FAULT(47) 3.1627 +#ifdef CONFIG_IA32_SUPPORT 3.1628 + mov r31=pr 3.1629 + br.sptk.many dispatch_to_ia32_handler 3.1630 +#else 3.1631 + FAULT(47) 3.1632 +#endif 3.1633 +END(ia32_interrupt) 3.1634 + 3.1635 + .org ia64_ivt+0x6c00 3.1636 +///////////////////////////////////////////////////////////////////////////////////////// 3.1637 +// 0x6c00 Entry 48 (size 16 bundles) Reserved 3.1638 + DBG_FAULT(48) 3.1639 + FAULT(48) 3.1640 + 3.1641 + .org ia64_ivt+0x6d00 3.1642 +///////////////////////////////////////////////////////////////////////////////////////// 3.1643 +// 0x6d00 Entry 49 (size 16 bundles) Reserved 3.1644 + DBG_FAULT(49) 3.1645 + FAULT(49) 3.1646 + 3.1647 + .org ia64_ivt+0x6e00 3.1648 +///////////////////////////////////////////////////////////////////////////////////////// 3.1649 +// 0x6e00 Entry 50 (size 16 bundles) Reserved 3.1650 + DBG_FAULT(50) 3.1651 + FAULT(50) 3.1652 + 3.1653 + .org ia64_ivt+0x6f00 3.1654 +///////////////////////////////////////////////////////////////////////////////////////// 3.1655 +// 0x6f00 Entry 51 (size 16 bundles) Reserved 3.1656 + DBG_FAULT(51) 3.1657 + FAULT(51) 3.1658 + 3.1659 + .org ia64_ivt+0x7000 3.1660 +///////////////////////////////////////////////////////////////////////////////////////// 3.1661 +// 0x7000 Entry 52 (size 16 bundles) Reserved 3.1662 + DBG_FAULT(52) 3.1663 + FAULT(52) 3.1664 + 3.1665 + .org ia64_ivt+0x7100 3.1666 +///////////////////////////////////////////////////////////////////////////////////////// 3.1667 +// 0x7100 Entry 53 (size 16 bundles) Reserved 3.1668 + DBG_FAULT(53) 3.1669 + FAULT(53) 3.1670 + 3.1671 + .org ia64_ivt+0x7200 3.1672 +///////////////////////////////////////////////////////////////////////////////////////// 3.1673 +// 0x7200 Entry 54 (size 16 bundles) Reserved 3.1674 + DBG_FAULT(54) 3.1675 + FAULT(54) 3.1676 + 3.1677 + .org ia64_ivt+0x7300 3.1678 +///////////////////////////////////////////////////////////////////////////////////////// 3.1679 +// 0x7300 Entry 55 (size 16 bundles) Reserved 3.1680 + DBG_FAULT(55) 3.1681 + FAULT(55) 3.1682 + 3.1683 + .org ia64_ivt+0x7400 3.1684 +///////////////////////////////////////////////////////////////////////////////////////// 3.1685 +// 0x7400 Entry 56 (size 16 bundles) Reserved 3.1686 + DBG_FAULT(56) 3.1687 + FAULT(56) 3.1688 + 3.1689 + .org ia64_ivt+0x7500 3.1690 +///////////////////////////////////////////////////////////////////////////////////////// 3.1691 +// 0x7500 Entry 57 (size 16 bundles) Reserved 3.1692 + DBG_FAULT(57) 3.1693 + FAULT(57) 3.1694 + 3.1695 + .org ia64_ivt+0x7600 3.1696 +///////////////////////////////////////////////////////////////////////////////////////// 3.1697 +// 0x7600 Entry 58 (size 16 bundles) Reserved 3.1698 + DBG_FAULT(58) 3.1699 + FAULT(58) 3.1700 + 3.1701 + .org ia64_ivt+0x7700 3.1702 +///////////////////////////////////////////////////////////////////////////////////////// 3.1703 +// 0x7700 Entry 59 (size 16 bundles) Reserved 3.1704 + DBG_FAULT(59) 3.1705 + FAULT(59) 3.1706 + 3.1707 + .org ia64_ivt+0x7800 3.1708 +///////////////////////////////////////////////////////////////////////////////////////// 3.1709 +// 0x7800 Entry 60 (size 16 bundles) Reserved 3.1710 + DBG_FAULT(60) 3.1711 + FAULT(60) 3.1712 + 3.1713 + .org ia64_ivt+0x7900 3.1714 +///////////////////////////////////////////////////////////////////////////////////////// 3.1715 +// 0x7900 Entry 61 (size 16 bundles) Reserved 3.1716 + DBG_FAULT(61) 3.1717 + FAULT(61) 3.1718 + 3.1719 + .org ia64_ivt+0x7a00 3.1720 +///////////////////////////////////////////////////////////////////////////////////////// 3.1721 +// 0x7a00 Entry 62 (size 16 bundles) Reserved 3.1722 + DBG_FAULT(62) 3.1723 + FAULT(62) 3.1724 + 3.1725 + .org ia64_ivt+0x7b00 3.1726 +///////////////////////////////////////////////////////////////////////////////////////// 3.1727 +// 0x7b00 Entry 63 (size 16 bundles) Reserved 3.1728 + DBG_FAULT(63) 3.1729 + FAULT(63) 3.1730 + 3.1731 + .org ia64_ivt+0x7c00 3.1732 +///////////////////////////////////////////////////////////////////////////////////////// 3.1733 +// 0x7c00 Entry 64 (size 16 bundles) Reserved 3.1734 + DBG_FAULT(64) 3.1735 + FAULT(64) 3.1736 + 3.1737 + .org ia64_ivt+0x7d00 3.1738 +///////////////////////////////////////////////////////////////////////////////////////// 3.1739 +// 0x7d00 Entry 65 (size 16 bundles) Reserved 3.1740 + DBG_FAULT(65) 3.1741 + FAULT(65) 3.1742 + 3.1743 + .org ia64_ivt+0x7e00 3.1744 +///////////////////////////////////////////////////////////////////////////////////////// 3.1745 +// 0x7e00 Entry 66 (size 16 bundles) Reserved 3.1746 + DBG_FAULT(66) 3.1747 + FAULT(66) 3.1748 + 3.1749 + .org ia64_ivt+0x7f00 3.1750 +///////////////////////////////////////////////////////////////////////////////////////// 3.1751 +// 0x7f00 Entry 67 (size 16 bundles) Reserved 3.1752 + DBG_FAULT(67) 3.1753 + FAULT(67) 3.1754 + 3.1755 +#ifdef XEN 3.1756 + .org ia64_ivt+0x8000 3.1757 +ENTRY(dispatch_reflection) 3.1758 + /* 3.1759 + * Input: 3.1760 + * psr.ic: off 3.1761 + * r19: intr type (offset into ivt, see ia64_int.h) 3.1762 + * r31: contains saved predicates (pr) 3.1763 + */ 3.1764 + SAVE_MIN_WITH_COVER_R19 3.1765 + alloc r14=ar.pfs,0,0,5,0 3.1766 + mov out4=r15 3.1767 + mov out0=cr.ifa 3.1768 + adds out1=16,sp 3.1769 + mov out2=cr.isr 3.1770 + mov out3=cr.iim 3.1771 +// mov out3=cr.itir 3.1772 + 3.1773 + ssm psr.ic | PSR_DEFAULT_BITS 3.1774 + ;; 3.1775 + srlz.i // guarantee that interruption collection is on 3.1776 + ;; 3.1777 +(p15) ssm psr.i // restore psr.i 3.1778 + adds r3=8,r2 // set up second base pointer 3.1779 + ;; 3.1780 + SAVE_REST 3.1781 + movl r14=ia64_leave_kernel 3.1782 + ;; 3.1783 + mov rp=r14 3.1784 + br.sptk.many ia64_prepare_handle_reflection 3.1785 +END(dispatch_reflection) 3.1786 +#endif 3.1787 + 3.1788 +#ifdef CONFIG_IA32_SUPPORT 3.1789 + 3.1790 + /* 3.1791 + * There is no particular reason for this code to be here, other than that 3.1792 + * there happens to be space here that would go unused otherwise. If this 3.1793 + * fault ever gets "unreserved", simply moved the following code to a more 3.1794 + * suitable spot... 3.1795 + */ 3.1796 + 3.1797 + // IA32 interrupt entry point 3.1798 + 3.1799 +ENTRY(dispatch_to_ia32_handler) 3.1800 + SAVE_MIN 3.1801 + ;; 3.1802 + mov r14=cr.isr 3.1803 + ssm psr.ic | PSR_DEFAULT_BITS 3.1804 + ;; 3.1805 + srlz.i // guarantee that interruption collection is on 3.1806 + ;; 3.1807 +(p15) ssm psr.i 3.1808 + adds r3=8,r2 // Base pointer for SAVE_REST 3.1809 + ;; 3.1810 + SAVE_REST 3.1811 + ;; 3.1812 + mov r15=0x80 3.1813 + shr r14=r14,16 // Get interrupt number 3.1814 + ;; 3.1815 + cmp.ne p6,p0=r14,r15 3.1816 +(p6) br.call.dpnt.many b6=non_ia32_syscall 3.1817 + 3.1818 + adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions 3.1819 + adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp 3.1820 + ;; 3.1821 + cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 3.1822 + ld8 r8=[r14] // get r8 3.1823 + ;; 3.1824 + st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP) 3.1825 + ;; 3.1826 + alloc r15=ar.pfs,0,0,6,0 // must first in an insn group 3.1827 + ;; 3.1828 + ld4 r8=[r14],8 // r8 == eax (syscall number) 3.1829 + mov r15=IA32_NR_syscalls 3.1830 + ;; 3.1831 + cmp.ltu.unc p6,p7=r8,r15 3.1832 + ld4 out1=[r14],8 // r9 == ecx 3.1833 + ;; 3.1834 + ld4 out2=[r14],8 // r10 == edx 3.1835 + ;; 3.1836 + ld4 out0=[r14] // r11 == ebx 3.1837 + adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp 3.1838 + ;; 3.1839 + ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp 3.1840 + ;; 3.1841 + ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi 3.1842 + adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 3.1843 + ;; 3.1844 + ld4 out4=[r14] // r15 == edi 3.1845 + movl r16=ia32_syscall_table 3.1846 + ;; 3.1847 +(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number 3.1848 + ld4 r2=[r2] // r2 = current_thread_info()->flags 3.1849 + ;; 3.1850 + ld8 r16=[r16] 3.1851 + and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit 3.1852 + ;; 3.1853 + mov b6=r16 3.1854 + movl r15=ia32_ret_from_syscall 3.1855 + cmp.eq p8,p0=r2,r0 3.1856 + ;; 3.1857 + mov rp=r15 3.1858 +(p8) br.call.sptk.many b6=b6 3.1859 + br.cond.sptk ia32_trace_syscall 3.1860 + 3.1861 +non_ia32_syscall: 3.1862 + alloc r15=ar.pfs,0,0,2,0 3.1863 + mov out0=r14 // interrupt # 3.1864 + add out1=16,sp // pointer to pt_regs 3.1865 + ;; // avoid WAW on CFM 3.1866 + br.call.sptk.many rp=ia32_bad_interrupt 3.1867 +.ret1: movl r15=ia64_leave_kernel 3.1868 + ;; 3.1869 + mov rp=r15 3.1870 + br.ret.sptk.many rp 3.1871 +END(dispatch_to_ia32_handler) 3.1872 + 3.1873 +#endif /* CONFIG_IA32_SUPPORT */
4.1 --- a/xen/arch/ia64/patch/linux-2.6.11/bootmem.h Thu Apr 28 22:40:58 2005 +0000 4.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 4.3 @@ -1,17 +0,0 @@ 4.4 - bootmem.h | 2 ++ 4.5 - 1 files changed, 2 insertions(+) 4.6 - 4.7 -Index: linux-2.6.11/include/linux/bootmem.h 4.8 -=================================================================== 4.9 ---- linux-2.6.11.orig/include/linux/bootmem.h 2005-03-02 01:38:25.000000000 -0600 4.10 -+++ linux-2.6.11/include/linux/bootmem.h 2005-03-19 12:39:36.915887729 -0600 4.11 -@@ -41,7 +41,9 @@ extern unsigned long __init init_bootmem 4.12 - extern void __init free_bootmem (unsigned long addr, unsigned long size); 4.13 - extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal); 4.14 - #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 4.15 -+#ifndef XEN 4.16 - extern void __init reserve_bootmem (unsigned long addr, unsigned long size); 4.17 -+#endif 4.18 - #define alloc_bootmem(x) \ 4.19 - __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 4.20 - #define alloc_bootmem_low(x) \
5.1 --- a/xen/arch/ia64/patch/linux-2.6.11/efi.c Thu Apr 28 22:40:58 2005 +0000 5.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/efi.c Fri Apr 29 22:09:13 2005 +0000 5.3 @@ -1,29 +1,6 @@ 5.4 - efi.c | 32 ++++++++++++++++++++++++++++++++ 5.5 - 1 files changed, 32 insertions(+) 5.6 - 5.7 -Index: linux-2.6.11-xendiffs/arch/ia64/kernel/efi.c 5.8 -=================================================================== 5.9 ---- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/efi.c 2005-04-07 12:22:08.230781400 -0500 5.10 -+++ linux-2.6.11-xendiffs/arch/ia64/kernel/efi.c 2005-04-07 12:25:11.875195997 -0500 5.11 -@@ -25,6 +25,9 @@ 5.12 - #include <linux/types.h> 5.13 - #include <linux/time.h> 5.14 - #include <linux/efi.h> 5.15 -+#ifdef XEN 5.16 -+#include <xen/sched.h> 5.17 -+#endif 5.18 - 5.19 - #include <asm/io.h> 5.20 - #include <asm/kregs.h> 5.21 -@@ -218,6 +221,7 @@ efi_gettimeofday (struct timespec *ts) 5.22 - if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) 5.23 - return; 5.24 - 5.25 -+ dummy(); 5.26 - ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second); 5.27 - ts->tv_nsec = tm.nanosecond; 5.28 - } 5.29 -@@ -320,6 +324,10 @@ efi_memmap_walk (efi_freemem_callback_t 5.30 +--- ../../linux-2.6.11/arch/ia64/kernel/efi.c 2005-03-02 00:37:47.000000000 -0700 5.31 ++++ arch/ia64/efi.c 2005-04-29 14:09:24.000000000 -0600 5.32 +@@ -320,6 +320,10 @@ 5.33 if (!(md->attribute & EFI_MEMORY_WB)) 5.34 continue; 5.35 5.36 @@ -34,7 +11,7 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k 5.37 /* 5.38 * granule_addr is the base of md's first granule. 5.39 * [granule_addr - first_non_wb_addr) is guaranteed to 5.40 -@@ -719,6 +727,30 @@ efi_get_iobase (void) 5.41 +@@ -719,6 +723,30 @@ 5.42 return 0; 5.43 } 5.44
6.1 --- a/xen/arch/ia64/patch/linux-2.6.11/entry.S Thu Apr 28 22:40:58 2005 +0000 6.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/entry.S Fri Apr 29 22:09:13 2005 +0000 6.3 @@ -1,86 +1,34 @@ 6.4 - entry.S | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 6.5 - 1 files changed, 85 insertions(+), 1 deletion(-) 6.6 - 6.7 -Index: linux-2.6.11-xendiffs/arch/ia64/kernel/entry.S 6.8 -=================================================================== 6.9 ---- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/entry.S 2005-04-08 13:32:07.636308237 -0500 6.10 -+++ linux-2.6.11-xendiffs/arch/ia64/kernel/entry.S 2005-04-08 13:37:04.612542509 -0500 6.11 -@@ -35,7 +35,9 @@ 6.12 - 6.13 - #include <asm/asmmacro.h> 6.14 - #include <asm/cache.h> 6.15 -+#ifndef XEN 6.16 - #include <asm/errno.h> 6.17 -+#endif 6.18 - #include <asm/kregs.h> 6.19 - #include <asm/offsets.h> 6.20 - #include <asm/pgtable.h> 6.21 -@@ -46,6 +48,25 @@ 6.22 +--- ../../linux-2.6.11/arch/ia64/kernel/entry.S 2005-03-02 00:37:50.000000000 -0700 6.23 ++++ arch/ia64/entry.S 2005-04-29 14:54:13.000000000 -0600 6.24 +@@ -46,6 +46,7 @@ 6.25 6.26 #include "minstate.h" 6.27 6.28 -+#ifdef XEN 6.29 -+#define sys_execve 0 6.30 -+#define do_fork 0 6.31 -+#define syscall_trace_enter 0 6.32 -+#define syscall_trace_leave 0 6.33 -+#define schedule 0 6.34 -+#define do_notify_resume_user 0 6.35 -+#define ia64_rt_sigsuspend 0 6.36 -+#define ia64_rt_sigreturn 0 6.37 -+#define ia64_handle_unaligned 0 6.38 -+#define errno 0 6.39 -+#define sys_ni_syscall 0 6.40 -+#define unw_init_frame_info 0 6.41 -+#define sys_call_table 0 6.42 -+#define do_sigdelayed 0 6.43 -+#endif 6.44 -+ 6.45 -+ /* 6.46 -+ 6.47 ++#ifndef XEN 6.48 /* 6.49 * execve() is special because in case of success, we need to 6.50 * setup a null register window frame. 6.51 -@@ -187,11 +208,14 @@ GLOBAL_ENTRY(ia64_switch_to) 6.52 - DO_SAVE_SWITCH_STACK 6.53 - .body 6.54 +@@ -174,6 +175,7 @@ 6.55 + mov rp=loc0 6.56 + br.ret.sptk.many rp 6.57 + END(sys_clone) 6.58 ++#endif /* !XEN */ 6.59 6.60 -+#ifdef XEN 6.61 -+//#undef IA64_TASK_THREAD_KSP_OFFSET 6.62 -+//#define IA64_TASK_THREAD_KSP_OFFSET 0x38 6.63 - adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13 6.64 + /* 6.65 + * prev_task <- ia64_switch_to(struct task_struct *next) 6.66 +@@ -191,7 +193,11 @@ 6.67 movl r25=init_task 6.68 mov r27=IA64_KR(CURRENT_STACK) 6.69 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 6.70 -- dep r20=0,in0,61,3 // physical address of "next" 6.71 ++#ifdef XEN 6.72 + dep r20=0,in0,60,4 // physical address of "next" 6.73 ++#else 6.74 + dep r20=0,in0,61,3 // physical address of "next" 6.75 ++#endif 6.76 ;; 6.77 st8 [r22]=sp // save kernel stack pointer of old task 6.78 shr.u r26=r20,IA64_GRANULE_SHIFT 6.79 -@@ -203,6 +227,22 @@ GLOBAL_ENTRY(ia64_switch_to) 6.80 - (p6) cmp.eq p7,p6=r26,r27 6.81 - (p6) br.cond.dpnt .map 6.82 - ;; 6.83 -+#else 6.84 -+ adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13 6.85 -+ mov r27=IA64_KR(CURRENT_STACK) 6.86 -+ dep r20=0,in0,61,3 // physical address of "current" 6.87 -+ ;; 6.88 -+ st8 [r22]=sp // save kernel stack pointer of old task 6.89 -+ shr.u r26=r20,IA64_GRANULE_SHIFT 6.90 -+ adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 6.91 -+ ;; 6.92 -+ /* 6.93 -+ * If we've already mapped this task's page, we can skip doing it again. 6.94 -+ */ 6.95 -+ cmp.eq p7,p6=r26,r27 6.96 -+(p6) br.cond.dpnt .map 6.97 -+ ;; 6.98 -+#endif 6.99 - .done: 6.100 - (p6) ssm psr.ic // if we had to map, reenable the psr.ic bit FIRST!!! 6.101 - ;; 6.102 -@@ -220,6 +260,16 @@ GLOBAL_ENTRY(ia64_switch_to) 6.103 +@@ -220,6 +226,16 @@ 6.104 br.ret.sptk.many rp // boogie on out in new context 6.105 6.106 .map: 6.107 @@ -97,7 +45,7 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k 6.108 rsm psr.ic // interrupts (psr.i) are already disabled here 6.109 movl r25=PAGE_KERNEL 6.110 ;; 6.111 -@@ -376,7 +426,11 @@ END(save_switch_stack) 6.112 +@@ -376,7 +392,11 @@ 6.113 * - b7 holds address to return to 6.114 * - must not touch r8-r11 6.115 */ 6.116 @@ -109,7 +57,23 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k 6.117 .prologue 6.118 .altrp b7 6.119 6.120 -@@ -604,6 +658,11 @@ GLOBAL_ENTRY(ia64_ret_from_clone) 6.121 +@@ -470,6 +490,7 @@ 6.122 + br.cond.sptk.many b7 6.123 + END(load_switch_stack) 6.124 + 6.125 ++#ifndef XEN 6.126 + GLOBAL_ENTRY(__ia64_syscall) 6.127 + .regstk 6,0,0,0 6.128 + mov r15=in5 // put syscall number in place 6.129 +@@ -588,6 +609,7 @@ 6.130 + } 6.131 + .ret4: br.cond.sptk ia64_leave_kernel 6.132 + END(ia64_strace_leave_kernel) 6.133 ++#endif 6.134 + 6.135 + GLOBAL_ENTRY(ia64_ret_from_clone) 6.136 + PT_REGS_UNWIND_INFO(0) 6.137 +@@ -604,6 +626,11 @@ 6.138 */ 6.139 br.call.sptk.many rp=ia64_invoke_schedule_tail 6.140 } 6.141 @@ -121,7 +85,7 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k 6.142 .ret8: 6.143 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 6.144 ;; 6.145 -@@ -614,6 +673,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone) 6.146 +@@ -614,6 +641,7 @@ 6.147 ;; 6.148 cmp.ne p6,p0=r2,r0 6.149 (p6) br.cond.spnt .strace_check_retval 6.150 @@ -129,14 +93,11 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k 6.151 ;; // added stop bits to prevent r8 dependency 6.152 END(ia64_ret_from_clone) 6.153 // fall through 6.154 -@@ -700,9 +760,14 @@ ENTRY(ia64_leave_syscall) 6.155 +@@ -700,19 +728,25 @@ 6.156 .work_processed_syscall: 6.157 adds r2=PT(LOADRS)+16,r12 6.158 adds r3=PT(AR_BSPSTORE)+16,r12 6.159 -+#ifdef XEN 6.160 -+ mov r31=r0 6.161 -+ ;; 6.162 -+#else 6.163 ++#ifndef XEN 6.164 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 6.165 ;; 6.166 (p6) ld4 r31=[r18] // load current_thread_info()->flags 6.167 @@ -144,7 +105,21 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k 6.168 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" 6.169 mov b7=r0 // clear b7 6.170 ;; 6.171 -@@ -757,7 +822,11 @@ ENTRY(ia64_leave_syscall) 6.172 + ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage) 6.173 + ld8 r18=[r2],PT(R9)-PT(B6) // load b6 6.174 ++#ifndef XEN 6.175 + (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? 6.176 ++#endif 6.177 + ;; 6.178 + mov r16=ar.bsp // M2 get existing backing store pointer 6.179 ++#ifndef XEN 6.180 + (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending? 6.181 + (p6) br.cond.spnt .work_pending_syscall 6.182 ++#endif 6.183 + ;; 6.184 + // start restoring the state saved on the kernel stack (struct pt_regs): 6.185 + ld8 r9=[r2],PT(CR_IPSR)-PT(R9) 6.186 +@@ -757,7 +791,11 @@ 6.187 ;; 6.188 ld8.fill r12=[r2] // restore r12 (sp) 6.189 ld8.fill r15=[r3] // restore r15 6.190 @@ -156,7 +131,7 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k 6.191 ;; 6.192 (pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8 6.193 (pUStk) st1 [r14]=r17 6.194 -@@ -814,9 +883,18 @@ GLOBAL_ENTRY(ia64_leave_kernel) 6.195 +@@ -814,9 +852,18 @@ 6.196 (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 6.197 #endif 6.198 .work_processed_kernel: 6.199 @@ -175,7 +150,17 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k 6.200 adds r21=PT(PR)+16,r12 6.201 ;; 6.202 6.203 -@@ -934,7 +1012,11 @@ GLOBAL_ENTRY(ia64_leave_kernel) 6.204 +@@ -838,7 +885,9 @@ 6.205 + ;; 6.206 + ld8 r29=[r2],16 // load b7 6.207 + ld8 r30=[r3],16 // load ar.csd 6.208 ++#ifndef XEN 6.209 + (p6) br.cond.spnt .work_pending 6.210 ++#endif 6.211 + ;; 6.212 + ld8 r31=[r2],16 // load ar.ssd 6.213 + ld8.fill r8=[r3],16 6.214 +@@ -934,7 +983,11 @@ 6.215 shr.u r18=r19,16 // get byte size of existing "dirty" partition 6.216 ;; 6.217 mov r16=ar.bsp // get existing backing store pointer 6.218 @@ -187,15 +172,47 @@ Index: linux-2.6.11-xendiffs/arch/ia64/k 6.219 ;; 6.220 ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8 6.221 (pKStk) br.cond.dpnt skip_rbs_switch 6.222 -@@ -1323,6 +1405,7 @@ GLOBAL_ENTRY(unw_init_running) 6.223 +@@ -1069,6 +1122,7 @@ 6.224 + mov pr=r31,-1 // I0 6.225 + rfi // B 6.226 + 6.227 ++#ifndef XEN 6.228 + /* 6.229 + * On entry: 6.230 + * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT) 6.231 +@@ -1130,6 +1184,7 @@ 6.232 + ld8 r8=[r2] 6.233 + ld8 r10=[r3] 6.234 + br.cond.sptk.many .work_processed_syscall // re-check 6.235 ++#endif 6.236 + 6.237 + END(ia64_leave_kernel) 6.238 + 6.239 +@@ -1166,6 +1221,7 @@ 6.240 br.ret.sptk.many rp 6.241 - END(unw_init_running) 6.242 + END(ia64_invoke_schedule_tail) 6.243 6.244 +#ifndef XEN 6.245 - .rodata 6.246 - .align 8 6.247 - .globl sys_call_table 6.248 -@@ -1585,3 +1668,4 @@ sys_call_table: 6.249 + /* 6.250 + * Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to 6.251 + * be set up by the caller. We declare 8 input registers so the system call 6.252 +@@ -1264,6 +1320,7 @@ 6.253 + mov ar.unat=r9 6.254 + br.many b7 6.255 + END(sys_rt_sigreturn) 6.256 ++#endif 6.257 + 6.258 + GLOBAL_ENTRY(ia64_prepare_handle_unaligned) 6.259 + .prologue 6.260 +@@ -1278,6 +1335,7 @@ 6.261 + br.cond.sptk.many rp // goes to ia64_leave_kernel 6.262 + END(ia64_prepare_handle_unaligned) 6.263 + 6.264 ++#ifndef XEN 6.265 + // 6.266 + // unw_init_running(void (*callback)(info, arg), void *arg) 6.267 + // 6.268 +@@ -1585,3 +1643,4 @@ 6.269 data8 sys_ni_syscall 6.270 6.271 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
7.1 --- a/xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h Thu Apr 28 22:40:58 2005 +0000 7.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 7.3 @@ -1,31 +0,0 @@ 7.4 - hpsim_ssc.h | 19 +++++++++++++++++++ 7.5 - 1 files changed, 19 insertions(+) 7.6 - 7.7 -Index: linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h 7.8 -=================================================================== 7.9 ---- linux-2.6.11.orig/arch/ia64/hp/sim/hpsim_ssc.h 2005-03-02 01:38:17.000000000 -0600 7.10 -+++ linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h 2005-03-19 13:34:01.705520375 -0600 7.11 -@@ -33,4 +33,23 @@ 7.12 - */ 7.13 - extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr); 7.14 - 7.15 -+#ifdef XEN 7.16 -+/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong 7.17 -+ * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */ 7.18 -+#define SSC_OPEN 50 7.19 -+#define SSC_CLOSE 51 7.20 -+#define SSC_READ 52 7.21 -+#define SSC_WRITE 53 7.22 -+#define SSC_GET_COMPLETION 54 7.23 -+#define SSC_WAIT_COMPLETION 55 7.24 -+ 7.25 -+#define SSC_WRITE_ACCESS 2 7.26 -+#define SSC_READ_ACCESS 1 7.27 -+ 7.28 -+struct ssc_disk_req { 7.29 -+ unsigned long addr; 7.30 -+ unsigned long len; 7.31 -+}; 7.32 -+#endif 7.33 -+ 7.34 - #endif /* _IA64_PLATFORM_HPSIM_SSC_H */
8.1 --- a/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c Thu Apr 28 22:40:58 2005 +0000 8.2 +++ b/xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c Fri Apr 29 22:09:13 2005 +0000 8.3 @@ -1,111 +1,22 @@ 8.4 - irq_ia64.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 8.5 - 1 files changed, 67 insertions(+) 8.6 - 8.7 -Index: linux-2.6.11-xendiffs/arch/ia64/kernel/irq_ia64.c 8.8 -=================================================================== 8.9 ---- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/irq_ia64.c 2005-04-08 13:30:16.777174938 -0500 8.10 -+++ linux-2.6.11-xendiffs/arch/ia64/kernel/irq_ia64.c 2005-04-08 14:15:47.398616472 -0500 8.11 -@@ -17,18 +17,26 @@ 8.12 - #include <linux/config.h> 8.13 - #include <linux/module.h> 8.14 - 8.15 -+#ifndef XEN 8.16 - #include <linux/jiffies.h> 8.17 -+#endif 8.18 - #include <linux/errno.h> 8.19 - #include <linux/init.h> 8.20 - #include <linux/interrupt.h> 8.21 - #include <linux/ioport.h> 8.22 -+#ifndef XEN 8.23 - #include <linux/kernel_stat.h> 8.24 -+#endif 8.25 - #include <linux/slab.h> 8.26 -+#ifndef XEN 8.27 - #include <linux/ptrace.h> 8.28 - #include <linux/random.h> /* for rand_initialize_irq() */ 8.29 - #include <linux/signal.h> 8.30 -+#endif 8.31 - #include <linux/smp.h> 8.32 -+#ifndef XEN 8.33 - #include <linux/smp_lock.h> 8.34 -+#endif 8.35 - #include <linux/threads.h> 8.36 - #include <linux/bitops.h> 8.37 - 8.38 -@@ -104,6 +112,24 @@ void 8.39 - ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) 8.40 - { 8.41 +--- ../../linux-2.6.11/arch/ia64/kernel/irq_ia64.c 2005-03-02 00:38:07.000000000 -0700 8.42 ++++ arch/ia64/irq_ia64.c 2005-04-29 16:05:30.000000000 -0600 8.43 +@@ -106,6 +106,9 @@ 8.44 unsigned long saved_tpr; 8.45 -+#if 0 8.46 -+//FIXME: For debug only, can be removed 8.47 -+ static char firstirq = 1; 8.48 -+ static char firsttime[256]; 8.49 -+ static char firstpend[256]; 8.50 -+ if (firstirq) { 8.51 -+ int i; 8.52 -+ for (i=0;i<256;i++) firsttime[i] = 1; 8.53 -+ for (i=0;i<256;i++) firstpend[i] = 1; 8.54 -+ firstirq = 0; 8.55 -+ } 8.56 -+ if (firsttime[vector]) { 8.57 -+ printf("**** (entry) First received int on vector=%d,itc=%lx\n", 8.58 -+ (unsigned long) vector, ia64_get_itc()); 8.59 -+ firsttime[vector] = 0; 8.60 -+ } 8.61 -+#endif 8.62 -+ 8.63 8.64 #if IRQ_DEBUG 8.65 ++#ifdef XEN 8.66 ++ xen_debug_irq(vector, regs); 8.67 ++#endif 8.68 { 8.69 -@@ -148,6 +174,27 @@ ia64_handle_irq (ia64_vector vector, str 8.70 + unsigned long bsp, sp; 8.71 + 8.72 +@@ -148,6 +151,9 @@ 8.73 ia64_setreg(_IA64_REG_CR_TPR, vector); 8.74 ia64_srlz_d(); 8.75 8.76 +#ifdef XEN 8.77 -+ if (vector != 0xef) { 8.78 -+ extern void vcpu_pend_interrupt(void *, int); 8.79 -+#if 0 8.80 -+ if (firsttime[vector]) { 8.81 -+ printf("**** (iterate) First received int on vector=%d,itc=%lx\n", 8.82 -+ (unsigned long) vector, ia64_get_itc()); 8.83 -+ firsttime[vector] = 0; 8.84 -+ } 8.85 -+ if (firstpend[vector]) { 8.86 -+ printf("**** First pended int on vector=%d,itc=%lx\n", 8.87 -+ (unsigned long) vector,ia64_get_itc()); 8.88 -+ firstpend[vector] = 0; 8.89 -+ } 8.90 -+#endif 8.91 -+ //FIXME: TEMPORARY HACK!!!! 8.92 -+ vcpu_pend_interrupt(dom0->exec_domain[0],vector); 8.93 -+ domain_wake(dom0->exec_domain[0]); 8.94 -+ } 8.95 -+ else 8.96 ++ if (!xen_do_IRQ(vector)) 8.97 +#endif 8.98 __do_IRQ(local_vector_to_irq(vector), regs); 8.99 8.100 /* 8.101 -@@ -276,3 +323,23 @@ ia64_send_ipi (int cpu, int vector, int 8.102 - 8.103 - writeq(ipi_data, ipi_addr); 8.104 - } 8.105 -+ 8.106 -+/* From linux/kernel/softirq.c */ 8.107 -+#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 8.108 -+# define invoke_softirq() __do_softirq() 8.109 -+#else 8.110 -+# define invoke_softirq() do_softirq() 8.111 -+#endif 8.112 -+ 8.113 -+/* 8.114 -+ * Exit an interrupt context. Process softirqs if needed and possible: 8.115 -+ */ 8.116 -+void irq_exit(void) 8.117 -+{ 8.118 -+ account_system_vtime(current); 8.119 -+ sub_preempt_count(IRQ_EXIT_OFFSET); 8.120 -+ if (!in_interrupt() && local_softirq_pending()) 8.121 -+ invoke_softirq(); 8.122 -+ preempt_enable_no_resched(); 8.123 -+} 8.124 -+/* end from linux/kernel/softirq.c */
9.1 --- a/xen/arch/ia64/patch/linux-2.6.11/ivt.S Thu Apr 28 22:40:58 2005 +0000 9.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 9.3 @@ -1,533 +0,0 @@ 9.4 - ivt.S | 254 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 9.5 - 1 files changed, 254 insertions(+) 9.6 - 9.7 -Index: linux-2.6.11-xendiffs/arch/ia64/kernel/ivt.S 9.8 -=================================================================== 9.9 ---- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/ivt.S 2005-04-07 10:29:00.565766924 -0500 9.10 -+++ linux-2.6.11-xendiffs/arch/ia64/kernel/ivt.S 2005-04-07 10:29:50.923594750 -0500 9.11 -@@ -1,3 +1,21 @@ 9.12 -+ 9.13 -+#ifdef XEN 9.14 -+//#define CONFIG_DISABLE_VHPT // FIXME: change when VHPT is enabled?? 9.15 -+// these are all hacked out for now as the entire IVT 9.16 -+// will eventually be replaced... just want to use it 9.17 -+// for startup code to handle TLB misses 9.18 -+//#define ia64_leave_kernel 0 9.19 -+//#define ia64_ret_from_syscall 0 9.20 -+//#define ia64_handle_irq 0 9.21 -+//#define ia64_fault 0 9.22 -+#define ia64_illegal_op_fault 0 9.23 -+#define ia64_prepare_handle_unaligned 0 9.24 -+#define ia64_bad_break 0 9.25 -+#define ia64_trace_syscall 0 9.26 -+#define sys_call_table 0 9.27 -+#define sys_ni_syscall 0 9.28 -+#include <asm/vhpt.h> 9.29 -+#endif 9.30 - /* 9.31 - * arch/ia64/kernel/ivt.S 9.32 - * 9.33 -@@ -77,6 +95,13 @@ 9.34 - mov r19=n;; /* prepare to save predicates */ \ 9.35 - br.sptk.many dispatch_to_fault_handler 9.36 - 9.37 -+#ifdef XEN 9.38 -+#define REFLECT(n) \ 9.39 -+ mov r31=pr; \ 9.40 -+ mov r19=n;; /* prepare to save predicates */ \ 9.41 -+ br.sptk.many dispatch_reflection 9.42 -+#endif 9.43 -+ 9.44 - .section .text.ivt,"ax" 9.45 - 9.46 - .align 32768 // align on 32KB boundary 9.47 -@@ -214,6 +239,13 @@ END(vhpt_miss) 9.48 - // 0x0400 Entry 1 (size 64 bundles) ITLB (21) 9.49 - ENTRY(itlb_miss) 9.50 - DBG_FAULT(1) 9.51 -+#ifdef XEN 9.52 -+ VHPT_CCHAIN_LOOKUP(itlb_miss,i) 9.53 -+#ifdef VHPT_GLOBAL 9.54 -+ br.cond.sptk page_fault 9.55 -+ ;; 9.56 -+#endif 9.57 -+#endif 9.58 - /* 9.59 - * The ITLB handler accesses the L3 PTE via the virtually mapped linear 9.60 - * page table. If a nested TLB miss occurs, we switch into physical 9.61 -@@ -258,6 +290,13 @@ END(itlb_miss) 9.62 - // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) 9.63 - ENTRY(dtlb_miss) 9.64 - DBG_FAULT(2) 9.65 -+#ifdef XEN 9.66 -+ VHPT_CCHAIN_LOOKUP(dtlb_miss,d) 9.67 -+#ifdef VHPT_GLOBAL 9.68 -+ br.cond.sptk page_fault 9.69 -+ ;; 9.70 -+#endif 9.71 -+#endif 9.72 - /* 9.73 - * The DTLB handler accesses the L3 PTE via the virtually mapped linear 9.74 - * page table. If a nested TLB miss occurs, we switch into physical 9.75 -@@ -302,6 +341,13 @@ END(dtlb_miss) 9.76 - // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) 9.77 - ENTRY(alt_itlb_miss) 9.78 - DBG_FAULT(3) 9.79 -+#ifdef XEN 9.80 -+//#ifdef VHPT_GLOBAL 9.81 -+// VHPT_CCHAIN_LOOKUP(alt_itlb_miss,i) 9.82 -+// br.cond.sptk page_fault 9.83 -+// ;; 9.84 -+//#endif 9.85 -+#endif 9.86 - mov r16=cr.ifa // get address that caused the TLB miss 9.87 - movl r17=PAGE_KERNEL 9.88 - mov r21=cr.ipsr 9.89 -@@ -340,6 +386,13 @@ END(alt_itlb_miss) 9.90 - // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) 9.91 - ENTRY(alt_dtlb_miss) 9.92 - DBG_FAULT(4) 9.93 -+#ifdef XEN 9.94 -+//#ifdef VHPT_GLOBAL 9.95 -+// VHPT_CCHAIN_LOOKUP(alt_dtlb_miss,d) 9.96 -+// br.cond.sptk page_fault 9.97 -+// ;; 9.98 -+//#endif 9.99 -+#endif 9.100 - mov r16=cr.ifa // get address that caused the TLB miss 9.101 - movl r17=PAGE_KERNEL 9.102 - mov r20=cr.isr 9.103 -@@ -369,6 +422,17 @@ ENTRY(alt_dtlb_miss) 9.104 - cmp.ne p8,p0=r0,r23 9.105 - (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field 9.106 - (p8) br.cond.spnt page_fault 9.107 -+#ifdef XEN 9.108 -+ ;; 9.109 -+ // FIXME: inadequate test, this is where we test for Xen address 9.110 -+ // note that 0xf000 (cached) and 0xd000 (uncached) addresses 9.111 -+ // should be OK. (Though no I/O is done in Xen, EFI needs uncached 9.112 -+ // addresses and some domain EFI calls are passed through) 9.113 -+ tbit.nz p0,p8=r16,60 9.114 -+(p8) br.cond.spnt page_fault 9.115 -+//(p8) br.cond.spnt 0 9.116 -+ ;; 9.117 -+#endif 9.118 - 9.119 - dep r21=-1,r21,IA64_PSR_ED_BIT,1 9.120 - or r19=r19,r17 // insert PTE control bits into r19 9.121 -@@ -449,6 +513,9 @@ END(nested_dtlb_miss) 9.122 - ///////////////////////////////////////////////////////////////////////////////////////// 9.123 - // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) 9.124 - ENTRY(ikey_miss) 9.125 -+#ifdef XEN 9.126 -+ REFLECT(6) 9.127 -+#endif 9.128 - DBG_FAULT(6) 9.129 - FAULT(6) 9.130 - END(ikey_miss) 9.131 -@@ -461,9 +528,16 @@ ENTRY(page_fault) 9.132 - srlz.i 9.133 - ;; 9.134 - SAVE_MIN_WITH_COVER 9.135 -+#ifdef XEN 9.136 -+ alloc r15=ar.pfs,0,0,4,0 9.137 -+ mov out0=cr.ifa 9.138 -+ mov out1=cr.isr 9.139 -+ mov out3=cr.itir 9.140 -+#else 9.141 - alloc r15=ar.pfs,0,0,3,0 9.142 - mov out0=cr.ifa 9.143 - mov out1=cr.isr 9.144 -+#endif 9.145 - adds r3=8,r2 // set up second base pointer 9.146 - ;; 9.147 - ssm psr.ic | PSR_DEFAULT_BITS 9.148 -@@ -484,6 +558,9 @@ END(page_fault) 9.149 - ///////////////////////////////////////////////////////////////////////////////////////// 9.150 - // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) 9.151 - ENTRY(dkey_miss) 9.152 -+#ifdef XEN 9.153 -+ REFLECT(7) 9.154 -+#endif 9.155 - DBG_FAULT(7) 9.156 - FAULT(7) 9.157 - END(dkey_miss) 9.158 -@@ -492,6 +569,9 @@ END(dkey_miss) 9.159 - ///////////////////////////////////////////////////////////////////////////////////////// 9.160 - // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) 9.161 - ENTRY(dirty_bit) 9.162 -+#ifdef XEN 9.163 -+ REFLECT(8) 9.164 -+#endif 9.165 - DBG_FAULT(8) 9.166 - /* 9.167 - * What we do here is to simply turn on the dirty bit in the PTE. We need to 9.168 -@@ -554,6 +634,9 @@ END(dirty_bit) 9.169 - ///////////////////////////////////////////////////////////////////////////////////////// 9.170 - // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) 9.171 - ENTRY(iaccess_bit) 9.172 -+#ifdef XEN 9.173 -+ REFLECT(9) 9.174 -+#endif 9.175 - DBG_FAULT(9) 9.176 - // Like Entry 8, except for instruction access 9.177 - mov r16=cr.ifa // get the address that caused the fault 9.178 -@@ -619,6 +702,9 @@ END(iaccess_bit) 9.179 - ///////////////////////////////////////////////////////////////////////////////////////// 9.180 - // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) 9.181 - ENTRY(daccess_bit) 9.182 -+#ifdef XEN 9.183 -+ REFLECT(10) 9.184 -+#endif 9.185 - DBG_FAULT(10) 9.186 - // Like Entry 8, except for data access 9.187 - mov r16=cr.ifa // get the address that caused the fault 9.188 -@@ -687,6 +773,16 @@ ENTRY(break_fault) 9.189 - * to prevent leaking bits from kernel to user level. 9.190 - */ 9.191 - DBG_FAULT(11) 9.192 -+#ifdef XEN 9.193 -+ mov r16=cr.isr 9.194 -+ mov r17=cr.iim 9.195 -+ mov r31=pr 9.196 -+ ;; 9.197 -+ cmp.eq p7,p0=r0,r17 // is this a psuedo-cover? 9.198 -+ // FIXME: may also need to check slot==2? 9.199 -+(p7) br.sptk.many dispatch_privop_fault 9.200 -+ br.sptk.many dispatch_break_fault 9.201 -+#endif 9.202 - mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat. 9.203 - mov r17=cr.iim 9.204 - mov r18=__IA64_BREAK_SYSCALL 9.205 -@@ -697,7 +793,9 @@ ENTRY(break_fault) 9.206 - mov r27=ar.rsc 9.207 - mov r26=ar.pfs 9.208 - mov r28=cr.iip 9.209 -+#ifndef XEN 9.210 - mov r31=pr // prepare to save predicates 9.211 -+#endif 9.212 - mov r20=r1 9.213 - ;; 9.214 - adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 9.215 -@@ -797,6 +895,36 @@ END(interrupt) 9.216 - DBG_FAULT(13) 9.217 - FAULT(13) 9.218 - 9.219 -+#ifdef XEN 9.220 -+ // There is no particular reason for this code to be here, other than that 9.221 -+ // there happens to be space here that would go unused otherwise. If this 9.222 -+ // fault ever gets "unreserved", simply moved the following code to a more 9.223 -+ // suitable spot... 9.224 -+ 9.225 -+ENTRY(dispatch_break_fault) 9.226 -+ SAVE_MIN_WITH_COVER 9.227 -+ ;; 9.228 -+ alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) 9.229 -+ mov out0=cr.ifa 9.230 -+ adds out1=16,sp 9.231 -+ mov out2=cr.isr // FIXME: pity to make this slow access twice 9.232 -+ mov out3=cr.iim // FIXME: pity to make this slow access twice 9.233 -+ 9.234 -+ ssm psr.ic | PSR_DEFAULT_BITS 9.235 -+ ;; 9.236 -+ srlz.i // guarantee that interruption collection is on 9.237 -+ ;; 9.238 -+(p15) ssm psr.i // restore psr.i 9.239 -+ adds r3=8,r2 // set up second base pointer 9.240 -+ ;; 9.241 -+ SAVE_REST 9.242 -+ movl r14=ia64_leave_kernel 9.243 -+ ;; 9.244 -+ mov rp=r14 9.245 -+ br.sptk.many ia64_prepare_handle_break 9.246 -+END(dispatch_break_fault) 9.247 -+#endif 9.248 -+ 9.249 - .org ia64_ivt+0x3800 9.250 - ///////////////////////////////////////////////////////////////////////////////////////// 9.251 - // 0x3800 Entry 14 (size 64 bundles) Reserved 9.252 -@@ -850,9 +978,11 @@ END(interrupt) 9.253 - * - ar.fpsr: set to kernel settings 9.254 - */ 9.255 - GLOBAL_ENTRY(ia64_syscall_setup) 9.256 -+#ifndef XEN 9.257 - #if PT(B6) != 0 9.258 - # error This code assumes that b6 is the first field in pt_regs. 9.259 - #endif 9.260 -+#endif 9.261 - st8 [r1]=r19 // save b6 9.262 - add r16=PT(CR_IPSR),r1 // initialize first base pointer 9.263 - add r17=PT(R11),r1 // initialize second base pointer 9.264 -@@ -992,6 +1122,37 @@ END(dispatch_illegal_op_fault) 9.265 - DBG_FAULT(16) 9.266 - FAULT(16) 9.267 - 9.268 -+#ifdef XEN 9.269 -+ // There is no particular reason for this code to be here, other than that 9.270 -+ // there happens to be space here that would go unused otherwise. If this 9.271 -+ // fault ever gets "unreserved", simply moved the following code to a more 9.272 -+ // suitable spot... 9.273 -+ 9.274 -+ENTRY(dispatch_privop_fault) 9.275 -+ SAVE_MIN_WITH_COVER 9.276 -+ ;; 9.277 -+ alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) 9.278 -+ mov out0=cr.ifa 9.279 -+ adds out1=16,sp 9.280 -+ mov out2=cr.isr // FIXME: pity to make this slow access twice 9.281 -+ mov out3=cr.itir 9.282 -+ 9.283 -+ ssm psr.ic | PSR_DEFAULT_BITS 9.284 -+ ;; 9.285 -+ srlz.i // guarantee that interruption collection is on 9.286 -+ ;; 9.287 -+(p15) ssm psr.i // restore psr.i 9.288 -+ adds r3=8,r2 // set up second base pointer 9.289 -+ ;; 9.290 -+ SAVE_REST 9.291 -+ movl r14=ia64_leave_kernel 9.292 -+ ;; 9.293 -+ mov rp=r14 9.294 -+ br.sptk.many ia64_prepare_handle_privop 9.295 -+END(dispatch_privop_fault) 9.296 -+#endif 9.297 -+ 9.298 -+ 9.299 - .org ia64_ivt+0x4400 9.300 - ///////////////////////////////////////////////////////////////////////////////////////// 9.301 - // 0x4400 Entry 17 (size 64 bundles) Reserved 9.302 -@@ -1108,6 +1269,9 @@ END(dispatch_to_fault_handler) 9.303 - ///////////////////////////////////////////////////////////////////////////////////////// 9.304 - // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) 9.305 - ENTRY(page_not_present) 9.306 -+#ifdef XEN 9.307 -+ REFLECT(20) 9.308 -+#endif 9.309 - DBG_FAULT(20) 9.310 - mov r16=cr.ifa 9.311 - rsm psr.dt 9.312 -@@ -1128,6 +1292,9 @@ END(page_not_present) 9.313 - ///////////////////////////////////////////////////////////////////////////////////////// 9.314 - // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) 9.315 - ENTRY(key_permission) 9.316 -+#ifdef XEN 9.317 -+ REFLECT(21) 9.318 -+#endif 9.319 - DBG_FAULT(21) 9.320 - mov r16=cr.ifa 9.321 - rsm psr.dt 9.322 -@@ -1141,6 +1308,9 @@ END(key_permission) 9.323 - ///////////////////////////////////////////////////////////////////////////////////////// 9.324 - // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) 9.325 - ENTRY(iaccess_rights) 9.326 -+#ifdef XEN 9.327 -+ REFLECT(22) 9.328 -+#endif 9.329 - DBG_FAULT(22) 9.330 - mov r16=cr.ifa 9.331 - rsm psr.dt 9.332 -@@ -1154,6 +1324,9 @@ END(iaccess_rights) 9.333 - ///////////////////////////////////////////////////////////////////////////////////////// 9.334 - // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) 9.335 - ENTRY(daccess_rights) 9.336 -+#ifdef XEN 9.337 -+ REFLECT(23) 9.338 -+#endif 9.339 - DBG_FAULT(23) 9.340 - mov r16=cr.ifa 9.341 - rsm psr.dt 9.342 -@@ -1171,8 +1344,13 @@ ENTRY(general_exception) 9.343 - mov r16=cr.isr 9.344 - mov r31=pr 9.345 - ;; 9.346 -+#ifdef XEN 9.347 -+ cmp4.ge p6,p0=0x20,r16 9.348 -+(p6) br.sptk.many dispatch_privop_fault 9.349 -+#else 9.350 - cmp4.eq p6,p0=0,r16 9.351 - (p6) br.sptk.many dispatch_illegal_op_fault 9.352 -+#endif 9.353 - ;; 9.354 - mov r19=24 // fault number 9.355 - br.sptk.many dispatch_to_fault_handler 9.356 -@@ -1182,6 +1360,9 @@ END(general_exception) 9.357 - ///////////////////////////////////////////////////////////////////////////////////////// 9.358 - // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) 9.359 - ENTRY(disabled_fp_reg) 9.360 -+#ifdef XEN 9.361 -+ REFLECT(25) 9.362 -+#endif 9.363 - DBG_FAULT(25) 9.364 - rsm psr.dfh // ensure we can access fph 9.365 - ;; 9.366 -@@ -1195,6 +1376,9 @@ END(disabled_fp_reg) 9.367 - ///////////////////////////////////////////////////////////////////////////////////////// 9.368 - // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) 9.369 - ENTRY(nat_consumption) 9.370 -+#ifdef XEN 9.371 -+ REFLECT(26) 9.372 -+#endif 9.373 - DBG_FAULT(26) 9.374 - FAULT(26) 9.375 - END(nat_consumption) 9.376 -@@ -1203,6 +1387,10 @@ END(nat_consumption) 9.377 - ///////////////////////////////////////////////////////////////////////////////////////// 9.378 - // 0x5700 Entry 27 (size 16 bundles) Speculation (40) 9.379 - ENTRY(speculation_vector) 9.380 -+#ifdef XEN 9.381 -+ // this probably need not reflect... 9.382 -+ REFLECT(27) 9.383 -+#endif 9.384 - DBG_FAULT(27) 9.385 - /* 9.386 - * A [f]chk.[as] instruction needs to take the branch to the recovery code but 9.387 -@@ -1246,6 +1434,9 @@ END(speculation_vector) 9.388 - ///////////////////////////////////////////////////////////////////////////////////////// 9.389 - // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) 9.390 - ENTRY(debug_vector) 9.391 -+#ifdef XEN 9.392 -+ REFLECT(29) 9.393 -+#endif 9.394 - DBG_FAULT(29) 9.395 - FAULT(29) 9.396 - END(debug_vector) 9.397 -@@ -1254,6 +1445,9 @@ END(debug_vector) 9.398 - ///////////////////////////////////////////////////////////////////////////////////////// 9.399 - // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) 9.400 - ENTRY(unaligned_access) 9.401 -+#ifdef XEN 9.402 -+ REFLECT(30) 9.403 -+#endif 9.404 - DBG_FAULT(30) 9.405 - mov r16=cr.ipsr 9.406 - mov r31=pr // prepare to save predicates 9.407 -@@ -1265,6 +1459,9 @@ END(unaligned_access) 9.408 - ///////////////////////////////////////////////////////////////////////////////////////// 9.409 - // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) 9.410 - ENTRY(unsupported_data_reference) 9.411 -+#ifdef XEN 9.412 -+ REFLECT(31) 9.413 -+#endif 9.414 - DBG_FAULT(31) 9.415 - FAULT(31) 9.416 - END(unsupported_data_reference) 9.417 -@@ -1273,6 +1470,9 @@ END(unsupported_data_reference) 9.418 - ///////////////////////////////////////////////////////////////////////////////////////// 9.419 - // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64) 9.420 - ENTRY(floating_point_fault) 9.421 -+#ifdef XEN 9.422 -+ REFLECT(32) 9.423 -+#endif 9.424 - DBG_FAULT(32) 9.425 - FAULT(32) 9.426 - END(floating_point_fault) 9.427 -@@ -1281,6 +1481,9 @@ END(floating_point_fault) 9.428 - ///////////////////////////////////////////////////////////////////////////////////////// 9.429 - // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) 9.430 - ENTRY(floating_point_trap) 9.431 -+#ifdef XEN 9.432 -+ REFLECT(33) 9.433 -+#endif 9.434 - DBG_FAULT(33) 9.435 - FAULT(33) 9.436 - END(floating_point_trap) 9.437 -@@ -1289,6 +1492,9 @@ END(floating_point_trap) 9.438 - ///////////////////////////////////////////////////////////////////////////////////////// 9.439 - // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) 9.440 - ENTRY(lower_privilege_trap) 9.441 -+#ifdef XEN 9.442 -+ REFLECT(34) 9.443 -+#endif 9.444 - DBG_FAULT(34) 9.445 - FAULT(34) 9.446 - END(lower_privilege_trap) 9.447 -@@ -1297,6 +1503,9 @@ END(lower_privilege_trap) 9.448 - ///////////////////////////////////////////////////////////////////////////////////////// 9.449 - // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) 9.450 - ENTRY(taken_branch_trap) 9.451 -+#ifdef XEN 9.452 -+ REFLECT(35) 9.453 -+#endif 9.454 - DBG_FAULT(35) 9.455 - FAULT(35) 9.456 - END(taken_branch_trap) 9.457 -@@ -1305,6 +1514,9 @@ END(taken_branch_trap) 9.458 - ///////////////////////////////////////////////////////////////////////////////////////// 9.459 - // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) 9.460 - ENTRY(single_step_trap) 9.461 -+#ifdef XEN 9.462 -+ REFLECT(36) 9.463 -+#endif 9.464 - DBG_FAULT(36) 9.465 - FAULT(36) 9.466 - END(single_step_trap) 9.467 -@@ -1361,6 +1573,9 @@ END(single_step_trap) 9.468 - ///////////////////////////////////////////////////////////////////////////////////////// 9.469 - // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) 9.470 - ENTRY(ia32_exception) 9.471 -+#ifdef XEN 9.472 -+ REFLECT(45) 9.473 -+#endif 9.474 - DBG_FAULT(45) 9.475 - FAULT(45) 9.476 - END(ia32_exception) 9.477 -@@ -1369,6 +1584,9 @@ END(ia32_exception) 9.478 - ///////////////////////////////////////////////////////////////////////////////////////// 9.479 - // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) 9.480 - ENTRY(ia32_intercept) 9.481 -+#ifdef XEN 9.482 -+ REFLECT(46) 9.483 -+#endif 9.484 - DBG_FAULT(46) 9.485 - #ifdef CONFIG_IA32_SUPPORT 9.486 - mov r31=pr 9.487 -@@ -1399,6 +1617,9 @@ END(ia32_intercept) 9.488 - ///////////////////////////////////////////////////////////////////////////////////////// 9.489 - // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) 9.490 - ENTRY(ia32_interrupt) 9.491 -+#ifdef XEN 9.492 -+ REFLECT(47) 9.493 -+#endif 9.494 - DBG_FAULT(47) 9.495 - #ifdef CONFIG_IA32_SUPPORT 9.496 - mov r31=pr 9.497 -@@ -1528,6 +1749,39 @@ END(ia32_interrupt) 9.498 - DBG_FAULT(67) 9.499 - FAULT(67) 9.500 - 9.501 -+#ifdef XEN 9.502 -+ .org ia64_ivt+0x8000 9.503 -+ENTRY(dispatch_reflection) 9.504 -+ /* 9.505 -+ * Input: 9.506 -+ * psr.ic: off 9.507 -+ * r19: intr type (offset into ivt, see ia64_int.h) 9.508 -+ * r31: contains saved predicates (pr) 9.509 -+ */ 9.510 -+ SAVE_MIN_WITH_COVER_R19 9.511 -+ alloc r14=ar.pfs,0,0,5,0 9.512 -+ mov out4=r15 9.513 -+ mov out0=cr.ifa 9.514 -+ adds out1=16,sp 9.515 -+ mov out2=cr.isr 9.516 -+ mov out3=cr.iim 9.517 -+// mov out3=cr.itir 9.518 -+ 9.519 -+ ssm psr.ic | PSR_DEFAULT_BITS 9.520 -+ ;; 9.521 -+ srlz.i // guarantee that interruption collection is on 9.522 -+ ;; 9.523 -+(p15) ssm psr.i // restore psr.i 9.524 -+ adds r3=8,r2 // set up second base pointer 9.525 -+ ;; 9.526 -+ SAVE_REST 9.527 -+ movl r14=ia64_leave_kernel 9.528 -+ ;; 9.529 -+ mov rp=r14 9.530 -+ br.sptk.many ia64_prepare_handle_reflection 9.531 -+END(dispatch_reflection) 9.532 -+#endif 9.533 -+ 9.534 - #ifdef CONFIG_IA32_SUPPORT 9.535 - 9.536 - /*
10.1 --- a/xen/arch/ia64/patch/linux-2.6.11/lds.S Thu Apr 28 22:40:58 2005 +0000 10.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 10.3 @@ -1,22 +0,0 @@ 10.4 - vmlinux.lds.S | 2 ++ 10.5 - 1 files changed, 2 insertions(+) 10.6 - 10.7 -Index: linux-2.6.11/arch/ia64/kernel/vmlinux.lds.S 10.8 -=================================================================== 10.9 ---- linux-2.6.11.orig/arch/ia64/kernel/vmlinux.lds.S 2005-03-02 01:38:25.000000000 -0600 10.10 -+++ linux-2.6.11/arch/ia64/kernel/vmlinux.lds.S 2005-03-19 13:44:28.746368232 -0600 10.11 -@@ -11,12 +11,14 @@ 10.12 - OUTPUT_FORMAT("elf64-ia64-little") 10.13 - OUTPUT_ARCH(ia64) 10.14 - ENTRY(phys_start) 10.15 -+#ifndef XEN 10.16 - jiffies = jiffies_64; 10.17 - PHDRS { 10.18 - code PT_LOAD; 10.19 - percpu PT_LOAD; 10.20 - data PT_LOAD; 10.21 - } 10.22 -+#endif 10.23 - SECTIONS 10.24 - { 10.25 - /* Sections to be discarded */
11.1 --- a/xen/arch/ia64/tools/mkbuildtree Thu Apr 28 22:40:58 2005 +0000 11.2 +++ b/xen/arch/ia64/tools/mkbuildtree Fri Apr 29 22:09:13 2005 +0000 11.3 @@ -85,7 +85,7 @@ cp_patch arch/ia64/kernel/entry.S arch/i 11.4 cp_patch arch/ia64/kernel/head.S arch/ia64/head.S head.S 11.5 #cp_patch arch/ia64/kernel/init_task.c arch/ia64/init_task.c init_task.c 11.6 cp_patch arch/ia64/kernel/irq_ia64.c arch/ia64/irq_ia64.c irq_ia64.c 11.7 -cp_patch arch/ia64/kernel/ivt.S arch/ia64/ivt.S ivt.S 11.8 +#cp_patch arch/ia64/kernel/ivt.S arch/ia64/ivt.S ivt.S 11.9 #cp_patch arch/ia64/kernel/minstate.h arch/ia64/minstate.h minstate.h 11.10 cp_patch arch/ia64/kernel/setup.c arch/ia64/setup.c setup.c 11.11 cp_patch arch/ia64/kernel/time.c arch/ia64/time.c time.c 11.12 @@ -317,6 +317,11 @@ null include/asm-ia64/linux/profile.h 11.13 null include/asm-ia64/linux/seqlock.h 11.14 null include/asm-ia64/linux/smp_lock.h 11.15 null include/asm-ia64/linux/tty.h 11.16 +null include/asm-ia64/linux/jiffies.h 11.17 +null include/asm-ia64/linux/kernel_stat.h 11.18 +null include/asm-ia64/linux/ptrace.h 11.19 +null include/asm-ia64/linux/random.h 11.20 +null include/asm-ia64/linux/signal.h 11.21 11.22 softlink include/linux/byteorder/generic.h include/asm-ia64/linux/byteorder/generic.h 11.23 softlink include/linux/byteorder/little_endian.h include/asm-ia64/linux/byteorder/little_endian.h
12.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 12.2 +++ b/xen/arch/ia64/xenirq.c Fri Apr 29 22:09:13 2005 +0000 12.3 @@ -0,0 +1,77 @@ 12.4 +/* 12.5 + * Xen irq routines 12.6 + * 12.7 + * Copyright (C) 2005 Hewlett-Packard Co. 12.8 + * Dan Magenheimer (dan.magenheimer@hp.com) 12.9 + * 12.10 + */ 12.11 + 12.12 +#include <asm/ptrace.h> 12.13 +#include <asm/hw_irq.h> 12.14 + 12.15 + 12.16 +void 12.17 +xen_debug_irq(ia64_vector vector, struct pt_regs *regs) 12.18 +{ 12.19 +//FIXME: For debug only, can be removed 12.20 + static char firstirq = 1; 12.21 + static char firsttime[256]; 12.22 + static char firstpend[256]; 12.23 + if (firstirq) { 12.24 + int i; 12.25 + for (i=0;i<256;i++) firsttime[i] = 1; 12.26 + for (i=0;i<256;i++) firstpend[i] = 1; 12.27 + firstirq = 0; 12.28 + } 12.29 + if (firsttime[vector]) { 12.30 + printf("**** (entry) First received int on vector=%d,itc=%lx\n", 12.31 + (unsigned long) vector, ia64_get_itc()); 12.32 + firsttime[vector] = 0; 12.33 + } 12.34 +} 12.35 + 12.36 + 12.37 +int 12.38 +xen_do_IRQ(ia64_vector vector) 12.39 +{ 12.40 + if (vector != 0xef) { 12.41 + extern void vcpu_pend_interrupt(void *, int); 12.42 +#if 0 12.43 + if (firsttime[vector]) { 12.44 + printf("**** (iterate) First received int on vector=%d,itc=%lx\n", 12.45 + (unsigned long) vector, ia64_get_itc()); 12.46 + firsttime[vector] = 0; 12.47 + } 12.48 + if (firstpend[vector]) { 12.49 + printf("**** First pended int on vector=%d,itc=%lx\n", 12.50 + (unsigned long) vector,ia64_get_itc()); 12.51 + firstpend[vector] = 0; 12.52 + } 12.53 +#endif 12.54 + //FIXME: TEMPORARY HACK!!!! 12.55 + vcpu_pend_interrupt(dom0->exec_domain[0],vector); 12.56 + domain_wake(dom0->exec_domain[0]); 12.57 + return(1); 12.58 + } 12.59 + return(0); 12.60 +} 12.61 + 12.62 +/* From linux/kernel/softirq.c */ 12.63 +#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 12.64 +# define invoke_softirq() __do_softirq() 12.65 +#else 12.66 +# define invoke_softirq() do_softirq() 12.67 +#endif 12.68 + 12.69 +/* 12.70 + * Exit an interrupt context. Process softirqs if needed and possible: 12.71 + */ 12.72 +void irq_exit(void) 12.73 +{ 12.74 + //account_system_vtime(current); 12.75 + //sub_preempt_count(IRQ_EXIT_OFFSET); 12.76 + if (!in_interrupt() && local_softirq_pending()) 12.77 + invoke_softirq(); 12.78 + //preempt_enable_no_resched(); 12.79 +} 12.80 +/* end from linux/kernel/softirq.c */