direct-io.hg
changeset 9849:ced37bea0647
[IA64] FPH enabling + cleanup
Move contents of switch_to macro from xensystem.h to context_switch function.
Initialize FPU on all processors. FPH is always enabled in Xen.
Speed up context-switch (a little bit!) by not enabling/disabling FPH.
Cleanup (unused function/variablesi/fields, debug printf...)
vmx_ia64_switch_to removed (was unused).
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
Move contents of switch_to macro from xensystem.h to context_switch function.
Initialize FPU on all processors. FPH is always enabled in Xen.
Speed up context-switch (a little bit!) by not enabling/disabling FPH.
Cleanup (unused function/variablesi/fields, debug printf...)
vmx_ia64_switch_to removed (was unused).
Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author | awilliam@xenbuild.aw |
---|---|
date | Tue Apr 25 22:35:41 2006 -0600 (2006-04-25) |
parents | d23c088eac6d |
children | 9d88feed1189 |
files | xen/arch/ia64/linux-xen/setup.c xen/arch/ia64/vmx/vmx_entry.S xen/arch/ia64/xen/domain.c xen/arch/ia64/xen/process.c xen/arch/ia64/xen/xenmisc.c xen/arch/ia64/xen/xensetup.c xen/include/asm-ia64/domain.h xen/include/asm-ia64/linux-xen/asm/io.h xen/include/asm-ia64/linux-xen/asm/page.h xen/include/asm-ia64/linux-xen/asm/pgtable.h xen/include/asm-ia64/linux-xen/asm/system.h xen/include/asm-ia64/xenpage.h xen/include/asm-ia64/xensystem.h |
line diff
1.1 --- a/xen/arch/ia64/linux-xen/setup.c Tue Apr 25 22:32:14 2006 -0600 1.2 +++ b/xen/arch/ia64/linux-xen/setup.c Tue Apr 25 22:35:41 2006 -0600 1.3 @@ -384,7 +384,9 @@ setup_arch (char **cmdline_p) 1.4 { 1.5 unw_init(); 1.6 1.7 +#ifndef XEN 1.8 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 1.9 +#endif 1.10 1.11 *cmdline_p = __va(ia64_boot_param->command_line); 1.12 #ifndef XEN 1.13 @@ -870,6 +872,11 @@ cpu_init (void) 1.14 #endif 1.15 BUG(); 1.16 1.17 +#ifdef XEN 1.18 + ia64_fph_enable(); 1.19 + __ia64_init_fpu(); 1.20 +#endif 1.21 + 1.22 ia64_mmu_init(ia64_imva(cpu_data)); 1.23 ia64_mca_cpu_init(ia64_imva(cpu_data)); 1.24 1.25 @@ -931,9 +938,11 @@ cpu_init (void) 1.26 #endif 1.27 } 1.28 1.29 +#ifndef XEN 1.30 void 1.31 check_bugs (void) 1.32 { 1.33 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 1.34 (unsigned long) __end___mckinley_e9_bundles); 1.35 } 1.36 +#endif
2.1 --- a/xen/arch/ia64/vmx/vmx_entry.S Tue Apr 25 22:32:14 2006 -0600 2.2 +++ b/xen/arch/ia64/vmx/vmx_entry.S Tue Apr 25 22:35:41 2006 -0600 2.3 @@ -37,48 +37,6 @@ 2.4 #include <asm/vmmu.h> 2.5 #include "vmx_minstate.h" 2.6 2.7 -/* 2.8 - * prev_task <- vmx_ia64_switch_to(struct task_struct *next) 2.9 - * With Ingo's new scheduler, interrupts are disabled when this routine gets 2.10 - * called. The code starting at .map relies on this. The rest of the code 2.11 - * doesn't care about the interrupt masking status. 2.12 - * 2.13 - * Since we allocate domain stack in xenheap, there's no need to map new 2.14 - * domain's stack since all xenheap is mapped by TR. Another different task 2.15 - * for vmx_ia64_switch_to is to switch to bank0 and change current pointer. 2.16 - */ 2.17 -GLOBAL_ENTRY(vmx_ia64_switch_to) 2.18 - .prologue 2.19 - alloc r16=ar.pfs,1,0,0,0 2.20 - DO_SAVE_SWITCH_STACK 2.21 - .body 2.22 - 2.23 - bsw.0 // Switch to bank0, because bank0 r21 is current pointer 2.24 - ;; 2.25 - adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13 2.26 - movl r25=init_task 2.27 - adds r26=IA64_TASK_THREAD_KSP_OFFSET,in0 2.28 - ;; 2.29 - st8 [r22]=sp // save kernel stack pointer of old task 2.30 - ;; 2.31 - /* 2.32 - * TR always mapped this task's page, we can skip doing it again. 2.33 - */ 2.34 - ld8 sp=[r26] // load kernel stack pointer of new task 2.35 - mov r21=in0 // update "current" application register 2.36 - mov r8=r13 // return pointer to previously running task 2.37 - mov r13=in0 // set "current" pointer 2.38 - ;; 2.39 - bsw.1 2.40 - ;; 2.41 - DO_LOAD_SWITCH_STACK 2.42 - 2.43 -#ifdef CONFIG_SMP 2.44 - sync.i // ensure "fc"s done by this CPU are visible on other CPUs 2.45 -#endif 2.46 - br.ret.sptk.many rp // boogie on out in new context 2.47 -END(vmx_ia64_switch_to) 2.48 - 2.49 GLOBAL_ENTRY(ia64_leave_nested) 2.50 rsm psr.i 2.51 ;;
3.1 --- a/xen/arch/ia64/xen/domain.c Tue Apr 25 22:32:14 2006 -0600 3.2 +++ b/xen/arch/ia64/xen/domain.c Tue Apr 25 22:35:41 2006 -0600 3.3 @@ -72,11 +72,8 @@ extern unsigned long running_on_sim; 3.4 #define IS_XEN_ADDRESS(d,a) ((a >= d->xen_vastart) && (a <= d->xen_vaend)) 3.5 3.6 /* FIXME: where these declarations should be there ? */ 3.7 -extern void domain_pend_keyboard_interrupt(int); 3.8 extern long platform_is_hp_ski(void); 3.9 -extern void sync_split_caches(void); 3.10 extern void serial_input_init(void); 3.11 - 3.12 static void init_switch_stack(struct vcpu *v); 3.13 void build_physmap_table(struct domain *d); 3.14 3.15 @@ -145,23 +142,6 @@ void startup_cpu_idle_loop(void) 3.16 /* Just some sanity to ensure that the scheduler is set up okay. */ 3.17 ASSERT(current->domain == IDLE_DOMAIN_ID); 3.18 raise_softirq(SCHEDULE_SOFTIRQ); 3.19 -#if 0 3.20 -//do we have to ensure the idle task has a shared page so that, for example, 3.21 -//region registers can be loaded from it. Apparently not... 3.22 - idle0_task.shared_info = (void *)alloc_xenheap_page(); 3.23 - memset(idle0_task.shared_info, 0, PAGE_SIZE); 3.24 - /* pin mapping */ 3.25 - // FIXME: Does this belong here? Or do only at domain switch time? 3.26 - { 3.27 - /* WARNING: following must be inlined to avoid nested fault */ 3.28 - unsigned long psr = ia64_clear_ic(); 3.29 - ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR, 3.30 - pte_val(pfn_pte(ia64_tpa(idle0_task.shared_info) >> PAGE_SHIFT, PAGE_KERNEL)), 3.31 - PAGE_SHIFT); 3.32 - ia64_set_psr(psr); 3.33 - ia64_srlz_i(); 3.34 - } 3.35 -#endif 3.36 3.37 continue_cpu_idle_loop(); 3.38 } 3.39 @@ -304,7 +284,6 @@ void arch_getdomaininfo_ctxt(struct vcpu 3.40 { 3.41 struct pt_regs *regs = vcpu_regs (v); 3.42 3.43 - printf("arch_getdomaininfo_ctxt\n"); 3.44 c->regs = *regs; 3.45 c->vcpu.evtchn_vector = v->vcpu_info->arch.evtchn_vector; 3.46 3.47 @@ -316,7 +295,6 @@ int arch_set_info_guest(struct vcpu *v, 3.48 struct pt_regs *regs = vcpu_regs (v); 3.49 struct domain *d = v->domain; 3.50 3.51 - printf("arch_set_info_guest\n"); 3.52 if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) ) 3.53 return 0; 3.54 if (c->flags & VGCF_VMX_GUEST) { 3.55 @@ -1237,9 +1215,8 @@ void alloc_dom0(void) 3.56 dom0_start = alloc_boot_pages(dom0_size >> PAGE_SHIFT, dom0_align >> PAGE_SHIFT); 3.57 dom0_start <<= PAGE_SHIFT; 3.58 if (!dom0_start) { 3.59 - printf("alloc_dom0: can't allocate contiguous memory size=%lu\n", 3.60 + panic("alloc_dom0: can't allocate contiguous memory size=%lu\n", 3.61 dom0_size); 3.62 - while(1); 3.63 } 3.64 printf("alloc_dom0: dom0_start=0x%lx\n", dom0_start); 3.65 #else 3.66 @@ -1495,17 +1472,6 @@ void dummy_called(char *function) 3.67 while(1); 3.68 } 3.69 3.70 - 3.71 -#if 0 3.72 -void switch_to(struct vcpu *prev, struct vcpu *next) 3.73 -{ 3.74 - struct vcpu *last; 3.75 - 3.76 - __switch_to(prev,next,last); 3.77 - //set_current(next); 3.78 -} 3.79 -#endif 3.80 - 3.81 void domain_pend_keyboard_interrupt(int irq) 3.82 { 3.83 vcpu_pend_interrupt(dom0->vcpu[0],irq); 3.84 @@ -1513,13 +1479,9 @@ void domain_pend_keyboard_interrupt(int 3.85 3.86 void sync_vcpu_execstate(struct vcpu *v) 3.87 { 3.88 - ia64_save_fpu(v->arch._thread.fph); 3.89 + __ia64_save_fpu(v->arch._thread.fph); 3.90 if (VMX_DOMAIN(v)) 3.91 vmx_save_state(v); 3.92 - else { 3.93 - if (IA64_HAS_EXTRA_STATE(v)) 3.94 - ia64_save_extra(v); 3.95 - } 3.96 // FIXME SMP: Anything else needed here for SMP? 3.97 } 3.98
4.1 --- a/xen/arch/ia64/xen/process.c Tue Apr 25 22:32:14 2006 -0600 4.2 +++ b/xen/arch/ia64/xen/process.c Tue Apr 25 22:35:41 2006 -0600 4.3 @@ -219,9 +219,6 @@ void reflect_interruption(unsigned long 4.4 4.5 regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL; 4.6 regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET; 4.7 -#ifdef CONFIG_SMP 4.8 -#warning "SMP FIXME: sharedinfo doesn't handle smp yet, need page per vcpu" 4.9 -#endif 4.10 regs->r31 = XSI_IPSR; 4.11 4.12 v->vcpu_info->evtchn_upcall_mask = 1;
5.1 --- a/xen/arch/ia64/xen/xenmisc.c Tue Apr 25 22:32:14 2006 -0600 5.2 +++ b/xen/arch/ia64/xen/xenmisc.c Tue Apr 25 22:35:41 2006 -0600 5.3 @@ -1,6 +1,6 @@ 5.4 /* 5.5 * Xen misc 5.6 - * 5.7 + * 5.8 * Functions/decls that are/may be needed to link with Xen because 5.9 * of x86 dependencies 5.10 * 5.11 @@ -21,11 +21,8 @@ 5.12 #include <asm/debugger.h> 5.13 #include <asm/vmx.h> 5.14 #include <asm/vmx_vcpu.h> 5.15 +#include <asm/vcpu.h> 5.16 5.17 -efi_memory_desc_t ia64_efi_io_md; 5.18 -EXPORT_SYMBOL(ia64_efi_io_md); 5.19 -unsigned long wait_init_idle; 5.20 -int phys_proc_id[NR_CPUS]; 5.21 unsigned long loops_per_jiffy = (1<<12); // from linux/init/main.c 5.22 5.23 /* FIXME: where these declarations should be there ? */ 5.24 @@ -33,8 +30,6 @@ extern void show_registers(struct pt_reg 5.25 5.26 void ia64_mca_init(void) { printf("ia64_mca_init() skipped (Machine check abort handling)\n"); } 5.27 void ia64_mca_cpu_init(void *x) { } 5.28 -void ia64_patch_mckinley_e9(unsigned long a, unsigned long b) { } 5.29 -void ia64_patch_vtop(unsigned long a, unsigned long b) { } 5.30 void hpsim_setup(char **x) 5.31 { 5.32 #ifdef CONFIG_SMP 5.33 @@ -68,22 +63,9 @@ platform_is_hp_ski(void) 5.34 return running_on_sim; 5.35 } 5.36 5.37 -/* calls in xen/common code that are unused on ia64 */ 5.38 - 5.39 -void sync_lazy_execstate_cpu(unsigned int cpu) {} 5.40 - 5.41 -#if 0 5.42 -int grant_table_create(struct domain *d) { return 0; } 5.43 -void grant_table_destroy(struct domain *d) { return; } 5.44 -#endif 5.45 5.46 struct pt_regs *guest_cpu_user_regs(void) { return vcpu_regs(current); } 5.47 5.48 -void raise_actimer_softirq(void) 5.49 -{ 5.50 - raise_softirq(TIMER_SOFTIRQ); 5.51 -} 5.52 - 5.53 unsigned long 5.54 gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn) 5.55 { 5.56 @@ -127,15 +109,12 @@ u32 tlbflush_time[NR_CPUS]; 5.57 /////////////////////////////// 5.58 5.59 5.60 -void free_page_type(struct page_info *page, u32 type) 5.61 +static void free_page_type(struct page_info *page, u32 type) 5.62 { 5.63 -// dummy(); 5.64 - return; 5.65 } 5.66 5.67 -int alloc_page_type(struct page_info *page, u32 type) 5.68 +static int alloc_page_type(struct page_info *page, u32 type) 5.69 { 5.70 -// dummy(); 5.71 return 1; 5.72 } 5.73 5.74 @@ -161,7 +140,7 @@ void *pgtable_quicklist_alloc(void) 5.75 { 5.76 void *p; 5.77 p = alloc_xenheap_pages(0); 5.78 - if (p) 5.79 + if (p) 5.80 clear_page(p); 5.81 return p; 5.82 } 5.83 @@ -276,12 +255,10 @@ void *search_module_extables(unsigned lo 5.84 void *__module_text_address(unsigned long addr) { return NULL; } 5.85 void *module_text_address(unsigned long addr) { return NULL; } 5.86 5.87 -void cs10foo(void) {} 5.88 -void cs01foo(void) {} 5.89 - 5.90 unsigned long context_switch_count = 0; 5.91 5.92 -#include <asm/vcpu.h> 5.93 +extern struct vcpu *ia64_switch_to (struct vcpu *next_task); 5.94 + 5.95 5.96 void context_switch(struct vcpu *prev, struct vcpu *next) 5.97 { 5.98 @@ -289,14 +266,20 @@ void context_switch(struct vcpu *prev, s 5.99 uint64_t pta; 5.100 5.101 local_irq_save(spsr); 5.102 -// if(VMX_DOMAIN(prev)){ 5.103 -// vtm_domain_out(prev); 5.104 -// } 5.105 - context_switch_count++; 5.106 - switch_to(prev,next,prev); 5.107 -// if(VMX_DOMAIN(current)){ 5.108 -// vtm_domain_in(current); 5.109 -// } 5.110 + context_switch_count++; 5.111 + 5.112 + __ia64_save_fpu(prev->arch._thread.fph); 5.113 + __ia64_load_fpu(next->arch._thread.fph); 5.114 + if (VMX_DOMAIN(prev)) 5.115 + vmx_save_state(prev); 5.116 + if (VMX_DOMAIN(next)) 5.117 + vmx_load_state(next); 5.118 + /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/ 5.119 + prev = ia64_switch_to(next); 5.120 + if (!VMX_DOMAIN(current)){ 5.121 + vcpu_set_next_timer(current); 5.122 + } 5.123 + 5.124 5.125 // leave this debug for now: it acts as a heartbeat when more than 5.126 // one domain is active 5.127 @@ -309,28 +292,26 @@ if (!i--) { printk("+"); i = 1000000; } 5.128 } 5.129 5.130 if (VMX_DOMAIN(current)){ 5.131 -// vtm_domain_in(current); 5.132 vmx_load_all_rr(current); 5.133 }else{ 5.134 extern char ia64_ivt; 5.135 ia64_set_iva(&ia64_ivt); 5.136 if (!is_idle_domain(current->domain)) { 5.137 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | 5.138 - VHPT_ENABLED); 5.139 + VHPT_ENABLED); 5.140 load_region_regs(current); 5.141 vcpu_load_kernel_regs(current); 5.142 - if (vcpu_timer_expired(current)) 5.143 - vcpu_pend_timer(current); 5.144 + if (vcpu_timer_expired(current)) 5.145 + vcpu_pend_timer(current); 5.146 }else { 5.147 - /* When switching to idle domain, only need to disable vhpt 5.148 - * walker. Then all accesses happen within idle context will 5.149 - * be handled by TR mapping and identity mapping. 5.150 - */ 5.151 - pta = ia64_get_pta(); 5.152 - ia64_set_pta(pta & ~VHPT_ENABLED); 5.153 + /* When switching to idle domain, only need to disable vhpt 5.154 + * walker. Then all accesses happen within idle context will 5.155 + * be handled by TR mapping and identity mapping. 5.156 + */ 5.157 + pta = ia64_get_pta(); 5.158 + ia64_set_pta(pta & ~VHPT_ENABLED); 5.159 } 5.160 } 5.161 - 5.162 local_irq_restore(spsr); 5.163 context_saved(prev); 5.164 } 5.165 @@ -349,9 +330,9 @@ void panic_domain(struct pt_regs *regs, 5.166 va_list args; 5.167 char buf[128]; 5.168 struct vcpu *v = current; 5.169 - 5.170 + 5.171 printf("$$$$$ PANIC in domain %d (k6=0x%lx): ", 5.172 - v->domain->domain_id, 5.173 + v->domain->domain_id, 5.174 __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT]); 5.175 va_start(args, fmt); 5.176 (void)vsnprintf(buf, sizeof(buf), fmt, args); 5.177 @@ -395,19 +376,19 @@ void put_page_type(struct page_info *pag 5.178 ASSERT((x & PGT_count_mask) != 0); 5.179 5.180 /* 5.181 - * The page should always be validated while a reference is held. The 5.182 - * exception is during domain destruction, when we forcibly invalidate 5.183 + * The page should always be validated while a reference is held. The 5.184 + * exception is during domain destruction, when we forcibly invalidate 5.185 * page-table pages if we detect a referential loop. 5.186 * See domain.c:relinquish_list(). 5.187 */ 5.188 - ASSERT((x & PGT_validated) || 5.189 + ASSERT((x & PGT_validated) || 5.190 test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags)); 5.191 5.192 if ( unlikely((nx & PGT_count_mask) == 0) ) 5.193 { 5.194 /* Record TLB information for flush later. Races are harmless. */ 5.195 page->tlbflush_timestamp = tlbflush_current_time(); 5.196 - 5.197 + 5.198 if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) && 5.199 likely(nx & PGT_validated) ) 5.200 { 5.201 @@ -416,7 +397,7 @@ void put_page_type(struct page_info *pag 5.202 * 'free' is safe because the refcnt is non-zero and validated 5.203 * bit is clear => other ops will spin or fail. 5.204 */ 5.205 - if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, 5.206 + if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, 5.207 x & ~PGT_validated)) != x) ) 5.208 goto again; 5.209 /* We cleared the 'valid bit' so we do the clean up. */ 5.210 @@ -426,7 +407,7 @@ void put_page_type(struct page_info *pag 5.211 nx &= ~PGT_validated; 5.212 } 5.213 } 5.214 - else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) == 5.215 + else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) == 5.216 (PGT_pinned | 1)) && 5.217 ((nx & PGT_type_mask) != PGT_writable_page)) ) 5.218 {
6.1 --- a/xen/arch/ia64/xen/xensetup.c Tue Apr 25 22:32:14 2006 -0600 6.2 +++ b/xen/arch/ia64/xen/xensetup.c Tue Apr 25 22:35:41 2006 -0600 6.3 @@ -319,9 +319,6 @@ void start_kernel(void) 6.4 6.5 init_frametable(); 6.6 6.7 - ia64_fph_enable(); 6.8 - __ia64_init_fpu(); 6.9 - 6.10 alloc_dom0(); 6.11 6.12 end_boot_allocator();
7.1 --- a/xen/include/asm-ia64/domain.h Tue Apr 25 22:32:14 2006 -0600 7.2 +++ b/xen/include/asm-ia64/domain.h Tue Apr 25 22:35:41 2006 -0600 7.3 @@ -63,7 +63,6 @@ struct arch_domain { 7.4 offsetof(vcpu_info_t, evtchn_upcall_mask)) 7.5 7.6 struct arch_vcpu { 7.7 -#if 1 7.8 TR_ENTRY itrs[NITRS]; 7.9 TR_ENTRY dtrs[NDTRS]; 7.10 TR_ENTRY itlb; 7.11 @@ -81,8 +80,11 @@ struct arch_vcpu { 7.12 unsigned long domain_itm; 7.13 unsigned long domain_itm_last; 7.14 unsigned long xen_itm; 7.15 -#endif 7.16 + 7.17 mapped_regs_t *privregs; /* save the state of vcpu */ 7.18 + 7.19 + /* These fields are copied from arch_domain to make access easier/faster 7.20 + in assembly code. */ 7.21 unsigned long metaphysical_rr0; // from arch_domain (so is pinned) 7.22 unsigned long metaphysical_rr4; // from arch_domain (so is pinned) 7.23 unsigned long metaphysical_saved_rr0; // from arch_domain (so is pinned) 7.24 @@ -90,6 +92,7 @@ struct arch_vcpu { 7.25 int breakimm; // from arch_domain (so is pinned) 7.26 int starting_rid; /* first RID assigned to domain */ 7.27 int ending_rid; /* one beyond highest RID assigned to domain */ 7.28 + 7.29 struct thread_struct _thread; // this must be last 7.30 7.31 thash_cb_t vtlb; 7.32 @@ -108,58 +111,9 @@ struct arch_vcpu { 7.33 // FOLLOWING FROM linux-2.6.7/include/sched.h 7.34 7.35 struct mm_struct { 7.36 - struct vm_area_struct * mmap; /* list of VMAs */ 7.37 -#ifndef XEN 7.38 - struct rb_root mm_rb; 7.39 -#endif 7.40 - struct vm_area_struct * mmap_cache; /* last find_vma result */ 7.41 - unsigned long free_area_cache; /* first hole */ 7.42 pgd_t * pgd; 7.43 - atomic_t mm_users; /* How many users with user space? */ 7.44 - atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 7.45 - int map_count; /* number of VMAs */ 7.46 -#ifndef XEN 7.47 - struct rw_semaphore mmap_sem; 7.48 -#endif 7.49 - spinlock_t page_table_lock; /* Protects task page tables and mm->rss */ 7.50 - 7.51 + // atomic_t mm_users; /* How many users with user space? */ 7.52 struct list_head pt_list; /* List of pagetable */ 7.53 - 7.54 - struct list_head mmlist; /* List of all active mm's. These are globally strung 7.55 - * together off init_mm.mmlist, and are protected 7.56 - * by mmlist_lock 7.57 - */ 7.58 - 7.59 -#ifndef XEN 7.60 - unsigned long start_code, end_code, start_data, end_data; 7.61 - unsigned long start_brk, brk, start_stack; 7.62 - unsigned long arg_start, arg_end, env_start, env_end; 7.63 - unsigned long rss, total_vm, locked_vm; 7.64 - unsigned long def_flags; 7.65 - 7.66 - unsigned long saved_auxv[40]; /* for /proc/PID/auxv */ 7.67 - 7.68 - unsigned dumpable:1; 7.69 -#endif 7.70 -#ifdef CONFIG_HUGETLB_PAGE 7.71 - int used_hugetlb; 7.72 -#endif 7.73 -#ifndef XEN 7.74 - cpumask_t cpu_vm_mask; 7.75 - 7.76 - /* Architecture-specific MM context */ 7.77 - mm_context_t context; 7.78 - 7.79 - /* coredumping support */ 7.80 - int core_waiters; 7.81 - struct completion *core_startup_done, core_done; 7.82 - 7.83 - /* aio bits */ 7.84 - rwlock_t ioctx_list_lock; 7.85 - struct kioctx *ioctx_list; 7.86 - 7.87 - struct kioctx default_kioctx; 7.88 -#endif 7.89 }; 7.90 7.91 extern struct mm_struct init_mm;
8.1 --- a/xen/include/asm-ia64/linux-xen/asm/io.h Tue Apr 25 22:32:14 2006 -0600 8.2 +++ b/xen/include/asm-ia64/linux-xen/asm/io.h Tue Apr 25 22:35:41 2006 -0600 8.3 @@ -23,7 +23,9 @@ 8.4 #define __SLOW_DOWN_IO do { } while (0) 8.5 #define SLOW_DOWN_IO do { } while (0) 8.6 8.7 -#ifndef XEN 8.8 +#ifdef XEN 8.9 +#include <asm/xensystem.h> 8.10 +#else 8.11 #define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */ 8.12 #endif 8.13
9.1 --- a/xen/include/asm-ia64/linux-xen/asm/page.h Tue Apr 25 22:32:14 2006 -0600 9.2 +++ b/xen/include/asm-ia64/linux-xen/asm/page.h Tue Apr 25 22:35:41 2006 -0600 9.3 @@ -119,6 +119,7 @@ typedef union ia64_va { 9.4 void *p; 9.5 } ia64_va; 9.6 9.7 +#ifndef XEN 9.8 /* 9.9 * Note: These macros depend on the fact that PAGE_OFFSET has all 9.10 * region bits set to 1 and all other bits set to zero. They are 9.11 @@ -127,6 +128,7 @@ typedef union ia64_va { 9.12 */ 9.13 #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) 9.14 #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) 9.15 +#endif /* XEN */ 9.16 9.17 #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) 9.18 #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) 9.19 @@ -198,6 +200,7 @@ get_order (unsigned long size) 9.20 # define __pgprot(x) (x) 9.21 #endif /* !STRICT_MM_TYPECHECKS */ 9.22 9.23 +#ifndef XEN 9.24 #define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000) 9.25 9.26 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ 9.27 @@ -205,7 +208,7 @@ get_order (unsigned long size) 9.28 (((current->personality & READ_IMPLIES_EXEC) != 0) \ 9.29 ? VM_EXEC : 0)) 9.30 9.31 -#ifdef XEN 9.32 +#else 9.33 #include <asm/xenpage.h> 9.34 #endif 9.35
10.1 --- a/xen/include/asm-ia64/linux-xen/asm/pgtable.h Tue Apr 25 22:32:14 2006 -0600 10.2 +++ b/xen/include/asm-ia64/linux-xen/asm/pgtable.h Tue Apr 25 22:35:41 2006 -0600 10.3 @@ -349,6 +349,7 @@ pgd_offset (struct mm_struct *mm, unsign 10.4 #define pte_unmap(pte) do { } while (0) 10.5 #define pte_unmap_nested(pte) do { } while (0) 10.6 10.7 +#ifndef XEN 10.8 /* atomic versions of the some PTE manipulations: */ 10.9 10.10 static inline int 10.11 @@ -418,6 +419,7 @@ pte_same (pte_t a, pte_t b) 10.12 } 10.13 10.14 #define update_mmu_cache(vma, address, pte) do { } while (0) 10.15 +#endif /* XEN */ 10.16 10.17 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 10.18 extern void paging_init (void);
11.1 --- a/xen/include/asm-ia64/linux-xen/asm/system.h Tue Apr 25 22:32:14 2006 -0600 11.2 +++ b/xen/include/asm-ia64/linux-xen/asm/system.h Tue Apr 25 22:35:41 2006 -0600 11.3 @@ -187,7 +187,9 @@ do { \ 11.4 (__ia64_id_flags & IA64_PSR_I) == 0; \ 11.5 }) 11.6 11.7 -#ifndef XEN 11.8 +#ifdef XEN 11.9 +#define local_irq_is_enabled() (!irqs_disabled()) 11.10 +#else 11.11 #ifdef __KERNEL__ 11.12 11.13 #ifdef CONFIG_IA32_SUPPORT
12.1 --- a/xen/include/asm-ia64/xenpage.h Tue Apr 25 22:32:14 2006 -0600 12.2 +++ b/xen/include/asm-ia64/xenpage.h Tue Apr 25 22:35:41 2006 -0600 12.3 @@ -66,7 +66,4 @@ static inline int get_order_from_pages(u 12.4 /* It is sometimes very useful to have unsigned long as result. */ 12.5 #define __va_ul(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.l;}) 12.6 12.7 -#undef PAGE_OFFSET 12.8 -#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000) 12.9 - 12.10 #endif /* _ASM_IA64_XENPAGE_H */
13.1 --- a/xen/include/asm-ia64/xensystem.h Tue Apr 25 22:32:14 2006 -0600 13.2 +++ b/xen/include/asm-ia64/xensystem.h Tue Apr 25 22:35:41 2006 -0600 13.3 @@ -25,9 +25,9 @@ 13.4 #define HYPERVISOR_VIRT_START 0xf000000000000000 13.5 #define KERNEL_START 0xf000000004000000 13.6 #define SHAREDINFO_ADDR 0xf100000000000000 13.7 -#define SHARED_ARCHINFO_ADDR (SHAREDINFO_ADDR + PAGE_SIZE) 13.8 +#define XSI_OFS PAGE_SIZE 13.9 +#define SHARED_ARCHINFO_ADDR (SHAREDINFO_ADDR + XSI_OFS) 13.10 #define PERCPU_ADDR (SHAREDINFO_ADDR - PERCPU_PAGE_SIZE) 13.11 -#define XSI_OFS (SHARED_ARCHINFO_ADDR - SHAREDINFO_ADDR) 13.12 #define VHPT_ADDR 0xf200000000000000 13.13 #ifdef CONFIG_VIRTUAL_FRAME_TABLE 13.14 #define VIRT_FRAME_TABLE_ADDR 0xf300000000000000 13.15 @@ -35,45 +35,8 @@ 13.16 #endif 13.17 #define XEN_END_ADDR 0xf400000000000000 13.18 13.19 +#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000) 13.20 + 13.21 #define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1) 13.22 13.23 -#ifndef __ASSEMBLY__ 13.24 - 13.25 -#define IA64_HAS_EXTRA_STATE(t) 0 13.26 - 13.27 -struct vcpu; 13.28 -extern void ia64_save_extra (struct vcpu *v); 13.29 -extern void ia64_load_extra (struct vcpu *v); 13.30 - 13.31 -extern struct vcpu *vmx_ia64_switch_to (struct vcpu *next_task); 13.32 -extern struct vcpu *ia64_switch_to (struct vcpu *next_task); 13.33 - 13.34 -#define __switch_to(prev,next,last) do { \ 13.35 - ia64_save_fpu(prev->arch._thread.fph); \ 13.36 - ia64_load_fpu(next->arch._thread.fph); \ 13.37 - if (VMX_DOMAIN(prev)) \ 13.38 - vmx_save_state(prev); \ 13.39 - else { \ 13.40 - if (IA64_HAS_EXTRA_STATE(prev)) \ 13.41 - ia64_save_extra(prev); \ 13.42 - } \ 13.43 - if (VMX_DOMAIN(next)) \ 13.44 - vmx_load_state(next); \ 13.45 - else { \ 13.46 - if (IA64_HAS_EXTRA_STATE(next)) \ 13.47 - ia64_save_extra(next); \ 13.48 - } \ 13.49 - /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/ \ 13.50 - (last) = ia64_switch_to((next)); \ 13.51 - if (!VMX_DOMAIN(current)){ \ 13.52 - vcpu_set_next_timer(current); \ 13.53 - } \ 13.54 -} while (0) 13.55 - 13.56 -// FIXME SMP... see system.h, does this need to be different? 13.57 -#define switch_to(prev,next,last) __switch_to(prev, next, last) 13.58 - 13.59 -#define local_irq_is_enabled() (!irqs_disabled()) 13.60 - 13.61 -#endif // __ASSEMBLY__ 13.62 #endif // _ASM_IA64_XENSYSTEM_H