ia64/xen-unstable
changeset 7332:54b112b314fe
Initial SMP support
Signed-off by: Tristan Gingold <Tristan.Gingold@bull.net>
Signed-off by: Tristan Gingold <Tristan.Gingold@bull.net>
line diff
1.1 --- a/xen/arch/ia64/linux-xen/head.S Wed Oct 12 10:56:14 2005 -0600 1.2 +++ b/xen/arch/ia64/linux-xen/head.S Wed Oct 12 17:12:59 2005 -0600 1.3 @@ -324,6 +324,9 @@ 1: // now we are in virtual mode 1.4 mov r16=-1 1.5 (isBP) br.cond.dpnt .load_current // BP stack is on region 5 --- no need to map it 1.6 1.7 +#ifndef XEN 1.8 + // XEN: stack is allocated in xenheap, which is currently always 1.9 + // mapped. 1.10 // load mapping for stack (virtaddr in r2, physaddr in r3) 1.11 rsm psr.ic 1.12 movl r17=PAGE_KERNEL 1.13 @@ -353,7 +356,8 @@ 1: // now we are in virtual mode 1.14 ssm psr.ic 1.15 srlz.d 1.16 ;; 1.17 - 1.18 +#endif 1.19 + 1.20 .load_current: 1.21 // load the "current" pointer (r13) and ar.k6 with the current task 1.22 #if defined(XEN) && defined(VALIDATE_VT)
2.1 --- a/xen/arch/ia64/linux-xen/irq_ia64.c Wed Oct 12 10:56:14 2005 -0600 2.2 +++ b/xen/arch/ia64/linux-xen/irq_ia64.c Wed Oct 12 17:12:59 2005 -0600 2.3 @@ -281,5 +281,8 @@ ia64_send_ipi (int cpu, int vector, int 2.4 ipi_data = (delivery_mode << 8) | (vector & 0xff); 2.5 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3)); 2.6 2.7 +#ifdef XEN 2.8 + printf ("send_ipi to %d (%x)\n", cpu, phys_cpu_id); 2.9 +#endif 2.10 writeq(ipi_data, ipi_addr); 2.11 }
3.1 --- a/xen/arch/ia64/linux-xen/mm_contig.c Wed Oct 12 10:56:14 2005 -0600 3.2 +++ b/xen/arch/ia64/linux-xen/mm_contig.c Wed Oct 12 17:12:59 2005 -0600 3.3 @@ -193,8 +193,8 @@ per_cpu_init (void) 3.4 */ 3.5 if (smp_processor_id() == 0) { 3.6 #ifdef XEN 3.7 - cpu_data = alloc_xenheap_pages(PERCPU_PAGE_SHIFT - 3.8 - PAGE_SHIFT + get_order(NR_CPUS)); 3.9 + cpu_data = alloc_xenheap_pages(get_order(NR_CPUS 3.10 + * PERCPU_PAGE_SIZE)); 3.11 #else 3.12 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, 3.13 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
4.1 --- a/xen/arch/ia64/linux-xen/setup.c Wed Oct 12 10:56:14 2005 -0600 4.2 +++ b/xen/arch/ia64/linux-xen/setup.c Wed Oct 12 17:12:59 2005 -0600 4.3 @@ -366,6 +366,7 @@ check_for_logical_procs (void) 4.4 } 4.5 #endif 4.6 4.7 +void __init 4.8 #ifdef XEN 4.9 early_setup_arch (char **cmdline_p) 4.10 #else 4.11 @@ -377,14 +378,12 @@ setup_arch (char **cmdline_p) 4.12 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 4.13 4.14 *cmdline_p = __va(ia64_boot_param->command_line); 4.15 -#ifdef XEN 4.16 - efi_init(); 4.17 -#else 4.18 +#ifndef XEN 4.19 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE); 4.20 +#endif 4.21 4.22 efi_init(); 4.23 io_port_init(); 4.24 -#endif 4.25 4.26 #ifdef CONFIG_IA64_GENERIC 4.27 { 4.28 @@ -414,11 +413,17 @@ setup_arch (char **cmdline_p) 4.29 #ifdef XEN 4.30 early_cmdline_parse(cmdline_p); 4.31 cmdline_parse(*cmdline_p); 4.32 -#undef CONFIG_ACPI_BOOT 4.33 #endif 4.34 if (early_console_setup(*cmdline_p) == 0) 4.35 mark_bsp_online(); 4.36 4.37 +#ifdef XEN 4.38 +} 4.39 + 4.40 +void __init 4.41 +late_setup_arch (char **cmdline_p) 4.42 +{ 4.43 +#endif 4.44 #ifdef CONFIG_ACPI_BOOT 4.45 /* Initialize the ACPI boot-time table parser */ 4.46 acpi_table_init(); 4.47 @@ -433,20 +438,16 @@ setup_arch (char **cmdline_p) 4.48 4.49 #ifndef XEN 4.50 find_memory(); 4.51 -#else 4.52 - io_port_init(); 4.53 -} 4.54 +#endif 4.55 4.56 -void __init 4.57 -late_setup_arch (char **cmdline_p) 4.58 -{ 4.59 -#undef CONFIG_ACPI_BOOT 4.60 - acpi_table_init(); 4.61 -#endif 4.62 /* process SAL system table: */ 4.63 ia64_sal_init(efi.sal_systab); 4.64 4.65 #ifdef CONFIG_SMP 4.66 +#ifdef XEN 4.67 + init_smp_config (); 4.68 +#endif 4.69 + 4.70 cpu_physical_id(0) = hard_smp_processor_id(); 4.71 4.72 cpu_set(0, cpu_sibling_map[0]); 4.73 @@ -768,6 +769,11 @@ cpu_init (void) 4.74 4.75 cpu_data = per_cpu_init(); 4.76 4.77 +#ifdef XEN 4.78 + printf ("cpu_init: current=%p, current->domain->arch.mm=%p\n", 4.79 + current, current->domain->arch.mm); 4.80 +#endif 4.81 + 4.82 /* 4.83 * We set ar.k3 so that assembly code in MCA handler can compute 4.84 * physical addresses of per cpu variables with a simple: 4.85 @@ -887,6 +893,16 @@ cpu_init (void) 4.86 #ifndef XEN 4.87 pm_idle = default_idle; 4.88 #endif 4.89 + 4.90 +#ifdef XEN 4.91 + /* surrender usage of kernel registers to domain, use percpu area instead */ 4.92 + __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE); 4.93 + __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA); 4.94 + __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK); 4.95 + __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER); 4.96 + __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT); 4.97 + __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE); 4.98 +#endif 4.99 } 4.100 4.101 void
5.1 --- a/xen/arch/ia64/linux-xen/smp.c Wed Oct 12 10:56:14 2005 -0600 5.2 +++ b/xen/arch/ia64/linux-xen/smp.c Wed Oct 12 17:12:59 2005 -0600 5.3 @@ -63,9 +63,18 @@ void flush_tlb_mask(cpumask_t mask) 5.4 //Huh? This seems to be used on ia64 even if !CONFIG_SMP 5.5 void smp_send_event_check_mask(cpumask_t mask) 5.6 { 5.7 - printf("smp_send_event_check_mask called\n"); 5.8 - //dummy(); 5.9 - //send_IPI_mask(cpu_mask, EVENT_CHECK_VECTOR); 5.10 + int cpu; 5.11 + 5.12 + /* Not for me. */ 5.13 + cpu_clear(smp_processor_id(), mask); 5.14 + if (cpus_empty(mask)) 5.15 + return; 5.16 + 5.17 + printf("smp_send_event_check_mask called\n"); 5.18 + 5.19 + for (cpu = 0; cpu < NR_CPUS; ++cpu) 5.20 + if (cpu_isset(cpu, mask)) 5.21 + platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); 5.22 } 5.23 5.24 5.25 @@ -249,6 +258,7 @@ send_IPI_self (int op) 5.26 send_IPI_single(smp_processor_id(), op); 5.27 } 5.28 5.29 +#ifndef XEN 5.30 /* 5.31 * Called with preeemption disabled. 5.32 */ 5.33 @@ -257,6 +267,7 @@ smp_send_reschedule (int cpu) 5.34 { 5.35 platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); 5.36 } 5.37 +#endif 5.38 5.39 void 5.40 smp_flush_tlb_all (void) 5.41 @@ -395,15 +406,14 @@ smp_call_function (void (*func) (void *i 5.42 if (wait) 5.43 atomic_set(&data.finished, 0); 5.44 5.45 - printk("smp_call_function: about to spin_lock \n"); 5.46 spin_lock(&call_lock); 5.47 - printk("smp_call_function: done with spin_lock \n"); 5.48 +#if 0 //def XEN 5.49 + printk("smp_call_function: %d lock\n", smp_processor_id ()); 5.50 +#endif 5.51 5.52 call_data = &data; 5.53 mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ 5.54 - printk("smp_call_function: about to send_IPI \n"); 5.55 send_IPI_allbutself(IPI_CALL_FUNC); 5.56 - printk("smp_call_function: done with send_IPI \n"); 5.57 5.58 /* Wait for response */ 5.59 while (atomic_read(&data.started) != cpus) 5.60 @@ -414,9 +424,10 @@ smp_call_function (void (*func) (void *i 5.61 cpu_relax(); 5.62 call_data = NULL; 5.63 5.64 - printk("smp_call_function: about to spin_unlock \n"); 5.65 spin_unlock(&call_lock); 5.66 +#if 0 //def XEN 5.67 printk("smp_call_function: DONE WITH spin_unlock, returning \n"); 5.68 +#endif 5.69 return 0; 5.70 } 5.71 EXPORT_SYMBOL(smp_call_function);
6.1 --- a/xen/arch/ia64/linux-xen/smpboot.c Wed Oct 12 10:56:14 2005 -0600 6.2 +++ b/xen/arch/ia64/linux-xen/smpboot.c Wed Oct 12 17:12:59 2005 -0600 6.3 @@ -477,6 +477,22 @@ do_boot_cpu (int sapicid, int cpu) 6.4 6.5 do_rest: 6.6 task_for_booting_cpu = c_idle.idle; 6.7 +#else 6.8 + struct domain *idle; 6.9 + struct vcpu *v; 6.10 + void *stack; 6.11 + 6.12 + if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL ) 6.13 + panic("failed 'createdomain' for CPU %d", cpu); 6.14 + set_bit(_DOMF_idle_domain, &idle->domain_flags); 6.15 + v = idle->vcpu[0]; 6.16 + 6.17 + printf ("do_boot_cpu: cpu=%d, domain=%p, vcpu=%p\n", cpu, idle, v); 6.18 + 6.19 + task_for_booting_cpu = v; 6.20 + 6.21 + /* Set cpu number. */ 6.22 + get_thread_info(v)->cpu = cpu; 6.23 #endif 6.24 6.25 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
7.1 --- a/xen/arch/ia64/xen/acpi.c Wed Oct 12 10:56:14 2005 -0600 7.2 +++ b/xen/arch/ia64/xen/acpi.c Wed Oct 12 17:12:59 2005 -0600 7.3 @@ -121,6 +121,7 @@ acpi_get_sysname (void) 7.4 #ifdef CONFIG_ACPI_BOOT 7.5 7.6 #define ACPI_MAX_PLATFORM_INTERRUPTS 256 7.7 +#define NR_IOSAPICS 4 7.8 7.9 #if 0 7.10 /* Array to record platform interrupt vectors for generic interrupt routing. */ 7.11 @@ -162,7 +163,6 @@ static int available_cpus __initdata; 7.12 struct acpi_table_madt * acpi_madt __initdata; 7.13 static u8 has_8259; 7.14 7.15 -#if 0 7.16 static int __init 7.17 acpi_parse_lapic_addr_ovr ( 7.18 acpi_table_entry_header *header, const unsigned long end) 7.19 @@ -247,12 +247,13 @@ acpi_parse_iosapic (acpi_table_entry_hea 7.20 7.21 acpi_table_print_madt_entry(header); 7.22 7.23 +#if 0 7.24 iosapic_init(iosapic->address, iosapic->global_irq_base); 7.25 +#endif 7.26 7.27 return 0; 7.28 } 7.29 7.30 - 7.31 static int __init 7.32 acpi_parse_plat_int_src ( 7.33 acpi_table_entry_header *header, const unsigned long end) 7.34 @@ -267,6 +268,7 @@ acpi_parse_plat_int_src ( 7.35 7.36 acpi_table_print_madt_entry(header); 7.37 7.38 +#if 0 7.39 /* 7.40 * Get vector assignment for this interrupt, set attributes, 7.41 * and program the IOSAPIC routing table. 7.42 @@ -280,6 +282,7 @@ acpi_parse_plat_int_src ( 7.43 (plintsrc->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 7.44 7.45 platform_intr_list[plintsrc->type] = vector; 7.46 +#endif 7.47 return 0; 7.48 } 7.49 7.50 @@ -297,13 +300,14 @@ acpi_parse_int_src_ovr ( 7.51 7.52 acpi_table_print_madt_entry(header); 7.53 7.54 +#if 0 7.55 iosapic_override_isa_irq(p->bus_irq, p->global_irq, 7.56 (p->flags.polarity == 1) ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW, 7.57 (p->flags.trigger == 1) ? IOSAPIC_EDGE : IOSAPIC_LEVEL); 7.58 +#endif 7.59 return 0; 7.60 } 7.61 7.62 - 7.63 static int __init 7.64 acpi_parse_nmi_src (acpi_table_entry_header *header, const unsigned long end) 7.65 { 7.66 @@ -331,8 +335,10 @@ void __init acpi_madt_oem_check(char *oe 7.67 */ 7.68 sal_platform_features |= IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT; 7.69 7.70 +#if 0 7.71 /*Start cyclone clock*/ 7.72 cyclone_setup(0); 7.73 +#endif 7.74 } 7.75 } 7.76 7.77 @@ -350,7 +356,9 @@ acpi_parse_madt (unsigned long phys_addr 7.78 #else 7.79 has_8259 = acpi_madt->flags.pcat_compat; 7.80 #endif 7.81 +#if 0 7.82 iosapic_system_init(has_8259); 7.83 +#endif 7.84 7.85 /* Get base address of IPI Message Block */ 7.86 7.87 @@ -364,7 +372,6 @@ acpi_parse_madt (unsigned long phys_addr 7.88 7.89 return 0; 7.90 } 7.91 -#endif 7.92 7.93 #ifdef CONFIG_ACPI_NUMA 7.94 7.95 @@ -529,6 +536,7 @@ acpi_register_gsi (u32 gsi, int polarity 7.96 return acpi_register_irq(gsi, polarity, trigger); 7.97 } 7.98 EXPORT_SYMBOL(acpi_register_gsi); 7.99 +#endif 7.100 static int __init 7.101 acpi_parse_fadt (unsigned long phys_addr, unsigned long size) 7.102 { 7.103 @@ -550,10 +558,11 @@ acpi_parse_fadt (unsigned long phys_addr 7.104 if (fadt->iapc_boot_arch & BAF_LEGACY_DEVICES) 7.105 acpi_legacy_devices = 1; 7.106 7.107 +#if 0 7.108 acpi_register_gsi(fadt->sci_int, ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE); 7.109 +#endif 7.110 return 0; 7.111 } 7.112 -#endif 7.113 7.114 unsigned long __init 7.115 acpi_find_rsdp (void) 7.116 @@ -567,7 +576,6 @@ acpi_find_rsdp (void) 7.117 return rsdp_phys; 7.118 } 7.119 7.120 -#if 0 7.121 int __init 7.122 acpi_boot_init (void) 7.123 { 7.124 @@ -646,6 +654,7 @@ acpi_boot_init (void) 7.125 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus, total_cpus); 7.126 return 0; 7.127 } 7.128 +#if 0 7.129 int 7.130 acpi_gsi_to_irq (u32 gsi, unsigned int *irq) 7.131 {
8.1 --- a/xen/arch/ia64/xen/domain.c Wed Oct 12 10:56:14 2005 -0600 8.2 +++ b/xen/arch/ia64/xen/domain.c Wed Oct 12 17:12:59 2005 -0600 8.3 @@ -23,6 +23,7 @@ 8.4 #include <asm/io.h> 8.5 #include <asm/processor.h> 8.6 #include <asm/desc.h> 8.7 +#include <asm/hw_irq.h> 8.8 //#include <asm/mpspec.h> 8.9 #include <xen/irq.h> 8.10 #include <xen/event.h> 8.11 @@ -75,35 +76,21 @@ void free_perdomain_pt(struct domain *d) 8.12 //free_page((unsigned long)d->mm.perdomain_pt); 8.13 } 8.14 8.15 -int hlt_counter; 8.16 - 8.17 -void disable_hlt(void) 8.18 +static void default_idle(void) 8.19 { 8.20 - hlt_counter++; 8.21 -} 8.22 - 8.23 -void enable_hlt(void) 8.24 -{ 8.25 - hlt_counter--; 8.26 + int cpu = smp_processor_id(); 8.27 + local_irq_disable(); 8.28 + if ( !softirq_pending(cpu)) 8.29 + safe_halt(); 8.30 + local_irq_enable(); 8.31 } 8.32 8.33 -static void default_idle(void) 8.34 -{ 8.35 - if ( hlt_counter == 0 ) 8.36 - { 8.37 - local_irq_disable(); 8.38 - if ( !softirq_pending(smp_processor_id()) ) 8.39 - safe_halt(); 8.40 - //else 8.41 - local_irq_enable(); 8.42 - } 8.43 -} 8.44 - 8.45 -void continue_cpu_idle_loop(void) 8.46 +static void continue_cpu_idle_loop(void) 8.47 { 8.48 int cpu = smp_processor_id(); 8.49 for ( ; ; ) 8.50 { 8.51 + printf ("idle%dD\n", cpu); 8.52 #ifdef IA64 8.53 // __IRQ_STAT(cpu, idle_timestamp) = jiffies 8.54 #else 8.55 @@ -111,23 +98,32 @@ void continue_cpu_idle_loop(void) 8.56 #endif 8.57 while ( !softirq_pending(cpu) ) 8.58 default_idle(); 8.59 + add_preempt_count(SOFTIRQ_OFFSET); 8.60 raise_softirq(SCHEDULE_SOFTIRQ); 8.61 do_softirq(); 8.62 + sub_preempt_count(SOFTIRQ_OFFSET); 8.63 } 8.64 } 8.65 8.66 void startup_cpu_idle_loop(void) 8.67 { 8.68 + int cpu = smp_processor_id (); 8.69 /* Just some sanity to ensure that the scheduler is set up okay. */ 8.70 ASSERT(current->domain == IDLE_DOMAIN_ID); 8.71 + printf ("idle%dA\n", cpu); 8.72 raise_softirq(SCHEDULE_SOFTIRQ); 8.73 +#if 0 /* All this work is done within continue_cpu_idle_loop */ 8.74 + printf ("idle%dB\n", cpu); 8.75 + asm volatile ("mov ar.k2=r0"); 8.76 do_softirq(); 8.77 + printf ("idle%dC\n", cpu); 8.78 8.79 /* 8.80 * Declares CPU setup done to the boot processor. 8.81 * Therefore memory barrier to ensure state is visible. 8.82 */ 8.83 smp_mb(); 8.84 +#endif 8.85 #if 0 8.86 //do we have to ensure the idle task has a shared page so that, for example, 8.87 //region registers can be loaded from it. Apparently not... 8.88 @@ -229,17 +225,21 @@ void arch_do_createdomain(struct vcpu *v 8.89 v->arch.breakimm = d->arch.breakimm; 8.90 8.91 d->arch.sys_pgnr = 0; 8.92 - d->arch.mm = xmalloc(struct mm_struct); 8.93 - if (unlikely(!d->arch.mm)) { 8.94 - printk("Can't allocate mm_struct for domain %d\n",d->domain_id); 8.95 - return -ENOMEM; 8.96 - } 8.97 - memset(d->arch.mm, 0, sizeof(*d->arch.mm)); 8.98 - d->arch.mm->pgd = pgd_alloc(d->arch.mm); 8.99 - if (unlikely(!d->arch.mm->pgd)) { 8.100 - printk("Can't allocate pgd for domain %d\n",d->domain_id); 8.101 - return -ENOMEM; 8.102 - } 8.103 + if (d->domain_id != IDLE_DOMAIN_ID) { 8.104 + d->arch.mm = xmalloc(struct mm_struct); 8.105 + if (unlikely(!d->arch.mm)) { 8.106 + printk("Can't allocate mm_struct for domain %d\n",d->domain_id); 8.107 + return -ENOMEM; 8.108 + } 8.109 + memset(d->arch.mm, 0, sizeof(*d->arch.mm)); 8.110 + d->arch.mm->pgd = pgd_alloc(d->arch.mm); 8.111 + if (unlikely(!d->arch.mm->pgd)) { 8.112 + printk("Can't allocate pgd for domain %d\n",d->domain_id); 8.113 + return -ENOMEM; 8.114 + } 8.115 + } else 8.116 + d->arch.mm = NULL; 8.117 + printf ("arch_do_create_domain: domain=%p\n", d); 8.118 } 8.119 8.120 void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
9.1 --- a/xen/arch/ia64/xen/process.c Wed Oct 12 10:56:14 2005 -0600 9.2 +++ b/xen/arch/ia64/xen/process.c Wed Oct 12 17:12:59 2005 -0600 9.3 @@ -62,11 +62,23 @@ long do_iopl(domid_t domain, unsigned in 9.4 return 0; 9.5 } 9.6 9.7 +#include <xen/sched-if.h> 9.8 + 9.9 +extern struct schedule_data schedule_data[NR_CPUS]; 9.10 + 9.11 void schedule_tail(struct vcpu *next) 9.12 { 9.13 unsigned long rr7; 9.14 //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info); 9.15 //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info); 9.16 + 9.17 + // TG: Real HACK FIXME. 9.18 + // This is currently necessary because when a new domain is started, 9.19 + // the context_switch function of xen/common/schedule.c(__enter_scheduler) 9.20 + // never returns. Therefore, the lock must be released. 9.21 + // schedule_tail is only called when a domain is started. 9.22 + spin_unlock_irq(&schedule_data[current->processor].schedule_lock); 9.23 + 9.24 /* rr7 will be postponed to last point when resuming back to guest */ 9.25 if(VMX_DOMAIN(current)){ 9.26 vmx_load_all_rr(current);
10.1 --- a/xen/arch/ia64/xen/xenirq.c Wed Oct 12 10:56:14 2005 -0600 10.2 +++ b/xen/arch/ia64/xen/xenirq.c Wed Oct 12 17:12:59 2005 -0600 10.3 @@ -35,7 +35,7 @@ xen_debug_irq(ia64_vector vector, struct 10.4 int 10.5 xen_do_IRQ(ia64_vector vector) 10.6 { 10.7 - if (vector != 0xef) { 10.8 + if (vector != IA64_TIMER_VECTOR && vector != IA64_IPI_VECTOR) { 10.9 extern void vcpu_pend_interrupt(void *, int); 10.10 #if 0 10.11 if (firsttime[vector]) { 10.12 @@ -57,22 +57,18 @@ xen_do_IRQ(ia64_vector vector) 10.13 return(0); 10.14 } 10.15 10.16 -/* From linux/kernel/softirq.c */ 10.17 -#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 10.18 -# define invoke_softirq() __do_softirq() 10.19 -#else 10.20 -# define invoke_softirq() do_softirq() 10.21 -#endif 10.22 - 10.23 /* 10.24 * Exit an interrupt context. Process softirqs if needed and possible: 10.25 */ 10.26 void irq_exit(void) 10.27 { 10.28 //account_system_vtime(current); 10.29 - //sub_preempt_count(IRQ_EXIT_OFFSET); 10.30 - if (!in_interrupt() && local_softirq_pending()) 10.31 - invoke_softirq(); 10.32 + sub_preempt_count(IRQ_EXIT_OFFSET); 10.33 + if (!in_interrupt() && local_softirq_pending()) { 10.34 + add_preempt_count(SOFTIRQ_OFFSET); 10.35 + do_softirq(); 10.36 + sub_preempt_count(SOFTIRQ_OFFSET); 10.37 + } 10.38 //preempt_enable_no_resched(); 10.39 } 10.40 /* end from linux/kernel/softirq.c */
11.1 --- a/xen/arch/ia64/xen/xenmisc.c Wed Oct 12 10:56:14 2005 -0600 11.2 +++ b/xen/arch/ia64/xen/xenmisc.c Wed Oct 12 17:12:59 2005 -0600 11.3 @@ -280,6 +280,8 @@ void cs01foo(void) {} 11.4 11.5 unsigned long context_switch_count = 0; 11.6 11.7 +#include <asm/vcpu.h> 11.8 + 11.9 void context_switch(struct vcpu *prev, struct vcpu *next) 11.10 { 11.11 //printk("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); 11.12 @@ -287,7 +289,8 @@ void context_switch(struct vcpu *prev, s 11.13 //prev->domain->domain_id,(long)prev&0xffffff,next->domain->domain_id,(long)next&0xffffff); 11.14 //if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo(); 11.15 //if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo(); 11.16 -//printk("@@sw %d->%d\n",prev->domain->domain_id,next->domain->domain_id); 11.17 +printk("@@sw%d/%x %d->%d\n",smp_processor_id(), hard_smp_processor_id (), 11.18 + prev->domain->domain_id,next->domain->domain_id); 11.19 if(VMX_DOMAIN(prev)){ 11.20 vtm_domain_out(prev); 11.21 }
12.1 --- a/xen/arch/ia64/xen/xensetup.c Wed Oct 12 10:56:14 2005 -0600 12.2 +++ b/xen/arch/ia64/xen/xensetup.c Wed Oct 12 17:12:59 2005 -0600 12.3 @@ -253,11 +253,11 @@ void start_kernel(void) 12.4 printk("About to call scheduler_init()\n"); 12.5 scheduler_init(); 12.6 local_irq_disable(); 12.7 + init_IRQ (); 12.8 printk("About to call init_xen_time()\n"); 12.9 init_xen_time(); /* initialise the time */ 12.10 printk("About to call ac_timer_init()\n"); 12.11 ac_timer_init(); 12.12 -// init_xen_time(); ??? 12.13 12.14 #ifdef CONFIG_SMP 12.15 if ( opt_nosmp ) 12.16 @@ -276,6 +276,9 @@ printk("About to call ac_timer_init()\n" 12.17 12.18 //BUG_ON(!local_irq_is_enabled()); 12.19 12.20 + /* Enable IRQ to receive IPI (needed for ITC sync). */ 12.21 + local_irq_enable(); 12.22 + 12.23 printk("num_online_cpus=%d, max_cpus=%d\n",num_online_cpus(),max_cpus); 12.24 for_each_present_cpu ( i ) 12.25 { 12.26 @@ -287,24 +290,16 @@ printk("About to call __cpu_up(%d)\n",i) 12.27 } 12.28 } 12.29 12.30 + local_irq_disable(); 12.31 + 12.32 printk("Brought up %ld CPUs\n", (long)num_online_cpus()); 12.33 smp_cpus_done(max_cpus); 12.34 #endif 12.35 12.36 - 12.37 - // FIXME: Should the following be swapped and moved later? 12.38 - schedulers_start(); 12.39 do_initcalls(); 12.40 printk("About to call sort_main_extable()\n"); 12.41 sort_main_extable(); 12.42 12.43 - /* surrender usage of kernel registers to domain, use percpu area instead */ 12.44 - __get_cpu_var(cpu_kr)._kr[IA64_KR_IO_BASE] = ia64_get_kr(IA64_KR_IO_BASE); 12.45 - __get_cpu_var(cpu_kr)._kr[IA64_KR_PER_CPU_DATA] = ia64_get_kr(IA64_KR_PER_CPU_DATA); 12.46 - __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT_STACK] = ia64_get_kr(IA64_KR_CURRENT_STACK); 12.47 - __get_cpu_var(cpu_kr)._kr[IA64_KR_FPU_OWNER] = ia64_get_kr(IA64_KR_FPU_OWNER); 12.48 - __get_cpu_var(cpu_kr)._kr[IA64_KR_CURRENT] = ia64_get_kr(IA64_KR_CURRENT); 12.49 - __get_cpu_var(cpu_kr)._kr[IA64_KR_PT_BASE] = ia64_get_kr(IA64_KR_PT_BASE); 12.50 12.51 /* Create initial domain 0. */ 12.52 printk("About to call do_createdomain()\n"); 12.53 @@ -342,6 +337,11 @@ printk("About to call construct_dom0()\n 12.54 0, 12.55 0) != 0) 12.56 panic("Could not set up DOM0 guest OS\n"); 12.57 + 12.58 + /* PIN domain0 on CPU 0. */ 12.59 + dom0->vcpu[0]->cpumap=1; 12.60 + set_bit(_VCPUF_cpu_pinned, &dom0->vcpu[0]->vcpu_flags); 12.61 + 12.62 #ifdef CLONE_DOMAIN0 12.63 { 12.64 int i; 12.65 @@ -379,9 +379,16 @@ printk("About to call init_trace_bufs()\ 12.66 domain_unpause_by_systemcontroller(clones[i]); 12.67 } 12.68 #endif 12.69 + domain0_ready = 1; 12.70 + 12.71 + local_irq_enable(); 12.72 + 12.73 + printf("About to call schedulers_start dom0=%p, idle0_dom=%p\n", 12.74 + dom0, &idle0_domain); 12.75 + schedulers_start(); 12.76 + 12.77 domain_unpause_by_systemcontroller(dom0); 12.78 - domain0_ready = 1; 12.79 - local_irq_enable(); 12.80 + 12.81 printk("About to call startup_cpu_idle_loop()\n"); 12.82 startup_cpu_idle_loop(); 12.83 }
13.1 --- a/xen/arch/ia64/xen/xentime.c Wed Oct 12 10:56:14 2005 -0600 13.2 +++ b/xen/arch/ia64/xen/xentime.c Wed Oct 12 17:12:59 2005 -0600 13.3 @@ -103,10 +103,10 @@ xen_timer_interrupt (int irq, void *dev_ 13.4 #ifdef HEARTBEAT_FREQ 13.5 static long count = 0; 13.6 if (!(++count & ((HEARTBEAT_FREQ*1024)-1))) { 13.7 - printf("Heartbeat... iip=%p,psr.i=%d,pend=%d\n", 13.8 - regs->cr_iip, 13.9 + printf("Heartbeat... iip=%p\n", /*",psr.i=%d,pend=%d\n", */ 13.10 + regs->cr_iip /*, 13.11 VCPU(current,interrupt_delivery_enabled), 13.12 - VCPU(current,pending_interruption)); 13.13 + VCPU(current,pending_interruption) */); 13.14 count = 0; 13.15 } 13.16 #endif
14.1 --- a/xen/include/asm-ia64/config.h Wed Oct 12 10:56:14 2005 -0600 14.2 +++ b/xen/include/asm-ia64/config.h Wed Oct 12 17:12:59 2005 -0600 14.3 @@ -28,8 +28,8 @@ 14.4 14.5 #ifdef CONFIG_XEN_SMP 14.6 #define CONFIG_SMP 1 14.7 -#define NR_CPUS 2 14.8 -#define CONFIG_NR_CPUS 2 14.9 +#define NR_CPUS 8 14.10 +#define CONFIG_NR_CPUS 8 14.11 #else 14.12 #undef CONFIG_SMP 14.13 #define NR_CPUS 1 14.14 @@ -123,8 +123,7 @@ extern char _end[]; /* standard ELF symb 14.15 #ifdef CONFIG_SMP 14.16 #warning "Lots of things to fix to enable CONFIG_SMP!" 14.17 #endif 14.18 -// FIXME SMP 14.19 -#define get_cpu() 0 14.20 +#define get_cpu() smp_processor_id() 14.21 #define put_cpu() do {} while(0) 14.22 14.23 // needed for common/dom0_ops.c until hyperthreading is supported 14.24 @@ -140,6 +139,7 @@ struct page; 14.25 // function calls; see decl in xen/include/xen/sched.h 14.26 #undef free_task_struct 14.27 #undef alloc_task_struct 14.28 +#define get_thread_info(v) alloc_thread_info(v) 14.29 14.30 // initial task has a different name in Xen 14.31 //#define idle0_task init_task 14.32 @@ -299,7 +299,11 @@ extern int ht_per_core; 14.33 #endif /* __XEN_IA64_CONFIG_H__ */ 14.34 14.35 // needed for include/xen/smp.h 14.36 +#ifdef CONFIG_SMP 14.37 +#define __smp_processor_id() current_thread_info()->cpu 14.38 +#else 14.39 #define __smp_processor_id() 0 14.40 +#endif 14.41 14.42 14.43 // FOLLOWING ADDED FOR XEN POST-NGIO and/or LINUX 2.6.7
15.1 --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h Wed Oct 12 10:56:14 2005 -0600 15.2 +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h Wed Oct 12 17:12:59 2005 -0600 15.3 @@ -17,11 +17,16 @@ 15.4 #include <asm/intrinsics.h> 15.5 #include <asm/system.h> 15.6 15.7 +#define DEBUG_SPINLOCK 15.8 + 15.9 typedef struct { 15.10 volatile unsigned int lock; 15.11 #ifdef CONFIG_PREEMPT 15.12 unsigned int break_lock; 15.13 #endif 15.14 +#ifdef DEBUG_SPINLOCK 15.15 + void *locker; 15.16 +#endif 15.17 #ifdef XEN 15.18 unsigned char recurse_cpu; 15.19 unsigned char recurse_cnt; 15.20 @@ -96,6 +101,10 @@ static inline void 15.21 : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); 15.22 # endif /* CONFIG_MCKINLEY */ 15.23 #endif 15.24 + 15.25 +#ifdef DEBUG_SPINLOCK 15.26 + asm volatile ("mov %0=ip" : "=r" (lock->locker)); 15.27 +#endif 15.28 } 15.29 #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) 15.30 #else /* !ASM_SUPPORTED */
16.1 --- a/xen/include/asm-ia64/linux-xen/linux/hardirq.h Wed Oct 12 10:56:14 2005 -0600 16.2 +++ b/xen/include/asm-ia64/linux-xen/linux/hardirq.h Wed Oct 12 17:12:59 2005 -0600 16.3 @@ -67,11 +67,7 @@ 16.4 */ 16.5 #define in_irq() (hardirq_count()) 16.6 #define in_softirq() (softirq_count()) 16.7 -#ifdef XEN 16.8 -#define in_interrupt() 0 // FIXME SMP LATER 16.9 -#else 16.10 #define in_interrupt() (irq_count()) 16.11 -#endif 16.12 16.13 #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) 16.14 # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
17.1 --- a/xen/include/asm-ia64/linux-xen/linux/interrupt.h Wed Oct 12 10:56:14 2005 -0600 17.2 +++ b/xen/include/asm-ia64/linux-xen/linux/interrupt.h Wed Oct 12 17:12:59 2005 -0600 17.3 @@ -88,6 +88,7 @@ static inline void __deprecated save_and 17.4 #define save_and_cli(x) save_and_cli(&x) 17.5 #endif /* CONFIG_SMP */ 17.6 17.7 +#ifndef XEN 17.8 /* SoftIRQ primitives. */ 17.9 #define local_bh_disable() \ 17.10 do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) 17.11 @@ -95,6 +96,7 @@ static inline void __deprecated save_and 17.12 do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) 17.13 17.14 extern void local_bh_enable(void); 17.15 +#endif 17.16 17.17 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high 17.18 frequency threaded job scheduling. For almost all the purposes