ia64/xen-unstable
changeset 8536:a4ce0ba0f8ff
Merged.
author | emellor@leeni.uk.xensource.com |
---|---|
date | Tue Jan 10 14:24:12 2006 +0000 (2006-01-10) |
parents | a51fcb5de470 da7873110bbb |
children | d39fa3f09cb2 fbee8d9fbaba |
files |
line diff
1.1 --- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Tue Jan 10 14:23:56 2006 +0000 1.2 +++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_dev.c Tue Jan 10 14:24:12 2006 +0000 1.3 @@ -109,7 +109,7 @@ static ssize_t xenbus_dev_write(struct f 1.4 size_t len, loff_t *ppos) 1.5 { 1.6 struct xenbus_dev_data *u = filp->private_data; 1.7 - struct xenbus_dev_transaction *trans; 1.8 + struct xenbus_dev_transaction *trans = NULL; 1.9 void *reply; 1.10 1.11 if ((len + u->len) > sizeof(u->u.buffer)) 1.12 @@ -134,14 +134,19 @@ static ssize_t xenbus_dev_write(struct f 1.13 case XS_MKDIR: 1.14 case XS_RM: 1.15 case XS_SET_PERMS: 1.16 - reply = xenbus_dev_request_and_reply(&u->u.msg); 1.17 - if (IS_ERR(reply)) 1.18 - return PTR_ERR(reply); 1.19 - 1.20 if (u->u.msg.type == XS_TRANSACTION_START) { 1.21 trans = kmalloc(sizeof(*trans), GFP_KERNEL); 1.22 if (!trans) 1.23 return -ENOMEM; 1.24 + } 1.25 + 1.26 + reply = xenbus_dev_request_and_reply(&u->u.msg); 1.27 + if (IS_ERR(reply)) { 1.28 + kfree(trans); 1.29 + return PTR_ERR(reply); 1.30 + } 1.31 + 1.32 + if (u->u.msg.type == XS_TRANSACTION_START) { 1.33 trans->handle = (struct xenbus_transaction *) 1.34 simple_strtoul(reply, NULL, 0); 1.35 list_add(&trans->list, &u->transactions);
2.1 --- a/tools/xm-test/tests/block-destroy/06_block-destroy_check_list_pos.py Tue Jan 10 14:23:56 2006 +0000 2.2 +++ b/tools/xm-test/tests/block-destroy/06_block-destroy_check_list_pos.py Tue Jan 10 14:24:12 2006 +0000 2.3 @@ -46,6 +46,8 @@ if not checkBlockList(domain): 2.4 if not checkXmLongList(domain): 2.5 FAIL("xm long list does not show that hda1 was attached") 2.6 2.7 +time.sleep(2) 2.8 + 2.9 s, o = traceCommand("xm block-detach %s hda1" % domain.getName()) 2.10 if s != 0: 2.11 FAIL("block-detach failed")
3.1 --- a/xen/arch/ia64/linux-xen/smpboot.c Tue Jan 10 14:23:56 2006 +0000 3.2 +++ b/xen/arch/ia64/linux-xen/smpboot.c Tue Jan 10 14:24:12 2006 +0000 3.3 @@ -484,7 +484,6 @@ do_rest: 3.4 3.5 if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL ) 3.6 panic("failed 'createdomain' for CPU %d", cpu); 3.7 - set_bit(_DOMF_idle_domain, &idle->domain_flags); 3.8 v = idle->vcpu[0]; 3.9 3.10 printf ("do_boot_cpu: cpu=%d, domain=%p, vcpu=%p\n", cpu, idle, v);
4.1 --- a/xen/arch/ia64/xen/idle0_task.c Tue Jan 10 14:23:56 2006 +0000 4.2 +++ b/xen/arch/ia64/xen/idle0_task.c Tue Jan 10 14:24:12 2006 +0000 4.3 @@ -22,7 +22,6 @@ 4.4 #define IDLE0_DOMAIN(_t) \ 4.5 { \ 4.6 domain_id: IDLE_DOMAIN_ID, \ 4.7 - domain_flags:DOMF_idle_domain, \ 4.8 refcnt: ATOMIC_INIT(1) \ 4.9 } 4.10
5.1 --- a/xen/arch/ia64/xen/xensetup.c Tue Jan 10 14:23:56 2006 +0000 5.2 +++ b/xen/arch/ia64/xen/xensetup.c Tue Jan 10 14:24:12 2006 +0000 5.3 @@ -26,7 +26,7 @@ unsigned long xenheap_phys_end; 5.4 5.5 char saved_command_line[COMMAND_LINE_SIZE]; 5.6 5.7 -struct vcpu *idle_domain[NR_CPUS] = { &idle0_vcpu }; 5.8 +struct vcpu *idle_vcpu[NR_CPUS] = { &idle0_vcpu }; 5.9 5.10 cpumask_t cpu_present_map; 5.11
6.1 --- a/xen/arch/x86/domain.c Tue Jan 10 14:23:56 2006 +0000 6.2 +++ b/xen/arch/x86/domain.c Tue Jan 10 14:24:12 2006 +0000 6.3 @@ -91,11 +91,9 @@ void startup_cpu_idle_loop(void) 6.4 { 6.5 struct vcpu *v = current; 6.6 6.7 - ASSERT(is_idle_domain(v->domain)); 6.8 - percpu_ctxt[smp_processor_id()].curr_vcpu = v; 6.9 + ASSERT(is_idle_vcpu(v)); 6.10 cpu_set(smp_processor_id(), v->domain->domain_dirty_cpumask); 6.11 cpu_set(smp_processor_id(), v->vcpu_dirty_cpumask); 6.12 - v->arch.schedule_tail = continue_idle_domain; 6.13 6.14 reset_stack_and_jump(idle_loop); 6.15 } 6.16 @@ -217,14 +215,20 @@ struct vcpu *alloc_vcpu_struct(struct do 6.17 6.18 memset(v, 0, sizeof(*v)); 6.19 6.20 - memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch)); 6.21 + memcpy(&v->arch, &idle_vcpu[0]->arch, sizeof(v->arch)); 6.22 v->arch.flags = TF_kernel_mode; 6.23 6.24 + if ( is_idle_domain(d) ) 6.25 + { 6.26 + percpu_ctxt[vcpu_id].curr_vcpu = v; 6.27 + v->arch.schedule_tail = continue_idle_domain; 6.28 + } 6.29 + 6.30 if ( (v->vcpu_id = vcpu_id) != 0 ) 6.31 { 6.32 v->arch.schedule_tail = d->vcpu[0]->arch.schedule_tail; 6.33 v->arch.perdomain_ptes = 6.34 - d->arch.mm_perdomain_pt + (vcpu_id << PDPT_VCPU_SHIFT); 6.35 + d->arch.mm_perdomain_pt + (vcpu_id << GDT_LDT_VCPU_SHIFT); 6.36 } 6.37 6.38 return v; 6.39 @@ -259,31 +263,11 @@ int arch_do_createdomain(struct vcpu *v) 6.40 int i; 6.41 #endif 6.42 6.43 - if ( is_idle_domain(d) ) 6.44 - return 0; 6.45 - 6.46 - d->arch.ioport_caps = 6.47 - rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex); 6.48 - if ( d->arch.ioport_caps == NULL ) 6.49 - return -ENOMEM; 6.50 - 6.51 - if ( (d->shared_info = alloc_xenheap_page()) == NULL ) 6.52 - return -ENOMEM; 6.53 - 6.54 - if ( (rc = ptwr_init(d)) != 0 ) 6.55 - { 6.56 - free_xenheap_page(d->shared_info); 6.57 - return rc; 6.58 - } 6.59 - 6.60 - v->arch.schedule_tail = continue_nonidle_domain; 6.61 - 6.62 - memset(d->shared_info, 0, PAGE_SIZE); 6.63 - v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id]; 6.64 - SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); 6.65 - 6.66 pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)); 6.67 d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order); 6.68 + if ( d->arch.mm_perdomain_pt == NULL ) 6.69 + goto fail_nomem; 6.70 + 6.71 memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE << pdpt_order); 6.72 v->arch.perdomain_ptes = d->arch.mm_perdomain_pt; 6.73 6.74 @@ -296,34 +280,75 @@ int arch_do_createdomain(struct vcpu *v) 6.75 */ 6.76 gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); 6.77 for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ ) 6.78 - d->arch.mm_perdomain_pt[ 6.79 - (vcpuid << PDPT_VCPU_SHIFT) + FIRST_RESERVED_GDT_PAGE] = gdt_l1e; 6.80 + d->arch.mm_perdomain_pt[((vcpuid << GDT_LDT_VCPU_SHIFT) + 6.81 + FIRST_RESERVED_GDT_PAGE)] = gdt_l1e; 6.82 6.83 v->arch.guest_vtable = __linear_l2_table; 6.84 v->arch.shadow_vtable = __shadow_linear_l2_table; 6.85 6.86 -#ifdef __x86_64__ 6.87 +#if defined(__i386__) 6.88 + 6.89 + d->arch.mapcache.l1tab = d->arch.mm_perdomain_pt + 6.90 + (GDT_LDT_MBYTES << (20 - PAGE_SHIFT)); 6.91 + spin_lock_init(&d->arch.mapcache.lock); 6.92 + 6.93 +#else /* __x86_64__ */ 6.94 + 6.95 v->arch.guest_vl3table = __linear_l3_table; 6.96 v->arch.guest_vl4table = __linear_l4_table; 6.97 6.98 d->arch.mm_perdomain_l2 = alloc_xenheap_page(); 6.99 + d->arch.mm_perdomain_l3 = alloc_xenheap_page(); 6.100 + if ( (d->arch.mm_perdomain_l2 == NULL) || 6.101 + (d->arch.mm_perdomain_l3 == NULL) ) 6.102 + goto fail_nomem; 6.103 + 6.104 memset(d->arch.mm_perdomain_l2, 0, PAGE_SIZE); 6.105 for ( i = 0; i < (1 << pdpt_order); i++ ) 6.106 d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+i] = 6.107 l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i, 6.108 __PAGE_HYPERVISOR); 6.109 6.110 - d->arch.mm_perdomain_l3 = alloc_xenheap_page(); 6.111 memset(d->arch.mm_perdomain_l3, 0, PAGE_SIZE); 6.112 d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = 6.113 l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2), 6.114 __PAGE_HYPERVISOR); 6.115 -#endif 6.116 + 6.117 +#endif /* __x86_64__ */ 6.118 6.119 shadow_lock_init(d); 6.120 INIT_LIST_HEAD(&d->arch.free_shadow_frames); 6.121 6.122 + if ( !is_idle_domain(d) ) 6.123 + { 6.124 + d->arch.ioport_caps = 6.125 + rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex); 6.126 + if ( d->arch.ioport_caps == NULL ) 6.127 + goto fail_nomem; 6.128 + 6.129 + if ( (d->shared_info = alloc_xenheap_page()) == NULL ) 6.130 + goto fail_nomem; 6.131 + 6.132 + if ( (rc = ptwr_init(d)) != 0 ) 6.133 + goto fail_nomem; 6.134 + 6.135 + memset(d->shared_info, 0, PAGE_SIZE); 6.136 + v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id]; 6.137 + SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); 6.138 + 6.139 + v->arch.schedule_tail = continue_nonidle_domain; 6.140 + } 6.141 + 6.142 return 0; 6.143 + 6.144 + fail_nomem: 6.145 + free_xenheap_page(d->shared_info); 6.146 +#ifdef __x86_64__ 6.147 + free_xenheap_page(d->arch.mm_perdomain_l2); 6.148 + free_xenheap_page(d->arch.mm_perdomain_l3); 6.149 +#endif 6.150 + free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order); 6.151 + return -ENOMEM; 6.152 } 6.153 6.154 /* This is called by arch_final_setup_guest and do_boot_vcpu */ 6.155 @@ -689,7 +714,10 @@ static void __context_switch(void) 6.156 struct vcpu *p = percpu_ctxt[cpu].curr_vcpu; 6.157 struct vcpu *n = current; 6.158 6.159 - if ( !is_idle_domain(p->domain) ) 6.160 + ASSERT(p != n); 6.161 + ASSERT(cpus_empty(n->vcpu_dirty_cpumask)); 6.162 + 6.163 + if ( !is_idle_vcpu(p) ) 6.164 { 6.165 memcpy(&p->arch.guest_context.user_regs, 6.166 stack_regs, 6.167 @@ -698,7 +726,7 @@ static void __context_switch(void) 6.168 save_segments(p); 6.169 } 6.170 6.171 - if ( !is_idle_domain(n->domain) ) 6.172 + if ( !is_idle_vcpu(n) ) 6.173 { 6.174 memcpy(stack_regs, 6.175 &n->arch.guest_context.user_regs, 6.176 @@ -748,24 +776,31 @@ static void __context_switch(void) 6.177 void context_switch(struct vcpu *prev, struct vcpu *next) 6.178 { 6.179 unsigned int cpu = smp_processor_id(); 6.180 + cpumask_t dirty_mask = next->vcpu_dirty_cpumask; 6.181 6.182 ASSERT(local_irq_is_enabled()); 6.183 6.184 + /* Allow at most one CPU at a time to be dirty. */ 6.185 + ASSERT(cpus_weight(dirty_mask) <= 1); 6.186 + if ( unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)) ) 6.187 + { 6.188 + /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ 6.189 + flush_tlb_mask(dirty_mask); 6.190 + } 6.191 + 6.192 + local_irq_disable(); 6.193 + 6.194 set_current(next); 6.195 6.196 - if ( (percpu_ctxt[cpu].curr_vcpu != next) && 6.197 - !is_idle_domain(next->domain) ) 6.198 + if ( (percpu_ctxt[cpu].curr_vcpu == next) || is_idle_vcpu(next) ) 6.199 { 6.200 - /* This may happen if next has been migrated by the scheduler. */ 6.201 - if ( unlikely(!cpus_empty(next->vcpu_dirty_cpumask)) ) 6.202 - { 6.203 - ASSERT(!cpu_isset(cpu, next->vcpu_dirty_cpumask)); 6.204 - sync_vcpu_execstate(next); 6.205 - ASSERT(cpus_empty(next->vcpu_dirty_cpumask)); 6.206 - } 6.207 + local_irq_enable(); 6.208 + } 6.209 + else 6.210 + { 6.211 + __context_switch(); 6.212 6.213 - local_irq_disable(); 6.214 - __context_switch(); 6.215 + /* Re-enable interrupts before restoring state which may fault. */ 6.216 local_irq_enable(); 6.217 6.218 if ( VMX_DOMAIN(next) )
7.1 --- a/xen/arch/x86/domain_build.c Tue Jan 10 14:23:56 2006 +0000 7.2 +++ b/xen/arch/x86/domain_build.c Tue Jan 10 14:24:12 2006 +0000 7.3 @@ -366,27 +366,20 @@ int construct_dom0(struct domain *d, 7.4 l2tab[(LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT)+i] = 7.5 l2e_from_paddr((u32)l2tab + i*PAGE_SIZE, __PAGE_HYPERVISOR); 7.6 } 7.7 - { 7.8 - unsigned long va; 7.9 - for (va = PERDOMAIN_VIRT_START; va < PERDOMAIN_VIRT_END; 7.10 - va += (1 << L2_PAGETABLE_SHIFT)) { 7.11 - l2tab[va >> L2_PAGETABLE_SHIFT] = 7.12 - l2e_from_paddr(__pa(d->arch.mm_perdomain_pt) + 7.13 - (va-PERDOMAIN_VIRT_START), 7.14 - __PAGE_HYPERVISOR); 7.15 - } 7.16 - } 7.17 v->arch.guest_table = mk_pagetable((unsigned long)l3start); 7.18 #else 7.19 l2start = l2tab = (l2_pgentry_t *)mpt_alloc; mpt_alloc += PAGE_SIZE; 7.20 memcpy(l2tab, &idle_pg_table[0], PAGE_SIZE); 7.21 l2tab[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] = 7.22 l2e_from_paddr((unsigned long)l2start, __PAGE_HYPERVISOR); 7.23 - l2tab[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT] = 7.24 - l2e_from_paddr(__pa(d->arch.mm_perdomain_pt), __PAGE_HYPERVISOR); 7.25 v->arch.guest_table = mk_pagetable((unsigned long)l2start); 7.26 #endif 7.27 7.28 + for ( i = 0; i < PDPT_L2_ENTRIES; i++ ) 7.29 + l2tab[l2_linear_offset(PERDOMAIN_VIRT_START) + i] = 7.30 + l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt) + i, 7.31 + __PAGE_HYPERVISOR); 7.32 + 7.33 l2tab += l2_linear_offset(dsi.v_start); 7.34 mfn = alloc_spfn; 7.35 for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++ )
8.1 --- a/xen/arch/x86/idle0_task.c Tue Jan 10 14:23:56 2006 +0000 8.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 8.3 @@ -1,28 +0,0 @@ 8.4 - 8.5 -#include <xen/config.h> 8.6 -#include <xen/sched.h> 8.7 -#include <asm/desc.h> 8.8 - 8.9 -struct domain idle0_domain = { 8.10 - domain_id: IDLE_DOMAIN_ID, 8.11 - domain_flags:DOMF_idle_domain, 8.12 - refcnt: ATOMIC_INIT(1) 8.13 -}; 8.14 - 8.15 -struct vcpu idle0_vcpu = { 8.16 - processor: 0, 8.17 - cpu_affinity:CPU_MASK_CPU0, 8.18 - domain: &idle0_domain 8.19 -}; 8.20 - 8.21 -struct tss_struct init_tss[NR_CPUS]; 8.22 - 8.23 -/* 8.24 - * Local variables: 8.25 - * mode: C 8.26 - * c-set-style: "BSD" 8.27 - * c-basic-offset: 4 8.28 - * tab-width: 4 8.29 - * indent-tabs-mode: nil 8.30 - * End: 8.31 - */
9.1 --- a/xen/arch/x86/mm.c Tue Jan 10 14:23:56 2006 +0000 9.2 +++ b/xen/arch/x86/mm.c Tue Jan 10 14:24:12 2006 +0000 9.3 @@ -841,10 +841,11 @@ static int alloc_l2_table(struct pfn_inf 9.4 L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t)); 9.5 pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] = 9.6 l2e_from_pfn(pfn, __PAGE_HYPERVISOR); 9.7 - pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] = 9.8 - l2e_from_page( 9.9 - virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt), 9.10 - __PAGE_HYPERVISOR); 9.11 + for ( i = 0; i < PDPT_L2_ENTRIES; i++ ) 9.12 + pl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] = 9.13 + l2e_from_page( 9.14 + virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt) + i, 9.15 + __PAGE_HYPERVISOR); 9.16 #endif 9.17 9.18 unmap_domain_page(pl2e);
10.1 --- a/xen/arch/x86/setup.c Tue Jan 10 14:23:56 2006 +0000 10.2 +++ b/xen/arch/x86/setup.c Tue Jan 10 14:24:12 2006 +0000 10.3 @@ -81,6 +81,10 @@ extern void early_time_init(void); 10.4 extern void initialize_keytable(void); 10.5 extern void early_cpu_init(void); 10.6 10.7 +struct tss_struct init_tss[NR_CPUS]; 10.8 + 10.9 +struct vcpu *idle_vcpu[NR_CPUS]; 10.10 + 10.11 extern unsigned long cpu0_stack[]; 10.12 10.13 struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 10.14 @@ -92,8 +96,6 @@ unsigned long mmu_cr4_features = X86_CR4 10.15 #endif 10.16 EXPORT_SYMBOL(mmu_cr4_features); 10.17 10.18 -struct vcpu *idle_domain[NR_CPUS] = { &idle0_vcpu }; 10.19 - 10.20 int acpi_disabled; 10.21 10.22 int acpi_force; 10.23 @@ -144,8 +146,8 @@ static struct e820entry e820_raw[E820MAX 10.24 10.25 void __init __start_xen(multiboot_info_t *mbi) 10.26 { 10.27 - unsigned long vgdt, gdt_pfn; 10.28 char *cmdline; 10.29 + struct domain *idle_domain; 10.30 unsigned long _initrd_start = 0, _initrd_len = 0; 10.31 unsigned int initrdidx = 1; 10.32 module_t *mod = (module_t *)__va(mbi->mods_addr); 10.33 @@ -163,9 +165,8 @@ void __init __start_xen(multiboot_info_t 10.34 if ( (mbi->flags & MBI_CMDLINE) && (mbi->cmdline != 0) ) 10.35 cmdline_parse(__va(mbi->cmdline)); 10.36 10.37 - /* Must do this early -- e.g., spinlocks rely on get_current(). */ 10.38 - set_current(&idle0_vcpu); 10.39 - set_processor_id(0); 10.40 + set_current((struct vcpu *)0xfffff000); /* debug sanity */ 10.41 + set_processor_id(0); /* needed early, for smp_processor_id() */ 10.42 10.43 smp_prepare_boot_cpu(); 10.44 10.45 @@ -382,6 +383,14 @@ void __init __start_xen(multiboot_info_t 10.46 10.47 early_cpu_init(); 10.48 10.49 + scheduler_init(); 10.50 + 10.51 + idle_domain = do_createdomain(IDLE_DOMAIN_ID, 0); 10.52 + BUG_ON(idle_domain == NULL); 10.53 + 10.54 + set_current(idle_domain->vcpu[0]); 10.55 + idle_vcpu[0] = current; 10.56 + 10.57 paging_init(); 10.58 10.59 /* Unmap the first page of CPU0's stack. */ 10.60 @@ -394,21 +403,6 @@ void __init __start_xen(multiboot_info_t 10.61 10.62 sort_exception_tables(); 10.63 10.64 - if ( arch_do_createdomain(current) != 0 ) 10.65 - BUG(); 10.66 - 10.67 - /* 10.68 - * Map default GDT into its final positions in the idle page table. As 10.69 - * noted in arch_do_createdomain(), we must map for every possible VCPU#. 10.70 - */ 10.71 - vgdt = GDT_VIRT_START(current) + FIRST_RESERVED_GDT_BYTE; 10.72 - gdt_pfn = virt_to_phys(gdt_table) >> PAGE_SHIFT; 10.73 - for ( i = 0; i < MAX_VIRT_CPUS; i++ ) 10.74 - { 10.75 - map_pages_to_xen(vgdt, gdt_pfn, 1, PAGE_HYPERVISOR); 10.76 - vgdt += 1 << PDPT_VCPU_VA_SHIFT; 10.77 - } 10.78 - 10.79 find_smp_config(); 10.80 10.81 smp_alloc_memory(); 10.82 @@ -435,8 +429,6 @@ void __init __start_xen(multiboot_info_t 10.83 10.84 arch_init_memory(); 10.85 10.86 - scheduler_init(); 10.87 - 10.88 identify_cpu(&boot_cpu_data); 10.89 if ( cpu_has_fxsr ) 10.90 set_in_cr4(X86_CR4_OSFXSR);
11.1 --- a/xen/arch/x86/smpboot.c Tue Jan 10 14:23:56 2006 +0000 11.2 +++ b/xen/arch/x86/smpboot.c Tue Jan 10 14:24:12 2006 +0000 11.3 @@ -435,7 +435,7 @@ void __init start_secondary(void *unused 11.4 11.5 extern void percpu_traps_init(void); 11.6 11.7 - set_current(idle_domain[cpu]); 11.8 + set_current(idle_vcpu[cpu]); 11.9 set_processor_id(cpu); 11.10 11.11 percpu_traps_init(); 11.12 @@ -761,7 +761,6 @@ static int __init do_boot_cpu(int apicid 11.13 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. 11.14 */ 11.15 { 11.16 - struct domain *idle; 11.17 struct vcpu *v; 11.18 unsigned long boot_error; 11.19 int timeout, cpu; 11.20 @@ -770,14 +769,10 @@ static int __init do_boot_cpu(int apicid 11.21 11.22 cpu = ++cpucount; 11.23 11.24 - if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL ) 11.25 - panic("failed 'createdomain' for CPU %d", cpu); 11.26 + v = idle_vcpu[cpu] = alloc_vcpu(idle_vcpu[0]->domain, cpu, cpu); 11.27 + BUG_ON(v == NULL); 11.28 11.29 - v = idle_domain[cpu] = idle->vcpu[0]; 11.30 - 11.31 - set_bit(_DOMF_idle_domain, &idle->domain_flags); 11.32 - 11.33 - v->arch.monitor_table = mk_pagetable(__pa(idle_pg_table)); 11.34 + v->arch.monitor_table = mk_pagetable(__pa(idle_pg_table)); 11.35 11.36 /* start_eip had better be page-aligned! */ 11.37 start_eip = setup_trampoline();
12.1 --- a/xen/arch/x86/traps.c Tue Jan 10 14:23:56 2006 +0000 12.2 +++ b/xen/arch/x86/traps.c Tue Jan 10 14:24:12 2006 +0000 12.3 @@ -427,7 +427,7 @@ void propagate_page_fault(unsigned long 12.4 tb->flags |= TBF_INTERRUPT; 12.5 } 12.6 12.7 -static int handle_perdomain_mapping_fault( 12.8 +static int handle_gdt_ldt_mapping_fault( 12.9 unsigned long offset, struct cpu_user_regs *regs) 12.10 { 12.11 extern int map_ldt_shadow_page(unsigned int); 12.12 @@ -437,14 +437,14 @@ static int handle_perdomain_mapping_faul 12.13 int ret; 12.14 12.15 /* Which vcpu's area did we fault in, and is it in the ldt sub-area? */ 12.16 - unsigned int is_ldt_area = (offset >> (PDPT_VCPU_VA_SHIFT-1)) & 1; 12.17 - unsigned int vcpu_area = (offset >> PDPT_VCPU_VA_SHIFT); 12.18 + unsigned int is_ldt_area = (offset >> (GDT_LDT_VCPU_VA_SHIFT-1)) & 1; 12.19 + unsigned int vcpu_area = (offset >> GDT_LDT_VCPU_VA_SHIFT); 12.20 12.21 /* Should never fault in another vcpu's area. */ 12.22 BUG_ON(vcpu_area != current->vcpu_id); 12.23 12.24 /* Byte offset within the gdt/ldt sub-area. */ 12.25 - offset &= (1UL << (PDPT_VCPU_VA_SHIFT-1)) - 1UL; 12.26 + offset &= (1UL << (GDT_LDT_VCPU_VA_SHIFT-1)) - 1UL; 12.27 12.28 if ( likely(is_ldt_area) ) 12.29 { 12.30 @@ -490,9 +490,9 @@ static int fixup_page_fault(unsigned lon 12.31 { 12.32 if ( shadow_mode_external(d) && GUEST_CONTEXT(v, regs) ) 12.33 return shadow_fault(addr, regs); 12.34 - if ( (addr >= PERDOMAIN_VIRT_START) && (addr < PERDOMAIN_VIRT_END) ) 12.35 - return handle_perdomain_mapping_fault( 12.36 - addr - PERDOMAIN_VIRT_START, regs); 12.37 + if ( (addr >= GDT_LDT_VIRT_START) && (addr < GDT_LDT_VIRT_END) ) 12.38 + return handle_gdt_ldt_mapping_fault( 12.39 + addr - GDT_LDT_VIRT_START, regs); 12.40 } 12.41 else if ( unlikely(shadow_mode_enabled(d)) ) 12.42 {
13.1 --- a/xen/arch/x86/x86_32/domain_page.c Tue Jan 10 14:23:56 2006 +0000 13.2 +++ b/xen/arch/x86/x86_32/domain_page.c Tue Jan 10 14:24:12 2006 +0000 13.3 @@ -23,28 +23,24 @@ 13.4 #define MAPCACHE_ORDER 10 13.5 #define MAPCACHE_ENTRIES (1 << MAPCACHE_ORDER) 13.6 13.7 -l1_pgentry_t *mapcache; 13.8 -static unsigned int map_idx, epoch, shadow_epoch[NR_CPUS]; 13.9 -static spinlock_t map_lock = SPIN_LOCK_UNLOCKED; 13.10 - 13.11 /* Use a spare PTE bit to mark entries ready for recycling. */ 13.12 #define READY_FOR_TLB_FLUSH (1<<10) 13.13 13.14 static void flush_all_ready_maps(void) 13.15 { 13.16 - l1_pgentry_t *cache = mapcache; 13.17 + struct mapcache *cache = ¤t->domain->arch.mapcache; 13.18 unsigned int i; 13.19 13.20 for ( i = 0; i < MAPCACHE_ENTRIES; i++ ) 13.21 - if ( (l1e_get_flags(cache[i]) & READY_FOR_TLB_FLUSH) ) 13.22 - cache[i] = l1e_empty(); 13.23 + if ( (l1e_get_flags(cache->l1tab[i]) & READY_FOR_TLB_FLUSH) ) 13.24 + cache->l1tab[i] = l1e_empty(); 13.25 } 13.26 13.27 void *map_domain_pages(unsigned long pfn, unsigned int order) 13.28 { 13.29 unsigned long va; 13.30 - unsigned int idx, i, flags, cpu = smp_processor_id(); 13.31 - l1_pgentry_t *cache = mapcache; 13.32 + unsigned int idx, i, flags, vcpu = current->vcpu_id; 13.33 + struct mapcache *cache = ¤t->domain->arch.mapcache; 13.34 #ifndef NDEBUG 13.35 unsigned int flush_count = 0; 13.36 #endif 13.37 @@ -52,37 +48,41 @@ void *map_domain_pages(unsigned long pfn 13.38 ASSERT(!in_irq()); 13.39 perfc_incrc(map_domain_page_count); 13.40 13.41 - spin_lock(&map_lock); 13.42 + /* If we are the idle domain, ensure that we run on our own page tables. */ 13.43 + if ( unlikely(is_idle_vcpu(current)) ) 13.44 + __sync_lazy_execstate(); 13.45 + 13.46 + spin_lock(&cache->lock); 13.47 13.48 /* Has some other CPU caused a wrap? We must flush if so. */ 13.49 - if ( epoch != shadow_epoch[cpu] ) 13.50 + if ( cache->epoch != cache->shadow_epoch[vcpu] ) 13.51 { 13.52 perfc_incrc(domain_page_tlb_flush); 13.53 local_flush_tlb(); 13.54 - shadow_epoch[cpu] = epoch; 13.55 + cache->shadow_epoch[vcpu] = cache->epoch; 13.56 } 13.57 13.58 do { 13.59 - idx = map_idx = (map_idx + 1) & (MAPCACHE_ENTRIES - 1); 13.60 + idx = cache->cursor = (cache->cursor + 1) & (MAPCACHE_ENTRIES - 1); 13.61 if ( unlikely(idx == 0) ) 13.62 { 13.63 ASSERT(flush_count++ == 0); 13.64 flush_all_ready_maps(); 13.65 perfc_incrc(domain_page_tlb_flush); 13.66 local_flush_tlb(); 13.67 - shadow_epoch[cpu] = ++epoch; 13.68 + cache->shadow_epoch[vcpu] = ++cache->epoch; 13.69 } 13.70 13.71 flags = 0; 13.72 for ( i = 0; i < (1U << order); i++ ) 13.73 - flags |= l1e_get_flags(cache[idx+i]); 13.74 + flags |= l1e_get_flags(cache->l1tab[idx+i]); 13.75 } 13.76 while ( flags & _PAGE_PRESENT ); 13.77 13.78 for ( i = 0; i < (1U << order); i++ ) 13.79 - cache[idx+i] = l1e_from_pfn(pfn+i, __PAGE_HYPERVISOR); 13.80 + cache->l1tab[idx+i] = l1e_from_pfn(pfn+i, __PAGE_HYPERVISOR); 13.81 13.82 - spin_unlock(&map_lock); 13.83 + spin_unlock(&cache->lock); 13.84 13.85 va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT); 13.86 return (void *)va; 13.87 @@ -91,9 +91,13 @@ void *map_domain_pages(unsigned long pfn 13.88 void unmap_domain_pages(void *va, unsigned int order) 13.89 { 13.90 unsigned int idx, i; 13.91 + struct mapcache *cache = ¤t->domain->arch.mapcache; 13.92 + 13.93 ASSERT((void *)MAPCACHE_VIRT_START <= va); 13.94 ASSERT(va < (void *)MAPCACHE_VIRT_END); 13.95 + 13.96 idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT; 13.97 + 13.98 for ( i = 0; i < (1U << order); i++ ) 13.99 - l1e_add_flags(mapcache[idx+i], READY_FOR_TLB_FLUSH); 13.100 + l1e_add_flags(cache->l1tab[idx+i], READY_FOR_TLB_FLUSH); 13.101 }
14.1 --- a/xen/arch/x86/x86_32/mm.c Tue Jan 10 14:23:56 2006 +0000 14.2 +++ b/xen/arch/x86/x86_32/mm.c Tue Jan 10 14:24:12 2006 +0000 14.3 @@ -29,8 +29,6 @@ 14.4 #include <asm/fixmap.h> 14.5 #include <public/memory.h> 14.6 14.7 -extern l1_pgentry_t *mapcache; 14.8 - 14.9 unsigned int PAGE_HYPERVISOR = __PAGE_HYPERVISOR; 14.10 unsigned int PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE; 14.11 14.12 @@ -68,7 +66,7 @@ void __init paging_init(void) 14.13 void *ioremap_pt; 14.14 unsigned long v; 14.15 struct pfn_info *pg; 14.16 - int i, mapcache_order; 14.17 + int i; 14.18 14.19 #ifdef CONFIG_X86_PAE 14.20 printk("PAE enabled, limit: %d GB\n", MACHPHYS_MBYTES); 14.21 @@ -76,7 +74,7 @@ void __init paging_init(void) 14.22 printk("PAE disabled.\n"); 14.23 #endif 14.24 14.25 - idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table)); 14.26 + idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table)); 14.27 14.28 if ( cpu_has_pge ) 14.29 { 14.30 @@ -121,14 +119,12 @@ void __init paging_init(void) 14.31 l2e_from_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR); 14.32 } 14.33 14.34 - /* Set up mapping cache for domain pages. */ 14.35 - mapcache_order = get_order_from_bytes( 14.36 - MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER)); 14.37 - mapcache = alloc_xenheap_pages(mapcache_order); 14.38 - memset(mapcache, 0, PAGE_SIZE << mapcache_order); 14.39 - for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ ) 14.40 - idle_pg_table_l2[l2_linear_offset(MAPCACHE_VIRT_START) + i] = 14.41 - l2e_from_page(virt_to_page(mapcache) + i, __PAGE_HYPERVISOR); 14.42 + /* Install per-domain mappings for idle domain. */ 14.43 + for ( i = 0; i < PDPT_L2_ENTRIES; i++ ) 14.44 + idle_pg_table_l2[l2_linear_offset(PERDOMAIN_VIRT_START) + i] = 14.45 + l2e_from_page(virt_to_page(idle_vcpu[0]->domain-> 14.46 + arch.mm_perdomain_pt) + i, 14.47 + __PAGE_HYPERVISOR); 14.48 } 14.49 14.50 void __init zap_low_mappings(l2_pgentry_t *base)
15.1 --- a/xen/arch/x86/x86_64/mm.c Tue Jan 10 14:23:56 2006 +0000 15.2 +++ b/xen/arch/x86/x86_64/mm.c Tue Jan 10 14:24:12 2006 +0000 15.3 @@ -80,7 +80,7 @@ void __init paging_init(void) 15.4 l2_pgentry_t *l2_ro_mpt; 15.5 struct pfn_info *pg; 15.6 15.7 - idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table)); 15.8 + idle_vcpu[0]->arch.monitor_table = mk_pagetable(__pa(idle_pg_table)); 15.9 15.10 /* Create user-accessible L2 directory to map the MPT for guests. */ 15.11 l3_ro_mpt = alloc_xenheap_page(); 15.12 @@ -119,6 +119,12 @@ void __init paging_init(void) 15.13 /* Set up linear page table mapping. */ 15.14 idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] = 15.15 l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR); 15.16 + 15.17 + /* Install per-domain mappings for idle domain. */ 15.18 + idle_pg_table[l4_table_offset(PERDOMAIN_VIRT_START)] = 15.19 + l4e_from_page( 15.20 + virt_to_page(idle_vcpu[0]->domain->arch.mm_perdomain_l3), 15.21 + __PAGE_HYPERVISOR); 15.22 } 15.23 15.24 void __init zap_low_mappings(void)
16.1 --- a/xen/common/domain.c Tue Jan 10 14:23:56 2006 +0000 16.2 +++ b/xen/common/domain.c Tue Jan 10 14:24:12 2006 +0000 16.3 @@ -46,9 +46,7 @@ struct domain *do_createdomain(domid_t d 16.4 INIT_LIST_HEAD(&d->page_list); 16.5 INIT_LIST_HEAD(&d->xenpage_list); 16.6 16.7 - if ( d->domain_id == IDLE_DOMAIN_ID ) 16.8 - set_bit(_DOMF_idle_domain, &d->domain_flags); 16.9 - else 16.10 + if ( !is_idle_domain(d) ) 16.11 set_bit(_DOMF_ctrl_pause, &d->domain_flags); 16.12 16.13 if ( !is_idle_domain(d) &&
17.1 --- a/xen/common/sched_bvt.c Tue Jan 10 14:23:56 2006 +0000 17.2 +++ b/xen/common/sched_bvt.c Tue Jan 10 14:24:12 2006 +0000 17.3 @@ -220,7 +220,7 @@ static void bvt_add_task(struct vcpu *v) 17.4 17.5 einf->vcpu = v; 17.6 17.7 - if ( is_idle_domain(v->domain) ) 17.8 + if ( is_idle_vcpu(v) ) 17.9 { 17.10 einf->avt = einf->evt = ~0U; 17.11 BUG_ON(__task_on_runqueue(v)); 17.12 @@ -268,7 +268,7 @@ static void bvt_wake(struct vcpu *v) 17.13 ((einf->evt - curr_evt) / BVT_INFO(curr->domain)->mcu_advance) + 17.14 ctx_allow; 17.15 17.16 - if ( is_idle_domain(curr->domain) || (einf->evt <= curr_evt) ) 17.17 + if ( is_idle_vcpu(curr) || (einf->evt <= curr_evt) ) 17.18 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); 17.19 else if ( schedule_data[cpu].s_timer.expires > r_time ) 17.20 set_ac_timer(&schedule_data[cpu].s_timer, r_time); 17.21 @@ -399,7 +399,7 @@ static struct task_slice bvt_do_schedule 17.22 ASSERT(prev_einf != NULL); 17.23 ASSERT(__task_on_runqueue(prev)); 17.24 17.25 - if ( likely(!is_idle_domain(prev->domain)) ) 17.26 + if ( likely(!is_idle_vcpu(prev)) ) 17.27 { 17.28 prev_einf->avt = calc_avt(prev, now); 17.29 prev_einf->evt = calc_evt(prev, prev_einf->avt); 17.30 @@ -490,13 +490,13 @@ static struct task_slice bvt_do_schedule 17.31 } 17.32 17.33 /* work out time for next run through scheduler */ 17.34 - if ( is_idle_domain(next->domain) ) 17.35 + if ( is_idle_vcpu(next) ) 17.36 { 17.37 r_time = ctx_allow; 17.38 goto sched_done; 17.39 } 17.40 17.41 - if ( (next_prime == NULL) || is_idle_domain(next_prime->domain) ) 17.42 + if ( (next_prime == NULL) || is_idle_vcpu(next_prime) ) 17.43 { 17.44 /* We have only one runnable task besides the idle task. */ 17.45 r_time = 10 * ctx_allow; /* RN: random constant */
18.1 --- a/xen/common/sched_sedf.c Tue Jan 10 14:23:56 2006 +0000 18.2 +++ b/xen/common/sched_sedf.c Tue Jan 10 14:24:12 2006 +0000 18.3 @@ -396,7 +396,7 @@ static void sedf_add_task(struct vcpu *d 18.4 INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q])); 18.5 INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q])); 18.6 18.7 - if ( !is_idle_domain(d->domain) ) 18.8 + if ( !is_idle_vcpu(d) ) 18.9 { 18.10 extraq_check(d); 18.11 } 18.12 @@ -777,7 +777,7 @@ static struct task_slice sedf_do_schedul 18.13 struct task_slice ret; 18.14 18.15 /*idle tasks don't need any of the following stuf*/ 18.16 - if (is_idle_domain(current->domain)) 18.17 + if ( is_idle_vcpu(current) ) 18.18 goto check_waitq; 18.19 18.20 /* create local state of the status of the domain, in order to avoid 18.21 @@ -874,7 +874,7 @@ static void sedf_sleep(struct vcpu *d) 18.22 PRINT(2,"sedf_sleep was called, domain-id %i.%i\n", 18.23 d->domain->domain_id, d->vcpu_id); 18.24 18.25 - if ( is_idle_domain(d->domain) ) 18.26 + if ( is_idle_vcpu(d) ) 18.27 return; 18.28 18.29 EDOM_INFO(d)->status |= SEDF_ASLEEP; 18.30 @@ -1194,7 +1194,7 @@ static void unblock_long_burst(struct se 18.31 static inline int get_run_type(struct vcpu* d) 18.32 { 18.33 struct sedf_vcpu_info* inf = EDOM_INFO(d); 18.34 - if (is_idle_domain(d->domain)) 18.35 + if (is_idle_vcpu(d)) 18.36 return DOMAIN_IDLE; 18.37 if (inf->status & EXTRA_RUN_PEN) 18.38 return DOMAIN_EXTRA_PEN; 18.39 @@ -1258,7 +1258,7 @@ void sedf_wake(struct vcpu *d) 18.40 PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id, 18.41 d->vcpu_id); 18.42 18.43 - if ( unlikely(is_idle_domain(d->domain)) ) 18.44 + if ( unlikely(is_idle_vcpu(d)) ) 18.45 return; 18.46 18.47 if ( unlikely(__task_on_queue(d)) )
19.1 --- a/xen/common/schedule.c Tue Jan 10 14:23:56 2006 +0000 19.2 +++ b/xen/common/schedule.c Tue Jan 10 14:24:12 2006 +0000 19.3 @@ -140,12 +140,10 @@ struct domain *alloc_domain(void) 19.4 */ 19.5 void sched_add_domain(struct vcpu *v) 19.6 { 19.7 - struct domain *d = v->domain; 19.8 - 19.9 /* Initialise the per-domain timer. */ 19.10 init_ac_timer(&v->timer, dom_timer_fn, v, v->processor); 19.11 19.12 - if ( is_idle_domain(d) ) 19.13 + if ( is_idle_vcpu(v) ) 19.14 { 19.15 schedule_data[v->processor].curr = v; 19.16 schedule_data[v->processor].idle = v; 19.17 @@ -153,7 +151,7 @@ void sched_add_domain(struct vcpu *v) 19.18 } 19.19 19.20 SCHED_OP(add_task, v); 19.21 - TRACE_2D(TRC_SCHED_DOM_ADD, d->domain_id, v->vcpu_id); 19.22 + TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id); 19.23 } 19.24 19.25 void sched_rem_domain(struct vcpu *v) 19.26 @@ -435,7 +433,7 @@ static void __enter_scheduler(void) 19.27 prev->wokenup = now; 19.28 19.29 #if defined(WAKE_HISTO) 19.30 - if ( !is_idle_domain(next->domain) && next->wokenup ) 19.31 + if ( !is_idle_vcpu(next) && next->wokenup ) 19.32 { 19.33 ulong diff = (ulong)(now - next->wokenup); 19.34 diff /= (ulong)MILLISECS(1); 19.35 @@ -445,7 +443,7 @@ static void __enter_scheduler(void) 19.36 next->wokenup = (s_time_t)0; 19.37 #elif defined(BLOCKTIME_HISTO) 19.38 prev->lastdeschd = now; 19.39 - if ( !is_idle_domain(next->domain) ) 19.40 + if ( !is_idle_vcpu(next) ) 19.41 { 19.42 ulong diff = (ulong)((now - next->lastdeschd) / MILLISECS(10)); 19.43 if (diff <= BUCKETS-2) schedule_data[cpu].hist[diff]++; 19.44 @@ -462,7 +460,7 @@ static void __enter_scheduler(void) 19.45 prev->sleep_tick = schedule_data[cpu].tick; 19.46 19.47 /* Ensure that the domain has an up-to-date time base. */ 19.48 - if ( !is_idle_domain(next->domain) ) 19.49 + if ( !is_idle_vcpu(next) ) 19.50 { 19.51 update_dom_time(next); 19.52 if ( next->sleep_tick != schedule_data[cpu].tick ) 19.53 @@ -499,7 +497,7 @@ static void t_timer_fn(void *unused) 19.54 19.55 schedule_data[cpu].tick++; 19.56 19.57 - if ( !is_idle_domain(v->domain) ) 19.58 + if ( !is_idle_vcpu(v) ) 19.59 { 19.60 update_dom_time(v); 19.61 send_guest_virq(v, VIRQ_TIMER); 19.62 @@ -533,9 +531,6 @@ void __init scheduler_init(void) 19.63 init_ac_timer(&t_timer[i], t_timer_fn, NULL, i); 19.64 } 19.65 19.66 - schedule_data[0].curr = idle_domain[0]; 19.67 - schedule_data[0].idle = idle_domain[0]; 19.68 - 19.69 for ( i = 0; schedulers[i] != NULL; i++ ) 19.70 { 19.71 ops = *schedulers[i]; 19.72 @@ -548,10 +543,16 @@ void __init scheduler_init(void) 19.73 19.74 printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name); 19.75 19.76 - rc = SCHED_OP(alloc_task, idle_domain[0]); 19.77 - BUG_ON(rc < 0); 19.78 + if ( idle_vcpu[0] != NULL ) 19.79 + { 19.80 + schedule_data[0].curr = idle_vcpu[0]; 19.81 + schedule_data[0].idle = idle_vcpu[0]; 19.82 19.83 - sched_add_domain(idle_domain[0]); 19.84 + rc = SCHED_OP(alloc_task, idle_vcpu[0]); 19.85 + BUG_ON(rc < 0); 19.86 + 19.87 + sched_add_domain(idle_vcpu[0]); 19.88 + } 19.89 } 19.90 19.91 /*
20.1 --- a/xen/include/asm-x86/config.h Tue Jan 10 14:23:56 2006 +0000 20.2 +++ b/xen/include/asm-x86/config.h Tue Jan 10 14:24:12 2006 +0000 20.3 @@ -148,7 +148,8 @@ extern unsigned long _end; /* standard E 20.4 #define SH_LINEAR_PT_VIRT_END (SH_LINEAR_PT_VIRT_START + PML4_ENTRY_BYTES) 20.5 /* Slot 260: per-domain mappings. */ 20.6 #define PERDOMAIN_VIRT_START (PML4_ADDR(260)) 20.7 -#define PERDOMAIN_VIRT_END (PERDOMAIN_VIRT_START + PML4_ENTRY_BYTES) 20.8 +#define PERDOMAIN_VIRT_END (PERDOMAIN_VIRT_START + (PERDOMAIN_MBYTES<<20)) 20.9 +#define PERDOMAIN_MBYTES ((unsigned long)GDT_LDT_MBYTES) 20.10 /* Slot 261: machine-to-phys conversion table (16GB). */ 20.11 #define RDWR_MPT_VIRT_START (PML4_ADDR(261)) 20.12 #define RDWR_MPT_VIRT_END (RDWR_MPT_VIRT_START + (16UL<<30)) 20.13 @@ -195,8 +196,7 @@ extern unsigned long _end; /* standard E 20.14 * ------ ------ 20.15 * I/O remapping area ( 4MB) 20.16 * Direct-map (1:1) area [Xen code/data/heap] (12MB) 20.17 - * map_domain_page cache ( 4MB) 20.18 - * Per-domain mappings ( 4MB) 20.19 + * Per-domain mappings (inc. 4MB map_domain_page cache) ( 4MB) 20.20 * Shadow linear pagetable ( 4MB) ( 8MB) 20.21 * Guest linear pagetable ( 4MB) ( 8MB) 20.22 * Machine-to-physical translation table [writable] ( 4MB) (16MB) 20.23 @@ -209,7 +209,7 @@ extern unsigned long _end; /* standard E 20.24 #define IOREMAP_MBYTES 4 20.25 #define DIRECTMAP_MBYTES 12 20.26 #define MAPCACHE_MBYTES 4 20.27 -#define PERDOMAIN_MBYTES 4 20.28 +#define PERDOMAIN_MBYTES 8 20.29 20.30 #ifdef CONFIG_X86_PAE 20.31 # define LINEARPT_MBYTES 8 20.32 @@ -227,7 +227,7 @@ extern unsigned long _end; /* standard E 20.33 #define DIRECTMAP_VIRT_START (DIRECTMAP_VIRT_END - (DIRECTMAP_MBYTES<<20)) 20.34 #define MAPCACHE_VIRT_END DIRECTMAP_VIRT_START 20.35 #define MAPCACHE_VIRT_START (MAPCACHE_VIRT_END - (MAPCACHE_MBYTES<<20)) 20.36 -#define PERDOMAIN_VIRT_END MAPCACHE_VIRT_START 20.37 +#define PERDOMAIN_VIRT_END DIRECTMAP_VIRT_START 20.38 #define PERDOMAIN_VIRT_START (PERDOMAIN_VIRT_END - (PERDOMAIN_MBYTES<<20)) 20.39 #define SH_LINEAR_PT_VIRT_END PERDOMAIN_VIRT_START 20.40 #define SH_LINEAR_PT_VIRT_START (SH_LINEAR_PT_VIRT_END - (LINEARPT_MBYTES<<20)) 20.41 @@ -282,14 +282,21 @@ extern unsigned long _end; /* standard E 20.42 extern unsigned long xenheap_phys_end; /* user-configurable */ 20.43 #endif 20.44 20.45 -#define GDT_VIRT_START(ed) \ 20.46 - (PERDOMAIN_VIRT_START + ((ed)->vcpu_id << PDPT_VCPU_VA_SHIFT)) 20.47 -#define LDT_VIRT_START(ed) \ 20.48 - (GDT_VIRT_START(ed) + (64*1024)) 20.49 +/* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */ 20.50 +#define GDT_LDT_VCPU_SHIFT 5 20.51 +#define GDT_LDT_VCPU_VA_SHIFT (GDT_LDT_VCPU_SHIFT + PAGE_SHIFT) 20.52 +#define GDT_LDT_MBYTES (MAX_VIRT_CPUS >> (20-GDT_LDT_VCPU_VA_SHIFT)) 20.53 +#define GDT_LDT_VIRT_START PERDOMAIN_VIRT_START 20.54 +#define GDT_LDT_VIRT_END (GDT_LDT_VIRT_START + (GDT_LDT_MBYTES << 20)) 20.55 20.56 -#define PDPT_VCPU_SHIFT 5 20.57 -#define PDPT_VCPU_VA_SHIFT (PDPT_VCPU_SHIFT + PAGE_SHIFT) 20.58 -#define PDPT_L1_ENTRIES (MAX_VIRT_CPUS << PDPT_VCPU_SHIFT) 20.59 +/* The address of a particular VCPU's GDT or LDT. */ 20.60 +#define GDT_VIRT_START(v) \ 20.61 + (PERDOMAIN_VIRT_START + ((v)->vcpu_id << GDT_LDT_VCPU_VA_SHIFT)) 20.62 +#define LDT_VIRT_START(v) \ 20.63 + (GDT_VIRT_START(v) + (64*1024)) 20.64 + 20.65 +#define PDPT_L1_ENTRIES \ 20.66 + ((PERDOMAIN_VIRT_END - PERDOMAIN_VIRT_START) >> PAGE_SHIFT) 20.67 #define PDPT_L2_ENTRIES \ 20.68 ((PDPT_L1_ENTRIES + (1 << PAGETABLE_ORDER) - 1) >> PAGETABLE_ORDER) 20.69
21.1 --- a/xen/include/asm-x86/domain.h Tue Jan 10 14:23:56 2006 +0000 21.2 +++ b/xen/include/asm-x86/domain.h Tue Jan 10 14:24:12 2006 +0000 21.3 @@ -13,6 +13,13 @@ struct trap_bounce { 21.4 unsigned long eip; 21.5 }; 21.6 21.7 +struct mapcache { 21.8 + l1_pgentry_t *l1tab; 21.9 + unsigned int cursor; 21.10 + unsigned int epoch, shadow_epoch[MAX_VIRT_CPUS]; 21.11 + spinlock_t lock; 21.12 +}; 21.13 + 21.14 struct arch_domain 21.15 { 21.16 l1_pgentry_t *mm_perdomain_pt; 21.17 @@ -21,6 +28,11 @@ struct arch_domain 21.18 l3_pgentry_t *mm_perdomain_l3; 21.19 #endif 21.20 21.21 +#ifdef CONFIG_X86_32 21.22 + /* map_domain_page() mapping cache. */ 21.23 + struct mapcache mapcache; 21.24 +#endif 21.25 + 21.26 /* Writable pagetables. */ 21.27 struct ptwr_info ptwr[2]; 21.28
22.1 --- a/xen/include/xen/sched.h Tue Jan 10 14:23:56 2006 +0000 22.2 +++ b/xen/include/xen/sched.h Tue Jan 10 14:24:12 2006 +0000 22.3 @@ -172,12 +172,10 @@ struct domain_setup_info 22.4 char *xen_section_string; 22.5 }; 22.6 22.7 -extern struct domain idle0_domain; 22.8 -extern struct vcpu idle0_vcpu; 22.9 - 22.10 -extern struct vcpu *idle_domain[NR_CPUS]; 22.11 +extern struct vcpu *idle_vcpu[NR_CPUS]; 22.12 #define IDLE_DOMAIN_ID (0x7FFFU) 22.13 -#define is_idle_domain(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags)) 22.14 +#define is_idle_domain(d) ((d)->domain_id == IDLE_DOMAIN_ID) 22.15 +#define is_idle_vcpu(v) (is_idle_domain((v)->domain)) 22.16 22.17 struct vcpu *alloc_vcpu( 22.18 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id); 22.19 @@ -367,23 +365,20 @@ extern struct domain *domain_list; 22.20 /* 22.21 * Per-domain flags (domain_flags). 22.22 */ 22.23 - /* Is this one of the per-CPU idle domains? */ 22.24 -#define _DOMF_idle_domain 0 22.25 -#define DOMF_idle_domain (1UL<<_DOMF_idle_domain) 22.26 /* Is this domain privileged? */ 22.27 -#define _DOMF_privileged 1 22.28 +#define _DOMF_privileged 0 22.29 #define DOMF_privileged (1UL<<_DOMF_privileged) 22.30 /* Guest shut itself down for some reason. */ 22.31 -#define _DOMF_shutdown 2 22.32 +#define _DOMF_shutdown 1 22.33 #define DOMF_shutdown (1UL<<_DOMF_shutdown) 22.34 /* Death rattle. */ 22.35 -#define _DOMF_dying 3 22.36 +#define _DOMF_dying 2 22.37 #define DOMF_dying (1UL<<_DOMF_dying) 22.38 /* Domain is paused by controller software. */ 22.39 -#define _DOMF_ctrl_pause 4 22.40 +#define _DOMF_ctrl_pause 3 22.41 #define DOMF_ctrl_pause (1UL<<_DOMF_ctrl_pause) 22.42 /* Domain is being debugged by controller software. */ 22.43 -#define _DOMF_debugging 5 22.44 +#define _DOMF_debugging 4 22.45 #define DOMF_debugging (1UL<<_DOMF_debugging) 22.46 22.47