direct-io.hg
changeset 8499:84cf56328ce0
Clean up xen-internal representation of per-vcpu
physical cpu affinity. Rename idle_task variables and
macros to idle_domain.
Signed-off-by: Keir Fraser <keir@xensource.com>
physical cpu affinity. Rename idle_task variables and
macros to idle_domain.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Jan 06 12:25:47 2006 +0100 (2006-01-06) |
parents | 07306e35a5fc |
children | dd5649730b32 |
files | xen/arch/ia64/linux-xen/process-linux-xen.c xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/xen/process.c xen/arch/ia64/xen/vcpu.c xen/arch/ia64/xen/xenmisc.c xen/arch/ia64/xen/xensetup.c xen/arch/ia64/xen/xentime.c xen/arch/x86/domain.c xen/arch/x86/idle0_task.c xen/arch/x86/setup.c xen/arch/x86/smpboot.c xen/common/dom0_ops.c xen/common/domain.c xen/common/sched_bvt.c xen/common/sched_sedf.c xen/common/schedule.c xen/include/xen/sched.h |
line diff
1.1 --- a/xen/arch/ia64/linux-xen/process-linux-xen.c Thu Jan 05 12:19:12 2006 +0100 1.2 +++ b/xen/arch/ia64/linux-xen/process-linux-xen.c Fri Jan 06 12:25:47 2006 +0100 1.3 @@ -241,7 +241,7 @@ static inline void play_dead(void) 1.4 1.5 max_xtp(); 1.6 local_irq_disable(); 1.7 - idle_task_exit(); 1.8 + idle_domain_exit(); 1.9 ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); 1.10 /* 1.11 * The above is a point of no-return, the processor is
2.1 --- a/xen/arch/ia64/vmx/vlsapic.c Thu Jan 05 12:19:12 2006 +0100 2.2 +++ b/xen/arch/ia64/vmx/vlsapic.c Fri Jan 06 12:25:47 2006 +0100 2.3 @@ -218,7 +218,7 @@ void vtm_interruption_update(VCPU *vcpu, 2.4 */ 2.5 void vtm_domain_out(VCPU *vcpu) 2.6 { 2.7 - if(!is_idle_task(vcpu->domain)) 2.8 + if(!is_idle_domain(vcpu->domain)) 2.9 rem_ac_timer(&vcpu->arch.arch_vmx.vtm.vtm_timer); 2.10 } 2.11 2.12 @@ -230,7 +230,7 @@ void vtm_domain_in(VCPU *vcpu) 2.13 { 2.14 vtime_t *vtm; 2.15 2.16 - if(!is_idle_task(vcpu->domain)) { 2.17 + if(!is_idle_domain(vcpu->domain)) { 2.18 vtm=&(vcpu->arch.arch_vmx.vtm); 2.19 vtm_interruption_update(vcpu, vtm); 2.20 }
3.1 --- a/xen/arch/ia64/vmx/vmx_process.c Thu Jan 05 12:19:12 2006 +0100 3.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Fri Jan 06 12:25:47 2006 +0100 3.3 @@ -231,7 +231,7 @@ void leave_hypervisor_tail(struct pt_reg 3.4 struct domain *d = current->domain; 3.5 struct vcpu *v = current; 3.6 // FIXME: Will this work properly if doing an RFI??? 3.7 - if (!is_idle_task(d) ) { // always comes from guest 3.8 + if (!is_idle_domain(d) ) { // always comes from guest 3.9 extern void vmx_dorfirfi(void); 3.10 struct pt_regs *user_regs = vcpu_regs(current); 3.11 if (local_softirq_pending())
4.1 --- a/xen/arch/ia64/xen/process.c Thu Jan 05 12:19:12 2006 +0100 4.2 +++ b/xen/arch/ia64/xen/process.c Fri Jan 06 12:25:47 2006 +0100 4.3 @@ -252,7 +252,7 @@ void deliver_pending_interrupt(struct pt 4.4 struct domain *d = current->domain; 4.5 struct vcpu *v = current; 4.6 // FIXME: Will this work properly if doing an RFI??? 4.7 - if (!is_idle_task(d) && user_mode(regs)) { 4.8 + if (!is_idle_domain(d) && user_mode(regs)) { 4.9 //vcpu_poke_timer(v); 4.10 if (vcpu_deliverable_interrupts(v)) 4.11 reflect_extint(regs);
5.1 --- a/xen/arch/ia64/xen/vcpu.c Thu Jan 05 12:19:12 2006 +0100 5.2 +++ b/xen/arch/ia64/xen/vcpu.c Fri Jan 06 12:25:47 2006 +0100 5.3 @@ -1085,7 +1085,7 @@ void vcpu_set_next_timer(VCPU *vcpu) 5.4 /* gloss over the wraparound problem for now... we know it exists 5.5 * but it doesn't matter right now */ 5.6 5.7 - if (is_idle_task(vcpu->domain)) { 5.8 + if (is_idle_domain(vcpu->domain)) { 5.9 // printf("****** vcpu_set_next_timer called during idle!!\n"); 5.10 vcpu_safe_set_itm(s); 5.11 return;
6.1 --- a/xen/arch/ia64/xen/xenmisc.c Thu Jan 05 12:19:12 2006 +0100 6.2 +++ b/xen/arch/ia64/xen/xenmisc.c Fri Jan 06 12:25:47 2006 +0100 6.3 @@ -320,7 +320,7 @@ if (!i--) { printk("+",id); i = 1000000; 6.4 ia64_set_iva(&ia64_ivt); 6.5 ia64_set_pta(VHPT_ADDR | (1 << 8) | (VHPT_SIZE_LOG2 << 2) | 6.6 VHPT_ENABLED); 6.7 - if (!is_idle_task(current->domain)) { 6.8 + if (!is_idle_domain(current->domain)) { 6.9 load_region_regs(current); 6.10 vcpu_load_kernel_regs(current); 6.11 if (vcpu_timer_expired(current)) vcpu_pend_timer(current);
7.1 --- a/xen/arch/ia64/xen/xensetup.c Thu Jan 05 12:19:12 2006 +0100 7.2 +++ b/xen/arch/ia64/xen/xensetup.c Fri Jan 06 12:25:47 2006 +0100 7.3 @@ -26,7 +26,7 @@ unsigned long xenheap_phys_end; 7.4 7.5 char saved_command_line[COMMAND_LINE_SIZE]; 7.6 7.7 -struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu }; 7.8 +struct vcpu *idle_domain[NR_CPUS] = { &idle0_vcpu }; 7.9 7.10 cpumask_t cpu_present_map; 7.11 7.12 @@ -382,8 +382,7 @@ printk("About to call do_createdomain()\ 7.13 panic("Could not set up DOM0 guest OS\n"); 7.14 7.15 /* PIN domain0 on CPU 0. */ 7.16 - dom0->vcpu[0]->cpumap=1; 7.17 - set_bit(_VCPUF_cpu_pinned, &dom0->vcpu[0]->vcpu_flags); 7.18 + dom0->vcpu[0]->cpumask = cpumask_of_cpu(0); 7.19 7.20 #ifdef CLONE_DOMAIN0 7.21 {
8.1 --- a/xen/arch/ia64/xen/xentime.c Thu Jan 05 12:19:12 2006 +0100 8.2 +++ b/xen/arch/ia64/xen/xentime.c Fri Jan 06 12:25:47 2006 +0100 8.3 @@ -127,7 +127,7 @@ xen_timer_interrupt (int irq, void *dev_ 8.4 vcpu_wake(dom0->vcpu[0]); 8.5 } 8.6 } 8.7 - if (!is_idle_task(current->domain)) { 8.8 + if (!is_idle_domain(current->domain)) { 8.9 if (vcpu_timer_expired(current)) { 8.10 vcpu_pend_timer(current); 8.11 // ensure another timer interrupt happens even if domain doesn't
9.1 --- a/xen/arch/x86/domain.c Thu Jan 05 12:19:12 2006 +0100 9.2 +++ b/xen/arch/x86/domain.c Fri Jan 06 12:25:47 2006 +0100 9.3 @@ -51,12 +51,12 @@ struct percpu_ctxt { 9.4 } __cacheline_aligned; 9.5 static struct percpu_ctxt percpu_ctxt[NR_CPUS]; 9.6 9.7 -static void continue_idle_task(struct vcpu *v) 9.8 +static void continue_idle_domain(struct vcpu *v) 9.9 { 9.10 reset_stack_and_jump(idle_loop); 9.11 } 9.12 9.13 -static void continue_nonidle_task(struct vcpu *v) 9.14 +static void continue_nonidle_domain(struct vcpu *v) 9.15 { 9.16 reset_stack_and_jump(ret_from_intr); 9.17 } 9.18 @@ -92,10 +92,10 @@ void startup_cpu_idle_loop(void) 9.19 { 9.20 struct vcpu *v = current; 9.21 9.22 - ASSERT(is_idle_task(v->domain)); 9.23 + ASSERT(is_idle_domain(v->domain)); 9.24 percpu_ctxt[smp_processor_id()].curr_vcpu = v; 9.25 cpu_set(smp_processor_id(), v->domain->cpumask); 9.26 - v->arch.schedule_tail = continue_idle_task; 9.27 + v->arch.schedule_tail = continue_idle_domain; 9.28 9.29 reset_stack_and_jump(idle_loop); 9.30 } 9.31 @@ -259,7 +259,7 @@ int arch_do_createdomain(struct vcpu *v) 9.32 int i; 9.33 #endif 9.34 9.35 - if ( is_idle_task(d) ) 9.36 + if ( is_idle_domain(d) ) 9.37 return 0; 9.38 9.39 d->arch.ioport_caps = 9.40 @@ -276,11 +276,10 @@ int arch_do_createdomain(struct vcpu *v) 9.41 return rc; 9.42 } 9.43 9.44 - v->arch.schedule_tail = continue_nonidle_task; 9.45 + v->arch.schedule_tail = continue_nonidle_domain; 9.46 9.47 memset(d->shared_info, 0, PAGE_SIZE); 9.48 v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id]; 9.49 - v->cpumap = CPUMAP_RUNANYWHERE; 9.50 SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); 9.51 9.52 pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)); 9.53 @@ -705,7 +704,7 @@ static void __context_switch(void) 9.54 struct vcpu *p = percpu_ctxt[cpu].curr_vcpu; 9.55 struct vcpu *n = current; 9.56 9.57 - if ( !is_idle_task(p->domain) ) 9.58 + if ( !is_idle_domain(p->domain) ) 9.59 { 9.60 memcpy(&p->arch.guest_context.user_regs, 9.61 stack_regs, 9.62 @@ -714,7 +713,7 @@ static void __context_switch(void) 9.63 save_segments(p); 9.64 } 9.65 9.66 - if ( !is_idle_task(n->domain) ) 9.67 + if ( !is_idle_domain(n->domain) ) 9.68 { 9.69 memcpy(stack_regs, 9.70 &n->arch.guest_context.user_regs, 9.71 @@ -767,7 +766,8 @@ void context_switch(struct vcpu *prev, s 9.72 9.73 set_current(next); 9.74 9.75 - if ( (percpu_ctxt[cpu].curr_vcpu != next) && !is_idle_task(next->domain) ) 9.76 + if ( (percpu_ctxt[cpu].curr_vcpu != next) && 9.77 + !is_idle_domain(next->domain) ) 9.78 { 9.79 __context_switch(); 9.80 percpu_ctxt[cpu].context_not_finalised = 1;
10.1 --- a/xen/arch/x86/idle0_task.c Thu Jan 05 12:19:12 2006 +0100 10.2 +++ b/xen/arch/x86/idle0_task.c Fri Jan 06 12:25:47 2006 +0100 10.3 @@ -11,6 +11,7 @@ struct domain idle0_domain = { 10.4 10.5 struct vcpu idle0_vcpu = { 10.6 processor: 0, 10.7 + cpu_affinity:CPU_MASK_CPU0, 10.8 domain: &idle0_domain 10.9 }; 10.10
11.1 --- a/xen/arch/x86/setup.c Thu Jan 05 12:19:12 2006 +0100 11.2 +++ b/xen/arch/x86/setup.c Fri Jan 06 12:25:47 2006 +0100 11.3 @@ -92,7 +92,7 @@ unsigned long mmu_cr4_features = X86_CR4 11.4 #endif 11.5 EXPORT_SYMBOL(mmu_cr4_features); 11.6 11.7 -struct vcpu *idle_task[NR_CPUS] = { &idle0_vcpu }; 11.8 +struct vcpu *idle_domain[NR_CPUS] = { &idle0_vcpu }; 11.9 11.10 int acpi_disabled; 11.11
12.1 --- a/xen/arch/x86/smpboot.c Thu Jan 05 12:19:12 2006 +0100 12.2 +++ b/xen/arch/x86/smpboot.c Fri Jan 06 12:25:47 2006 +0100 12.3 @@ -435,7 +435,7 @@ void __init start_secondary(void *unused 12.4 12.5 extern void percpu_traps_init(void); 12.6 12.7 - set_current(idle_task[cpu]); 12.8 + set_current(idle_domain[cpu]); 12.9 set_processor_id(cpu); 12.10 12.11 percpu_traps_init(); 12.12 @@ -773,7 +773,7 @@ static int __init do_boot_cpu(int apicid 12.13 if ( (idle = do_createdomain(IDLE_DOMAIN_ID, cpu)) == NULL ) 12.14 panic("failed 'createdomain' for CPU %d", cpu); 12.15 12.16 - v = idle_task[cpu] = idle->vcpu[0]; 12.17 + v = idle_domain[cpu] = idle->vcpu[0]; 12.18 12.19 set_bit(_DOMF_idle_domain, &idle->domain_flags); 12.20
13.1 --- a/xen/common/dom0_ops.c Thu Jan 05 12:19:12 2006 +0100 13.2 +++ b/xen/common/dom0_ops.c Fri Jan 06 12:25:47 2006 +0100 13.3 @@ -319,22 +319,14 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 13.4 break; 13.5 } 13.6 13.7 - v->cpumap = op->u.pincpudomain.cpumap; 13.8 + memcpy(cpus_addr(v->cpu_affinity), 13.9 + &op->u.pincpudomain.cpumap, 13.10 + min((int)BITS_TO_LONGS(NR_CPUS), 13.11 + (int)sizeof(op->u.pincpudomain.cpumap))); 13.12 13.13 - if ( v->cpumap == CPUMAP_RUNANYWHERE ) 13.14 - { 13.15 - clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags); 13.16 - } 13.17 - else 13.18 - { 13.19 - /* pick a new cpu from the usable map */ 13.20 - int new_cpu; 13.21 - new_cpu = (int)find_first_set_bit(v->cpumap) % num_online_cpus(); 13.22 - vcpu_pause(v); 13.23 - vcpu_migrate_cpu(v, new_cpu); 13.24 - set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags); 13.25 - vcpu_unpause(v); 13.26 - } 13.27 + vcpu_pause(v); 13.28 + vcpu_migrate_cpu(v, first_cpu(v->cpu_affinity)); 13.29 + vcpu_unpause(v); 13.30 13.31 put_domain(d); 13.32 } 13.33 @@ -506,7 +498,11 @@ long do_dom0_op(dom0_op_t *u_dom0_op) 13.34 op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags); 13.35 op->u.getvcpuinfo.cpu_time = v->cpu_time; 13.36 op->u.getvcpuinfo.cpu = v->processor; 13.37 - op->u.getvcpuinfo.cpumap = v->cpumap; 13.38 + op->u.getvcpuinfo.cpumap = 0; 13.39 + memcpy(&op->u.getvcpuinfo.cpumap, 13.40 + cpus_addr(v->cpu_affinity), 13.41 + min((int)BITS_TO_LONGS(NR_CPUS), 13.42 + (int)sizeof(op->u.getvcpuinfo.cpumap))); 13.43 ret = 0; 13.44 13.45 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
14.1 --- a/xen/common/domain.c Thu Jan 05 12:19:12 2006 +0100 14.2 +++ b/xen/common/domain.c Fri Jan 06 12:25:47 2006 +0100 14.3 @@ -51,7 +51,7 @@ struct domain *do_createdomain(domid_t d 14.4 else 14.5 set_bit(_DOMF_ctrl_pause, &d->domain_flags); 14.6 14.7 - if ( !is_idle_task(d) && 14.8 + if ( !is_idle_domain(d) && 14.9 ((evtchn_init(d) != 0) || (grant_table_create(d) != 0)) ) 14.10 goto fail1; 14.11 14.12 @@ -68,7 +68,7 @@ struct domain *do_createdomain(domid_t d 14.13 (arch_do_createdomain(v) != 0) ) 14.14 goto fail3; 14.15 14.16 - if ( !is_idle_task(d) ) 14.17 + if ( !is_idle_domain(d) ) 14.18 { 14.19 write_lock(&domlist_lock); 14.20 pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
15.1 --- a/xen/common/sched_bvt.c Thu Jan 05 12:19:12 2006 +0100 15.2 +++ b/xen/common/sched_bvt.c Fri Jan 06 12:25:47 2006 +0100 15.3 @@ -219,7 +219,7 @@ static void bvt_add_task(struct vcpu *v) 15.4 15.5 einf->vcpu = v; 15.6 15.7 - if ( is_idle_task(v->domain) ) 15.8 + if ( is_idle_domain(v->domain) ) 15.9 { 15.10 einf->avt = einf->evt = ~0U; 15.11 BUG_ON(__task_on_runqueue(v)); 15.12 @@ -265,7 +265,7 @@ static void bvt_wake(struct vcpu *v) 15.13 ((einf->evt - curr_evt) / BVT_INFO(curr->domain)->mcu_advance) + 15.14 ctx_allow; 15.15 15.16 - if ( is_idle_task(curr->domain) || (einf->evt <= curr_evt) ) 15.17 + if ( is_idle_domain(curr->domain) || (einf->evt <= curr_evt) ) 15.18 cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ); 15.19 else if ( schedule_data[cpu].s_timer.expires > r_time ) 15.20 set_ac_timer(&schedule_data[cpu].s_timer, r_time); 15.21 @@ -380,7 +380,7 @@ static struct task_slice bvt_do_schedule 15.22 ASSERT(prev_einf != NULL); 15.23 ASSERT(__task_on_runqueue(prev)); 15.24 15.25 - if ( likely(!is_idle_task(prev->domain)) ) 15.26 + if ( likely(!is_idle_domain(prev->domain)) ) 15.27 { 15.28 prev_einf->avt = calc_avt(prev, now); 15.29 prev_einf->evt = calc_evt(prev, prev_einf->avt); 15.30 @@ -471,13 +471,13 @@ static struct task_slice bvt_do_schedule 15.31 } 15.32 15.33 /* work out time for next run through scheduler */ 15.34 - if ( is_idle_task(next->domain) ) 15.35 + if ( is_idle_domain(next->domain) ) 15.36 { 15.37 r_time = ctx_allow; 15.38 goto sched_done; 15.39 } 15.40 15.41 - if ( (next_prime == NULL) || is_idle_task(next_prime->domain) ) 15.42 + if ( (next_prime == NULL) || is_idle_domain(next_prime->domain) ) 15.43 { 15.44 /* We have only one runnable task besides the idle task. */ 15.45 r_time = 10 * ctx_allow; /* RN: random constant */
16.1 --- a/xen/common/sched_sedf.c Thu Jan 05 12:19:12 2006 +0100 16.2 +++ b/xen/common/sched_sedf.c Fri Jan 06 12:25:47 2006 +0100 16.3 @@ -384,7 +384,7 @@ static void sedf_add_task(struct vcpu *d 16.4 INIT_LIST_HEAD(&(inf->extralist[EXTRA_PEN_Q])); 16.5 INIT_LIST_HEAD(&(inf->extralist[EXTRA_UTIL_Q])); 16.6 16.7 - if (!is_idle_task(d->domain)) { 16.8 + if (!is_idle_domain(d->domain)) { 16.9 extraq_check(d); 16.10 } else { 16.11 EDOM_INFO(d)->deadl_abs = 0; 16.12 @@ -711,7 +711,7 @@ static struct task_slice sedf_do_schedul 16.13 struct task_slice ret; 16.14 16.15 /*idle tasks don't need any of the following stuf*/ 16.16 - if (is_idle_task(current->domain)) 16.17 + if (is_idle_domain(current->domain)) 16.18 goto check_waitq; 16.19 16.20 /* create local state of the status of the domain, in order to avoid 16.21 @@ -797,7 +797,7 @@ static struct task_slice sedf_do_schedul 16.22 static void sedf_sleep(struct vcpu *d) { 16.23 PRINT(2,"sedf_sleep was called, domain-id %i.%i\n",d->domain->domain_id, d->vcpu_id); 16.24 16.25 - if (is_idle_task(d->domain)) 16.26 + if (is_idle_domain(d->domain)) 16.27 return; 16.28 16.29 EDOM_INFO(d)->status |= SEDF_ASLEEP; 16.30 @@ -1068,7 +1068,7 @@ static inline void unblock_long_burst(st 16.31 #define DOMAIN_IDLE 4 16.32 static inline int get_run_type(struct vcpu* d) { 16.33 struct sedf_vcpu_info* inf = EDOM_INFO(d); 16.34 - if (is_idle_task(d->domain)) 16.35 + if (is_idle_domain(d->domain)) 16.36 return DOMAIN_IDLE; 16.37 if (inf->status & EXTRA_RUN_PEN) 16.38 return DOMAIN_EXTRA_PEN; 16.39 @@ -1126,7 +1126,7 @@ void sedf_wake(struct vcpu *d) { 16.40 PRINT(3, "sedf_wake was called, domain-id %i.%i\n",d->domain->domain_id, 16.41 d->vcpu_id); 16.42 16.43 - if (unlikely(is_idle_task(d->domain))) 16.44 + if (unlikely(is_idle_domain(d->domain))) 16.45 return; 16.46 16.47 if ( unlikely(__task_on_queue(d)) ) {
17.1 --- a/xen/common/schedule.c Thu Jan 05 12:19:12 2006 +0100 17.2 +++ b/xen/common/schedule.c Fri Jan 06 12:25:47 2006 +0100 17.3 @@ -100,7 +100,9 @@ struct vcpu *alloc_vcpu( 17.4 v->vcpu_id = vcpu_id; 17.5 v->processor = cpu_id; 17.6 atomic_set(&v->pausecnt, 0); 17.7 - v->cpumap = CPUMAP_RUNANYWHERE; 17.8 + 17.9 + v->cpu_affinity = is_idle_domain(d) ? 17.10 + cpumask_of_cpu(cpu_id) : CPU_MASK_ALL; 17.11 17.12 d->vcpu[vcpu_id] = v; 17.13 17.14 @@ -143,7 +145,7 @@ void sched_add_domain(struct vcpu *v) 17.15 /* Initialise the per-domain timer. */ 17.16 init_ac_timer(&v->timer, dom_timer_fn, v, v->processor); 17.17 17.18 - if ( is_idle_task(d) ) 17.19 + if ( is_idle_domain(d) ) 17.20 { 17.21 schedule_data[v->processor].curr = v; 17.22 schedule_data[v->processor].idle = v; 17.23 @@ -428,7 +430,7 @@ static void __enter_scheduler(void) 17.24 prev->wokenup = NOW(); 17.25 17.26 #if defined(WAKE_HISTO) 17.27 - if ( !is_idle_task(next->domain) && next->wokenup ) 17.28 + if ( !is_idle_domain(next->domain) && next->wokenup ) 17.29 { 17.30 ulong diff = (ulong)(now - next->wokenup); 17.31 diff /= (ulong)MILLISECS(1); 17.32 @@ -438,7 +440,7 @@ static void __enter_scheduler(void) 17.33 next->wokenup = (s_time_t)0; 17.34 #elif defined(BLOCKTIME_HISTO) 17.35 prev->lastdeschd = now; 17.36 - if ( !is_idle_task(next->domain) ) 17.37 + if ( !is_idle_domain(next->domain) ) 17.38 { 17.39 ulong diff = (ulong)((now - next->lastdeschd) / MILLISECS(10)); 17.40 if (diff <= BUCKETS-2) schedule_data[cpu].hist[diff]++; 17.41 @@ -449,7 +451,7 @@ static void __enter_scheduler(void) 17.42 prev->sleep_tick = schedule_data[cpu].tick; 17.43 17.44 /* Ensure that the domain has an up-to-date time base. */ 17.45 - if ( !is_idle_task(next->domain) ) 17.46 + if ( !is_idle_domain(next->domain) ) 17.47 { 17.48 update_dom_time(next); 17.49 if ( next->sleep_tick != schedule_data[cpu].tick ) 17.50 @@ -471,7 +473,7 @@ static void __enter_scheduler(void) 17.51 int idle_cpu(int cpu) 17.52 { 17.53 struct vcpu *p = schedule_data[cpu].curr; 17.54 - return p == idle_task[cpu]; 17.55 + return p == idle_domain[cpu]; 17.56 } 17.57 17.58 17.59 @@ -497,7 +499,7 @@ static void t_timer_fn(void *unused) 17.60 17.61 schedule_data[cpu].tick++; 17.62 17.63 - if ( !is_idle_task(v->domain) ) 17.64 + if ( !is_idle_domain(v->domain) ) 17.65 { 17.66 update_dom_time(v); 17.67 send_guest_virq(v, VIRQ_TIMER); 17.68 @@ -531,8 +533,8 @@ void __init scheduler_init(void) 17.69 init_ac_timer(&t_timer[i], t_timer_fn, NULL, i); 17.70 } 17.71 17.72 - schedule_data[0].curr = idle_task[0]; 17.73 - schedule_data[0].idle = idle_task[0]; 17.74 + schedule_data[0].curr = idle_domain[0]; 17.75 + schedule_data[0].idle = idle_domain[0]; 17.76 17.77 for ( i = 0; schedulers[i] != NULL; i++ ) 17.78 { 17.79 @@ -546,10 +548,10 @@ void __init scheduler_init(void) 17.80 17.81 printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name); 17.82 17.83 - rc = SCHED_OP(alloc_task, idle_task[0]); 17.84 + rc = SCHED_OP(alloc_task, idle_domain[0]); 17.85 BUG_ON(rc < 0); 17.86 17.87 - sched_add_domain(idle_task[0]); 17.88 + sched_add_domain(idle_domain[0]); 17.89 } 17.90 17.91 /*
18.1 --- a/xen/include/xen/sched.h Thu Jan 05 12:19:12 2006 +0100 18.2 +++ b/xen/include/xen/sched.h Fri Jan 06 12:25:47 2006 +0100 18.3 @@ -51,8 +51,6 @@ struct evtchn 18.4 int evtchn_init(struct domain *d); 18.5 void evtchn_destroy(struct domain *d); 18.6 18.7 -#define CPUMAP_RUNANYWHERE 0xFFFFFFFF 18.8 - 18.9 struct vcpu 18.10 { 18.11 int vcpu_id; 18.12 @@ -80,7 +78,7 @@ struct vcpu 18.13 18.14 atomic_t pausecnt; 18.15 18.16 - cpumap_t cpumap; /* which cpus this domain can run on */ 18.17 + cpumask_t cpu_affinity; 18.18 18.19 struct arch_vcpu arch; 18.20 }; 18.21 @@ -173,9 +171,9 @@ struct domain_setup_info 18.22 extern struct domain idle0_domain; 18.23 extern struct vcpu idle0_vcpu; 18.24 18.25 -extern struct vcpu *idle_task[NR_CPUS]; 18.26 +extern struct vcpu *idle_domain[NR_CPUS]; 18.27 #define IDLE_DOMAIN_ID (0x7FFFU) 18.28 -#define is_idle_task(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags)) 18.29 +#define is_idle_domain(_d) (test_bit(_DOMF_idle_domain, &(_d)->domain_flags)) 18.30 18.31 struct vcpu *alloc_vcpu( 18.32 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id); 18.33 @@ -364,17 +362,14 @@ extern struct domain *domain_list; 18.34 /* Currently running on a CPU? */ 18.35 #define _VCPUF_running 3 18.36 #define VCPUF_running (1UL<<_VCPUF_running) 18.37 - /* Disables auto-migration between CPUs. */ 18.38 -#define _VCPUF_cpu_pinned 4 18.39 -#define VCPUF_cpu_pinned (1UL<<_VCPUF_cpu_pinned) 18.40 /* Domain migrated between CPUs. */ 18.41 -#define _VCPUF_cpu_migrated 5 18.42 +#define _VCPUF_cpu_migrated 4 18.43 #define VCPUF_cpu_migrated (1UL<<_VCPUF_cpu_migrated) 18.44 /* Initialization completed. */ 18.45 -#define _VCPUF_initialised 6 18.46 +#define _VCPUF_initialised 5 18.47 #define VCPUF_initialised (1UL<<_VCPUF_initialised) 18.48 /* VCPU is not-runnable */ 18.49 -#define _VCPUF_down 7 18.50 +#define _VCPUF_down 6 18.51 #define VCPUF_down (1UL<<_VCPUF_down) 18.52 18.53 /*