direct-io.hg
changeset 8509:0aff653824db
Reduce locked critical region in __enter_scheduler(),
changing the context switch interface yet again.
domain_runnable() renamed to vcpu_runnable().
Fix stupid bug resulting in bogus value for
vcpu_dirty_cpumask, which caused vcpu_sync_execstate() to
fail sometimes.
Signed-off-by: Keir Fraser <keir@xensource.com>
changing the context switch interface yet again.
domain_runnable() renamed to vcpu_runnable().
Fix stupid bug resulting in bogus value for
vcpu_dirty_cpumask, which caused vcpu_sync_execstate() to
fail sometimes.
Signed-off-by: Keir Fraser <keir@xensource.com>
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Sat Jan 07 16:53:25 2006 +0100 (2006-01-07) |
parents | d92a68e6faa9 |
children | 83eeb056f7c2 |
files | xen/arch/ia64/xen/process.c xen/arch/ia64/xen/xenmisc.c xen/arch/x86/domain.c xen/common/sched_bvt.c xen/common/sched_sedf.c xen/common/schedule.c xen/include/xen/sched-if.h xen/include/xen/sched.h |
line diff
1.1 --- a/xen/arch/ia64/xen/process.c Sat Jan 07 01:31:04 2006 +0000 1.2 +++ b/xen/arch/ia64/xen/process.c Sat Jan 07 16:53:25 2006 +0100 1.3 @@ -65,24 +65,16 @@ long do_iopl(domid_t domain, unsigned in 1.4 1.5 extern struct schedule_data schedule_data[NR_CPUS]; 1.6 1.7 -void schedule_tail(struct vcpu *next) 1.8 +void schedule_tail(struct vcpu *prev) 1.9 { 1.10 - unsigned long rr7; 1.11 - //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info); 1.12 - //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info); 1.13 + context_saved(prev); 1.14 1.15 - // This is necessary because when a new domain is started, our 1.16 - // implementation of context_switch() does not return (switch_to() has 1.17 - // special and peculiar behaviour in this case). 1.18 - context_switch_done(); 1.19 - 1.20 - /* rr7 will be postponed to last point when resuming back to guest */ 1.21 - if(VMX_DOMAIN(current)){ 1.22 - vmx_load_all_rr(current); 1.23 - }else{ 1.24 - load_region_regs(current); 1.25 - vcpu_load_kernel_regs(current); 1.26 - } 1.27 + if (VMX_DOMAIN(current)) { 1.28 + vmx_load_all_rr(current); 1.29 + } else { 1.30 + load_region_regs(current); 1.31 + vcpu_load_kernel_regs(current); 1.32 + } 1.33 } 1.34 1.35 void tdpfoo(void) { }
2.1 --- a/xen/arch/ia64/xen/xenmisc.c Sat Jan 07 01:31:04 2006 +0000 2.2 +++ b/xen/arch/ia64/xen/xenmisc.c Sat Jan 07 16:53:25 2006 +0100 2.3 @@ -327,6 +327,8 @@ if (!i--) { printk("+",id); i = 1000000; 2.4 } 2.5 if (vcpu_timer_expired(current)) vcpu_pend_timer(current); 2.6 } 2.7 + 2.8 + context_saved(prev); 2.9 } 2.10 2.11 void continue_running(struct vcpu *same)
3.1 --- a/xen/arch/x86/domain.c Sat Jan 07 01:31:04 2006 +0000 3.2 +++ b/xen/arch/x86/domain.c Sat Jan 07 16:53:25 2006 +0100 3.3 @@ -739,7 +739,7 @@ static void __context_switch(void) 3.4 3.5 if ( p->domain != n->domain ) 3.6 cpu_clear(cpu, p->domain->domain_dirty_cpumask); 3.7 - cpu_clear(cpu, n->vcpu_dirty_cpumask); 3.8 + cpu_clear(cpu, p->vcpu_dirty_cpumask); 3.9 3.10 percpu_ctxt[cpu].curr_vcpu = n; 3.11 } 3.12 @@ -749,17 +749,14 @@ void context_switch(struct vcpu *prev, s 3.13 { 3.14 unsigned int cpu = smp_processor_id(); 3.15 3.16 - ASSERT(!local_irq_is_enabled()); 3.17 - 3.18 set_current(next); 3.19 3.20 if ( (percpu_ctxt[cpu].curr_vcpu != next) && 3.21 !is_idle_domain(next->domain) ) 3.22 { 3.23 + local_irq_disable(); 3.24 __context_switch(); 3.25 - 3.26 - context_switch_done(); 3.27 - ASSERT(local_irq_is_enabled()); 3.28 + local_irq_enable(); 3.29 3.30 if ( VMX_DOMAIN(next) ) 3.31 { 3.32 @@ -772,10 +769,8 @@ void context_switch(struct vcpu *prev, s 3.33 vmx_load_msrs(next); 3.34 } 3.35 } 3.36 - else 3.37 - { 3.38 - context_switch_done(); 3.39 - } 3.40 + 3.41 + context_saved(prev); 3.42 3.43 schedule_tail(next); 3.44 BUG();
4.1 --- a/xen/common/sched_bvt.c Sat Jan 07 01:31:04 2006 +0000 4.2 +++ b/xen/common/sched_bvt.c Sat Jan 07 16:53:25 2006 +0100 4.3 @@ -277,7 +277,7 @@ static void bvt_wake(struct vcpu *v) 4.4 4.5 static void bvt_sleep(struct vcpu *v) 4.6 { 4.7 - if ( test_bit(_VCPUF_running, &v->vcpu_flags) ) 4.8 + if ( schedule_data[v->processor].curr == v ) 4.9 cpu_raise_softirq(v->processor, SCHEDULE_SOFTIRQ); 4.10 else if ( __task_on_runqueue(v) ) 4.11 __del_from_runqueue(v); 4.12 @@ -409,7 +409,7 @@ static struct task_slice bvt_do_schedule 4.13 4.14 __del_from_runqueue(prev); 4.15 4.16 - if ( domain_runnable(prev) ) 4.17 + if ( vcpu_runnable(prev) ) 4.18 __add_to_runqueue_tail(prev); 4.19 } 4.20
5.1 --- a/xen/common/sched_sedf.c Sat Jan 07 01:31:04 2006 +0000 5.2 +++ b/xen/common/sched_sedf.c Sat Jan 07 16:53:25 2006 +0100 5.3 @@ -782,8 +782,8 @@ static struct task_slice sedf_do_schedul 5.4 5.5 /* create local state of the status of the domain, in order to avoid 5.6 inconsistent state during scheduling decisions, because data for 5.7 - domain_runnable is not protected by the scheduling lock!*/ 5.8 - if ( !domain_runnable(current) ) 5.9 + vcpu_runnable is not protected by the scheduling lock!*/ 5.10 + if ( !vcpu_runnable(current) ) 5.11 inf->status |= SEDF_ASLEEP; 5.12 5.13 if ( inf->status & SEDF_ASLEEP ) 5.14 @@ -879,7 +879,7 @@ static void sedf_sleep(struct vcpu *d) 5.15 5.16 EDOM_INFO(d)->status |= SEDF_ASLEEP; 5.17 5.18 - if ( test_bit(_VCPUF_running, &d->vcpu_flags) ) 5.19 + if ( schedule_data[d->processor].curr == d ) 5.20 { 5.21 cpu_raise_softirq(d->processor, SCHEDULE_SOFTIRQ); 5.22 }
6.1 --- a/xen/common/schedule.c Sat Jan 07 01:31:04 2006 +0000 6.2 +++ b/xen/common/schedule.c Sat Jan 07 16:53:25 2006 +0100 6.3 @@ -168,7 +168,7 @@ void vcpu_sleep_nosync(struct vcpu *v) 6.4 unsigned long flags; 6.5 6.6 spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags); 6.7 - if ( likely(!domain_runnable(v)) ) 6.8 + if ( likely(!vcpu_runnable(v)) ) 6.9 SCHED_OP(sleep, v); 6.10 spin_unlock_irqrestore(&schedule_data[v->processor].schedule_lock, flags); 6.11 6.12 @@ -184,7 +184,7 @@ void vcpu_sleep_sync(struct vcpu *v) 6.13 * flag is cleared and the scheduler lock is released. We also check that 6.14 * the domain continues to be unrunnable, in case someone else wakes it. 6.15 */ 6.16 - while ( !domain_runnable(v) && 6.17 + while ( !vcpu_runnable(v) && 6.18 (test_bit(_VCPUF_running, &v->vcpu_flags) || 6.19 spin_is_locked(&schedule_data[v->processor].schedule_lock)) ) 6.20 cpu_relax(); 6.21 @@ -197,7 +197,7 @@ void vcpu_wake(struct vcpu *v) 6.22 unsigned long flags; 6.23 6.24 spin_lock_irqsave(&schedule_data[v->processor].schedule_lock, flags); 6.25 - if ( likely(domain_runnable(v)) ) 6.26 + if ( likely(vcpu_runnable(v)) ) 6.27 { 6.28 SCHED_OP(wake, v); 6.29 v->wokenup = NOW(); 6.30 @@ -387,20 +387,18 @@ static void __enter_scheduler(void) 6.31 { 6.32 struct vcpu *prev = current, *next = NULL; 6.33 int cpu = smp_processor_id(); 6.34 - s_time_t now; 6.35 + s_time_t now = NOW(); 6.36 struct task_slice next_slice; 6.37 s32 r_time; /* time for new dom to run */ 6.38 6.39 + ASSERT(!in_irq()); 6.40 + 6.41 perfc_incrc(sched_run); 6.42 - 6.43 + 6.44 spin_lock_irq(&schedule_data[cpu].schedule_lock); 6.45 6.46 - now = NOW(); 6.47 - 6.48 rem_ac_timer(&schedule_data[cpu].s_timer); 6.49 6.50 - ASSERT(!in_irq()); 6.51 - 6.52 prev->cpu_time += now - prev->lastschd; 6.53 6.54 /* get policy-specific decision on scheduling... */ 6.55 @@ -408,7 +406,7 @@ static void __enter_scheduler(void) 6.56 6.57 r_time = next_slice.time; 6.58 next = next_slice.task; 6.59 - 6.60 + 6.61 schedule_data[cpu].curr = next; 6.62 6.63 next->lastschd = now; 6.64 @@ -426,11 +424,6 @@ static void __enter_scheduler(void) 6.65 TRACE_3D(TRC_SCHED_SWITCH_INFNEXT, 6.66 next->domain->domain_id, now - next->wokenup, r_time); 6.67 6.68 - clear_bit(_VCPUF_running, &prev->vcpu_flags); 6.69 - set_bit(_VCPUF_running, &next->vcpu_flags); 6.70 - 6.71 - perfc_incrc(sched_ctx); 6.72 - 6.73 /* 6.74 * Logic of wokenup field in domain struct: 6.75 * Used to calculate "waiting time", which is the time that a domain 6.76 @@ -439,7 +432,7 @@ static void __enter_scheduler(void) 6.77 * also set here then a preempted runnable domain will get a screwed up 6.78 * "waiting time" value next time it is scheduled. 6.79 */ 6.80 - prev->wokenup = NOW(); 6.81 + prev->wokenup = now; 6.82 6.83 #if defined(WAKE_HISTO) 6.84 if ( !is_idle_domain(next->domain) && next->wokenup ) 6.85 @@ -460,6 +453,12 @@ static void __enter_scheduler(void) 6.86 } 6.87 #endif 6.88 6.89 + set_bit(_VCPUF_running, &next->vcpu_flags); 6.90 + 6.91 + spin_unlock_irq(&schedule_data[cpu].schedule_lock); 6.92 + 6.93 + perfc_incrc(sched_ctx); 6.94 + 6.95 prev->sleep_tick = schedule_data[cpu].tick; 6.96 6.97 /* Ensure that the domain has an up-to-date time base. */ 6.98 @@ -474,25 +473,7 @@ static void __enter_scheduler(void) 6.99 prev->domain->domain_id, prev->vcpu_id, 6.100 next->domain->domain_id, next->vcpu_id); 6.101 6.102 - schedule_data[cpu].context_switch_in_progress = 1; 6.103 context_switch(prev, next); 6.104 - if ( schedule_data[cpu].context_switch_in_progress ) 6.105 - context_switch_done(); 6.106 -} 6.107 - 6.108 -void context_switch_done(void) 6.109 -{ 6.110 - unsigned int cpu = smp_processor_id(); 6.111 - ASSERT(schedule_data[cpu].context_switch_in_progress); 6.112 - spin_unlock_irq(&schedule_data[cpu].schedule_lock); 6.113 - schedule_data[cpu].context_switch_in_progress = 0; 6.114 -} 6.115 - 6.116 -/* No locking needed -- pointer comparison is safe :-) */ 6.117 -int idle_cpu(int cpu) 6.118 -{ 6.119 - struct vcpu *p = schedule_data[cpu].curr; 6.120 - return p == idle_domain[cpu]; 6.121 } 6.122 6.123
7.1 --- a/xen/include/xen/sched-if.h Sat Jan 07 01:31:04 2006 +0000 7.2 +++ b/xen/include/xen/sched-if.h Sat Jan 07 16:53:25 2006 +0100 7.3 @@ -18,7 +18,6 @@ struct schedule_data { 7.4 void *sched_priv; 7.5 struct ac_timer s_timer; /* scheduling timer */ 7.6 unsigned long tick; /* current periodic 'tick' */ 7.7 - int context_switch_in_progress; 7.8 #ifdef BUCKETS 7.9 u32 hist[BUCKETS]; /* for scheduler latency histogram */ 7.10 #endif
8.1 --- a/xen/include/xen/sched.h Sat Jan 07 01:31:04 2006 +0000 8.2 +++ b/xen/include/xen/sched.h Sat Jan 07 16:53:25 2006 +0100 8.3 @@ -271,41 +271,28 @@ void vcpu_sleep_sync(struct vcpu *d); 8.4 extern void sync_vcpu_execstate(struct vcpu *v); 8.5 8.6 /* 8.7 - * Called by the scheduler to switch to another VCPU. On entry, although 8.8 - * VCPUF_running is no longer asserted for @prev, its context is still running 8.9 - * on the local CPU and is not committed to memory. The local scheduler lock 8.10 - * is therefore still held, and interrupts are disabled, because the local CPU 8.11 - * is in an inconsistent state. 8.12 - * 8.13 - * The callee must ensure that the local CPU is no longer running in @prev's 8.14 - * context, and that the context is saved to memory, before returning. 8.15 - * Alternatively, if implementing lazy context switching, it suffices to ensure 8.16 - * that invoking sync_vcpu_execstate() will switch and commit @prev's state. 8.17 + * Called by the scheduler to switch to another VCPU. This function must 8.18 + * call context_saved(@prev) when the local CPU is no longer running in 8.19 + * @prev's context, and that context is saved to memory. Alternatively, if 8.20 + * implementing lazy context switching, it suffices to ensure that invoking 8.21 + * sync_vcpu_execstate() will switch and commit @prev's state. 8.22 */ 8.23 extern void context_switch( 8.24 struct vcpu *prev, 8.25 struct vcpu *next); 8.26 8.27 /* 8.28 - * If context_switch() does not return to the caller, or you need to perform 8.29 - * some aspects of state restoration with interrupts enabled, then you must 8.30 - * call context_switch_done() at a suitable safe point. 8.31 - * 8.32 - * As when returning from context_switch(), the caller must ensure that the 8.33 - * local CPU is no longer running in the previous VCPU's context, and that the 8.34 - * context is saved to memory. Alternatively, if implementing lazy context 8.35 - * switching, ensure that invoking sync_vcpu_execstate() will switch and 8.36 - * commit the previous VCPU's state. 8.37 + * As described above, context_switch() must call this function when the 8.38 + * local CPU is no longer running in @prev's context, and @prev's context is 8.39 + * saved to memory. Alternatively, if implementing lazy context switching, 8.40 + * ensure that invoking sync_vcpu_execstate() will switch and commit @prev. 8.41 */ 8.42 -extern void context_switch_done(void); 8.43 +#define context_saved(prev) (clear_bit(_VCPUF_running, &(prev)->vcpu_flags)) 8.44 8.45 /* Called by the scheduler to continue running the current VCPU. */ 8.46 extern void continue_running( 8.47 struct vcpu *same); 8.48 8.49 -/* Is CPU 'cpu' idle right now? */ 8.50 -int idle_cpu(int cpu); 8.51 - 8.52 void startup_cpu_idle_loop(void); 8.53 8.54 unsigned long __hypercall_create_continuation( 8.55 @@ -400,7 +387,7 @@ extern struct domain *domain_list; 8.56 #define DOMF_debugging (1UL<<_DOMF_debugging) 8.57 8.58 8.59 -static inline int domain_runnable(struct vcpu *v) 8.60 +static inline int vcpu_runnable(struct vcpu *v) 8.61 { 8.62 return ( (atomic_read(&v->pausecnt) == 0) && 8.63 !(v->vcpu_flags & (VCPUF_blocked|VCPUF_down)) &&