direct-io.hg

changeset 6453:3bbc9384be3f

Refactor sync_lazy_execstate_cpu() into the more sensible
sync_vcpu_execstate().

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Aug 26 17:57:09 2005 +0000 (2005-08-26)
parents 37e9c9cd6c14
children f3e63b95c07a dfaf788ab18c
files xen/arch/ia64/xenmisc.c xen/arch/x86/domain.c xen/common/schedule.c xen/include/xen/sched.h
line diff
     1.1 --- a/xen/arch/ia64/xenmisc.c	Fri Aug 26 17:42:34 2005 +0000
     1.2 +++ b/xen/arch/ia64/xenmisc.c	Fri Aug 26 17:57:09 2005 +0000
     1.3 @@ -58,7 +58,7 @@ platform_is_hp_ski(void)
     1.4  
     1.5  /* calls in xen/common code that are unused on ia64 */
     1.6  
     1.7 -void sync_lazy_execstate_cpu(unsigned int cpu) {}
     1.8 +void sync_vcpu_execstate(struct vcpu *v) {}
     1.9  
    1.10  #ifdef CONFIG_VTI
    1.11  int grant_table_create(struct domain *d) { return 0; }
     2.1 --- a/xen/arch/x86/domain.c	Fri Aug 26 17:42:34 2005 +0000
     2.2 +++ b/xen/arch/x86/domain.c	Fri Aug 26 17:57:09 2005 +0000
     2.3 @@ -885,8 +885,13 @@ int __sync_lazy_execstate(void)
     2.4      return switch_required;
     2.5  }
     2.6  
     2.7 -void sync_lazy_execstate_cpu(unsigned int cpu)
     2.8 +void sync_vcpu_execstate(struct vcpu *v)
     2.9  {
    2.10 +    unsigned int cpu = v->processor;
    2.11 +
    2.12 +    if ( !cpu_isset(cpu, v->domain->cpumask) )
    2.13 +        return;
    2.14 +
    2.15      if ( cpu == smp_processor_id() )
    2.16      {
    2.17          (void)__sync_lazy_execstate();
     3.1 --- a/xen/common/schedule.c	Fri Aug 26 17:42:34 2005 +0000
     3.2 +++ b/xen/common/schedule.c	Fri Aug 26 17:57:09 2005 +0000
     3.3 @@ -218,9 +218,7 @@ void vcpu_sleep_sync(struct vcpu *v)
     3.4              && spin_is_locked(&schedule_data[v->processor].schedule_lock) )
     3.5          cpu_relax();
     3.6  
     3.7 -    /* Counteract lazy context switching. */
     3.8 -    if ( cpu_isset(v->processor, v->domain->cpumask) )
     3.9 -        sync_lazy_execstate_cpu(v->processor);
    3.10 +    sync_vcpu_execstate(v);
    3.11  }
    3.12  
    3.13  void vcpu_wake(struct vcpu *v)
     4.1 --- a/xen/include/xen/sched.h	Fri Aug 26 17:42:34 2005 +0000
     4.2 +++ b/xen/include/xen/sched.h	Fri Aug 26 17:57:09 2005 +0000
     4.3 @@ -250,10 +250,11 @@ void vcpu_sleep_nosync(struct vcpu *d);
     4.4  void vcpu_sleep_sync(struct vcpu *d);
     4.5  
     4.6  /*
     4.7 - * Force loading of currently-executing domain state on the specified CPU.
     4.8 - * This is used to counteract lazy state switching where required.
     4.9 + * Force synchronisation of given VCPU's state. If it is currently descheduled,
    4.10 + * this call will ensure that all its state is committed to memory and that
    4.11 + * no CPU is using critical state (e.g., page tables) belonging to the VCPU.
    4.12   */
    4.13 -extern void sync_lazy_execstate_cpu(unsigned int cpu);
    4.14 +extern void sync_vcpu_execstate(struct vcpu *v);
    4.15  
    4.16  /*
    4.17   * Called by the scheduler to switch to another VCPU. On entry, although
    4.18 @@ -265,7 +266,7 @@ extern void sync_lazy_execstate_cpu(unsi
    4.19   * The callee must ensure that the local CPU is no longer running in @prev's
    4.20   * context, and that the context is saved to memory, before returning.
    4.21   * Alternatively, if implementing lazy context switching, it suffices to ensure
    4.22 - * that invoking sync_lazy_execstate() will switch and commit @prev's state.
    4.23 + * that invoking sync_vcpu_execstate() will switch and commit @prev's state.
    4.24   */
    4.25  extern void context_switch(
    4.26      struct vcpu *prev,