ia64/xen-unstable

changeset 9333:760f9149dbaa

Abstract some details of context switching into ctxt_switch_from/to
function hooks. Allows neater separation between paravirtual and
HVM code paths.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sun Mar 19 17:10:20 2006 +0100 (2006-03-19)
parents 768936b2800a
children 56a775219c88
files xen/arch/x86/domain.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/include/asm-x86/domain.h xen/include/asm-x86/hvm/hvm.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Sun Mar 19 15:17:50 2006 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Sun Mar 19 17:10:20 2006 +0100
     1.3 @@ -51,6 +51,9 @@ struct percpu_ctxt {
     1.4  } __cacheline_aligned;
     1.5  static struct percpu_ctxt percpu_ctxt[NR_CPUS];
     1.6  
     1.7 +static void paravirt_ctxt_switch_from(struct vcpu *v);
     1.8 +static void paravirt_ctxt_switch_to(struct vcpu *v);
     1.9 +
    1.10  static void continue_idle_domain(struct vcpu *v)
    1.11  {
    1.12      reset_stack_and_jump(idle_loop);
    1.13 @@ -226,6 +229,9 @@ struct vcpu *alloc_vcpu_struct(struct do
    1.14          v->arch.schedule_tail = continue_nonidle_domain;
    1.15      }
    1.16  
    1.17 +    v->arch.ctxt_switch_from = paravirt_ctxt_switch_from;
    1.18 +    v->arch.ctxt_switch_to   = paravirt_ctxt_switch_to;
    1.19 +
    1.20      v->arch.perdomain_ptes =
    1.21          d->arch.mm_perdomain_pt + (vcpu_id << GDT_LDT_VCPU_SHIFT);
    1.22  
    1.23 @@ -685,21 +691,32 @@ static void save_segments(struct vcpu *v
    1.24      percpu_ctxt[smp_processor_id()].dirty_segment_mask = dirty_segment_mask;
    1.25  }
    1.26  
    1.27 -#define switch_kernel_stack(_n,_c) ((void)0)
    1.28 +#define switch_kernel_stack(v) ((void)0)
    1.29  
    1.30  #elif defined(__i386__)
    1.31  
    1.32  #define load_segments(n) ((void)0)
    1.33  #define save_segments(p) ((void)0)
    1.34  
    1.35 -static inline void switch_kernel_stack(struct vcpu *n, unsigned int cpu)
    1.36 +static inline void switch_kernel_stack(struct vcpu *v)
    1.37  {
    1.38 -    struct tss_struct *tss = &init_tss[cpu];
    1.39 -    tss->esp1 = n->arch.guest_context.kernel_sp;
    1.40 -    tss->ss1  = n->arch.guest_context.kernel_ss;
    1.41 +    struct tss_struct *tss = &init_tss[smp_processor_id()];
    1.42 +    tss->esp1 = v->arch.guest_context.kernel_sp;
    1.43 +    tss->ss1  = v->arch.guest_context.kernel_ss;
    1.44  }
    1.45  
    1.46 -#endif
    1.47 +#endif /* __i386__ */
    1.48 +
    1.49 +static void paravirt_ctxt_switch_from(struct vcpu *v)
    1.50 +{
    1.51 +    save_segments(v);
    1.52 +}
    1.53 +
    1.54 +static void paravirt_ctxt_switch_to(struct vcpu *v)
    1.55 +{
    1.56 +    set_int80_direct_trap(v);
    1.57 +    switch_kernel_stack(v);
    1.58 +}
    1.59  
    1.60  #define loaddebug(_v,_reg) \
    1.61      __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg]))
    1.62 @@ -720,15 +737,7 @@ static void __context_switch(void)
    1.63                 stack_regs,
    1.64                 CTXT_SWITCH_STACK_BYTES);
    1.65          unlazy_fpu(p);
    1.66 -        if ( !hvm_guest(p) )
    1.67 -        {
    1.68 -            save_segments(p);
    1.69 -        }
    1.70 -        else
    1.71 -        {
    1.72 -            hvm_save_segments(p);
    1.73 -            hvm_load_msrs();
    1.74 -        }
    1.75 +        p->arch.ctxt_switch_from(p);
    1.76      }
    1.77  
    1.78      if ( !is_idle_vcpu(n) )
    1.79 @@ -749,15 +758,7 @@ static void __context_switch(void)
    1.80              loaddebug(&n->arch.guest_context, 7);
    1.81          }
    1.82  
    1.83 -        if ( !hvm_guest(n) )
    1.84 -        {
    1.85 -            set_int80_direct_trap(n);
    1.86 -            switch_kernel_stack(n, cpu);
    1.87 -        }
    1.88 -        else
    1.89 -        {
    1.90 -            hvm_restore_msrs(n);
    1.91 -        }
    1.92 +        n->arch.ctxt_switch_to(n);
    1.93      }
    1.94  
    1.95      if ( p->domain != n->domain )
     2.1 --- a/xen/arch/x86/hvm/svm/svm.c	Sun Mar 19 15:17:50 2006 +0100
     2.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Sun Mar 19 17:10:20 2006 +0100
     2.3 @@ -200,7 +200,8 @@ int svm_initialize_guest_resources(struc
     2.4      return 1;
     2.5  }
     2.6  
     2.7 -void svm_store_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
     2.8 +static void svm_store_cpu_guest_regs(
     2.9 +    struct vcpu *v, struct cpu_user_regs *regs)
    2.10  {
    2.11      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    2.12  
    2.13 @@ -227,24 +228,12 @@ void svm_store_cpu_guest_regs(struct vcp
    2.14  #endif
    2.15  }
    2.16  
    2.17 -void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
    2.18 +static void svm_load_cpu_guest_regs(
    2.19 +    struct vcpu *v, struct cpu_user_regs *regs)
    2.20  {
    2.21      svm_load_cpu_user_regs(v, regs);
    2.22  }
    2.23  
    2.24 -#ifdef __x86_64__
    2.25 -
    2.26 -void svm_save_segments(struct vcpu *v)
    2.27 -{
    2.28 -}
    2.29 -void svm_load_msrs(void)
    2.30 -{
    2.31 -}
    2.32 -void svm_restore_msrs(struct vcpu *v)
    2.33 -{
    2.34 -}
    2.35 -#endif
    2.36 -
    2.37  #define IS_CANO_ADDRESS(add) 1
    2.38  
    2.39  static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
    2.40 @@ -459,12 +448,6 @@ int start_svm(void)
    2.41      hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
    2.42      hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
    2.43  
    2.44 -#ifdef __x86_64__
    2.45 -    hvm_funcs.save_segments = svm_save_segments;
    2.46 -    hvm_funcs.load_msrs = svm_load_msrs;
    2.47 -    hvm_funcs.restore_msrs = svm_restore_msrs;
    2.48 -#endif
    2.49 -
    2.50      hvm_funcs.store_cpu_guest_ctrl_regs = svm_store_cpu_guest_ctrl_regs;
    2.51      hvm_funcs.modify_guest_state = svm_modify_guest_state;
    2.52  
    2.53 @@ -687,9 +670,19 @@ static void arch_svm_do_launch(struct vc
    2.54      reset_stack_and_jump(svm_asm_do_launch);
    2.55  }
    2.56  
    2.57 +static void svm_ctxt_switch_from(struct vcpu *v)
    2.58 +{
    2.59 +}
    2.60 +
    2.61 +static void svm_ctxt_switch_to(struct vcpu *v)
    2.62 +{
    2.63 +}
    2.64 +
    2.65  void svm_final_setup_guest(struct vcpu *v)
    2.66  {
    2.67 -    v->arch.schedule_tail = arch_svm_do_launch;
    2.68 +    v->arch.schedule_tail    = arch_svm_do_launch;
    2.69 +    v->arch.ctxt_switch_from = svm_ctxt_switch_from;
    2.70 +    v->arch.ctxt_switch_to   = svm_ctxt_switch_to;
    2.71  
    2.72      if (v == v->domain->vcpu[0]) 
    2.73      {
     3.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Sun Mar 19 15:17:50 2006 +0100
     3.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Sun Mar 19 17:10:20 2006 +0100
     3.3 @@ -50,9 +50,14 @@
     3.4  static unsigned long trace_values[NR_CPUS][4];
     3.5  #define TRACE_VMEXIT(index,value) trace_values[smp_processor_id()][index]=value
     3.6  
     3.7 +static void vmx_ctxt_switch_from(struct vcpu *v);
     3.8 +static void vmx_ctxt_switch_to(struct vcpu *v);
     3.9 +
    3.10  void vmx_final_setup_guest(struct vcpu *v)
    3.11  {
    3.12 -    v->arch.schedule_tail = arch_vmx_do_launch;
    3.13 +    v->arch.schedule_tail    = arch_vmx_do_launch;
    3.14 +    v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
    3.15 +    v->arch.ctxt_switch_to   = vmx_ctxt_switch_to;
    3.16  
    3.17      if ( v->vcpu_id == 0 )
    3.18      {
    3.19 @@ -105,6 +110,7 @@ static void vmx_relinquish_guest_resourc
    3.20  }
    3.21  
    3.22  #ifdef __x86_64__
    3.23 +
    3.24  static struct vmx_msr_state percpu_msr[NR_CPUS];
    3.25  
    3.26  static u32 msr_data_index[VMX_MSR_COUNT] =
    3.27 @@ -113,7 +119,7 @@ static u32 msr_data_index[VMX_MSR_COUNT]
    3.28      MSR_SYSCALL_MASK, MSR_EFER,
    3.29  };
    3.30  
    3.31 -void vmx_save_segments(struct vcpu *v)
    3.32 +static void vmx_save_segments(struct vcpu *v)
    3.33  {
    3.34      rdmsrl(MSR_SHADOW_GS_BASE, v->arch.hvm_vmx.msr_content.shadow_gs);
    3.35  }
    3.36 @@ -124,7 +130,7 @@ void vmx_save_segments(struct vcpu *v)
    3.37   * are not modified once set for generic domains, we don't save them,
    3.38   * but simply reset them to the values set at percpu_traps_init().
    3.39   */
    3.40 -void vmx_load_msrs(void)
    3.41 +static void vmx_load_msrs(void)
    3.42  {
    3.43      struct vmx_msr_state *host_state = &percpu_msr[smp_processor_id()];
    3.44      int i;
    3.45 @@ -302,8 +308,7 @@ static inline int long_mode_do_msr_write
    3.46      return 1;
    3.47  }
    3.48  
    3.49 -void
    3.50 -vmx_restore_msrs(struct vcpu *v)
    3.51 +static void vmx_restore_msrs(struct vcpu *v)
    3.52  {
    3.53      int i = 0;
    3.54      struct vmx_msr_state *guest_state;
    3.55 @@ -323,22 +328,42 @@ vmx_restore_msrs(struct vcpu *v)
    3.56  
    3.57          HVM_DBG_LOG(DBG_LEVEL_2,
    3.58                      "restore guest's index %d msr %lx with %lx\n",
    3.59 -                    i, (unsigned long) msr_data_index[i], (unsigned long) guest_state->msr_items[i]);
    3.60 +                    i, (unsigned long)msr_data_index[i],
    3.61 +                    (unsigned long)guest_state->msr_items[i]);
    3.62          set_bit(i, &host_state->flags);
    3.63          wrmsrl(msr_data_index[i], guest_state->msr_items[i]);
    3.64          clear_bit(i, &guest_flags);
    3.65      }
    3.66  }
    3.67  #else  /* __i386__ */
    3.68 -#define  vmx_save_init_msrs()   ((void)0)
    3.69  
    3.70 -static inline int  long_mode_do_msr_read(struct cpu_user_regs *regs){
    3.71 +#define vmx_save_segments(v)      ((void)0)
    3.72 +#define vmx_load_msrs()           ((void)0)
    3.73 +#define vmx_restore_msrs(v)       ((void)0)
    3.74 +#define vmx_save_init_msrs()      ((void)0)
    3.75 +
    3.76 +static inline int long_mode_do_msr_read(struct cpu_user_regs *regs)
    3.77 +{
    3.78      return 0;
    3.79  }
    3.80 -static inline int  long_mode_do_msr_write(struct cpu_user_regs *regs){
    3.81 +
    3.82 +static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
    3.83 +{
    3.84      return 0;
    3.85  }
    3.86 -#endif
    3.87 +
    3.88 +#endif /* __i386__ */
    3.89 +
    3.90 +static void vmx_ctxt_switch_from(struct vcpu *v)
    3.91 +{
    3.92 +    vmx_save_segments(v);
    3.93 +    vmx_load_msrs();
    3.94 +}
    3.95 +
    3.96 +static void vmx_ctxt_switch_to(struct vcpu *v)
    3.97 +{
    3.98 +    vmx_restore_msrs(v);
    3.99 +}
   3.100  
   3.101  void stop_vmx(void)
   3.102  {
   3.103 @@ -580,12 +605,6 @@ int start_vmx(void)
   3.104      hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
   3.105      hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
   3.106  
   3.107 -#ifdef __x86_64__
   3.108 -    hvm_funcs.save_segments = vmx_save_segments;
   3.109 -    hvm_funcs.load_msrs = vmx_load_msrs;
   3.110 -    hvm_funcs.restore_msrs = vmx_restore_msrs;
   3.111 -#endif
   3.112 -
   3.113      hvm_funcs.store_cpu_guest_ctrl_regs = vmx_store_cpu_guest_ctrl_regs;
   3.114      hvm_funcs.modify_guest_state = vmx_modify_guest_state;
   3.115  
     4.1 --- a/xen/include/asm-x86/domain.h	Sun Mar 19 15:17:50 2006 +0100
     4.2 +++ b/xen/include/asm-x86/domain.h	Sun Mar 19 17:10:20 2006 +0100
     4.3 @@ -124,6 +124,9 @@ struct arch_vcpu
     4.4  
     4.5      void (*schedule_tail) (struct vcpu *);
     4.6  
     4.7 +    void (*ctxt_switch_from) (struct vcpu *);
     4.8 +    void (*ctxt_switch_to) (struct vcpu *);
     4.9 +
    4.10      /* Bounce information for propagating an exception to guest OS. */
    4.11      struct trap_bounce trap_bounce;
    4.12  
     5.1 --- a/xen/include/asm-x86/hvm/hvm.h	Sun Mar 19 15:17:50 2006 +0100
     5.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Sun Mar 19 17:10:20 2006 +0100
     5.3 @@ -41,18 +41,11 @@ struct hvm_function_table {
     5.4      /*
     5.5       * Store and load guest state:
     5.6       * 1) load/store guest register state,
     5.7 -     * 2) load/store segment state (x86_64 only),
     5.8 -     * 3) load/store msr register state (x86_64 only),
     5.9 -     * 4) store guest control register state (used for panic dumps),
    5.10 -     * 5) modify guest state (e.g., set debug flags).
    5.11 +     * 2) store guest control register state (used for panic dumps),
    5.12 +     * 3) modify guest state (e.g., set debug flags).
    5.13       */
    5.14      void (*store_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r);
    5.15      void (*load_cpu_guest_regs)(struct vcpu *v, struct cpu_user_regs *r);
    5.16 -#ifdef __x86_64__
    5.17 -    void (*save_segments)(struct vcpu *v);
    5.18 -    void (*load_msrs)(void);
    5.19 -    void (*restore_msrs)(struct vcpu *v);
    5.20 -#endif
    5.21      void (*store_cpu_guest_ctrl_regs)(struct vcpu *v, unsigned long crs[8]);
    5.22      void (*modify_guest_state)(struct vcpu *v);
    5.23  
    5.24 @@ -111,33 +104,6 @@ hvm_load_cpu_guest_regs(struct vcpu *v, 
    5.25      hvm_funcs.load_cpu_guest_regs(v, r);
    5.26  }
    5.27  
    5.28 -#ifdef __x86_64__
    5.29 -static inline void
    5.30 -hvm_save_segments(struct vcpu *v)
    5.31 -{
    5.32 -    if (hvm_funcs.save_segments)
    5.33 -        hvm_funcs.save_segments(v);
    5.34 -}
    5.35 -
    5.36 -static inline void
    5.37 -hvm_load_msrs(void)
    5.38 -{
    5.39 -    if (hvm_funcs.load_msrs)
    5.40 -        hvm_funcs.load_msrs();
    5.41 -}
    5.42 -
    5.43 -static inline void
    5.44 -hvm_restore_msrs(struct vcpu *v)
    5.45 -{
    5.46 -    if (hvm_funcs.restore_msrs)
    5.47 -        hvm_funcs.restore_msrs(v);
    5.48 -}
    5.49 -#else
    5.50 -#define hvm_save_segments(v)    ((void)0)
    5.51 -#define hvm_load_msrs(v)        ((void)0)
    5.52 -#define hvm_restore_msrs(v)     ((void)0)
    5.53 -#endif /* __x86_64__ */
    5.54 -
    5.55  static inline void
    5.56  hvm_store_cpu_guest_ctrl_regs(struct vcpu *v, unsigned long crs[8])
    5.57  {