ia64/xen-unstable

changeset 16790:bba0419a05f1

[IA64] domheap: Clean up of context switch code

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 0c2dc9424a68
children 4fbde3a39909
files xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/xen/domain.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_init.c	Thu Jan 17 12:05:43 2008 -0700
     1.2 +++ b/xen/arch/ia64/vmx/vmx_init.c	Thu Jan 17 12:05:43 2008 -0700
     1.3 @@ -335,6 +335,10 @@ vmx_load_state(struct vcpu *v)
     1.4  	u64 status;
     1.5  
     1.6  	BUG_ON(v != current);
     1.7 +
     1.8 +	vmx_load_all_rr(v);
     1.9 +
    1.10 +	/* vmx_load_all_rr() pins down v->arch.privregs with both dtr/itr*/
    1.11  	status = ia64_pal_vp_restore((u64 *)v->arch.privregs, 0);
    1.12  	if (status != PAL_STATUS_SUCCESS){
    1.13  		panic_domain(vcpu_regs(v),"Restore vp status failed\n");
    1.14 @@ -350,6 +354,8 @@ vmx_load_state(struct vcpu *v)
    1.15  	ia64_set_kr(7, v->arch.arch_vmx.vkr[7]);
    1.16  	/* Guest vTLB is not required to be switched explicitly, since
    1.17  	 * anchored in vcpu */
    1.18 +
    1.19 +	migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);
    1.20  }
    1.21  
    1.22  static int
    1.23 @@ -602,9 +608,7 @@ void vmx_do_resume(struct vcpu *v)
    1.24  {
    1.25  	ioreq_t *p;
    1.26  
    1.27 -	vmx_load_all_rr(v);
    1.28  	vmx_load_state(v);
    1.29 -	migrate_timer(&v->arch.arch_vmx.vtm.vtm_timer, v->processor);
    1.30  
    1.31  	/* stolen from hvm_do_resume() in arch/x86/hvm/hvm.c */
    1.32  	/* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
     2.1 --- a/xen/arch/ia64/xen/domain.c	Thu Jan 17 12:05:43 2008 -0700
     2.2 +++ b/xen/arch/ia64/xen/domain.c	Thu Jan 17 12:05:43 2008 -0700
     2.3 @@ -78,16 +78,6 @@ DEFINE_PER_CPU(struct vcpu *, fp_owner);
     2.4  
     2.5  #include <xen/sched-if.h>
     2.6  
     2.7 -static void
     2.8 -ia64_disable_vhpt_walker(void)
     2.9 -{
    2.10 -	// disable VHPT. ia64_new_rr7() might cause VHPT
    2.11 -	// fault without this because it flushes dtr[IA64_TR_VHPT]
    2.12 -	// (VHPT_SIZE_LOG2 << 2) is just for avoid
    2.13 -	// Reserved Register/Field fault.
    2.14 -	ia64_set_pta(VHPT_SIZE_LOG2 << 2);
    2.15 -}
    2.16 -
    2.17  static void flush_vtlb_for_context_switch(struct vcpu* prev, struct vcpu* next)
    2.18  {
    2.19  	int cpu = smp_processor_id();
    2.20 @@ -165,6 +155,21 @@ static void flush_cache_for_context_swit
    2.21  	}
    2.22  }
    2.23  
    2.24 +static void set_current_psr_i_addr(struct vcpu* v)
    2.25 +{
    2.26 +	__ia64_per_cpu_var(current_psr_i_addr) =
    2.27 +		(uint8_t*)(v->domain->arch.shared_info_va +
    2.28 +			   INT_ENABLE_OFFSET(v));
    2.29 +	__ia64_per_cpu_var(current_psr_ic_addr) = (int *)
    2.30 +		(v->domain->arch.shared_info_va + XSI_PSR_IC_OFS);
    2.31 +}
    2.32 +
    2.33 +static void clear_current_psr_i_addr(void)
    2.34 +{
    2.35 +	__ia64_per_cpu_var(current_psr_i_addr) = NULL;
    2.36 +	__ia64_per_cpu_var(current_psr_ic_addr) = NULL;
    2.37 +}
    2.38 +
    2.39  static void lazy_fp_switch(struct vcpu *prev, struct vcpu *next)
    2.40  {
    2.41  	/*
    2.42 @@ -196,26 +201,28 @@ static void lazy_fp_switch(struct vcpu *
    2.43  	}
    2.44  }
    2.45  
    2.46 +static void load_state(struct vcpu *v)
    2.47 +{
    2.48 +	load_region_regs(v);
    2.49 +	ia64_set_pta(vcpu_pta(v));
    2.50 +	vcpu_load_kernel_regs(v);
    2.51 +	if (vcpu_pkr_in_use(v))
    2.52 +		vcpu_pkr_load_regs(v);
    2.53 +	set_current_psr_i_addr(v);
    2.54 +}
    2.55 +
    2.56  void schedule_tail(struct vcpu *prev)
    2.57  {
    2.58  	extern char ia64_ivt;
    2.59  
    2.60  	context_saved(prev);
    2.61 -	ia64_disable_vhpt_walker();
    2.62  
    2.63  	if (VMX_DOMAIN(current))
    2.64  		vmx_do_resume(current);
    2.65  	else {
    2.66  		if (VMX_DOMAIN(prev))
    2.67  			ia64_set_iva(&ia64_ivt);
    2.68 -		load_region_regs(current);
    2.69 -		ia64_set_pta(vcpu_pta(current));
    2.70 -		vcpu_load_kernel_regs(current);
    2.71 -		__ia64_per_cpu_var(current_psr_i_addr) =
    2.72 -			(uint8_t*)(current->domain->arch.shared_info_va +
    2.73 -				   INT_ENABLE_OFFSET(current));
    2.74 -		__ia64_per_cpu_var(current_psr_ic_addr) = (int *)
    2.75 -		  (current->domain->arch.shared_info_va + XSI_PSR_IC_OFS);
    2.76 +		load_state(current);
    2.77  		migrate_timer(&current->arch.hlt_timer, current->processor);
    2.78  	}
    2.79  	flush_vtlb_for_context_switch(prev, current);
    2.80 @@ -242,7 +249,6 @@ void context_switch(struct vcpu *prev, s
    2.81          }
    2.82      }
    2.83  
    2.84 -    ia64_disable_vhpt_walker();
    2.85      lazy_fp_switch(prev, current);
    2.86  
    2.87      if (prev->arch.dbg_used || next->arch.dbg_used) {
    2.88 @@ -253,37 +259,31 @@ void context_switch(struct vcpu *prev, s
    2.89          ia64_load_debug_regs(next->arch.dbr);
    2.90      }
    2.91      
    2.92 +    /*
    2.93 +     * disable VHPT walker.
    2.94 +     * ia64_switch_to() might cause VHPT fault because it flushes
    2.95 +     * dtr[IA64_TR_VHPT] and reinsert the mapping with dtr[IA64_TR_STACK].
    2.96 +     * (VHPT_SIZE_LOG2 << 2) is just for avoiding
    2.97 +     * Reserved Register/Field fault.
    2.98 +     */
    2.99 +    ia64_set_pta(VHPT_SIZE_LOG2 << 2);
   2.100      prev = ia64_switch_to(next);
   2.101  
   2.102      /* Note: ia64_switch_to does not return here at vcpu initialization.  */
   2.103  
   2.104      if (VMX_DOMAIN(current)) {
   2.105 -        vmx_load_all_rr(current);
   2.106          vmx_load_state(current);
   2.107 -        migrate_timer(&current->arch.arch_vmx.vtm.vtm_timer,
   2.108 -                      current->processor);
   2.109      } else {
   2.110 -        struct domain *nd;
   2.111          extern char ia64_ivt;
   2.112  
   2.113          if (VMX_DOMAIN(prev))
   2.114              ia64_set_iva(&ia64_ivt);
   2.115  
   2.116 -        nd = current->domain;
   2.117 -        if (!is_idle_domain(nd)) {
   2.118 -            load_region_regs(current);
   2.119 -            ia64_set_pta(vcpu_pta(current));
   2.120 -            vcpu_load_kernel_regs(current);
   2.121 -            if (vcpu_pkr_in_use(current))
   2.122 -                vcpu_pkr_load_regs(current);
   2.123 +        if (!is_idle_vcpu(current)) {
   2.124 +            load_state(current);
   2.125              vcpu_set_next_timer(current);
   2.126              if (vcpu_timer_expired(current))
   2.127                  vcpu_pend_timer(current);
   2.128 -	    __ia64_per_cpu_var(current_psr_i_addr) =
   2.129 -		    (uint8_t*)(nd->arch.shared_info_va +
   2.130 -			       INT_ENABLE_OFFSET(current));
   2.131 -            __ia64_per_cpu_var(current_psr_ic_addr) =
   2.132 -                (int *)(nd->arch.shared_info_va + XSI_PSR_IC_OFS);
   2.133              /* steal time accounting */
   2.134              if (!guest_handle_is_null(runstate_guest(current)))
   2.135                  __copy_to_guest(runstate_guest(current), &current->runstate, 1);
   2.136 @@ -292,8 +292,7 @@ void context_switch(struct vcpu *prev, s
   2.137               * walker. Then all accesses happen within idle context will
   2.138               * be handled by TR mapping and identity mapping.
   2.139               */
   2.140 -            __ia64_per_cpu_var(current_psr_i_addr) = NULL;
   2.141 -            __ia64_per_cpu_var(current_psr_ic_addr) = NULL;
   2.142 +	     clear_current_psr_i_addr();
   2.143          }
   2.144      }
   2.145      local_irq_restore(spsr);
   2.146 @@ -1710,12 +1709,10 @@ domain_set_shared_info_va (unsigned long
   2.147  
   2.148  	VCPU(v, interrupt_mask_addr) = (unsigned char *)va +
   2.149  	                               INT_ENABLE_OFFSET(v);
   2.150 -
   2.151 -	__ia64_per_cpu_var(current_psr_i_addr) =
   2.152 -		(uint8_t*)(va + INT_ENABLE_OFFSET(current));
   2.153 -	__ia64_per_cpu_var(current_psr_ic_addr) = (int *)(va + XSI_PSR_IC_OFS);
   2.154 +	set_current_psr_i_addr(v);
   2.155  
   2.156  	/* Remap the shared pages.  */
   2.157 +	BUG_ON(VMX_DOMAIN(v));
   2.158  	rc = !set_one_rr(7UL << 61, PSCB(v,rrs[7]));
   2.159  	BUG_ON(rc);
   2.160