ia64/xen-unstable

changeset 15147:b4cc3fbcdf25

[IA64] Reimplement vcpu_get_psr.

It now returns the virtualized psr.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Thu May 31 10:48:48 2007 -0600 (2007-05-31)
parents 919d72f6dc45
children 409f9849fa68
files xen/arch/ia64/xen/privop.c xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/vcpu.h
line diff
     1.1 --- a/xen/arch/ia64/xen/privop.c	Thu May 31 09:45:46 2007 -0600
     1.2 +++ b/xen/arch/ia64/xen/privop.c	Thu May 31 10:48:48 2007 -0600
     1.3 @@ -524,7 +524,7 @@ static IA64FAULT priv_mov_from_psr(VCPU 
     1.4  	u64 val;
     1.5  	IA64FAULT fault;
     1.6  
     1.7 -	fault = vcpu_get_psr(vcpu, &val);
     1.8 +	fault = vcpu_get_psr_masked(vcpu, &val);
     1.9  	if (fault == IA64_NO_FAULT)
    1.10  		return vcpu_set_gr(vcpu, tgt, val, 0);
    1.11  	else
    1.12 @@ -883,7 +883,7 @@ int ia64_hyperprivop(unsigned long iim, 
    1.13  		vcpu_reset_psr_sm(v, IA64_PSR_BE);
    1.14  		return 1;
    1.15  	case HYPERPRIVOP_GET_PSR:
    1.16 -		vcpu_get_psr(v, &val);
    1.17 +		vcpu_get_psr_masked(v, &val);
    1.18  		regs->r8 = val;
    1.19  		return 1;
    1.20  	}
     2.1 --- a/xen/arch/ia64/xen/vcpu.c	Thu May 31 09:45:46 2007 -0600
     2.2 +++ b/xen/arch/ia64/xen/vcpu.c	Thu May 31 10:48:48 2007 -0600
     2.3 @@ -448,43 +448,47 @@ IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u6
     2.4  	return IA64_NO_FAULT;
     2.5  }
     2.6  
     2.7 -IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval)
     2.8 +u64 vcpu_get_psr(VCPU * vcpu)
     2.9  {
    2.10 -	REGS *regs = vcpu_regs(vcpu);
    2.11 -	struct ia64_psr newpsr;
    2.12 + 	REGS *regs = vcpu_regs(vcpu);
    2.13 +	PSR newpsr;
    2.14 +	PSR ipsr;
    2.15 +
    2.16 +	ipsr.i64 = regs->cr_ipsr;
    2.17 +
    2.18 +	/* Copy non-virtualized bits.  */
    2.19 +	newpsr.i64 = ipsr.i64 & (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC |
    2.20 +				 IA64_PSR_MFL| IA64_PSR_MFH| IA64_PSR_PK |
    2.21 +				 IA64_PSR_DFL| IA64_PSR_SP | IA64_PSR_DB |
    2.22 +				 IA64_PSR_LP | IA64_PSR_TB | IA64_PSR_ID |
    2.23 +				 IA64_PSR_DA | IA64_PSR_DD | IA64_PSR_SS |
    2.24 +				 IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA);
    2.25  
    2.26 -	newpsr = *(struct ia64_psr *)&regs->cr_ipsr;
    2.27 -	if (!vcpu->vcpu_info->evtchn_upcall_mask)
    2.28 -		newpsr.i = 1;
    2.29 -	else
    2.30 -		newpsr.i = 0;
    2.31 -	if (PSCB(vcpu, interrupt_collection_enabled))
    2.32 -		newpsr.ic = 1;
    2.33 -	else
    2.34 -		newpsr.ic = 0;
    2.35 -	if (PSCB(vcpu, metaphysical_mode))
    2.36 -		newpsr.dt = 0;
    2.37 -	else
    2.38 -		newpsr.dt = 1;
    2.39 -	if (PSCB(vcpu, vpsr_pp))
    2.40 -		newpsr.pp = 1;
    2.41 -	else
    2.42 -		newpsr.pp = 0;
    2.43 -	newpsr.dfh = PSCB(vcpu, vpsr_dfh);
    2.44 +	/* Bits forced to 1 (psr.si and psr.is are forced to 0)  */
    2.45 +	newpsr.i64 |= IA64_PSR_DI;
    2.46 +
    2.47 +	/* System mask.  */
    2.48 +	newpsr.ia64_psr.ic = PSCB(vcpu, interrupt_collection_enabled);
    2.49 +	newpsr.ia64_psr.i = !vcpu->vcpu_info->evtchn_upcall_mask;
    2.50  
    2.51 -	*pval = *(unsigned long *)&newpsr;
    2.52 -	*pval &= (MASK(0, 32) | MASK(35, 2));
    2.53 -	return IA64_NO_FAULT;
    2.54 +	if (!PSCB(vcpu, metaphysical_mode))
    2.55 +		newpsr.i64 |= IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT;
    2.56 +	newpsr.ia64_psr.dfh = PSCB(vcpu, vpsr_dfh);
    2.57 +	newpsr.ia64_psr.pp = PSCB(vcpu, vpsr_pp);
    2.58 +
    2.59 +	/* Fool cpl.  */
    2.60 +	if (ipsr.ia64_psr.cpl < 3)
    2.61 +		newpsr.ia64_psr.cpl = 0;
    2.62 +	newpsr.ia64_psr.bn = PSCB(vcpu, banknum);
    2.63 +	
    2.64 +	return newpsr.i64;
    2.65  }
    2.66  
    2.67 -BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
    2.68 +IA64FAULT vcpu_get_psr_masked(VCPU * vcpu, u64 * pval)
    2.69  {
    2.70 -	return !!PSCB(vcpu, interrupt_collection_enabled);
    2.71 -}
    2.72 -
    2.73 -BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
    2.74 -{
    2.75 -	return !vcpu->vcpu_info->evtchn_upcall_mask;
    2.76 +  	u64 psr = vcpu_get_psr(vcpu);
    2.77 +	*pval = psr & (MASK(0, 32) | MASK(35, 2));
    2.78 +	return IA64_NO_FAULT;
    2.79  }
    2.80  
    2.81  u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr)
    2.82 @@ -511,6 +515,16 @@ u64 vcpu_get_ipsr_int_state(VCPU * vcpu,
    2.83  	return psr.i64;
    2.84  }
    2.85  
    2.86 +BOOLEAN vcpu_get_psr_ic(VCPU * vcpu)
    2.87 +{
    2.88 +	return !!PSCB(vcpu, interrupt_collection_enabled);
    2.89 +}
    2.90 +
    2.91 +BOOLEAN vcpu_get_psr_i(VCPU * vcpu)
    2.92 +{
    2.93 +	return !vcpu->vcpu_info->evtchn_upcall_mask;
    2.94 +}
    2.95 +
    2.96  /**************************************************************************
    2.97   VCPU control register access routines
    2.98  **************************************************************************/
     3.1 --- a/xen/include/asm-ia64/vcpu.h	Thu May 31 09:45:46 2007 -0600
     3.2 +++ b/xen/include/asm-ia64/vcpu.h	Thu May 31 10:48:48 2007 -0600
     3.3 @@ -42,7 +42,8 @@ extern IA64FAULT vcpu_get_ar(VCPU * vcpu
     3.4  /* psr */
     3.5  extern BOOLEAN vcpu_get_psr_ic(VCPU * vcpu);
     3.6  extern u64 vcpu_get_ipsr_int_state(VCPU * vcpu, u64 prevpsr);
     3.7 -extern IA64FAULT vcpu_get_psr(VCPU * vcpu, u64 * pval);
     3.8 +extern u64 vcpu_get_psr(VCPU * vcpu);
     3.9 +extern IA64FAULT vcpu_get_psr_masked(VCPU * vcpu, u64 * pval);
    3.10  extern IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm);
    3.11  extern IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm);
    3.12  extern IA64FAULT vcpu_set_psr_l(VCPU * vcpu, u64 val);