ia64/xen-unstable

changeset 17700:9c0a654157cb

[IA64] cleanup: vcpu_set_psr_sm.

It is pointless to set the machine psr.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Wed May 28 18:10:44 2008 +0900 (2008-05-28)
parents 74d0f17f3fa5
children d2a239224cb2
files xen/arch/ia64/xen/vcpu.c
line diff
     1.1 --- a/xen/arch/ia64/xen/vcpu.c	Thu May 22 19:42:51 2008 +0900
     1.2 +++ b/xen/arch/ia64/xen/vcpu.c	Wed May 28 18:10:44 2008 +0900
     1.3 @@ -301,13 +301,12 @@ IA64FAULT vcpu_reset_psr_dt(VCPU * vcpu)
     1.4  
     1.5  IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu, u64 imm24)
     1.6  {
     1.7 -	struct ia64_psr psr, imm, *ipsr;
     1.8 +	struct ia64_psr imm, *ipsr;
     1.9  	REGS *regs = vcpu_regs(vcpu);
    1.10  
    1.11  	//PRIVOP_COUNT_ADDR(regs,_RSM);
    1.12  	// TODO: All of these bits need to be virtualized
    1.13  	// TODO: Only allowed for current vcpu
    1.14 -	__asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
    1.15  	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
    1.16  	imm = *(struct ia64_psr *)&imm24;
    1.17  	// interrupt flag
    1.18 @@ -336,14 +335,10 @@ IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu,
    1.19  		// ipsr->pp = 1;
    1.20  		PSCB(vcpu, vpsr_pp) = 0; // but fool the domain if it gets psr
    1.21  	}
    1.22 -	if (imm.up) {
    1.23 +	if (imm.up)
    1.24  		ipsr->up = 0;
    1.25 -		psr.up = 0;
    1.26 -	}
    1.27 -	if (imm.sp) {
    1.28 +	if (imm.sp)
    1.29  		ipsr->sp = 0;
    1.30 -		psr.sp = 0;
    1.31 -	}
    1.32  	if (imm.be)
    1.33  		ipsr->be = 0;
    1.34  	if (imm.dt)
    1.35 @@ -352,7 +347,6 @@ IA64FAULT vcpu_reset_psr_sm(VCPU * vcpu,
    1.36  		ipsr->pk = 0;
    1.37  		vcpu_pkr_use_unset(vcpu);
    1.38  	}
    1.39 -	__asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
    1.40  	return IA64_NO_FAULT;
    1.41  }
    1.42  
    1.43 @@ -371,13 +365,12 @@ IA64FAULT vcpu_set_psr_i(VCPU * vcpu)
    1.44  
    1.45  IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u64 imm24)
    1.46  {
    1.47 -	struct ia64_psr psr, imm, *ipsr;
    1.48 +	struct ia64_psr imm, *ipsr;
    1.49  	REGS *regs = vcpu_regs(vcpu);
    1.50  	u64 mask, enabling_interrupts = 0;
    1.51  
    1.52  	//PRIVOP_COUNT_ADDR(regs,_SSM);
    1.53  	// TODO: All of these bits need to be virtualized
    1.54 -	__asm__ __volatile("mov %0=psr;;":"=r"(psr)::"memory");
    1.55  	imm = *(struct ia64_psr *)&imm24;
    1.56  	ipsr = (struct ia64_psr *)&regs->cr_ipsr;
    1.57  	// just handle psr.sp,pp and psr.i,ic (and user mask) for now
    1.58 @@ -401,10 +394,8 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u
    1.59  		// ipsr->pp = 1;
    1.60  		PSCB(vcpu, vpsr_pp) = 1;
    1.61  	}
    1.62 -	if (imm.sp) {
    1.63 +	if (imm.sp)
    1.64  		ipsr->sp = 1;
    1.65 -		psr.sp = 1;
    1.66 -	}
    1.67  	if (imm.i) {
    1.68  		if (vcpu->vcpu_info->evtchn_upcall_mask) {
    1.69  //printk("vcpu_set_psr_sm: psr.ic 0->1\n");
    1.70 @@ -415,22 +406,14 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u
    1.71  	if (imm.ic)
    1.72  		PSCB(vcpu, interrupt_collection_enabled) = 1;
    1.73  	// TODO: do this faster
    1.74 -	if (imm.mfl) {
    1.75 +	if (imm.mfl)
    1.76  		ipsr->mfl = 1;
    1.77 -		psr.mfl = 1;
    1.78 -	}
    1.79 -	if (imm.mfh) {
    1.80 +	if (imm.mfh)
    1.81  		ipsr->mfh = 1;
    1.82 -		psr.mfh = 1;
    1.83 -	}
    1.84 -	if (imm.ac) {
    1.85 +	if (imm.ac)
    1.86  		ipsr->ac = 1;
    1.87 -		psr.ac = 1;
    1.88 -	}
    1.89 -	if (imm.up) {
    1.90 +	if (imm.up)
    1.91  		ipsr->up = 1;
    1.92 -		psr.up = 1;
    1.93 -	}
    1.94  	if (imm.be)
    1.95  		ipsr->be = 1;
    1.96  	if (imm.dt)
    1.97 @@ -439,7 +422,6 @@ IA64FAULT vcpu_set_psr_sm(VCPU * vcpu, u
    1.98  		vcpu_pkr_set_psr_handling(vcpu);
    1.99  		ipsr->pk = 1;
   1.100  	}
   1.101 -	__asm__ __volatile(";; mov psr.l=%0;; srlz.d"::"r"(psr):"memory");
   1.102  	if (enabling_interrupts &&
   1.103  	    vcpu_check_pending_interrupts(vcpu) != SPURIOUS_VECTOR)
   1.104  		PSCB(vcpu, pending_interruption) = 1;