ia64/xen-unstable

changeset 10141:2f2f500c26da

[IA64] Add event injection logic

Add event inject logic. Because up to this point there's no place
to register callback, this patch doesn't break existing working flow.

Signes-off-by Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Tue May 23 08:34:48 2006 -0600 (2006-05-23)
parents d8659e39ff3c
children 5be25952b50d
files xen/arch/ia64/xen/process.c xen/arch/ia64/xen/vcpu.c xen/include/asm-ia64/event.h
line diff
     1.1 --- a/xen/arch/ia64/xen/process.c	Tue May 23 08:24:09 2006 -0600
     1.2 +++ b/xen/arch/ia64/xen/process.c	Tue May 23 08:34:48 2006 -0600
     1.3 @@ -246,6 +246,40 @@ printf("*#*#*#* about to deliver early t
     1.4  	reflect_interruption(isr,regs,IA64_EXTINT_VECTOR);
     1.5  }
     1.6  
     1.7 +void reflect_event(struct pt_regs *regs)
     1.8 +{
     1.9 +	unsigned long isr = regs->cr_ipsr & IA64_PSR_RI;
    1.10 +	struct vcpu *v = current;
    1.11 +
    1.12 +	/* Sanity check */
    1.13 +	if (is_idle_vcpu(v) || !user_mode(regs)) {
    1.14 +		//printk("WARN: invocation to reflect_event in nested xen\n");
    1.15 +		return;
    1.16 +	}
    1.17 +
    1.18 +	if (!event_pending(v))
    1.19 +		return;
    1.20 +
    1.21 +	if (!PSCB(v,interrupt_collection_enabled))
    1.22 +		printf("psr.ic off, delivering event, ipsr=%lx,iip=%lx,isr=%lx,viip=0x%lx\n",
    1.23 +		       regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
    1.24 +	PSCB(v,unat) = regs->ar_unat;  // not sure if this is really needed?
    1.25 +	PSCB(v,precover_ifs) = regs->cr_ifs;
    1.26 +	vcpu_bsw0(v);
    1.27 +	PSCB(v,ipsr) = vcpu_get_ipsr_int_state(v,regs->cr_ipsr);
    1.28 +	PSCB(v,isr) = isr;
    1.29 +	PSCB(v,iip) = regs->cr_iip;
    1.30 +	PSCB(v,ifs) = 0;
    1.31 +	PSCB(v,incomplete_regframe) = 0;
    1.32 +
    1.33 +	regs->cr_iip = v->arch.event_callback_ip;
    1.34 +	regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
    1.35 +	regs->r31 = XSI_IPSR;
    1.36 +
    1.37 +	v->vcpu_info->evtchn_upcall_mask = 1;
    1.38 +	PSCB(v,interrupt_collection_enabled) = 0;
    1.39 +}
    1.40 +
    1.41  // ONLY gets called from ia64_leave_kernel
    1.42  // ONLY call with interrupts disabled?? (else might miss one?)
    1.43  // NEVER successful if already reflecting a trap/fault because psr.i==0
    1.44 @@ -255,7 +289,6 @@ void deliver_pending_interrupt(struct pt
    1.45  	struct vcpu *v = current;
    1.46  	// FIXME: Will this work properly if doing an RFI???
    1.47  	if (!is_idle_domain(d) && user_mode(regs)) {
    1.48 -		//vcpu_poke_timer(v);
    1.49  		if (vcpu_deliverable_interrupts(v))
    1.50  			reflect_extint(regs);
    1.51  		else if (PSCB(v,pending_interruption))
     2.1 --- a/xen/arch/ia64/xen/vcpu.c	Tue May 23 08:24:09 2006 -0600
     2.2 +++ b/xen/arch/ia64/xen/vcpu.c	Tue May 23 08:34:48 2006 -0600
     2.3 @@ -649,16 +649,18 @@ void vcpu_pend_interrupt(VCPU *vcpu, UIN
     2.4  		printf("vcpu_pend_interrupt: bad vector\n");
     2.5  		return;
     2.6  	}
     2.7 -    if ( VMX_DOMAIN(vcpu) ) {
     2.8 -	    set_bit(vector,VCPU(vcpu,irr));
     2.9 -    } else
    2.10 -    {
    2.11 -	if (test_bit(vector,PSCBX(vcpu,irr))) {
    2.12 -//printf("vcpu_pend_interrupt: overrun\n");
    2.13 +
    2.14 +	if (vcpu->arch.event_callback_ip) {
    2.15 +		printf("Deprecated interface. Move to new event based solution\n");
    2.16 +		return;
    2.17  	}
    2.18 -	set_bit(vector,PSCBX(vcpu,irr));
    2.19 -	PSCB(vcpu,pending_interruption) = 1;
    2.20 -    }
    2.21 +		
    2.22 +	if ( VMX_DOMAIN(vcpu) ) {
    2.23 +		set_bit(vector,VCPU(vcpu,irr));
    2.24 +	} else {
    2.25 +		set_bit(vector,PSCBX(vcpu,irr));
    2.26 +		PSCB(vcpu,pending_interruption) = 1;
    2.27 +	}
    2.28  }
    2.29  
    2.30  #define	IA64_TPR_MMI	0x10000
    2.31 @@ -674,6 +676,9 @@ UINT64 vcpu_check_pending_interrupts(VCP
    2.32  {
    2.33  	UINT64 *p, *r, bits, bitnum, mask, i, vector;
    2.34  
    2.35 +	if (vcpu->arch.event_callback_ip)
    2.36 +		return SPURIOUS_VECTOR;
    2.37 +
    2.38  	/* Always check pending event, since guest may just ack the
    2.39  	 * event injection without handle. Later guest may throw out
    2.40  	 * the event itself.
    2.41 @@ -1151,7 +1156,16 @@ void vcpu_pend_timer(VCPU *vcpu)
    2.42  		// don't deliver another
    2.43  		return;
    2.44  	}
    2.45 -	vcpu_pend_interrupt(vcpu, itv);
    2.46 +	if (vcpu->arch.event_callback_ip) {
    2.47 +		/* A small window may occur when injecting vIRQ while related
    2.48 +		 * handler has not been registered. Don't fire in such case.
    2.49 +		 */
    2.50 +		if (vcpu->virq_to_evtchn[VIRQ_ITC]) {
    2.51 +			send_guest_vcpu_virq(vcpu, VIRQ_ITC);
    2.52 +			PSCBX(vcpu, domain_itm_last) = PSCBX(vcpu, domain_itm);
    2.53 +		}
    2.54 +	} else
    2.55 +		vcpu_pend_interrupt(vcpu, itv);
    2.56  }
    2.57  
    2.58  // returns true if ready to deliver a timer interrupt too early
     3.1 --- a/xen/include/asm-ia64/event.h	Tue May 23 08:24:09 2006 -0600
     3.2 +++ b/xen/include/asm-ia64/event.h	Tue May 23 08:34:48 2006 -0600
     3.3 @@ -28,8 +28,8 @@ static inline void evtchn_notify(struct 
     3.4      if ( running )
     3.5          smp_send_event_check_cpu(v->processor);
     3.6  
     3.7 -    if(!VMX_DOMAIN(v))
     3.8 -	vcpu_pend_interrupt(v, v->domain->shared_info->arch.evtchn_vector);
     3.9 +    if(!VMX_DOMAIN(v) && !v->arch.event_callback_ip)
    3.10 +        vcpu_pend_interrupt(v, v->domain->shared_info->arch.evtchn_vector);
    3.11  }
    3.12  
    3.13  /* Note: Bitwise operations result in fast code with no branches. */