ret = -EINVAL;
if ( eoi.irq >= v->domain->nr_pirqs )
break;
- spin_lock(&v->domain->event_lock);
if ( v->domain->arch.pirq_eoi_map )
evtchn_unmask(v->domain->pirq_to_evtchn[eoi.irq]);
if ( !is_hvm_domain(v->domain) ||
ret = pirq_guest_eoi(v->domain, eoi.irq);
else
ret = 0;
- if ( is_hvm_domain(v->domain) &&
- domain_pirq_to_emuirq(v->domain, eoi.irq) > 0 )
- {
- struct hvm_irq *hvm_irq = &v->domain->arch.hvm_domain.irq;
- int gsi = domain_pirq_to_emuirq(v->domain, eoi.irq);
-
- /* if this is a level irq and count > 0, send another
- * notification */
- if ( gsi >= NR_ISAIRQS /* ISA irqs are edge triggered */
- && hvm_irq->gsi_assert_count[gsi] )
- send_guest_pirq(v->domain, eoi.irq);
- }
- spin_unlock(&v->domain->event_lock);
break;
}
break;
irq_status_query.flags = 0;
if ( is_hvm_domain(v->domain) &&
- domain_pirq_to_irq(v->domain, irq) <= 0 &&
- domain_pirq_to_emuirq(v->domain, irq) == IRQ_UNBOUND )
+ domain_pirq_to_irq(v->domain, irq) <= 0 )
{
- ret = -EINVAL;
+ ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
break;
}