action = (irq_guest_action_t *)desc->action;
+ /*
+ * Is another instance of this timer already running? Skip everything
+ * to avoid forcing an EOI early.
+ */
+ if ( timer_is_active(&action->eoi_timer) )
+ goto out;
+
if ( action->ack_type != ACKTYPE_NONE )
{
unsigned int i;
return;
}
+ /*
+ * Stop the timer as soon as we're certain we'll set it again further down,
+ * to prevent the current timeout (if any) to needlessly expire.
+ */
+ if ( action->ack_type != ACKTYPE_NONE )
+ stop_timer(&action->eoi_timer);
+
if ( action->ack_type == ACKTYPE_EOI )
{
sp = pending_eoi_sp(peoi);
if ( action->ack_type != ACKTYPE_NONE )
{
- stop_timer(&action->eoi_timer);
migrate_timer(&action->eoi_timer, smp_processor_id());
set_timer(&action->eoi_timer, NOW() + MILLISECS(1));
}
return;
}
+ stop_timer(&action->eoi_timer);
+
if ( action->ack_type == ACKTYPE_UNMASK )
{
ASSERT(cpumask_empty(action->cpu_eoi_map));
})
-static bool_t active_timer(struct timer *timer)
+static bool active_timer(const struct timer *timer)
{
ASSERT(timer->status >= TIMER_STATUS_inactive);
- ASSERT(timer->status <= TIMER_STATUS_in_list);
- return (timer->status >= TIMER_STATUS_in_heap);
+ return timer_is_active(timer);
}
#define timer_is_expired(t) timer_expires_before(t, NOW())
+/*
+ * True if a timer is active.
+ *
+ * Unlike for timer_expires_before(), it is the caller's responsibility to
+ * use suitable locking such that the returned value isn't stale by the time
+ * it gets acted upon.
+ */
+static inline bool timer_is_active(const struct timer *timer)
+{
+ ASSERT(timer->status <= TIMER_STATUS_in_list);
+ return timer->status >= TIMER_STATUS_in_heap;
+}
+
/* Migrate a timer to a different CPU. The timer may be currently active. */
void migrate_timer(struct timer *timer, unsigned int new_cpu);