ia64/xen-unstable

view xen/include/asm-x86/event.h @ 18321:29c242c06ac2

x86: Simplify arch_virq_is_global().
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Aug 13 13:39:40 2008 +0100 (2008-08-13)
parents a49673cd23d2
children 4941c5a14598
line source
1 /******************************************************************************
2 * event.h
3 *
4 * A nice interface for passing asynchronous events to guest OSes.
5 * (architecture-dependent part)
6 *
7 */
9 #ifndef __ASM_EVENT_H__
10 #define __ASM_EVENT_H__
12 #include <xen/shared.h>
14 static inline void vcpu_kick(struct vcpu *v)
15 {
16 /*
17 * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
18 * pending flag. These values may fluctuate (after all, we hold no
19 * locks) but the key insight is that each change will cause
20 * evtchn_upcall_pending to be polled.
21 *
22 * NB2. We save the running flag across the unblock to avoid a needless
23 * IPI for domains that we IPI'd to unblock.
24 */
25 int running = v->is_running;
26 vcpu_unblock(v);
27 if ( running )
28 smp_send_event_check_cpu(v->processor);
29 }
31 static inline void vcpu_mark_events_pending(struct vcpu *v)
32 {
33 int already_pending = test_and_set_bit(
34 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
36 if ( already_pending )
37 return;
39 if ( is_hvm_vcpu(v) )
40 hvm_assert_evtchn_irq(v);
41 else
42 vcpu_kick(v);
43 }
45 int hvm_local_events_need_delivery(struct vcpu *v);
46 static inline int local_events_need_delivery(void)
47 {
48 struct vcpu *v = current;
49 return (is_hvm_vcpu(v) ? hvm_local_events_need_delivery(v) :
50 (vcpu_info(v, evtchn_upcall_pending) &&
51 !vcpu_info(v, evtchn_upcall_mask)));
52 }
54 static inline int local_event_delivery_is_enabled(void)
55 {
56 return !vcpu_info(current, evtchn_upcall_mask);
57 }
59 static inline void local_event_delivery_disable(void)
60 {
61 vcpu_info(current, evtchn_upcall_mask) = 1;
62 }
64 static inline void local_event_delivery_enable(void)
65 {
66 vcpu_info(current, evtchn_upcall_mask) = 0;
67 }
69 /* No arch specific virq definition now. Default to global. */
70 static inline int arch_virq_is_global(int virq)
71 {
72 return 1;
73 }
75 #endif