ia64/xen-unstable

view xen/include/asm-ia64/event.h @ 12450:6384ff711405

[IA64] MCA support - Define MCA interrupt vector

Signed-off-by: Yutaka Ezaki <yutaka.ezaki@jp.fujitsu.com>
Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
Signed-off-by: Kazuhiro Suzuki <kaz@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Sun Oct 29 09:27:15 2006 -0700 (2006-10-29)
parents 04e5e80be909
children b7ae31726aa6
line source
1 /******************************************************************************
2 * event.h
3 *
4 * A nice interface for passing asynchronous events to guest OSes.
5 * (architecture-dependent part)
6 *
7 */
9 #ifndef __ASM_EVENT_H__
10 #define __ASM_EVENT_H__
12 #include <public/xen.h>
13 #include <asm/vcpu.h>
15 static inline void vcpu_kick(struct vcpu *v)
16 {
17 /*
18 * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
19 * pending flag. These values may fluctuate (after all, we hold no
20 * locks) but the key insight is that each change will cause
21 * evtchn_upcall_pending to be polled.
22 *
23 * NB2. We save VCPUF_running across the unblock to avoid a needless
24 * IPI for domains that we IPI'd to unblock.
25 */
26 int running = test_bit(_VCPUF_running, &v->vcpu_flags);
27 vcpu_unblock(v);
28 if ( running )
29 smp_send_event_check_cpu(v->processor);
31 if(!VMX_DOMAIN(v) && !v->arch.event_callback_ip)
32 vcpu_pend_interrupt(v, v->domain->shared_info->arch.evtchn_vector);
33 }
35 static inline void vcpu_mark_events_pending(struct vcpu *v)
36 {
37 if ( !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
38 vcpu_kick(v);
39 }
41 /* Note: Bitwise operations result in fast code with no branches. */
42 #define event_pending(v) \
43 (!!(v)->vcpu_info->evtchn_upcall_pending & \
44 !(v)->vcpu_info->evtchn_upcall_mask)
46 static inline int local_events_need_delivery(void)
47 {
48 return event_pending(current);
49 }
51 static inline int local_event_delivery_is_enabled(void)
52 {
53 return !current->vcpu_info->evtchn_upcall_mask;
54 }
56 static inline void local_event_delivery_disable(void)
57 {
58 current->vcpu_info->evtchn_upcall_mask = 1;
59 }
61 static inline void local_event_delivery_enable(void)
62 {
63 current->vcpu_info->evtchn_upcall_mask = 0;
64 }
66 static inline int arch_virq_is_global(int virq)
67 {
68 int rc;
70 switch ( virq )
71 {
72 case VIRQ_ITC:
73 case VIRQ_MCA_CMC:
74 case VIRQ_MCA_CPE:
75 rc = 0;
76 break;
77 default:
78 rc = 1;
79 break;
80 }
82 return rc;
83 }
85 #endif