ia64/xen-unstable

view xen/include/asm-x86/event.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 50358c4b37f4
children 9865d5e82802
line source
1 /******************************************************************************
2 * event.h
3 *
4 * A nice interface for passing asynchronous events to guest OSes.
5 * (architecture-dependent part)
6 *
7 */
9 #ifndef __ASM_EVENT_H__
10 #define __ASM_EVENT_H__
12 #include <xen/shared.h>
14 static inline void vcpu_kick(struct vcpu *v)
15 {
16 /*
17 * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
18 * pending flag. These values may fluctuate (after all, we hold no
19 * locks) but the key insight is that each change will cause
20 * evtchn_upcall_pending to be polled.
21 *
22 * NB2. We save the running flag across the unblock to avoid a needless
23 * IPI for domains that we IPI'd to unblock.
24 */
25 int running = v->is_running;
26 vcpu_unblock(v);
27 if ( running )
28 smp_send_event_check_cpu(v->processor);
29 }
31 static inline void vcpu_mark_events_pending(struct vcpu *v)
32 {
33 if ( !test_and_set_bit(0, &vcpu_info(v, evtchn_upcall_pending)) )
34 vcpu_kick(v);
35 }
37 int hvm_local_events_need_delivery(struct vcpu *v);
38 static inline int local_events_need_delivery(void)
39 {
40 struct vcpu *v = current;
41 return (is_hvm_vcpu(v) ? hvm_local_events_need_delivery(v) :
42 (vcpu_info(v, evtchn_upcall_pending) &&
43 !vcpu_info(v, evtchn_upcall_mask)));
44 }
46 static inline int local_event_delivery_is_enabled(void)
47 {
48 return !vcpu_info(current, evtchn_upcall_mask);
49 }
51 static inline void local_event_delivery_disable(void)
52 {
53 vcpu_info(current, evtchn_upcall_mask) = 1;
54 }
56 static inline void local_event_delivery_enable(void)
57 {
58 vcpu_info(current, evtchn_upcall_mask) = 0;
59 }
61 /* No arch specific virq definition now. Default to global. */
62 static inline int arch_virq_is_global(int virq)
63 {
64 return 1;
65 }
67 #endif