if ( v->vcpu_info == &dummy_vcpu_info )
{
memset(new_info, 0, sizeof(*new_info));
+#ifdef XEN_HAVE_PV_UPCALL_MASK
__vcpu_info(v, new_info, evtchn_upcall_mask) = 1;
+#endif
}
else
{
v->vcpu_id, v->processor,
v->is_running ? 'T':'F', v->poll_evtchn,
vcpu_info(v, evtchn_upcall_pending),
- vcpu_info(v, evtchn_upcall_mask));
+ !vcpu_event_delivery_is_enabled(v));
cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
printk("dirty_cpus=%s ", tmpstr);
cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_affinity);
void vcpu_kick(struct vcpu *v);
void vcpu_mark_events_pending(struct vcpu *v);
+static inline int vcpu_event_delivery_is_enabled(struct vcpu *v)
+{
+ struct cpu_user_regs *regs = &v->arch.cpu_info->guest_cpu_user_regs;
+ return !(regs->cpsr & PSR_IRQ_MASK);
+}
+
static inline int local_events_need_delivery_nomask(void)
{
struct pending_irq *p = irq_to_pending(current, VGIC_IRQ_EVTCHN_CALLBACK);
static inline int local_events_need_delivery(void)
{
- struct cpu_user_regs *regs = guest_cpu_user_regs();
-
- /* guest IRQs are masked */
- if ( (regs->cpsr & PSR_IRQ_MASK) )
+ if ( !vcpu_event_delivery_is_enabled(current) )
return 0;
return local_events_need_delivery_nomask();
}
-int local_event_delivery_is_enabled(void);
-
static inline void local_event_delivery_enable(void)
{
struct cpu_user_regs *regs = guest_cpu_user_regs();
#define COMPAT_LEGACY_MAX_VCPUS XEN_LEGACY_MAX_VCPUS
#define COMPAT_HAVE_PV_GUEST_ENTRY XEN_HAVE_PV_GUEST_ENTRY
+#define COMPAT_HAVE_PV_UPCALL_MASK XEN_HAVE_PV_UPCALL_MASK
#endif
void vcpu_kick(struct vcpu *v);
void vcpu_mark_events_pending(struct vcpu *v);
+static inline int vcpu_event_delivery_is_enabled(struct vcpu *v)
+{
+ return !vcpu_info(v, evtchn_upcall_mask);
+}
+
int hvm_local_events_need_delivery(struct vcpu *v);
static inline int local_events_need_delivery(void)
{
!vcpu_info(v, evtchn_upcall_mask)));
}
-static inline int local_event_delivery_is_enabled(void)
-{
- return !vcpu_info(current, evtchn_upcall_mask);
-}
-
static inline void local_event_delivery_disable(void)
{
vcpu_info(current, evtchn_upcall_mask) = 1;
#define XEN_HAVE_PV_GUEST_ENTRY 1
+#define XEN_HAVE_PV_UPCALL_MASK 1
+
/*
* `incontents 200 segdesc Segment Descriptor Tables
*/
* to block: this avoids wakeup-waiting races.
*/
uint8_t evtchn_upcall_pending;
+#ifdef XEN_HAVE_PV_UPCALL_MASK
uint8_t evtchn_upcall_mask;
+#else /* XEN_HAVE_PV_UPCALL_MASK */
+ uint8_t pad0;
+#endif /* XEN_HAVE_PV_UPCALL_MASK */
xen_ulong_t evtchn_pending_sel;
struct arch_vcpu_info arch;
struct vcpu_time_info time;