direct-io.hg

view extras/mini-os/events.c @ 10734:9b7e1ea4c4d2

[HVM] Sync p2m table across all vcpus on x86_32p xen.
We found VGA acceleration can not work on SMP VMX guests on x86_32p
xen, this is caused by the way we construct p2m table today: only the 1st
l2 page table slot that maps p2m table pages is copied to none-vcpu0 vcpu
monitor page table when VMX is created. But VGA acceleration will
create some p2m table entries beyond the 1st l2 page table slot after HVM is
created, so only vcpu0 can get these p2m entries, and other vcpu can
not do VGA acceleration.

Signed-off-by: Xin Li <xin.b.li@intel.com>
author kfraser@localhost.localdomain
date Wed Jul 26 11:34:12 2006 +0100 (2006-07-26)
parents 43474e663b3d
children a10d02d20b31
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
2 ****************************************************************************
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: events.c
8 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
9 * Changes: Grzegorz Milos (gm281@cam.ac.uk)
10 *
11 * Date: Jul 2003, changes Jun 2005
12 *
13 * Environment: Xen Minimal OS
14 * Description: Deals with events recieved on event channels
15 *
16 ****************************************************************************
17 */
19 #include <os.h>
20 #include <mm.h>
21 #include <hypervisor.h>
22 #include <events.h>
23 #include <lib.h>
25 #define NR_EVS 1024
27 /* this represents a event handler. Chaining or sharing is not allowed */
28 typedef struct _ev_action_t {
29 void (*handler)(int, struct pt_regs *, void *);
30 void *data;
31 u32 count;
32 } ev_action_t;
35 static ev_action_t ev_actions[NR_EVS];
36 void default_handler(int port, struct pt_regs *regs, void *data);
39 /*
40 * Demux events to different handlers.
41 */
42 int do_event(u32 port, struct pt_regs *regs)
43 {
44 ev_action_t *action;
45 if (port >= NR_EVS) {
46 printk("Port number too large: %d\n", port);
47 goto out;
48 }
50 action = &ev_actions[port];
51 action->count++;
53 /* call the handler */
54 action->handler(port, regs, action->data);
56 out:
57 clear_evtchn(port);
59 return 1;
61 }
63 int bind_evtchn( u32 port, void (*handler)(int, struct pt_regs *, void *),
64 void *data )
65 {
66 if(ev_actions[port].handler != default_handler)
67 printk("WARN: Handler for port %d already registered, replacing\n",
68 port);
70 ev_actions[port].data = data;
71 wmb();
72 ev_actions[port].handler = handler;
74 /* Finally unmask the port */
75 unmask_evtchn(port);
77 return port;
78 }
80 void unbind_evtchn( u32 port )
81 {
82 if (ev_actions[port].handler == default_handler)
83 printk("WARN: No handler for port %d when unbinding\n", port);
84 ev_actions[port].handler = default_handler;
85 wmb();
86 ev_actions[port].data = NULL;
87 }
89 int bind_virq( u32 virq, void (*handler)(int, struct pt_regs *, void *data),
90 void *data)
91 {
92 evtchn_op_t op;
94 /* Try to bind the virq to a port */
95 op.cmd = EVTCHNOP_bind_virq;
96 op.u.bind_virq.virq = virq;
97 op.u.bind_virq.vcpu = smp_processor_id();
99 if ( HYPERVISOR_event_channel_op(&op) != 0 )
100 {
101 printk("Failed to bind virtual IRQ %d\n", virq);
102 return 1;
103 }
104 bind_evtchn(op.u.bind_virq.port, handler, data);
105 return 0;
106 }
108 void unbind_virq( u32 port )
109 {
110 unbind_evtchn(port);
111 }
113 #if defined(__x86_64__)
114 /* Allocate 4 pages for the irqstack */
115 #define STACK_PAGES 4
116 char irqstack[1024 * 4 * STACK_PAGES];
118 static struct pda
119 {
120 int irqcount; /* offset 0 (used in x86_64.S) */
121 char *irqstackptr; /* 8 */
122 } cpu0_pda;
123 #endif
125 /*
126 * Initially all events are without a handler and disabled
127 */
128 void init_events(void)
129 {
130 int i;
131 #if defined(__x86_64__)
132 asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
133 wrmsrl(0xc0000101, &cpu0_pda); /* 0xc0000101 is MSR_GS_BASE */
134 cpu0_pda.irqcount = -1;
135 cpu0_pda.irqstackptr = irqstack + 1024 * 4 * STACK_PAGES;
136 #endif
137 /* inintialise event handler */
138 for ( i = 0; i < NR_EVS; i++ )
139 {
140 ev_actions[i].handler = default_handler;
141 mask_evtchn(i);
142 }
143 }
145 void default_handler(int port, struct pt_regs *regs, void *ignore)
146 {
147 printk("[Port %d] - event received\n", port);
148 }
150 /* Unfortunate confusion of terminology: the port is unbound as far
151 as Xen is concerned, but we automatically bind a handler to it
152 from inside mini-os. */
153 int evtchn_alloc_unbound(void (*handler)(int, struct pt_regs *regs,
154 void *data),
155 void *data)
156 {
157 u32 port;
158 evtchn_op_t op;
159 int err;
161 op.cmd = EVTCHNOP_alloc_unbound;
162 op.u.alloc_unbound.dom = DOMID_SELF;
163 op.u.alloc_unbound.remote_dom = 0;
165 err = HYPERVISOR_event_channel_op(&op);
166 if (err) {
167 printk("Failed to alloc unbound evtchn: %d.\n", err);
168 return -1;
169 }
170 port = op.u.alloc_unbound.port;
171 bind_evtchn(port, handler, data);
172 return port;
173 }