ia64/xen-unstable

view extras/mini-os/events.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 08916e5135b3
children
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*-
2 ****************************************************************************
3 * (C) 2003 - Rolf Neugebauer - Intel Research Cambridge
4 * (C) 2005 - Grzegorz Milos - Intel Research Cambridge
5 ****************************************************************************
6 *
7 * File: events.c
8 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
9 * Changes: Grzegorz Milos (gm281@cam.ac.uk)
10 *
11 * Date: Jul 2003, changes Jun 2005
12 *
13 * Environment: Xen Minimal OS
14 * Description: Deals with events recieved on event channels
15 *
16 ****************************************************************************
17 */
19 #include <os.h>
20 #include <mm.h>
21 #include <hypervisor.h>
22 #include <events.h>
23 #include <lib.h>
25 #define NR_EVS 1024
27 /* this represents a event handler. Chaining or sharing is not allowed */
28 typedef struct _ev_action_t {
29 evtchn_handler_t handler;
30 void *data;
31 u32 count;
32 } ev_action_t;
34 static ev_action_t ev_actions[NR_EVS];
35 void default_handler(evtchn_port_t port, struct pt_regs *regs, void *data);
37 static unsigned long bound_ports[NR_EVS/(8*sizeof(unsigned long))];
39 void unbind_all_ports(void)
40 {
41 int i;
42 int cpu = 0;
43 shared_info_t *s = HYPERVISOR_shared_info;
44 vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
45 int rc;
47 for ( i = 0; i < NR_EVS; i++ )
48 {
49 if ( i == start_info.console.domU.evtchn ||
50 i == start_info.store_evtchn)
51 continue;
53 if ( test_and_clear_bit(i, bound_ports) )
54 {
55 struct evtchn_close close;
56 printk("port %d still bound!\n", i);
57 mask_evtchn(i);
58 close.port = i;
59 rc = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
60 if ( rc )
61 printk("WARN: close_port %s failed rc=%d. ignored\n", i, rc);
62 clear_evtchn(i);
63 }
64 }
65 vcpu_info->evtchn_upcall_pending = 0;
66 vcpu_info->evtchn_pending_sel = 0;
67 }
69 /*
70 * Demux events to different handlers.
71 */
72 int do_event(evtchn_port_t port, struct pt_regs *regs)
73 {
74 ev_action_t *action;
76 clear_evtchn(port);
78 if ( port >= NR_EVS )
79 {
80 printk("WARN: do_event(): Port number too large: %d\n", port);
81 return 1;
82 }
84 action = &ev_actions[port];
85 action->count++;
87 /* call the handler */
88 action->handler(port, regs, action->data);
90 return 1;
92 }
94 evtchn_port_t bind_evtchn(evtchn_port_t port, evtchn_handler_t handler,
95 void *data)
96 {
97 if ( ev_actions[port].handler != default_handler )
98 printk("WARN: Handler for port %d already registered, replacing\n",
99 port);
101 ev_actions[port].data = data;
102 wmb();
103 ev_actions[port].handler = handler;
104 set_bit(port, bound_ports);
106 return port;
107 }
109 void unbind_evtchn(evtchn_port_t port )
110 {
111 struct evtchn_close close;
112 int rc;
114 if ( ev_actions[port].handler == default_handler )
115 printk("WARN: No handler for port %d when unbinding\n", port);
116 mask_evtchn(port);
117 clear_evtchn(port);
119 ev_actions[port].handler = default_handler;
120 wmb();
121 ev_actions[port].data = NULL;
122 clear_bit(port, bound_ports);
124 close.port = port;
125 rc = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
126 if ( rc )
127 printk("WARN: close_port %s failed rc=%d. ignored\n", port, rc);
129 }
131 evtchn_port_t bind_virq(uint32_t virq, evtchn_handler_t handler, void *data)
132 {
133 evtchn_bind_virq_t op;
134 int rc;
136 /* Try to bind the virq to a port */
137 op.virq = virq;
138 op.vcpu = smp_processor_id();
140 if ( (rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &op)) != 0 )
141 {
142 printk("Failed to bind virtual IRQ %d with rc=%d\n", virq, rc);
143 return -1;
144 }
145 bind_evtchn(op.port, handler, data);
146 return op.port;
147 }
149 evtchn_port_t bind_pirq(uint32_t pirq, int will_share,
150 evtchn_handler_t handler, void *data)
151 {
152 evtchn_bind_pirq_t op;
153 int rc;
155 /* Try to bind the pirq to a port */
156 op.pirq = pirq;
157 op.flags = will_share ? BIND_PIRQ__WILL_SHARE : 0;
159 if ( (rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &op)) != 0 )
160 {
161 printk("Failed to bind physical IRQ %d with rc=%d\n", pirq, rc);
162 return -1;
163 }
164 bind_evtchn(op.port, handler, data);
165 return op.port;
166 }
168 #if defined(__x86_64__)
169 char irqstack[2 * STACK_SIZE];
171 static struct pda
172 {
173 int irqcount; /* offset 0 (used in x86_64.S) */
174 char *irqstackptr; /* 8 */
175 } cpu0_pda;
176 #endif
178 /*
179 * Initially all events are without a handler and disabled
180 */
181 void init_events(void)
182 {
183 int i;
184 #if defined(__x86_64__)
185 asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
186 wrmsrl(0xc0000101, &cpu0_pda); /* 0xc0000101 is MSR_GS_BASE */
187 cpu0_pda.irqcount = -1;
188 cpu0_pda.irqstackptr = (void*) (((unsigned long)irqstack + 2 * STACK_SIZE)
189 & ~(STACK_SIZE - 1));
190 #endif
191 /* initialize event handler */
192 for ( i = 0; i < NR_EVS; i++ )
193 {
194 ev_actions[i].handler = default_handler;
195 mask_evtchn(i);
196 }
197 }
199 void fini_events(void)
200 {
201 /* Dealloc all events */
202 unbind_all_ports();
203 #if defined(__x86_64__)
204 wrmsrl(0xc0000101, NULL); /* 0xc0000101 is MSR_GS_BASE */
205 #endif
206 }
208 void default_handler(evtchn_port_t port, struct pt_regs *regs, void *ignore)
209 {
210 printk("[Port %d] - event received\n", port);
211 }
213 /* Create a port available to the pal for exchanging notifications.
214 Returns the result of the hypervisor call. */
216 /* Unfortunate confusion of terminology: the port is unbound as far
217 as Xen is concerned, but we automatically bind a handler to it
218 from inside mini-os. */
220 int evtchn_alloc_unbound(domid_t pal, evtchn_handler_t handler,
221 void *data, evtchn_port_t *port)
222 {
223 int rc;
225 evtchn_alloc_unbound_t op;
226 op.dom = DOMID_SELF;
227 op.remote_dom = pal;
228 rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &op);
229 if ( rc )
230 {
231 printk("ERROR: alloc_unbound failed with rc=%d", rc);
232 return rc;
233 }
234 *port = bind_evtchn(op.port, handler, data);
235 return rc;
236 }
238 /* Connect to a port so as to allow the exchange of notifications with
239 the pal. Returns the result of the hypervisor call. */
241 int evtchn_bind_interdomain(domid_t pal, evtchn_port_t remote_port,
242 evtchn_handler_t handler, void *data,
243 evtchn_port_t *local_port)
244 {
245 int rc;
246 evtchn_port_t port;
247 evtchn_bind_interdomain_t op;
248 op.remote_dom = pal;
249 op.remote_port = remote_port;
250 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &op);
251 if ( rc )
252 {
253 printk("ERROR: bind_interdomain failed with rc=%d", rc);
254 return rc;
255 }
256 port = op.local_port;
257 *local_port = bind_evtchn(port, handler, data);
258 return rc;
259 }
261 /*
262 * Local variables:
263 * mode: C
264 * c-set-style: "BSD"
265 * c-basic-offset: 4
266 * tab-width: 4
267 * indent-tabs-mode: nil
268 * End:
269 */