ia64/xen-unstable

view xen/common/event_channel.c @ 1129:6baeead2cccd

bitkeeper revision 1.749.1.2 (403e097cnc0BYoVqLwFH7-TpqyBF_w)

xc_evtchn.c:
new file
event_channel.h, event_channel.c, Xc.c, xc_private.h, xc.h:
Plumb event channels thru to Python wrapper.
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 26 14:58:04 2004 +0000 (2004-02-26)
parents 9ed81ff882d4
children acd0f2cab313
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event channels between domains.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xeno/config.h>
19 #include <xeno/init.h>
20 #include <xeno/lib.h>
21 #include <xeno/errno.h>
22 #include <xeno/sched.h>
23 #include <xeno/event.h>
25 #include <hypervisor-ifs/hypervisor-if.h>
26 #include <hypervisor-ifs/event_channel.h>
28 #define MAX_EVENT_CHANNELS 1024
30 static int get_free_port(struct task_struct *p)
31 {
32 int max, port;
33 event_channel_t *chn;
35 max = p->max_event_channel;
36 chn = p->event_channel;
38 for ( port = 0; port < max; port++ )
39 if ( chn[port].state == ECS_FREE )
40 break;
42 if ( port == max )
43 {
44 if ( max == MAX_EVENT_CHANNELS )
45 return -ENOSPC;
47 max = (max == 0) ? 4 : (max * 2);
49 chn = kmalloc(max * sizeof(event_channel_t), GFP_KERNEL);
50 if ( unlikely(chn == NULL) )
51 return -ENOMEM;
53 memset(chn, 0, max * sizeof(event_channel_t));
55 if ( p->event_channel != NULL )
56 {
57 memcpy(chn, p->event_channel, (max/2) * sizeof(event_channel_t));
58 kfree(p->event_channel);
59 }
61 p->event_channel = chn;
62 p->max_event_channel = max;
63 }
65 return port;
66 }
68 static inline unsigned long set_event_pending(struct task_struct *p, int port)
69 {
70 if ( !test_and_set_bit(port, &p->shared_info->event_channel_pend[0]) &&
71 !test_and_set_bit(port>>5, &p->shared_info->event_channel_pend_sel) )
72 return mark_guest_event(p, _EVENT_EVTCHN);
73 return 0;
74 }
76 static inline unsigned long set_event_disc(struct task_struct *p, int port)
77 {
78 if ( !test_and_set_bit(port, &p->shared_info->event_channel_disc[0]) &&
79 !test_and_set_bit(port>>5, &p->shared_info->event_channel_disc_sel) )
80 return mark_guest_event(p, _EVENT_EVTCHN);
81 return 0;
82 }
84 static long event_channel_open(evtchn_open_t *open)
85 {
86 struct task_struct *p1, *p2;
87 int port1 = 0, port2 = 0;
88 unsigned long cpu_mask;
89 domid_t dom1 = open->dom1, dom2 = open->dom2;
90 long rc = 0;
92 if ( !IS_PRIV(current) )
93 return -EPERM;
95 /* 'dom1' may be DOMID_SELF. 'dom2' cannot be.*/
96 if ( dom1 == DOMID_SELF )
97 dom1 = current->domain;
98 if ( dom2 == DOMID_SELF )
99 return -EINVAL;
101 /* Event channel must connect distinct domains. */
102 if ( dom1 == dom2 )
103 return -EINVAL;
105 if ( ((p1 = find_domain_by_id(dom1)) == NULL) ||
106 ((p2 = find_domain_by_id(dom2)) == NULL) )
107 {
108 if ( p1 != NULL )
109 put_task_struct(p1);
110 return -ESRCH;
111 }
113 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
114 if ( dom1 < dom2 )
115 {
116 spin_lock(&p1->event_channel_lock);
117 spin_lock(&p2->event_channel_lock);
118 }
119 else
120 {
121 spin_lock(&p2->event_channel_lock);
122 spin_lock(&p1->event_channel_lock);
123 }
125 if ( (port1 = get_free_port(p1)) < 0 )
126 {
127 rc = port1;
128 goto out;
129 }
131 if ( (port2 = get_free_port(p2)) < 0 )
132 {
133 rc = port2;
134 goto out;
135 }
137 p1->event_channel[port1].remote_dom = p2;
138 p1->event_channel[port1].remote_port = (u16)port2;
139 p1->event_channel[port1].state = ECS_CONNECTED;
141 p2->event_channel[port2].remote_dom = p1;
142 p2->event_channel[port2].remote_port = (u16)port1;
143 p2->event_channel[port2].state = ECS_CONNECTED;
145 cpu_mask = set_event_pending(p1, port1);
146 cpu_mask |= set_event_pending(p2, port2);
147 guest_event_notify(cpu_mask);
149 out:
150 spin_unlock(&p1->event_channel_lock);
151 spin_unlock(&p2->event_channel_lock);
153 put_task_struct(p1);
154 put_task_struct(p2);
156 open->port1 = port1;
157 open->port2 = port2;
159 return rc;
160 }
163 static long __event_channel_close(struct task_struct *p1, int port1)
164 {
165 struct task_struct *p2 = NULL;
166 event_channel_t *chn1, *chn2;
167 int port2;
168 unsigned long cpu_mask;
169 long rc = 0;
171 again:
172 spin_lock(&p1->event_channel_lock);
174 chn1 = p1->event_channel;
176 if ( (port1 < 0) || (port1 >= p1->max_event_channel) ||
177 (chn1[port1].state == ECS_FREE) )
178 {
179 rc = -EINVAL;
180 goto out;
181 }
183 if ( chn1[port1].state == ECS_CONNECTED )
184 {
185 if ( p2 == NULL )
186 {
187 p2 = chn1[port1].remote_dom;
188 get_task_struct(p2);
190 if ( p1->domain < p2->domain )
191 {
192 spin_lock(&p2->event_channel_lock);
193 }
194 else
195 {
196 spin_unlock(&p1->event_channel_lock);
197 spin_lock(&p2->event_channel_lock);
198 goto again;
199 }
200 }
201 else if ( p2 != chn1[port1].remote_dom )
202 {
203 rc = -EINVAL;
204 goto out;
205 }
207 chn2 = p2->event_channel;
208 port2 = chn1[port1].remote_port;
210 if ( port2 >= p2->max_event_channel )
211 BUG();
212 if ( chn2[port2].state != ECS_CONNECTED )
213 BUG();
214 if ( chn2[port2].remote_dom != p1 )
215 BUG();
217 chn2[port2].state = ECS_ZOMBIE;
218 chn2[port2].remote_dom = NULL;
219 chn2[port2].remote_port = 0xFFFF;
221 cpu_mask = set_event_disc(p1, port1);
222 cpu_mask |= set_event_disc(p2, port2);
223 guest_event_notify(cpu_mask);
224 }
226 chn1[port1].state = ECS_FREE;
227 chn1[port1].remote_dom = NULL;
228 chn1[port1].remote_port = 0xFFFF;
230 out:
231 spin_unlock(&p1->event_channel_lock);
232 put_task_struct(p1);
234 if ( p2 != NULL )
235 {
236 spin_unlock(&p2->event_channel_lock);
237 put_task_struct(p2);
238 }
240 return rc;
241 }
244 static long event_channel_close(evtchn_close_t *close)
245 {
246 struct task_struct *p;
247 long rc;
248 domid_t dom = close->dom;
250 if ( dom == DOMID_SELF )
251 dom = current->domain;
252 else if ( !IS_PRIV(current) )
253 return -EPERM;
255 if ( (p = find_domain_by_id(dom)) == NULL )
256 return -ESRCH;
258 rc = __event_channel_close(p, close->port);
260 put_task_struct(p);
261 return rc;
262 }
265 static long event_channel_send(int lport)
266 {
267 struct task_struct *lp = current, *rp;
268 int rport;
269 unsigned long cpu_mask;
271 spin_lock(&lp->event_channel_lock);
273 if ( unlikely(lport < 0) ||
274 unlikely(lport >= lp->max_event_channel) ||
275 unlikely(lp->event_channel[lport].state != ECS_CONNECTED) )
276 {
277 spin_unlock(&lp->event_channel_lock);
278 return -EINVAL;
279 }
281 rp = lp->event_channel[lport].remote_dom;
282 rport = lp->event_channel[lport].remote_port;
284 get_task_struct(rp);
286 spin_unlock(&lp->event_channel_lock);
288 cpu_mask = set_event_pending(rp, rport);
289 guest_event_notify(cpu_mask);
291 put_task_struct(rp);
293 return 0;
294 }
297 static long event_channel_status(evtchn_status_t *status)
298 {
299 struct task_struct *p;
300 domid_t dom = status->dom1;
301 int port = status->port1;
302 event_channel_t *chn;
304 if ( dom == DOMID_SELF )
305 dom = current->domain;
306 else if ( !IS_PRIV(current) )
307 return -EPERM;
309 if ( (p = find_domain_by_id(dom)) == NULL )
310 return -ESRCH;
312 spin_lock(&p->event_channel_lock);
314 chn = p->event_channel;
316 if ( (port < 0) || (port >= p->max_event_channel) )
317 {
318 spin_unlock(&p->event_channel_lock);
319 return -EINVAL;
320 }
322 switch ( chn[port].state )
323 {
324 case ECS_FREE:
325 status->status = EVTCHNSTAT_closed;
326 break;
327 case ECS_ZOMBIE:
328 status->status = EVTCHNSTAT_disconnected;
329 break;
330 case ECS_CONNECTED:
331 status->status = EVTCHNSTAT_connected;
332 status->dom2 = chn[port].remote_dom->domain;
333 status->port2 = chn[port].remote_port;
334 break;
335 default:
336 BUG();
337 }
339 spin_unlock(&p->event_channel_lock);
340 return 0;
341 }
344 long do_event_channel_op(evtchn_op_t *uop)
345 {
346 long rc;
347 evtchn_op_t op;
349 if ( copy_from_user(&op, uop, sizeof(op)) != 0 )
350 return -EFAULT;
352 switch ( op.cmd )
353 {
354 case EVTCHNOP_open:
355 rc = event_channel_open(&op.u.open);
356 if ( copy_to_user(uop, &op, sizeof(op)) != 0 )
357 rc = -EFAULT; /* Cleaning up here would be a mess! */
358 break;
360 case EVTCHNOP_close:
361 rc = event_channel_close(&op.u.close);
362 break;
364 case EVTCHNOP_send:
365 rc = event_channel_send(op.u.send.local_port);
366 break;
368 case EVTCHNOP_status:
369 rc = event_channel_status(&op.u.status);
370 if ( copy_to_user(uop, &op, sizeof(op)) != 0 )
371 rc = -EFAULT;
372 break;
374 default:
375 rc = -ENOSYS;
376 break;
377 }
379 return rc;
380 }
383 void destroy_event_channels(struct task_struct *p)
384 {
385 int i;
386 if ( p->event_channel != NULL )
387 {
388 for ( i = 0; i < p->max_event_channel; i++ )
389 (void)__event_channel_close(p, i);
390 kfree(p->event_channel);
391 }
392 }