ia64/xen-unstable

view xen/common/event_channel.c @ 1127:9ed81ff882d4

bitkeeper revision 1.749.1.1 (403cd19e9cL26IazEdGTvx0tHxbYqw)

event_channel.h:
new file
sched.h, hypervisor-if.h, event_channel.c, domain.c, apic.c:
Rewritten event-channel code.
author kaf24@scramble.cl.cam.ac.uk
date Wed Feb 25 16:47:26 2004 +0000 (2004-02-25)
parents 45296ed1d50d
children 74915dc117d5 6baeead2cccd
line source
1 /******************************************************************************
2 * event_channel.c
3 *
4 * Event channels between domains.
5 *
6 * Copyright (c) 2003-2004, K A Fraser.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
18 #include <xeno/config.h>
19 #include <xeno/init.h>
20 #include <xeno/lib.h>
21 #include <xeno/errno.h>
22 #include <xeno/sched.h>
23 #include <xeno/event.h>
25 #include <hypervisor-ifs/hypervisor-if.h>
26 #include <hypervisor-ifs/event_channel.h>
28 #define MAX_EVENT_CHANNELS 1024
30 static int get_free_port(struct task_struct *p)
31 {
32 int max, port;
33 event_channel_t *chn;
35 max = p->max_event_channel;
36 chn = p->event_channel;
38 for ( port = 0; port < max; port++ )
39 if ( chn[port].state == ECS_FREE )
40 break;
42 if ( port == max )
43 {
44 if ( max == MAX_EVENT_CHANNELS )
45 return -ENOSPC;
47 max = (max == 0) ? 4 : (max * 2);
49 chn = kmalloc(max * sizeof(event_channel_t), GFP_KERNEL);
50 if ( unlikely(chn == NULL) )
51 return -ENOMEM;
53 memset(chn, 0, max * sizeof(event_channel_t));
55 if ( p->event_channel != NULL )
56 {
57 memcpy(chn, p->event_channel, (max/2) * sizeof(event_channel_t));
58 kfree(p->event_channel);
59 }
61 p->event_channel = chn;
62 p->max_event_channel = max;
63 }
65 return port;
66 }
68 static inline unsigned long set_event_pending(struct task_struct *p, int port)
69 {
70 if ( !test_and_set_bit(port, &p->shared_info->event_channel_pend[0]) &&
71 !test_and_set_bit(port>>5, &p->shared_info->event_channel_pend_sel) )
72 return mark_guest_event(p, _EVENT_EVTCHN);
73 return 0;
74 }
76 static inline unsigned long set_event_disc(struct task_struct *p, int port)
77 {
78 if ( !test_and_set_bit(port, &p->shared_info->event_channel_disc[0]) &&
79 !test_and_set_bit(port>>5, &p->shared_info->event_channel_disc_sel) )
80 return mark_guest_event(p, _EVENT_EVTCHN);
81 return 0;
82 }
84 static long event_channel_open(evtchn_open_t *open)
85 {
86 struct task_struct *lp, *rp;
87 int lport = 0, rport = 0;
88 unsigned long cpu_mask;
89 domid_t ldom = open->local_dom, rdom = open->remote_dom;
90 long rc = 0;
92 if ( !IS_PRIV(current) )
93 return -EPERM;
95 /* 'local_dom' may be DOMID_SELF. 'remote_dom' cannot be.*/
96 if ( ldom == DOMID_SELF )
97 ldom = current->domain;
99 /* Event channel must connect distinct domains. */
100 if ( ldom == rdom )
101 return -EINVAL;
103 if ( ((lp = find_domain_by_id(ldom)) == NULL) ||
104 ((rp = find_domain_by_id(rdom)) == NULL) )
105 {
106 if ( lp != NULL )
107 put_task_struct(lp);
108 return -ESRCH;
109 }
111 /* Avoid deadlock by first acquiring lock of domain with smaller id. */
112 if ( ldom < rdom )
113 {
114 spin_lock(&lp->event_channel_lock);
115 spin_lock(&rp->event_channel_lock);
116 }
117 else
118 {
119 spin_lock(&rp->event_channel_lock);
120 spin_lock(&lp->event_channel_lock);
121 }
123 if ( (lport = get_free_port(lp)) < 0 )
124 {
125 rc = lport;
126 goto out;
127 }
129 if ( (rport = get_free_port(rp)) < 0 )
130 {
131 rc = rport;
132 goto out;
133 }
135 lp->event_channel[lport].remote_dom = rp;
136 lp->event_channel[lport].remote_port = (u16)rport;
137 lp->event_channel[lport].state = ECS_CONNECTED;
139 rp->event_channel[rport].remote_dom = lp;
140 rp->event_channel[rport].remote_port = (u16)lport;
141 rp->event_channel[rport].state = ECS_CONNECTED;
143 cpu_mask = set_event_pending(lp, lport);
144 cpu_mask |= set_event_pending(rp, rport);
145 guest_event_notify(cpu_mask);
147 out:
148 spin_unlock(&lp->event_channel_lock);
149 spin_unlock(&rp->event_channel_lock);
151 put_task_struct(lp);
152 put_task_struct(rp);
154 open->local_port = lport;
155 open->remote_port = rport;
157 return rc;
158 }
161 static long __event_channel_close(struct task_struct *lp, int lport)
162 {
163 struct task_struct *rp = NULL;
164 event_channel_t *lchn, *rchn;
165 int rport;
166 unsigned long cpu_mask;
167 long rc = 0;
169 again:
170 spin_lock(&lp->event_channel_lock);
172 lchn = lp->event_channel;
174 if ( (lport < 0) || (lport >= lp->max_event_channel) ||
175 (lchn[lport].state == ECS_FREE) )
176 {
177 rc = -EINVAL;
178 goto out;
179 }
181 if ( lchn[lport].state == ECS_CONNECTED )
182 {
183 if ( rp == NULL )
184 {
185 rp = lchn[lport].remote_dom;
186 get_task_struct(rp);
188 if ( lp->domain < rp->domain )
189 {
190 spin_lock(&rp->event_channel_lock);
191 }
192 else
193 {
194 spin_unlock(&lp->event_channel_lock);
195 spin_lock(&rp->event_channel_lock);
196 goto again;
197 }
198 }
199 else if ( rp != lchn[lport].remote_dom )
200 {
201 rc = -EINVAL;
202 goto out;
203 }
205 rchn = rp->event_channel;
206 rport = lchn[lport].remote_port;
208 if ( rport >= rp->max_event_channel )
209 BUG();
210 if ( rchn[rport].state != ECS_CONNECTED )
211 BUG();
212 if ( rchn[rport].remote_dom != lp )
213 BUG();
215 rchn[rport].state = ECS_ZOMBIE;
216 rchn[rport].remote_dom = NULL;
217 rchn[rport].remote_port = 0xFFFF;
219 cpu_mask = set_event_disc(lp, lport);
220 cpu_mask |= set_event_disc(rp, rport);
221 guest_event_notify(cpu_mask);
222 }
224 lchn[lport].state = ECS_FREE;
225 lchn[lport].remote_dom = NULL;
226 lchn[lport].remote_port = 0xFFFF;
228 out:
229 spin_unlock(&lp->event_channel_lock);
230 put_task_struct(lp);
232 if ( rp != NULL )
233 {
234 spin_unlock(&rp->event_channel_lock);
235 put_task_struct(rp);
236 }
238 return rc;
239 }
242 static long event_channel_close(evtchn_close_t *close)
243 {
244 struct task_struct *lp;
245 int lport = close->local_port;
246 long rc;
247 domid_t ldom = close->local_dom;
249 if ( ldom == DOMID_SELF )
250 ldom = current->domain;
251 else if ( !IS_PRIV(current) )
252 return -EPERM;
254 if ( (lp = find_domain_by_id(ldom)) == NULL )
255 return -ESRCH;
257 rc = __event_channel_close(lp, lport);
259 put_task_struct(lp);
260 return rc;
261 }
264 static long event_channel_send(int lport)
265 {
266 struct task_struct *lp = current, *rp;
267 int rport;
268 unsigned long cpu_mask;
270 spin_lock(&lp->event_channel_lock);
272 if ( unlikely(lport < 0) ||
273 unlikely(lport >= lp->max_event_channel) ||
274 unlikely(lp->event_channel[lport].state != ECS_CONNECTED) )
275 {
276 spin_unlock(&lp->event_channel_lock);
277 return -EINVAL;
278 }
280 rp = lp->event_channel[lport].remote_dom;
281 rport = lp->event_channel[lport].remote_port;
283 get_task_struct(rp);
285 spin_unlock(&lp->event_channel_lock);
287 cpu_mask = set_event_pending(rp, rport);
288 guest_event_notify(cpu_mask);
290 put_task_struct(rp);
292 return 0;
293 }
296 static long event_channel_status(evtchn_status_t *status)
297 {
298 struct task_struct *lp;
299 domid_t ldom = status->local_dom;
300 int lport = status->local_port;
301 event_channel_t *lchn;
303 if ( ldom == DOMID_SELF )
304 ldom = current->domain;
305 else if ( !IS_PRIV(current) )
306 return -EPERM;
308 if ( (lp = find_domain_by_id(ldom)) == NULL )
309 return -ESRCH;
311 spin_lock(&lp->event_channel_lock);
313 lchn = lp->event_channel;
315 if ( (lport < 0) || (lport >= lp->max_event_channel) )
316 {
317 spin_unlock(&lp->event_channel_lock);
318 return -EINVAL;
319 }
321 switch ( lchn[lport].state )
322 {
323 case ECS_FREE:
324 status->status = EVTCHNSTAT_closed;
325 break;
326 case ECS_ZOMBIE:
327 status->status = EVTCHNSTAT_disconnected;
328 break;
329 case ECS_CONNECTED:
330 status->status = EVTCHNSTAT_connected;
331 status->remote_dom = lchn[lport].remote_dom->domain;
332 status->remote_port = lchn[lport].remote_port;
333 break;
334 default:
335 BUG();
336 }
338 spin_unlock(&lp->event_channel_lock);
339 return 0;
340 }
343 long do_event_channel_op(evtchn_op_t *uop)
344 {
345 long rc;
346 evtchn_op_t op;
348 if ( copy_from_user(&op, uop, sizeof(op)) != 0 )
349 return -EFAULT;
351 switch ( op.cmd )
352 {
353 case EVTCHNOP_open:
354 rc = event_channel_open(&op.u.open);
355 if ( copy_to_user(uop, &op, sizeof(op)) != 0 )
356 rc = -EFAULT; /* Cleaning up here would be a mess! */
357 break;
359 case EVTCHNOP_close:
360 rc = event_channel_close(&op.u.close);
361 break;
363 case EVTCHNOP_send:
364 rc = event_channel_send(op.u.send.local_port);
365 break;
367 case EVTCHNOP_status:
368 rc = event_channel_status(&op.u.status);
369 if ( copy_to_user(uop, &op, sizeof(op)) != 0 )
370 rc = -EFAULT;
371 break;
373 default:
374 rc = -ENOSYS;
375 break;
376 }
378 return rc;
379 }
382 void destroy_event_channels(struct task_struct *p)
383 {
384 int i;
385 if ( p->event_channel != NULL )
386 {
387 for ( i = 0; i < p->max_event_channel; i++ )
388 (void)__event_channel_close(p, i);
389 kfree(p->event_channel);
390 }
391 }