direct-io.hg

view linux-2.6-xen-sparse/drivers/xen/evtchn/evtchn.c @ 11672:7e79259c2c17

[LINUX] Make evtchn device use a dynamic minor number.

Also update the code in tools to create the device node if udev fails.
The tools now read the sysfs system to find the minor number needed.

Original patch from Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Sep 29 14:20:52 2006 +0100 (2006-09-29)
parents cede50ca1704
children e47738923a05
line source
1 /******************************************************************************
2 * evtchn.c
3 *
4 * Driver for receiving and demuxing event-channel signals.
5 *
6 * Copyright (c) 2004-2005, K A Fraser
7 * Multi-process extensions Copyright (c) 2004, Steven Smith
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/string.h>
40 #include <linux/errno.h>
41 #include <linux/fs.h>
42 #include <linux/errno.h>
43 #include <linux/miscdevice.h>
44 #include <linux/major.h>
45 #include <linux/proc_fs.h>
46 #include <linux/stat.h>
47 #include <linux/poll.h>
48 #include <linux/irq.h>
49 #include <linux/init.h>
50 #include <linux/gfp.h>
51 #include <xen/evtchn.h>
52 #include <xen/public/evtchn.h>
54 struct per_user_data {
55 /* Notification ring, accessed via /dev/xen/evtchn. */
56 #define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
57 #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
58 evtchn_port_t *ring;
59 unsigned int ring_cons, ring_prod, ring_overflow;
61 /* Processes wait on this queue when ring is empty. */
62 wait_queue_head_t evtchn_wait;
63 struct fasync_struct *evtchn_async_queue;
64 };
66 /* Who's bound to each port? */
67 static struct per_user_data *port_user[NR_EVENT_CHANNELS];
68 static spinlock_t port_user_lock;
70 void evtchn_device_upcall(int port)
71 {
72 struct per_user_data *u;
74 spin_lock(&port_user_lock);
76 mask_evtchn(port);
77 clear_evtchn(port);
79 if ((u = port_user[port]) != NULL) {
80 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
81 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
82 if (u->ring_cons == u->ring_prod++) {
83 wake_up_interruptible(&u->evtchn_wait);
84 kill_fasync(&u->evtchn_async_queue,
85 SIGIO, POLL_IN);
86 }
87 } else {
88 u->ring_overflow = 1;
89 }
90 }
92 spin_unlock(&port_user_lock);
93 }
95 static ssize_t evtchn_read(struct file *file, char __user *buf,
96 size_t count, loff_t *ppos)
97 {
98 int rc;
99 unsigned int c, p, bytes1 = 0, bytes2 = 0;
100 struct per_user_data *u = file->private_data;
102 /* Whole number of ports. */
103 count &= ~(sizeof(evtchn_port_t)-1);
105 if (count == 0)
106 return 0;
108 if (count > PAGE_SIZE)
109 count = PAGE_SIZE;
111 for (;;) {
112 if (u->ring_overflow)
113 return -EFBIG;
115 if ((c = u->ring_cons) != (p = u->ring_prod))
116 break;
118 if (file->f_flags & O_NONBLOCK)
119 return -EAGAIN;
121 rc = wait_event_interruptible(
122 u->evtchn_wait, u->ring_cons != u->ring_prod);
123 if (rc)
124 return rc;
125 }
127 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
128 if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
129 bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
130 sizeof(evtchn_port_t);
131 bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
132 } else {
133 bytes1 = (p - c) * sizeof(evtchn_port_t);
134 bytes2 = 0;
135 }
137 /* Truncate chunks according to caller's maximum byte count. */
138 if (bytes1 > count) {
139 bytes1 = count;
140 bytes2 = 0;
141 } else if ((bytes1 + bytes2) > count) {
142 bytes2 = count - bytes1;
143 }
145 if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
146 ((bytes2 != 0) &&
147 copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
148 return -EFAULT;
150 u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
152 return bytes1 + bytes2;
153 }
155 static ssize_t evtchn_write(struct file *file, const char __user *buf,
156 size_t count, loff_t *ppos)
157 {
158 int rc, i;
159 evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
160 struct per_user_data *u = file->private_data;
162 if (kbuf == NULL)
163 return -ENOMEM;
165 /* Whole number of ports. */
166 count &= ~(sizeof(evtchn_port_t)-1);
168 if (count == 0) {
169 rc = 0;
170 goto out;
171 }
173 if (count > PAGE_SIZE)
174 count = PAGE_SIZE;
176 if (copy_from_user(kbuf, buf, count) != 0) {
177 rc = -EFAULT;
178 goto out;
179 }
181 spin_lock_irq(&port_user_lock);
182 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
183 if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
184 unmask_evtchn(kbuf[i]);
185 spin_unlock_irq(&port_user_lock);
187 rc = count;
189 out:
190 free_page((unsigned long)kbuf);
191 return rc;
192 }
194 static void evtchn_bind_to_user(struct per_user_data *u, int port)
195 {
196 spin_lock_irq(&port_user_lock);
197 BUG_ON(port_user[port] != NULL);
198 port_user[port] = u;
199 unmask_evtchn(port);
200 spin_unlock_irq(&port_user_lock);
201 }
203 static int evtchn_ioctl(struct inode *inode, struct file *file,
204 unsigned int cmd, unsigned long arg)
205 {
206 int rc;
207 struct per_user_data *u = file->private_data;
208 void __user *uarg = (void __user *) arg;
210 switch (cmd) {
211 case IOCTL_EVTCHN_BIND_VIRQ: {
212 struct ioctl_evtchn_bind_virq bind;
213 struct evtchn_bind_virq bind_virq;
215 rc = -EFAULT;
216 if (copy_from_user(&bind, uarg, sizeof(bind)))
217 break;
219 bind_virq.virq = bind.virq;
220 bind_virq.vcpu = 0;
221 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
222 &bind_virq);
223 if (rc != 0)
224 break;
226 rc = bind_virq.port;
227 evtchn_bind_to_user(u, rc);
228 break;
229 }
231 case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
232 struct ioctl_evtchn_bind_interdomain bind;
233 struct evtchn_bind_interdomain bind_interdomain;
235 rc = -EFAULT;
236 if (copy_from_user(&bind, uarg, sizeof(bind)))
237 break;
239 bind_interdomain.remote_dom = bind.remote_domain;
240 bind_interdomain.remote_port = bind.remote_port;
241 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
242 &bind_interdomain);
243 if (rc != 0)
244 break;
246 rc = bind_interdomain.local_port;
247 evtchn_bind_to_user(u, rc);
248 break;
249 }
251 case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
252 struct ioctl_evtchn_bind_unbound_port bind;
253 struct evtchn_alloc_unbound alloc_unbound;
255 rc = -EFAULT;
256 if (copy_from_user(&bind, uarg, sizeof(bind)))
257 break;
259 alloc_unbound.dom = DOMID_SELF;
260 alloc_unbound.remote_dom = bind.remote_domain;
261 rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
262 &alloc_unbound);
263 if (rc != 0)
264 break;
266 rc = alloc_unbound.port;
267 evtchn_bind_to_user(u, rc);
268 break;
269 }
271 case IOCTL_EVTCHN_UNBIND: {
272 struct ioctl_evtchn_unbind unbind;
273 struct evtchn_close close;
274 int ret;
276 rc = -EFAULT;
277 if (copy_from_user(&unbind, uarg, sizeof(unbind)))
278 break;
280 rc = -EINVAL;
281 if (unbind.port >= NR_EVENT_CHANNELS)
282 break;
284 spin_lock_irq(&port_user_lock);
286 rc = -ENOTCONN;
287 if (port_user[unbind.port] != u) {
288 spin_unlock_irq(&port_user_lock);
289 break;
290 }
292 port_user[unbind.port] = NULL;
293 mask_evtchn(unbind.port);
295 spin_unlock_irq(&port_user_lock);
297 close.port = unbind.port;
298 ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
299 BUG_ON(ret);
301 rc = 0;
302 break;
303 }
305 case IOCTL_EVTCHN_NOTIFY: {
306 struct ioctl_evtchn_notify notify;
308 rc = -EFAULT;
309 if (copy_from_user(&notify, uarg, sizeof(notify)))
310 break;
312 if (notify.port >= NR_EVENT_CHANNELS) {
313 rc = -EINVAL;
314 } else if (port_user[notify.port] != u) {
315 rc = -ENOTCONN;
316 } else {
317 notify_remote_via_evtchn(notify.port);
318 rc = 0;
319 }
320 break;
321 }
323 case IOCTL_EVTCHN_RESET: {
324 /* Initialise the ring to empty. Clear errors. */
325 spin_lock_irq(&port_user_lock);
326 u->ring_cons = u->ring_prod = u->ring_overflow = 0;
327 spin_unlock_irq(&port_user_lock);
328 rc = 0;
329 break;
330 }
332 default:
333 rc = -ENOSYS;
334 break;
335 }
337 return rc;
338 }
340 static unsigned int evtchn_poll(struct file *file, poll_table *wait)
341 {
342 unsigned int mask = POLLOUT | POLLWRNORM;
343 struct per_user_data *u = file->private_data;
345 poll_wait(file, &u->evtchn_wait, wait);
346 if (u->ring_cons != u->ring_prod)
347 mask |= POLLIN | POLLRDNORM;
348 if (u->ring_overflow)
349 mask = POLLERR;
350 return mask;
351 }
353 static int evtchn_fasync(int fd, struct file *filp, int on)
354 {
355 struct per_user_data *u = filp->private_data;
356 return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
357 }
359 static int evtchn_open(struct inode *inode, struct file *filp)
360 {
361 struct per_user_data *u;
363 if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
364 return -ENOMEM;
366 memset(u, 0, sizeof(*u));
367 init_waitqueue_head(&u->evtchn_wait);
369 u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
370 if (u->ring == NULL) {
371 kfree(u);
372 return -ENOMEM;
373 }
375 filp->private_data = u;
377 return 0;
378 }
380 static int evtchn_release(struct inode *inode, struct file *filp)
381 {
382 int i;
383 struct per_user_data *u = filp->private_data;
384 struct evtchn_close close;
386 spin_lock_irq(&port_user_lock);
388 free_page((unsigned long)u->ring);
390 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
391 int ret;
392 if (port_user[i] != u)
393 continue;
395 port_user[i] = NULL;
396 mask_evtchn(i);
398 close.port = i;
399 ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
400 BUG_ON(ret);
401 }
403 spin_unlock_irq(&port_user_lock);
405 kfree(u);
407 return 0;
408 }
410 static struct file_operations evtchn_fops = {
411 .owner = THIS_MODULE,
412 .read = evtchn_read,
413 .write = evtchn_write,
414 .ioctl = evtchn_ioctl,
415 .poll = evtchn_poll,
416 .fasync = evtchn_fasync,
417 .open = evtchn_open,
418 .release = evtchn_release,
419 };
421 static struct miscdevice evtchn_miscdev = {
422 .minor = MISC_DYNAMIC_MINOR,
423 .name = "evtchn",
424 .fops = &evtchn_fops,
425 };
427 static int __init evtchn_init(void)
428 {
429 int err;
431 if (!is_running_on_xen())
432 return -ENODEV;
434 spin_lock_init(&port_user_lock);
435 memset(port_user, 0, sizeof(port_user));
437 /* Create '/dev/misc/evtchn'. */
438 err = misc_register(&evtchn_miscdev);
439 if (err != 0) {
440 printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
441 return err;
442 }
444 printk("Event-channel device installed.\n");
446 return 0;
447 }
449 static void evtchn_cleanup(void)
450 {
451 misc_deregister(&evtchn_miscdev);
452 }
454 module_init(evtchn_init);
455 module_exit(evtchn_cleanup);
457 MODULE_LICENSE("Dual BSD/GPL");