ia64/linux-2.6.18-xen.hg

view fs/fuse/inode.c @ 524:7f8b544237bf

netfront: Allow netfront in domain 0.

This is useful if your physical network device is in a utility domain.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Apr 15 15:18:58 2008 +0100 (2008-04-15)
parents 3e8752eb6d9c
children
line source
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
9 #include "fuse_i.h"
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/parser.h>
18 #include <linux/statfs.h>
19 #include <linux/random.h>
21 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
22 MODULE_DESCRIPTION("Filesystem in Userspace");
23 MODULE_LICENSE("GPL");
25 static kmem_cache_t *fuse_inode_cachep;
26 struct list_head fuse_conn_list;
27 DEFINE_MUTEX(fuse_mutex);
29 #define FUSE_SUPER_MAGIC 0x65735546
31 struct fuse_mount_data {
32 int fd;
33 unsigned rootmode;
34 unsigned user_id;
35 unsigned group_id;
36 unsigned fd_present : 1;
37 unsigned rootmode_present : 1;
38 unsigned user_id_present : 1;
39 unsigned group_id_present : 1;
40 unsigned flags;
41 unsigned max_read;
42 };
44 static struct inode *fuse_alloc_inode(struct super_block *sb)
45 {
46 struct inode *inode;
47 struct fuse_inode *fi;
49 inode = kmem_cache_alloc(fuse_inode_cachep, SLAB_KERNEL);
50 if (!inode)
51 return NULL;
53 fi = get_fuse_inode(inode);
54 fi->i_time = 0;
55 fi->nodeid = 0;
56 fi->nlookup = 0;
57 fi->forget_req = fuse_request_alloc();
58 if (!fi->forget_req) {
59 kmem_cache_free(fuse_inode_cachep, inode);
60 return NULL;
61 }
63 return inode;
64 }
66 static void fuse_destroy_inode(struct inode *inode)
67 {
68 struct fuse_inode *fi = get_fuse_inode(inode);
69 if (fi->forget_req)
70 fuse_request_free(fi->forget_req);
71 kmem_cache_free(fuse_inode_cachep, inode);
72 }
74 static void fuse_read_inode(struct inode *inode)
75 {
76 /* No op */
77 }
79 void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
80 unsigned long nodeid, u64 nlookup)
81 {
82 struct fuse_forget_in *inarg = &req->misc.forget_in;
83 inarg->nlookup = nlookup;
84 req->in.h.opcode = FUSE_FORGET;
85 req->in.h.nodeid = nodeid;
86 req->in.numargs = 1;
87 req->in.args[0].size = sizeof(struct fuse_forget_in);
88 req->in.args[0].value = inarg;
89 request_send_noreply(fc, req);
90 }
92 static void fuse_clear_inode(struct inode *inode)
93 {
94 if (inode->i_sb->s_flags & MS_ACTIVE) {
95 struct fuse_conn *fc = get_fuse_conn(inode);
96 struct fuse_inode *fi = get_fuse_inode(inode);
97 fuse_send_forget(fc, fi->forget_req, fi->nodeid, fi->nlookup);
98 fi->forget_req = NULL;
99 }
100 }
102 static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
103 {
104 if (*flags & MS_MANDLOCK)
105 return -EINVAL;
107 return 0;
108 }
110 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr)
111 {
112 struct fuse_conn *fc = get_fuse_conn(inode);
113 if (S_ISREG(inode->i_mode) && i_size_read(inode) != attr->size)
114 invalidate_inode_pages(inode->i_mapping);
116 inode->i_ino = attr->ino;
117 inode->i_mode = (inode->i_mode & S_IFMT) + (attr->mode & 07777);
118 inode->i_nlink = attr->nlink;
119 inode->i_uid = attr->uid;
120 inode->i_gid = attr->gid;
121 spin_lock(&fc->lock);
122 i_size_write(inode, attr->size);
123 spin_unlock(&fc->lock);
124 inode->i_blksize = PAGE_CACHE_SIZE;
125 inode->i_blocks = attr->blocks;
126 inode->i_atime.tv_sec = attr->atime;
127 inode->i_atime.tv_nsec = attr->atimensec;
128 inode->i_mtime.tv_sec = attr->mtime;
129 inode->i_mtime.tv_nsec = attr->mtimensec;
130 inode->i_ctime.tv_sec = attr->ctime;
131 inode->i_ctime.tv_nsec = attr->ctimensec;
132 }
134 static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
135 {
136 inode->i_mode = attr->mode & S_IFMT;
137 inode->i_size = attr->size;
138 if (S_ISREG(inode->i_mode)) {
139 fuse_init_common(inode);
140 fuse_init_file_inode(inode);
141 } else if (S_ISDIR(inode->i_mode))
142 fuse_init_dir(inode);
143 else if (S_ISLNK(inode->i_mode))
144 fuse_init_symlink(inode);
145 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
146 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
147 fuse_init_common(inode);
148 init_special_inode(inode, inode->i_mode,
149 new_decode_dev(attr->rdev));
150 } else
151 BUG();
152 }
154 static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
155 {
156 unsigned long nodeid = *(unsigned long *) _nodeidp;
157 if (get_node_id(inode) == nodeid)
158 return 1;
159 else
160 return 0;
161 }
163 static int fuse_inode_set(struct inode *inode, void *_nodeidp)
164 {
165 unsigned long nodeid = *(unsigned long *) _nodeidp;
166 get_fuse_inode(inode)->nodeid = nodeid;
167 return 0;
168 }
170 struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
171 int generation, struct fuse_attr *attr)
172 {
173 struct inode *inode;
174 struct fuse_inode *fi;
175 struct fuse_conn *fc = get_fuse_conn_super(sb);
176 int retried = 0;
178 retry:
179 inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid);
180 if (!inode)
181 return NULL;
183 if ((inode->i_state & I_NEW)) {
184 inode->i_flags |= S_NOATIME|S_NOCMTIME;
185 inode->i_generation = generation;
186 inode->i_data.backing_dev_info = &fc->bdi;
187 fuse_init_inode(inode, attr);
188 unlock_new_inode(inode);
189 } else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
190 BUG_ON(retried);
191 /* Inode has changed type, any I/O on the old should fail */
192 make_bad_inode(inode);
193 iput(inode);
194 retried = 1;
195 goto retry;
196 }
198 fi = get_fuse_inode(inode);
199 fi->nlookup ++;
200 fuse_change_attributes(inode, attr);
201 return inode;
202 }
204 static void fuse_umount_begin(struct vfsmount *vfsmnt, int flags)
205 {
206 if (flags & MNT_FORCE)
207 fuse_abort_conn(get_fuse_conn_super(vfsmnt->mnt_sb));
208 }
210 static void fuse_put_super(struct super_block *sb)
211 {
212 struct fuse_conn *fc = get_fuse_conn_super(sb);
214 spin_lock(&fc->lock);
215 fc->connected = 0;
216 fc->blocked = 0;
217 spin_unlock(&fc->lock);
218 /* Flush all readers on this fs */
219 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
220 wake_up_all(&fc->waitq);
221 wake_up_all(&fc->blocked_waitq);
222 mutex_lock(&fuse_mutex);
223 list_del(&fc->entry);
224 fuse_ctl_remove_conn(fc);
225 mutex_unlock(&fuse_mutex);
226 fuse_conn_put(fc);
227 }
229 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
230 {
231 stbuf->f_type = FUSE_SUPER_MAGIC;
232 stbuf->f_bsize = attr->bsize;
233 stbuf->f_frsize = attr->frsize;
234 stbuf->f_blocks = attr->blocks;
235 stbuf->f_bfree = attr->bfree;
236 stbuf->f_bavail = attr->bavail;
237 stbuf->f_files = attr->files;
238 stbuf->f_ffree = attr->ffree;
239 stbuf->f_namelen = attr->namelen;
240 /* fsid is left zero */
241 }
243 static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
244 {
245 struct super_block *sb = dentry->d_sb;
246 struct fuse_conn *fc = get_fuse_conn_super(sb);
247 struct fuse_req *req;
248 struct fuse_statfs_out outarg;
249 int err;
251 req = fuse_get_req(fc);
252 if (IS_ERR(req))
253 return PTR_ERR(req);
255 memset(&outarg, 0, sizeof(outarg));
256 req->in.numargs = 0;
257 req->in.h.opcode = FUSE_STATFS;
258 req->out.numargs = 1;
259 req->out.args[0].size =
260 fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
261 req->out.args[0].value = &outarg;
262 request_send(fc, req);
263 err = req->out.h.error;
264 if (!err)
265 convert_fuse_statfs(buf, &outarg.st);
266 fuse_put_request(fc, req);
267 return err;
268 }
270 enum {
271 OPT_FD,
272 OPT_ROOTMODE,
273 OPT_USER_ID,
274 OPT_GROUP_ID,
275 OPT_DEFAULT_PERMISSIONS,
276 OPT_ALLOW_OTHER,
277 OPT_MAX_READ,
278 OPT_ERR
279 };
281 static match_table_t tokens = {
282 {OPT_FD, "fd=%u"},
283 {OPT_ROOTMODE, "rootmode=%o"},
284 {OPT_USER_ID, "user_id=%u"},
285 {OPT_GROUP_ID, "group_id=%u"},
286 {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
287 {OPT_ALLOW_OTHER, "allow_other"},
288 {OPT_MAX_READ, "max_read=%u"},
289 {OPT_ERR, NULL}
290 };
292 static int parse_fuse_opt(char *opt, struct fuse_mount_data *d)
293 {
294 char *p;
295 memset(d, 0, sizeof(struct fuse_mount_data));
296 d->max_read = ~0;
298 while ((p = strsep(&opt, ",")) != NULL) {
299 int token;
300 int value;
301 substring_t args[MAX_OPT_ARGS];
302 if (!*p)
303 continue;
305 token = match_token(p, tokens, args);
306 switch (token) {
307 case OPT_FD:
308 if (match_int(&args[0], &value))
309 return 0;
310 d->fd = value;
311 d->fd_present = 1;
312 break;
314 case OPT_ROOTMODE:
315 if (match_octal(&args[0], &value))
316 return 0;
317 d->rootmode = value;
318 d->rootmode_present = 1;
319 break;
321 case OPT_USER_ID:
322 if (match_int(&args[0], &value))
323 return 0;
324 d->user_id = value;
325 d->user_id_present = 1;
326 break;
328 case OPT_GROUP_ID:
329 if (match_int(&args[0], &value))
330 return 0;
331 d->group_id = value;
332 d->group_id_present = 1;
333 break;
335 case OPT_DEFAULT_PERMISSIONS:
336 d->flags |= FUSE_DEFAULT_PERMISSIONS;
337 break;
339 case OPT_ALLOW_OTHER:
340 d->flags |= FUSE_ALLOW_OTHER;
341 break;
343 case OPT_MAX_READ:
344 if (match_int(&args[0], &value))
345 return 0;
346 d->max_read = value;
347 break;
349 default:
350 return 0;
351 }
352 }
354 if (!d->fd_present || !d->rootmode_present ||
355 !d->user_id_present || !d->group_id_present)
356 return 0;
358 return 1;
359 }
361 static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
362 {
363 struct fuse_conn *fc = get_fuse_conn_super(mnt->mnt_sb);
365 seq_printf(m, ",user_id=%u", fc->user_id);
366 seq_printf(m, ",group_id=%u", fc->group_id);
367 if (fc->flags & FUSE_DEFAULT_PERMISSIONS)
368 seq_puts(m, ",default_permissions");
369 if (fc->flags & FUSE_ALLOW_OTHER)
370 seq_puts(m, ",allow_other");
371 if (fc->max_read != ~0)
372 seq_printf(m, ",max_read=%u", fc->max_read);
373 return 0;
374 }
376 static struct fuse_conn *new_conn(void)
377 {
378 struct fuse_conn *fc;
380 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
381 if (fc) {
382 spin_lock_init(&fc->lock);
383 atomic_set(&fc->count, 1);
384 init_waitqueue_head(&fc->waitq);
385 init_waitqueue_head(&fc->blocked_waitq);
386 INIT_LIST_HEAD(&fc->pending);
387 INIT_LIST_HEAD(&fc->processing);
388 INIT_LIST_HEAD(&fc->io);
389 INIT_LIST_HEAD(&fc->interrupts);
390 atomic_set(&fc->num_waiting, 0);
391 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
392 fc->bdi.unplug_io_fn = default_unplug_io_fn;
393 fc->reqctr = 0;
394 fc->blocked = 1;
395 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
396 }
397 return fc;
398 }
400 void fuse_conn_put(struct fuse_conn *fc)
401 {
402 if (atomic_dec_and_test(&fc->count))
403 kfree(fc);
404 }
406 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
407 {
408 atomic_inc(&fc->count);
409 return fc;
410 }
412 static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
413 {
414 struct fuse_attr attr;
415 memset(&attr, 0, sizeof(attr));
417 attr.mode = mode;
418 attr.ino = FUSE_ROOT_ID;
419 return fuse_iget(sb, 1, 0, &attr);
420 }
422 static struct super_operations fuse_super_operations = {
423 .alloc_inode = fuse_alloc_inode,
424 .destroy_inode = fuse_destroy_inode,
425 .read_inode = fuse_read_inode,
426 .clear_inode = fuse_clear_inode,
427 .remount_fs = fuse_remount_fs,
428 .put_super = fuse_put_super,
429 .umount_begin = fuse_umount_begin,
430 .statfs = fuse_statfs,
431 .show_options = fuse_show_options,
432 };
434 static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
435 {
436 struct fuse_init_out *arg = &req->misc.init_out;
438 if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
439 fc->conn_error = 1;
440 else {
441 unsigned long ra_pages;
443 if (arg->minor >= 6) {
444 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
445 if (arg->flags & FUSE_ASYNC_READ)
446 fc->async_read = 1;
447 if (!(arg->flags & FUSE_POSIX_LOCKS))
448 fc->no_lock = 1;
449 } else {
450 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
451 fc->no_lock = 1;
452 }
454 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
455 fc->minor = arg->minor;
456 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
457 }
458 fuse_put_request(fc, req);
459 fc->blocked = 0;
460 wake_up_all(&fc->blocked_waitq);
461 }
463 static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
464 {
465 struct fuse_init_in *arg = &req->misc.init_in;
467 arg->major = FUSE_KERNEL_VERSION;
468 arg->minor = FUSE_KERNEL_MINOR_VERSION;
469 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
470 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS;
471 req->in.h.opcode = FUSE_INIT;
472 req->in.numargs = 1;
473 req->in.args[0].size = sizeof(*arg);
474 req->in.args[0].value = arg;
475 req->out.numargs = 1;
476 /* Variable length arguement used for backward compatibility
477 with interface version < 7.5. Rest of init_out is zeroed
478 by do_get_request(), so a short reply is not a problem */
479 req->out.argvar = 1;
480 req->out.args[0].size = sizeof(struct fuse_init_out);
481 req->out.args[0].value = &req->misc.init_out;
482 req->end = process_init_reply;
483 request_send_background(fc, req);
484 }
486 static u64 conn_id(void)
487 {
488 static u64 ctr = 1;
489 return ctr++;
490 }
492 static int fuse_fill_super(struct super_block *sb, void *data, int silent)
493 {
494 struct fuse_conn *fc;
495 struct inode *root;
496 struct fuse_mount_data d;
497 struct file *file;
498 struct dentry *root_dentry;
499 struct fuse_req *init_req;
500 int err;
502 if (sb->s_flags & MS_MANDLOCK)
503 return -EINVAL;
505 if (!parse_fuse_opt((char *) data, &d))
506 return -EINVAL;
508 sb->s_blocksize = PAGE_CACHE_SIZE;
509 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
510 sb->s_magic = FUSE_SUPER_MAGIC;
511 sb->s_op = &fuse_super_operations;
512 sb->s_maxbytes = MAX_LFS_FILESIZE;
514 file = fget(d.fd);
515 if (!file)
516 return -EINVAL;
518 if (file->f_op != &fuse_dev_operations)
519 return -EINVAL;
521 fc = new_conn();
522 if (!fc)
523 return -ENOMEM;
525 fc->flags = d.flags;
526 fc->user_id = d.user_id;
527 fc->group_id = d.group_id;
528 fc->max_read = d.max_read;
530 /* Used by get_root_inode() */
531 sb->s_fs_info = fc;
533 err = -ENOMEM;
534 root = get_root_inode(sb, d.rootmode);
535 if (!root)
536 goto err;
538 root_dentry = d_alloc_root(root);
539 if (!root_dentry) {
540 iput(root);
541 goto err;
542 }
544 init_req = fuse_request_alloc();
545 if (!init_req)
546 goto err_put_root;
548 mutex_lock(&fuse_mutex);
549 err = -EINVAL;
550 if (file->private_data)
551 goto err_unlock;
553 fc->id = conn_id();
554 err = fuse_ctl_add_conn(fc);
555 if (err)
556 goto err_unlock;
558 list_add_tail(&fc->entry, &fuse_conn_list);
559 sb->s_root = root_dentry;
560 fc->connected = 1;
561 file->private_data = fuse_conn_get(fc);
562 mutex_unlock(&fuse_mutex);
563 /*
564 * atomic_dec_and_test() in fput() provides the necessary
565 * memory barrier for file->private_data to be visible on all
566 * CPUs after this
567 */
568 fput(file);
570 fuse_send_init(fc, init_req);
572 return 0;
574 err_unlock:
575 mutex_unlock(&fuse_mutex);
576 fuse_request_free(init_req);
577 err_put_root:
578 dput(root_dentry);
579 err:
580 fput(file);
581 fuse_conn_put(fc);
582 return err;
583 }
585 static int fuse_get_sb(struct file_system_type *fs_type,
586 int flags, const char *dev_name,
587 void *raw_data, struct vfsmount *mnt)
588 {
589 return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt);
590 }
592 static struct file_system_type fuse_fs_type = {
593 .owner = THIS_MODULE,
594 .name = "fuse",
595 .get_sb = fuse_get_sb,
596 .kill_sb = kill_anon_super,
597 };
599 static decl_subsys(fuse, NULL, NULL);
600 static decl_subsys(connections, NULL, NULL);
602 static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep,
603 unsigned long flags)
604 {
605 struct inode * inode = foo;
607 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
608 SLAB_CTOR_CONSTRUCTOR)
609 inode_init_once(inode);
610 }
612 static int __init fuse_fs_init(void)
613 {
614 int err;
616 err = register_filesystem(&fuse_fs_type);
617 if (err)
618 printk("fuse: failed to register filesystem\n");
619 else {
620 fuse_inode_cachep = kmem_cache_create("fuse_inode",
621 sizeof(struct fuse_inode),
622 0, SLAB_HWCACHE_ALIGN,
623 fuse_inode_init_once, NULL);
624 if (!fuse_inode_cachep) {
625 unregister_filesystem(&fuse_fs_type);
626 err = -ENOMEM;
627 }
628 }
630 return err;
631 }
633 static void fuse_fs_cleanup(void)
634 {
635 unregister_filesystem(&fuse_fs_type);
636 kmem_cache_destroy(fuse_inode_cachep);
637 }
639 static int fuse_sysfs_init(void)
640 {
641 int err;
643 kset_set_kset_s(&fuse_subsys, fs_subsys);
644 err = subsystem_register(&fuse_subsys);
645 if (err)
646 goto out_err;
648 kset_set_kset_s(&connections_subsys, fuse_subsys);
649 err = subsystem_register(&connections_subsys);
650 if (err)
651 goto out_fuse_unregister;
653 return 0;
655 out_fuse_unregister:
656 subsystem_unregister(&fuse_subsys);
657 out_err:
658 return err;
659 }
661 static void fuse_sysfs_cleanup(void)
662 {
663 subsystem_unregister(&connections_subsys);
664 subsystem_unregister(&fuse_subsys);
665 }
667 static int __init fuse_init(void)
668 {
669 int res;
671 printk("fuse init (API version %i.%i)\n",
672 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
674 INIT_LIST_HEAD(&fuse_conn_list);
675 res = fuse_fs_init();
676 if (res)
677 goto err;
679 res = fuse_dev_init();
680 if (res)
681 goto err_fs_cleanup;
683 res = fuse_sysfs_init();
684 if (res)
685 goto err_dev_cleanup;
687 res = fuse_ctl_init();
688 if (res)
689 goto err_sysfs_cleanup;
691 return 0;
693 err_sysfs_cleanup:
694 fuse_sysfs_cleanup();
695 err_dev_cleanup:
696 fuse_dev_cleanup();
697 err_fs_cleanup:
698 fuse_fs_cleanup();
699 err:
700 return res;
701 }
703 static void __exit fuse_exit(void)
704 {
705 printk(KERN_DEBUG "fuse exit\n");
707 fuse_ctl_cleanup();
708 fuse_sysfs_cleanup();
709 fuse_fs_cleanup();
710 fuse_dev_cleanup();
711 }
713 module_init(fuse_init);
714 module_exit(fuse_exit);