ia64/xen-unstable

view linux-2.4.28-xen-sparse/drivers/char/mem.c @ 3251:a169836882cb

bitkeeper revision 1.1159.170.59 (41b4c2fdJ2gj_BWy27Vj3ptayZp_yg)

sync w/ head.
author cl349@arcadians.cl.cam.ac.uk
date Mon Dec 06 20:37:17 2004 +0000 (2004-12-06)
parents f65b65977b19
children b2e2e9db7739
line source
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 *
10 * MODIFIED FOR XEN by Keir Fraser, 10th July 2003.
11 * Linux running on Xen has strange semantics for /dev/mem and /dev/kmem!!
12 * 1. mmap will not work on /dev/kmem
13 * 2. mmap on /dev/mem interprets the 'file offset' as a machine address
14 * rather than a physical address.
15 * I don't believe anyone sane mmaps /dev/kmem, but /dev/mem is mmapped
16 * to get at memory-mapped I/O spaces (eg. the VESA X server does this).
17 * For this to work at all we need to expect machine addresses.
18 * Reading/writing of /dev/kmem expects kernel virtual addresses, as usual.
19 * Reading/writing of /dev/mem expects 'physical addresses' as usual -- this
20 * is because /dev/mem can only read/write existing kernel mappings, which
21 * will be normal RAM, and we should present pseudo-physical layout for all
22 * except I/O (which is the sticky case that mmap is hacked to deal with).
23 */
25 #include <linux/config.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/tpqic02.h>
29 #include <linux/ftape.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/mman.h>
33 #include <linux/random.h>
34 #include <linux/init.h>
35 #include <linux/raw.h>
36 #include <linux/tty.h>
37 #include <linux/capability.h>
38 #include <linux/ptrace.h>
40 #include <asm/uaccess.h>
41 #include <asm/io.h>
42 #include <asm/pgalloc.h>
44 #ifdef CONFIG_I2C
45 extern int i2c_init_all(void);
46 #endif
47 #ifdef CONFIG_FB
48 extern void fbmem_init(void);
49 #endif
50 #ifdef CONFIG_PROM_CONSOLE
51 extern void prom_con_init(void);
52 #endif
53 #ifdef CONFIG_MDA_CONSOLE
54 extern void mda_console_init(void);
55 #endif
56 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
57 extern void tapechar_init(void);
58 #endif
60 static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
61 const char * buf, size_t count, loff_t *ppos)
62 {
63 ssize_t written;
65 written = 0;
66 #if defined(__sparc__) || defined(__mc68000__)
67 /* we don't have page 0 mapped on sparc and m68k.. */
68 if (realp < PAGE_SIZE) {
69 unsigned long sz = PAGE_SIZE-realp;
70 if (sz > count) sz = count;
71 /* Hmm. Do something? */
72 buf+=sz;
73 p+=sz;
74 count-=sz;
75 written+=sz;
76 }
77 #endif
78 if (copy_from_user(p, buf, count))
79 return -EFAULT;
80 written += count;
81 *ppos = realp + written;
82 return written;
83 }
86 /*
87 * This funcion reads the *physical* memory. The f_pos points directly to the
88 * memory location.
89 */
90 static ssize_t read_mem(struct file * file, char * buf,
91 size_t count, loff_t *ppos)
92 {
93 unsigned long p = *ppos;
94 unsigned long end_mem;
95 ssize_t read;
97 end_mem = __pa(high_memory);
98 if (p >= end_mem)
99 return 0;
100 if (count > end_mem - p)
101 count = end_mem - p;
102 read = 0;
103 #if defined(__sparc__) || defined(__mc68000__)
104 /* we don't have page 0 mapped on sparc and m68k.. */
105 if (p < PAGE_SIZE) {
106 unsigned long sz = PAGE_SIZE-p;
107 if (sz > count)
108 sz = count;
109 if (sz > 0) {
110 if (clear_user(buf, sz))
111 return -EFAULT;
112 buf += sz;
113 p += sz;
114 count -= sz;
115 read += sz;
116 }
117 }
118 #endif
119 if (copy_to_user(buf, __va(p), count))
120 return -EFAULT;
121 read += count;
122 *ppos = p + read;
123 return read;
124 }
126 static ssize_t write_mem(struct file * file, const char * buf,
127 size_t count, loff_t *ppos)
128 {
129 unsigned long p = *ppos;
130 unsigned long end_mem;
132 end_mem = __pa(high_memory);
133 if (p >= end_mem)
134 return 0;
135 if (count > end_mem - p)
136 count = end_mem - p;
137 return do_write_mem(file, __va(p), p, buf, count, ppos);
138 }
140 #ifndef pgprot_noncached
142 /*
143 * This should probably be per-architecture in <asm/pgtable.h>
144 */
145 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
146 {
147 unsigned long prot = pgprot_val(_prot);
149 #if defined(__i386__) || defined(__x86_64__)
150 /* On PPro and successors, PCD alone doesn't always mean
151 uncached because of interactions with the MTRRs. PCD | PWT
152 means definitely uncached. */
153 if (boot_cpu_data.x86 > 3)
154 prot |= _PAGE_PCD | _PAGE_PWT;
155 #elif defined(__powerpc__)
156 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
157 #elif defined(__mc68000__)
158 #ifdef SUN3_PAGE_NOCACHE
159 if (MMU_IS_SUN3)
160 prot |= SUN3_PAGE_NOCACHE;
161 else
162 #endif
163 if (MMU_IS_851 || MMU_IS_030)
164 prot |= _PAGE_NOCACHE030;
165 /* Use no-cache mode, serialized */
166 else if (MMU_IS_040 || MMU_IS_060)
167 prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
168 #endif
170 return __pgprot(prot);
171 }
173 #endif /* !pgprot_noncached */
175 /*
176 * Architectures vary in how they handle caching for addresses
177 * outside of main memory.
178 */
179 static inline int noncached_address(unsigned long addr)
180 {
181 #if defined(__i386__)
182 /*
183 * On the PPro and successors, the MTRRs are used to set
184 * memory types for physical addresses outside main memory,
185 * so blindly setting PCD or PWT on those pages is wrong.
186 * For Pentiums and earlier, the surround logic should disable
187 * caching for the high addresses through the KEN pin, but
188 * we maintain the tradition of paranoia in this code.
189 */
190 return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ||
191 test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ||
192 test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ||
193 test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) )
194 && addr >= __pa(high_memory);
195 #else
196 return addr >= __pa(high_memory);
197 #endif
198 }
200 #if !defined(CONFIG_XEN)
201 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
202 {
203 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
205 /*
206 * Accessing memory above the top the kernel knows about or
207 * through a file pointer that was marked O_SYNC will be
208 * done non-cached.
209 */
210 if (noncached_address(offset) || (file->f_flags & O_SYNC))
211 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
213 /* Don't try to swap out physical pages.. */
214 vma->vm_flags |= VM_RESERVED;
216 /*
217 * Don't dump addresses that are not real memory to a core file.
218 */
219 if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC))
220 vma->vm_flags |= VM_IO;
222 if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start,
223 vma->vm_page_prot))
224 return -EAGAIN;
225 return 0;
226 }
227 #elif !defined(CONFIG_XEN_PRIVILEGED_GUEST)
228 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
229 {
230 return -ENXIO;
231 }
232 #else
233 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
234 {
235 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
237 if (!(xen_start_info.flags & SIF_PRIVILEGED))
238 return -ENXIO;
240 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
241 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
242 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
243 if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset,
244 vma->vm_end-vma->vm_start, vma->vm_page_prot,
245 DOMID_IO))
246 return -EAGAIN;
247 return 0;
248 }
249 #endif /* CONFIG_XEN */
251 /*
252 * This function reads the *virtual* memory as seen by the kernel.
253 */
254 static ssize_t read_kmem(struct file *file, char *buf,
255 size_t count, loff_t *ppos)
256 {
257 unsigned long p = *ppos;
258 ssize_t read = 0;
259 ssize_t virtr = 0;
260 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
262 if (p < (unsigned long) high_memory) {
263 read = count;
264 if (count > (unsigned long) high_memory - p)
265 read = (unsigned long) high_memory - p;
267 #if defined(__sparc__) || defined(__mc68000__)
268 /* we don't have page 0 mapped on sparc and m68k.. */
269 if (p < PAGE_SIZE && read > 0) {
270 size_t tmp = PAGE_SIZE - p;
271 if (tmp > read) tmp = read;
272 if (clear_user(buf, tmp))
273 return -EFAULT;
274 buf += tmp;
275 p += tmp;
276 read -= tmp;
277 count -= tmp;
278 }
279 #endif
280 if (copy_to_user(buf, (char *)p, read))
281 return -EFAULT;
282 p += read;
283 buf += read;
284 count -= read;
285 }
287 if (count > 0) {
288 kbuf = (char *)__get_free_page(GFP_KERNEL);
289 if (!kbuf)
290 return -ENOMEM;
291 while (count > 0) {
292 int len = count;
294 if (len > PAGE_SIZE)
295 len = PAGE_SIZE;
296 len = vread(kbuf, (char *)p, len);
297 if (!len)
298 break;
299 if (copy_to_user(buf, kbuf, len)) {
300 free_page((unsigned long)kbuf);
301 return -EFAULT;
302 }
303 count -= len;
304 buf += len;
305 virtr += len;
306 p += len;
307 }
308 free_page((unsigned long)kbuf);
309 }
310 *ppos = p;
311 return virtr + read;
312 }
314 extern long vwrite(char *buf, char *addr, unsigned long count);
316 /*
317 * This function writes to the *virtual* memory as seen by the kernel.
318 */
319 static ssize_t write_kmem(struct file * file, const char * buf,
320 size_t count, loff_t *ppos)
321 {
322 unsigned long p = *ppos;
323 ssize_t wrote = 0;
324 ssize_t virtr = 0;
325 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
327 if (p < (unsigned long) high_memory) {
328 wrote = count;
329 if (count > (unsigned long) high_memory - p)
330 wrote = (unsigned long) high_memory - p;
332 wrote = do_write_mem(file, (void*)p, p, buf, wrote, ppos);
334 p += wrote;
335 buf += wrote;
336 count -= wrote;
337 }
339 if (count > 0) {
340 kbuf = (char *)__get_free_page(GFP_KERNEL);
341 if (!kbuf)
342 return -ENOMEM;
343 while (count > 0) {
344 int len = count;
346 if (len > PAGE_SIZE)
347 len = PAGE_SIZE;
348 if (len && copy_from_user(kbuf, buf, len)) {
349 free_page((unsigned long)kbuf);
350 return -EFAULT;
351 }
352 len = vwrite(kbuf, (char *)p, len);
353 count -= len;
354 buf += len;
355 virtr += len;
356 p += len;
357 }
358 free_page((unsigned long)kbuf);
359 }
361 *ppos = p;
362 return virtr + wrote;
363 }
365 #if defined(CONFIG_ISA) || !defined(__mc68000__)
366 static ssize_t read_port(struct file * file, char * buf,
367 size_t count, loff_t *ppos)
368 {
369 unsigned long i = *ppos;
370 char *tmp = buf;
372 if (verify_area(VERIFY_WRITE,buf,count))
373 return -EFAULT;
374 while (count-- > 0 && i < 65536) {
375 if (__put_user(inb(i),tmp) < 0)
376 return -EFAULT;
377 i++;
378 tmp++;
379 }
380 *ppos = i;
381 return tmp-buf;
382 }
384 static ssize_t write_port(struct file * file, const char * buf,
385 size_t count, loff_t *ppos)
386 {
387 unsigned long i = *ppos;
388 const char * tmp = buf;
390 if (verify_area(VERIFY_READ,buf,count))
391 return -EFAULT;
392 while (count-- > 0 && i < 65536) {
393 char c;
394 if (__get_user(c, tmp))
395 return -EFAULT;
396 outb(c,i);
397 i++;
398 tmp++;
399 }
400 *ppos = i;
401 return tmp-buf;
402 }
403 #endif
405 static ssize_t read_null(struct file * file, char * buf,
406 size_t count, loff_t *ppos)
407 {
408 return 0;
409 }
411 static ssize_t write_null(struct file * file, const char * buf,
412 size_t count, loff_t *ppos)
413 {
414 return count;
415 }
417 /*
418 * For fun, we are using the MMU for this.
419 */
420 static inline size_t read_zero_pagealigned(char * buf, size_t size)
421 {
422 struct mm_struct *mm;
423 struct vm_area_struct * vma;
424 unsigned long addr=(unsigned long)buf;
426 mm = current->mm;
427 /* Oops, this was forgotten before. -ben */
428 down_read(&mm->mmap_sem);
430 /* For private mappings, just map in zero pages. */
431 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
432 unsigned long count;
434 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
435 goto out_up;
436 if (vma->vm_flags & VM_SHARED)
437 break;
438 count = vma->vm_end - addr;
439 if (count > size)
440 count = size;
442 zap_page_range(mm, addr, count);
443 zeromap_page_range(addr, count, PAGE_COPY);
445 size -= count;
446 buf += count;
447 addr += count;
448 if (size == 0)
449 goto out_up;
450 }
452 up_read(&mm->mmap_sem);
454 /* The shared case is hard. Let's do the conventional zeroing. */
455 do {
456 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
457 if (unwritten)
458 return size + unwritten - PAGE_SIZE;
459 if (current->need_resched)
460 schedule();
461 buf += PAGE_SIZE;
462 size -= PAGE_SIZE;
463 } while (size);
465 return size;
466 out_up:
467 up_read(&mm->mmap_sem);
468 return size;
469 }
471 static ssize_t read_zero(struct file * file, char * buf,
472 size_t count, loff_t *ppos)
473 {
474 unsigned long left, unwritten, written = 0;
476 if (!count)
477 return 0;
479 if (!access_ok(VERIFY_WRITE, buf, count))
480 return -EFAULT;
482 left = count;
484 /* do we want to be clever? Arbitrary cut-off */
485 if (count >= PAGE_SIZE*4) {
486 unsigned long partial;
488 /* How much left of the page? */
489 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
490 unwritten = clear_user(buf, partial);
491 written = partial - unwritten;
492 if (unwritten)
493 goto out;
494 left -= partial;
495 buf += partial;
496 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
497 written += (left & PAGE_MASK) - unwritten;
498 if (unwritten)
499 goto out;
500 buf += left & PAGE_MASK;
501 left &= ~PAGE_MASK;
502 }
503 unwritten = clear_user(buf, left);
504 written += left - unwritten;
505 out:
506 return written ? written : -EFAULT;
507 }
509 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
510 {
511 if (vma->vm_flags & VM_SHARED)
512 return shmem_zero_setup(vma);
513 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
514 return -EAGAIN;
515 return 0;
516 }
518 static ssize_t write_full(struct file * file, const char * buf,
519 size_t count, loff_t *ppos)
520 {
521 return -ENOSPC;
522 }
524 /*
525 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
526 * can fopen() both devices with "a" now. This was previously impossible.
527 * -- SRB.
528 */
530 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
531 {
532 return file->f_pos = 0;
533 }
535 /*
536 * The memory devices use the full 32/64 bits of the offset, and so we cannot
537 * check against negative addresses: they are ok. The return value is weird,
538 * though, in that case (0).
539 *
540 * also note that seeking relative to the "end of file" isn't supported:
541 * it has no meaning, so it returns -EINVAL.
542 */
543 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
544 {
545 loff_t ret;
547 switch (orig) {
548 case 0:
549 file->f_pos = offset;
550 ret = file->f_pos;
551 force_successful_syscall_return();
552 break;
553 case 1:
554 file->f_pos += offset;
555 ret = file->f_pos;
556 force_successful_syscall_return();
557 break;
558 default:
559 ret = -EINVAL;
560 }
561 return ret;
562 }
564 static int open_port(struct inode * inode, struct file * filp)
565 {
566 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
567 }
569 struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address, int write)
570 {
571 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
572 unsigned long kaddr;
573 pgd_t *pgd;
574 pmd_t *pmd;
575 pte_t *ptep, pte;
576 struct page *page = NULL;
578 /* address is user VA; convert to kernel VA of desired page */
579 kaddr = (address - vma->vm_start) + offset;
580 kaddr = VMALLOC_VMADDR(kaddr);
582 spin_lock(&init_mm.page_table_lock);
584 /* Lookup page structure for kernel VA */
585 pgd = pgd_offset(&init_mm, kaddr);
586 if (pgd_none(*pgd) || pgd_bad(*pgd))
587 goto out;
588 pmd = pmd_offset(pgd, kaddr);
589 if (pmd_none(*pmd) || pmd_bad(*pmd))
590 goto out;
591 ptep = pte_offset(pmd, kaddr);
592 if (!ptep)
593 goto out;
594 pte = *ptep;
595 if (!pte_present(pte))
596 goto out;
597 if (write && !pte_write(pte))
598 goto out;
599 page = pte_page(pte);
600 if (!VALID_PAGE(page)) {
601 page = NULL;
602 goto out;
603 }
605 /* Increment reference count on page */
606 get_page(page);
608 out:
609 spin_unlock(&init_mm.page_table_lock);
611 return page;
612 }
614 struct vm_operations_struct kmem_vm_ops = {
615 nopage: kmem_vm_nopage,
616 };
618 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
619 {
620 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
621 unsigned long size = vma->vm_end - vma->vm_start;
623 /*
624 * If the user is not attempting to mmap a high memory address then
625 * the standard mmap_mem mechanism will work. High memory addresses
626 * need special handling, as remap_page_range expects a physically-
627 * contiguous range of kernel addresses (such as obtained in kmalloc).
628 */
629 if ((offset + size) < (unsigned long) high_memory)
630 return mmap_mem(file, vma);
632 /*
633 * Accessing memory above the top the kernel knows about or
634 * through a file pointer that was marked O_SYNC will be
635 * done non-cached.
636 */
637 if (noncached_address(offset) || (file->f_flags & O_SYNC))
638 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
640 /* Don't do anything here; "nopage" will fill the holes */
641 vma->vm_ops = &kmem_vm_ops;
643 /* Don't try to swap out physical pages.. */
644 vma->vm_flags |= VM_RESERVED;
646 /*
647 * Don't dump addresses that are not real memory to a core file.
648 */
649 vma->vm_flags |= VM_IO;
651 return 0;
652 }
654 #define zero_lseek null_lseek
655 #define full_lseek null_lseek
656 #define write_zero write_null
657 #define read_full read_zero
658 #define open_mem open_port
659 #define open_kmem open_mem
661 static struct file_operations mem_fops = {
662 llseek: memory_lseek,
663 read: read_mem,
664 write: write_mem,
665 mmap: mmap_mem,
666 open: open_mem,
667 };
669 static struct file_operations kmem_fops = {
670 llseek: memory_lseek,
671 read: read_kmem,
672 write: write_kmem,
673 #if !defined(CONFIG_XEN)
674 mmap: mmap_kmem,
675 #endif
676 open: open_kmem,
677 };
679 static struct file_operations null_fops = {
680 llseek: null_lseek,
681 read: read_null,
682 write: write_null,
683 };
685 #if defined(CONFIG_ISA) || !defined(__mc68000__)
686 static struct file_operations port_fops = {
687 llseek: memory_lseek,
688 read: read_port,
689 write: write_port,
690 open: open_port,
691 };
692 #endif
694 static struct file_operations zero_fops = {
695 llseek: zero_lseek,
696 read: read_zero,
697 write: write_zero,
698 mmap: mmap_zero,
699 };
701 static struct file_operations full_fops = {
702 llseek: full_lseek,
703 read: read_full,
704 write: write_full,
705 };
707 static int memory_open(struct inode * inode, struct file * filp)
708 {
709 switch (MINOR(inode->i_rdev)) {
710 case 1:
711 filp->f_op = &mem_fops;
712 break;
713 case 2:
714 filp->f_op = &kmem_fops;
715 break;
716 case 3:
717 filp->f_op = &null_fops;
718 break;
719 #if defined(CONFIG_ISA) || !defined(__mc68000__)
720 case 4:
721 filp->f_op = &port_fops;
722 break;
723 #endif
724 case 5:
725 filp->f_op = &zero_fops;
726 break;
727 case 7:
728 filp->f_op = &full_fops;
729 break;
730 case 8:
731 filp->f_op = &random_fops;
732 break;
733 case 9:
734 filp->f_op = &urandom_fops;
735 break;
736 default:
737 return -ENXIO;
738 }
739 if (filp->f_op && filp->f_op->open)
740 return filp->f_op->open(inode,filp);
741 return 0;
742 }
744 void __init memory_devfs_register (void)
745 {
746 /* These are never unregistered */
747 static const struct {
748 unsigned short minor;
749 char *name;
750 umode_t mode;
751 struct file_operations *fops;
752 } list[] = { /* list of minor devices */
753 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
754 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
755 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
756 #if defined(CONFIG_ISA) || !defined(__mc68000__)
757 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
758 #endif
759 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
760 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
761 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
762 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}
763 };
764 int i;
766 for (i=0; i<(sizeof(list)/sizeof(*list)); i++)
767 devfs_register (NULL, list[i].name, DEVFS_FL_NONE,
768 MEM_MAJOR, list[i].minor,
769 list[i].mode | S_IFCHR,
770 list[i].fops, NULL);
771 }
773 static struct file_operations memory_fops = {
774 open: memory_open, /* just a selector for the real open */
775 };
777 int __init chr_dev_init(void)
778 {
779 if (devfs_register_chrdev(MEM_MAJOR,"mem",&memory_fops))
780 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
781 memory_devfs_register();
782 rand_initialize();
783 #ifdef CONFIG_I2C
784 i2c_init_all();
785 #endif
786 #if defined (CONFIG_FB)
787 fbmem_init();
788 #endif
789 #if defined (CONFIG_PROM_CONSOLE)
790 prom_con_init();
791 #endif
792 #if defined (CONFIG_MDA_CONSOLE)
793 mda_console_init();
794 #endif
795 tty_init();
796 #ifdef CONFIG_M68K_PRINTER
797 lp_m68k_init();
798 #endif
799 misc_init();
800 #if CONFIG_QIC02_TAPE
801 qic02_tape_init();
802 #endif
803 #ifdef CONFIG_FTAPE
804 ftape_init();
805 #endif
806 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
807 tapechar_init();
808 #endif
809 return 0;
810 }
812 __initcall(chr_dev_init);