ia64/xen-unstable

view linux-2.4.26-xen-sparse/drivers/char/mem.c @ 1921:24ecc060e9d7

bitkeeper revision 1.1108.21.1 (41062740xHG36OEbpVAmVX5N9WCaNw)

make vmlinuz really stripped
author cl349@freefall.cl.cam.ac.uk
date Tue Jul 27 09:58:24 2004 +0000 (2004-07-27)
parents f3123052268f
children 7eb5302d64b8 8be25c10fa1e f8ccc0daf252 b1347b2eb538
line source
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 *
10 * MODIFIED FOR XEN by Keir Fraser, 10th July 2003.
11 * Linux running on Xen has strange semantics for /dev/mem and /dev/kmem!!
12 * 1. mmap will not work on /dev/kmem
13 * 2. mmap on /dev/mem interprets the 'file offset' as a machine address
14 * rather than a physical address.
15 * I don't believe anyone sane mmaps /dev/kmem, but /dev/mem is mmapped
16 * to get at memory-mapped I/O spaces (eg. the VESA X server does this).
17 * For this to work at all we need to expect machine addresses.
18 * Reading/writing of /dev/kmem expects kernel virtual addresses, as usual.
19 * Reading/writing of /dev/mem expects 'physical addresses' as usual -- this
20 * is because /dev/mem can only read/write existing kernel mappings, which
21 * will be normal RAM, and we should present pseudo-physical layout for all
22 * except I/O (which is the sticky case that mmap is hacked to deal with).
23 */
25 #include <linux/config.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/tpqic02.h>
29 #include <linux/ftape.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/mman.h>
33 #include <linux/random.h>
34 #include <linux/init.h>
35 #include <linux/raw.h>
36 #include <linux/tty.h>
37 #include <linux/capability.h>
38 #include <linux/ptrace.h>
40 #include <asm/uaccess.h>
41 #include <asm/io.h>
42 #include <asm/pgalloc.h>
44 #ifdef CONFIG_I2C
45 extern int i2c_init_all(void);
46 #endif
47 #ifdef CONFIG_FB
48 extern void fbmem_init(void);
49 #endif
50 #ifdef CONFIG_PROM_CONSOLE
51 extern void prom_con_init(void);
52 #endif
53 #ifdef CONFIG_MDA_CONSOLE
54 extern void mda_console_init(void);
55 #endif
56 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
57 extern void tapechar_init(void);
58 #endif
60 static ssize_t do_write_mem(struct file * file, void *p, unsigned long realp,
61 const char * buf, size_t count, loff_t *ppos)
62 {
63 ssize_t written;
65 written = 0;
66 #if defined(__sparc__) || defined(__mc68000__)
67 /* we don't have page 0 mapped on sparc and m68k.. */
68 if (realp < PAGE_SIZE) {
69 unsigned long sz = PAGE_SIZE-realp;
70 if (sz > count) sz = count;
71 /* Hmm. Do something? */
72 buf+=sz;
73 p+=sz;
74 count-=sz;
75 written+=sz;
76 }
77 #endif
78 if (copy_from_user(p, buf, count))
79 return -EFAULT;
80 written += count;
81 *ppos += written;
82 return written;
83 }
86 /*
87 * This funcion reads the *physical* memory. The f_pos points directly to the
88 * memory location.
89 */
90 static ssize_t read_mem(struct file * file, char * buf,
91 size_t count, loff_t *ppos)
92 {
93 unsigned long p = *ppos;
94 unsigned long end_mem;
95 ssize_t read;
97 end_mem = __pa(high_memory);
98 if (p >= end_mem)
99 return 0;
100 if (count > end_mem - p)
101 count = end_mem - p;
102 read = 0;
103 #if defined(__sparc__) || defined(__mc68000__)
104 /* we don't have page 0 mapped on sparc and m68k.. */
105 if (p < PAGE_SIZE) {
106 unsigned long sz = PAGE_SIZE-p;
107 if (sz > count)
108 sz = count;
109 if (sz > 0) {
110 if (clear_user(buf, sz))
111 return -EFAULT;
112 buf += sz;
113 p += sz;
114 count -= sz;
115 read += sz;
116 }
117 }
118 #endif
119 if (copy_to_user(buf, __va(p), count))
120 return -EFAULT;
121 read += count;
122 *ppos += read;
123 return read;
124 }
126 static ssize_t write_mem(struct file * file, const char * buf,
127 size_t count, loff_t *ppos)
128 {
129 unsigned long p = *ppos;
130 unsigned long end_mem;
132 end_mem = __pa(high_memory);
133 if (p >= end_mem)
134 return 0;
135 if (count > end_mem - p)
136 count = end_mem - p;
137 return do_write_mem(file, __va(p), p, buf, count, ppos);
138 }
140 #ifndef pgprot_noncached
142 /*
143 * This should probably be per-architecture in <asm/pgtable.h>
144 */
145 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
146 {
147 unsigned long prot = pgprot_val(_prot);
149 #if defined(__i386__) || defined(__x86_64__)
150 /* On PPro and successors, PCD alone doesn't always mean
151 uncached because of interactions with the MTRRs. PCD | PWT
152 means definitely uncached. */
153 if (boot_cpu_data.x86 > 3)
154 prot |= _PAGE_PCD | _PAGE_PWT;
155 #elif defined(__powerpc__)
156 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
157 #elif defined(__mc68000__)
158 #ifdef SUN3_PAGE_NOCACHE
159 if (MMU_IS_SUN3)
160 prot |= SUN3_PAGE_NOCACHE;
161 else
162 #endif
163 if (MMU_IS_851 || MMU_IS_030)
164 prot |= _PAGE_NOCACHE030;
165 /* Use no-cache mode, serialized */
166 else if (MMU_IS_040 || MMU_IS_060)
167 prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S;
168 #endif
170 return __pgprot(prot);
171 }
173 #endif /* !pgprot_noncached */
175 /*
176 * Architectures vary in how they handle caching for addresses
177 * outside of main memory.
178 */
179 static inline int noncached_address(unsigned long addr)
180 {
181 #if defined(__i386__)
182 /*
183 * On the PPro and successors, the MTRRs are used to set
184 * memory types for physical addresses outside main memory,
185 * so blindly setting PCD or PWT on those pages is wrong.
186 * For Pentiums and earlier, the surround logic should disable
187 * caching for the high addresses through the KEN pin, but
188 * we maintain the tradition of paranoia in this code.
189 */
190 return !( test_bit(X86_FEATURE_MTRR, &boot_cpu_data.x86_capability) ||
191 test_bit(X86_FEATURE_K6_MTRR, &boot_cpu_data.x86_capability) ||
192 test_bit(X86_FEATURE_CYRIX_ARR, &boot_cpu_data.x86_capability) ||
193 test_bit(X86_FEATURE_CENTAUR_MCR, &boot_cpu_data.x86_capability) )
194 && addr >= __pa(high_memory);
195 #else
196 return addr >= __pa(high_memory);
197 #endif
198 }
200 #if !defined(CONFIG_XEN)
201 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
202 {
203 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
205 /*
206 * Accessing memory above the top the kernel knows about or
207 * through a file pointer that was marked O_SYNC will be
208 * done non-cached.
209 */
210 if (noncached_address(offset) || (file->f_flags & O_SYNC))
211 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
213 /* Don't try to swap out physical pages.. */
214 vma->vm_flags |= VM_RESERVED;
216 /*
217 * Don't dump addresses that are not real memory to a core file.
218 */
219 if (offset >= __pa(high_memory) || (file->f_flags & O_SYNC))
220 vma->vm_flags |= VM_IO;
222 if (remap_page_range(vma->vm_start, offset, vma->vm_end-vma->vm_start,
223 vma->vm_page_prot))
224 return -EAGAIN;
225 return 0;
226 }
227 #elif !defined(CONFIG_XEN_PRIVILEGED_GUEST)
228 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
229 {
230 return -ENXIO;
231 }
232 #else
233 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
234 {
235 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
237 if (!(start_info.flags & SIF_PRIVILEGED))
238 return -ENXIO;
240 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
241 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
242 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
243 if (direct_remap_area_pages(vma->vm_mm, vma->vm_start, offset,
244 vma->vm_end-vma->vm_start, vma->vm_page_prot,
245 (domid_t)file->private_data))
246 return -EAGAIN;
247 return 0;
248 }
249 static int ioctl_mem(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
250 {
251 switch (cmd) {
252 case _IO('M', 1): file->private_data = (void *)arg; break;
253 default: return -ENOSYS;
254 }
255 return 0;
256 }
257 #endif /* CONFIG_XEN */
259 /*
260 * This function reads the *virtual* memory as seen by the kernel.
261 */
262 static ssize_t read_kmem(struct file *file, char *buf,
263 size_t count, loff_t *ppos)
264 {
265 unsigned long p = *ppos;
266 ssize_t read = 0;
267 ssize_t virtr = 0;
268 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
270 if (p < (unsigned long) high_memory) {
271 read = count;
272 if (count > (unsigned long) high_memory - p)
273 read = (unsigned long) high_memory - p;
275 #if defined(__sparc__) || defined(__mc68000__)
276 /* we don't have page 0 mapped on sparc and m68k.. */
277 if (p < PAGE_SIZE && read > 0) {
278 size_t tmp = PAGE_SIZE - p;
279 if (tmp > read) tmp = read;
280 if (clear_user(buf, tmp))
281 return -EFAULT;
282 buf += tmp;
283 p += tmp;
284 read -= tmp;
285 count -= tmp;
286 }
287 #endif
288 if (copy_to_user(buf, (char *)p, read))
289 return -EFAULT;
290 p += read;
291 buf += read;
292 count -= read;
293 }
295 if (count > 0) {
296 kbuf = (char *)__get_free_page(GFP_KERNEL);
297 if (!kbuf)
298 return -ENOMEM;
299 while (count > 0) {
300 int len = count;
302 if (len > PAGE_SIZE)
303 len = PAGE_SIZE;
304 len = vread(kbuf, (char *)p, len);
305 if (!len)
306 break;
307 if (copy_to_user(buf, kbuf, len)) {
308 free_page((unsigned long)kbuf);
309 return -EFAULT;
310 }
311 count -= len;
312 buf += len;
313 virtr += len;
314 p += len;
315 }
316 free_page((unsigned long)kbuf);
317 }
318 *ppos = p;
319 return virtr + read;
320 }
322 extern long vwrite(char *buf, char *addr, unsigned long count);
324 /*
325 * This function writes to the *virtual* memory as seen by the kernel.
326 */
327 static ssize_t write_kmem(struct file * file, const char * buf,
328 size_t count, loff_t *ppos)
329 {
330 unsigned long p = *ppos;
331 ssize_t wrote = 0;
332 ssize_t virtr = 0;
333 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
335 if (p < (unsigned long) high_memory) {
336 wrote = count;
337 if (count > (unsigned long) high_memory - p)
338 wrote = (unsigned long) high_memory - p;
340 wrote = do_write_mem(file, (void*)p, p, buf, wrote, ppos);
342 p += wrote;
343 buf += wrote;
344 count -= wrote;
345 }
347 if (count > 0) {
348 kbuf = (char *)__get_free_page(GFP_KERNEL);
349 if (!kbuf)
350 return -ENOMEM;
351 while (count > 0) {
352 int len = count;
354 if (len > PAGE_SIZE)
355 len = PAGE_SIZE;
356 if (len && copy_from_user(kbuf, buf, len)) {
357 free_page((unsigned long)kbuf);
358 return -EFAULT;
359 }
360 len = vwrite(kbuf, (char *)p, len);
361 count -= len;
362 buf += len;
363 virtr += len;
364 p += len;
365 }
366 free_page((unsigned long)kbuf);
367 }
369 *ppos = p;
370 return virtr + wrote;
371 }
373 #if defined(CONFIG_ISA) || !defined(__mc68000__)
374 static ssize_t read_port(struct file * file, char * buf,
375 size_t count, loff_t *ppos)
376 {
377 unsigned long i = *ppos;
378 char *tmp = buf;
380 if (verify_area(VERIFY_WRITE,buf,count))
381 return -EFAULT;
382 while (count-- > 0 && i < 65536) {
383 if (__put_user(inb(i),tmp) < 0)
384 return -EFAULT;
385 i++;
386 tmp++;
387 }
388 *ppos = i;
389 return tmp-buf;
390 }
392 static ssize_t write_port(struct file * file, const char * buf,
393 size_t count, loff_t *ppos)
394 {
395 unsigned long i = *ppos;
396 const char * tmp = buf;
398 if (verify_area(VERIFY_READ,buf,count))
399 return -EFAULT;
400 while (count-- > 0 && i < 65536) {
401 char c;
402 if (__get_user(c, tmp))
403 return -EFAULT;
404 outb(c,i);
405 i++;
406 tmp++;
407 }
408 *ppos = i;
409 return tmp-buf;
410 }
411 #endif
413 static ssize_t read_null(struct file * file, char * buf,
414 size_t count, loff_t *ppos)
415 {
416 return 0;
417 }
419 static ssize_t write_null(struct file * file, const char * buf,
420 size_t count, loff_t *ppos)
421 {
422 return count;
423 }
425 /*
426 * For fun, we are using the MMU for this.
427 */
428 static inline size_t read_zero_pagealigned(char * buf, size_t size)
429 {
430 struct mm_struct *mm;
431 struct vm_area_struct * vma;
432 unsigned long addr=(unsigned long)buf;
434 mm = current->mm;
435 /* Oops, this was forgotten before. -ben */
436 down_read(&mm->mmap_sem);
438 /* For private mappings, just map in zero pages. */
439 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
440 unsigned long count;
442 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
443 goto out_up;
444 if (vma->vm_flags & VM_SHARED)
445 break;
446 count = vma->vm_end - addr;
447 if (count > size)
448 count = size;
450 zap_page_range(mm, addr, count);
451 zeromap_page_range(addr, count, PAGE_COPY);
453 size -= count;
454 buf += count;
455 addr += count;
456 if (size == 0)
457 goto out_up;
458 }
460 up_read(&mm->mmap_sem);
462 /* The shared case is hard. Let's do the conventional zeroing. */
463 do {
464 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
465 if (unwritten)
466 return size + unwritten - PAGE_SIZE;
467 if (current->need_resched)
468 schedule();
469 buf += PAGE_SIZE;
470 size -= PAGE_SIZE;
471 } while (size);
473 return size;
474 out_up:
475 up_read(&mm->mmap_sem);
476 return size;
477 }
479 static ssize_t read_zero(struct file * file, char * buf,
480 size_t count, loff_t *ppos)
481 {
482 unsigned long left, unwritten, written = 0;
484 if (!count)
485 return 0;
487 if (!access_ok(VERIFY_WRITE, buf, count))
488 return -EFAULT;
490 left = count;
492 /* do we want to be clever? Arbitrary cut-off */
493 if (count >= PAGE_SIZE*4) {
494 unsigned long partial;
496 /* How much left of the page? */
497 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
498 unwritten = clear_user(buf, partial);
499 written = partial - unwritten;
500 if (unwritten)
501 goto out;
502 left -= partial;
503 buf += partial;
504 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
505 written += (left & PAGE_MASK) - unwritten;
506 if (unwritten)
507 goto out;
508 buf += left & PAGE_MASK;
509 left &= ~PAGE_MASK;
510 }
511 unwritten = clear_user(buf, left);
512 written += left - unwritten;
513 out:
514 return written ? written : -EFAULT;
515 }
517 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
518 {
519 if (vma->vm_flags & VM_SHARED)
520 return shmem_zero_setup(vma);
521 if (zeromap_page_range(vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
522 return -EAGAIN;
523 return 0;
524 }
526 static ssize_t write_full(struct file * file, const char * buf,
527 size_t count, loff_t *ppos)
528 {
529 return -ENOSPC;
530 }
532 /*
533 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
534 * can fopen() both devices with "a" now. This was previously impossible.
535 * -- SRB.
536 */
538 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
539 {
540 return file->f_pos = 0;
541 }
543 /*
544 * The memory devices use the full 32/64 bits of the offset, and so we cannot
545 * check against negative addresses: they are ok. The return value is weird,
546 * though, in that case (0).
547 *
548 * also note that seeking relative to the "end of file" isn't supported:
549 * it has no meaning, so it returns -EINVAL.
550 */
551 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
552 {
553 loff_t ret;
555 switch (orig) {
556 case 0:
557 file->f_pos = offset;
558 ret = file->f_pos;
559 force_successful_syscall_return();
560 break;
561 case 1:
562 file->f_pos += offset;
563 ret = file->f_pos;
564 force_successful_syscall_return();
565 break;
566 default:
567 ret = -EINVAL;
568 }
569 return ret;
570 }
572 static int open_port(struct inode * inode, struct file * filp)
573 {
574 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
575 }
577 struct page *kmem_vm_nopage(struct vm_area_struct *vma, unsigned long address, int write)
578 {
579 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
580 unsigned long kaddr;
581 pgd_t *pgd;
582 pmd_t *pmd;
583 pte_t *ptep, pte;
584 struct page *page = NULL;
586 /* address is user VA; convert to kernel VA of desired page */
587 kaddr = (address - vma->vm_start) + offset;
588 kaddr = VMALLOC_VMADDR(kaddr);
590 spin_lock(&init_mm.page_table_lock);
592 /* Lookup page structure for kernel VA */
593 pgd = pgd_offset(&init_mm, kaddr);
594 if (pgd_none(*pgd) || pgd_bad(*pgd))
595 goto out;
596 pmd = pmd_offset(pgd, kaddr);
597 if (pmd_none(*pmd) || pmd_bad(*pmd))
598 goto out;
599 ptep = pte_offset(pmd, kaddr);
600 if (!ptep)
601 goto out;
602 pte = *ptep;
603 if (!pte_present(pte))
604 goto out;
605 if (write && !pte_write(pte))
606 goto out;
607 page = pte_page(pte);
608 if (!VALID_PAGE(page)) {
609 page = NULL;
610 goto out;
611 }
613 /* Increment reference count on page */
614 get_page(page);
616 out:
617 spin_unlock(&init_mm.page_table_lock);
619 return page;
620 }
622 struct vm_operations_struct kmem_vm_ops = {
623 nopage: kmem_vm_nopage,
624 };
626 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
627 {
628 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
629 unsigned long size = vma->vm_end - vma->vm_start;
631 /*
632 * If the user is not attempting to mmap a high memory address then
633 * the standard mmap_mem mechanism will work. High memory addresses
634 * need special handling, as remap_page_range expects a physically-
635 * contiguous range of kernel addresses (such as obtained in kmalloc).
636 */
637 if ((offset + size) < (unsigned long) high_memory)
638 return mmap_mem(file, vma);
640 /*
641 * Accessing memory above the top the kernel knows about or
642 * through a file pointer that was marked O_SYNC will be
643 * done non-cached.
644 */
645 if (noncached_address(offset) || (file->f_flags & O_SYNC))
646 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
648 /* Don't do anything here; "nopage" will fill the holes */
649 vma->vm_ops = &kmem_vm_ops;
651 /* Don't try to swap out physical pages.. */
652 vma->vm_flags |= VM_RESERVED;
654 /*
655 * Don't dump addresses that are not real memory to a core file.
656 */
657 vma->vm_flags |= VM_IO;
659 return 0;
660 }
662 #define zero_lseek null_lseek
663 #define full_lseek null_lseek
664 #define write_zero write_null
665 #define read_full read_zero
666 #define open_mem open_port
667 #define open_kmem open_mem
669 static struct file_operations mem_fops = {
670 llseek: memory_lseek,
671 read: read_mem,
672 write: write_mem,
673 mmap: mmap_mem,
674 open: open_mem,
675 #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
676 ioctl: ioctl_mem,
677 #endif
678 };
680 static struct file_operations kmem_fops = {
681 llseek: memory_lseek,
682 read: read_kmem,
683 write: write_kmem,
684 #if !defined(CONFIG_XEN)
685 mmap: mmap_kmem,
686 #endif
687 open: open_kmem,
688 };
690 static struct file_operations null_fops = {
691 llseek: null_lseek,
692 read: read_null,
693 write: write_null,
694 };
696 #if defined(CONFIG_ISA) || !defined(__mc68000__)
697 static struct file_operations port_fops = {
698 llseek: memory_lseek,
699 read: read_port,
700 write: write_port,
701 open: open_port,
702 };
703 #endif
705 static struct file_operations zero_fops = {
706 llseek: zero_lseek,
707 read: read_zero,
708 write: write_zero,
709 mmap: mmap_zero,
710 };
712 static struct file_operations full_fops = {
713 llseek: full_lseek,
714 read: read_full,
715 write: write_full,
716 };
718 static int memory_open(struct inode * inode, struct file * filp)
719 {
720 switch (MINOR(inode->i_rdev)) {
721 case 1:
722 filp->f_op = &mem_fops;
723 break;
724 case 2:
725 filp->f_op = &kmem_fops;
726 break;
727 case 3:
728 filp->f_op = &null_fops;
729 break;
730 #if defined(CONFIG_ISA) || !defined(__mc68000__)
731 case 4:
732 filp->f_op = &port_fops;
733 break;
734 #endif
735 case 5:
736 filp->f_op = &zero_fops;
737 break;
738 case 7:
739 filp->f_op = &full_fops;
740 break;
741 case 8:
742 filp->f_op = &random_fops;
743 break;
744 case 9:
745 filp->f_op = &urandom_fops;
746 break;
747 default:
748 return -ENXIO;
749 }
750 if (filp->f_op && filp->f_op->open)
751 return filp->f_op->open(inode,filp);
752 return 0;
753 }
755 void __init memory_devfs_register (void)
756 {
757 /* These are never unregistered */
758 static const struct {
759 unsigned short minor;
760 char *name;
761 umode_t mode;
762 struct file_operations *fops;
763 } list[] = { /* list of minor devices */
764 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
765 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
766 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
767 #if defined(CONFIG_ISA) || !defined(__mc68000__)
768 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
769 #endif
770 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
771 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
772 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
773 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}
774 };
775 int i;
777 for (i=0; i<(sizeof(list)/sizeof(*list)); i++)
778 devfs_register (NULL, list[i].name, DEVFS_FL_NONE,
779 MEM_MAJOR, list[i].minor,
780 list[i].mode | S_IFCHR,
781 list[i].fops, NULL);
782 }
784 static struct file_operations memory_fops = {
785 open: memory_open, /* just a selector for the real open */
786 };
788 int __init chr_dev_init(void)
789 {
790 if (devfs_register_chrdev(MEM_MAJOR,"mem",&memory_fops))
791 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
792 memory_devfs_register();
793 rand_initialize();
794 #ifdef CONFIG_I2C
795 i2c_init_all();
796 #endif
797 #if defined (CONFIG_FB)
798 fbmem_init();
799 #endif
800 #if defined (CONFIG_PROM_CONSOLE)
801 prom_con_init();
802 #endif
803 #if defined (CONFIG_MDA_CONSOLE)
804 mda_console_init();
805 #endif
806 tty_init();
807 #ifdef CONFIG_M68K_PRINTER
808 lp_m68k_init();
809 #endif
810 misc_init();
811 #if CONFIG_QIC02_TAPE
812 qic02_tape_init();
813 #endif
814 #ifdef CONFIG_FTAPE
815 ftape_init();
816 #endif
817 #if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
818 tapechar_init();
819 #endif
820 return 0;
821 }
823 __initcall(chr_dev_init);