ia64/xen-unstable

changeset 1535:0e1219bec03c

bitkeeper revision 1.994.1.3 (40d7ff06C_ErgacoeY2JZXnEr4soPQ)

Remove PAGE_IO/pte_io hack from Linux.
author kaf24@scramble.cl.cam.ac.uk
date Tue Jun 22 09:42:30 2004 +0000 (2004-06-22)
parents 1cfa6e590f83
children 7ce227b8b740
files .rootkeys linux-2.4.26-xen-sparse/arch/xen/drivers/dom0/core.c linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h linux-2.4.26-xen-sparse/include/asm-xen/pgtable.h linux-2.4.26-xen-sparse/mm/memory.c linux-2.4.26-xen-sparse/mm/vmalloc.c
line diff
     1.1 --- a/.rootkeys	Tue Jun 22 08:32:33 2004 +0000
     1.2 +++ b/.rootkeys	Tue Jun 22 09:42:30 2004 +0000
     1.3 @@ -151,7 +151,6 @@ 3f108af5VxPkLv13tXpXgoRKALQtXQ linux-2.4
     1.4  3e5a4e681xMPdF9xCMwpyfuYMySU5g linux-2.4.26-xen-sparse/mm/mremap.c
     1.5  409ba2e7akOFqQUg6Qyg2s28xcXiMg linux-2.4.26-xen-sparse/mm/page_alloc.c
     1.6  3e5a4e683HKVU-sxtagrDasRB8eBVw linux-2.4.26-xen-sparse/mm/swapfile.c
     1.7 -3f108af81Thhb242EmKjGCYkjx-GJA linux-2.4.26-xen-sparse/mm/vmalloc.c
     1.8  3f776bd1Hy9rn69ntXBhPReUFw9IEA tools/Makefile
     1.9  3e6377b24eQqYMsDi9XrFkIgTzZ47A tools/balloon/Makefile
    1.10  3e6377d6eiFjF1hHIS6JEIOFk62xSA tools/balloon/README
     2.1 --- a/linux-2.4.26-xen-sparse/arch/xen/drivers/dom0/core.c	Tue Jun 22 08:32:33 2004 +0000
     2.2 +++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/dom0/core.c	Tue Jun 22 09:42:30 2004 +0000
     2.3 @@ -157,10 +157,10 @@ static int privcmd_ioctl(struct inode *i
     2.4          addr = m.addr;
     2.5          for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
     2.6          {
     2.7 -            if ( get_user(mfn, p) ) return -EFAULT;
     2.8 +            if ( get_user(mfn, p) )
     2.9 +                return -EFAULT;
    2.10  
    2.11 -            v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot) |
    2.12 -                _PAGE_IO;
    2.13 +            v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
    2.14  
    2.15              __direct_remap_area_pages(vma->vm_mm,
    2.16                                        addr, 
     3.1 --- a/linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c	Tue Jun 22 08:32:33 2004 +0000
     3.2 +++ b/linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c	Tue Jun 22 09:42:30 2004 +0000
     3.3 @@ -150,7 +150,7 @@ int direct_remap_area_pages(struct mm_st
     3.4           * Fill in the machine address: PTE ptr is done later by
     3.5           * __direct_remap_area_pages(). 
     3.6           */
     3.7 -        v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot) | _PAGE_IO;
     3.8 +        v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot);
     3.9  
    3.10          machine_addr += PAGE_SIZE;
    3.11          address += PAGE_SIZE; 
    3.12 @@ -262,8 +262,7 @@ void __init *bt_ioremap(unsigned long ma
    3.13           */
    3.14          idx = FIX_BTMAP_BEGIN;
    3.15          while (nrpages > 0) {
    3.16 -                __set_fixmap(idx, machine_addr, 
    3.17 -                             __pgprot(__PAGE_KERNEL|_PAGE_IO));
    3.18 +                __set_fixmap(idx, machine_addr, PAGE_KERNEL);
    3.19                  machine_addr += PAGE_SIZE;
    3.20                  --idx;
    3.21                  --nrpages;
     4.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h	Tue Jun 22 08:32:33 2004 +0000
     4.2 +++ b/linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h	Tue Jun 22 09:42:30 2004 +0000
     4.3 @@ -48,7 +48,26 @@ static inline pmd_t * pmd_offset(pgd_t *
     4.4  }
     4.5  
     4.6  #define pte_same(a, b)		((a).pte_low == (b).pte_low)
     4.7 -#define pte_page(x)		(mem_map+((unsigned long)((pte_val(x) >> PAGE_SHIFT))))
     4.8 +
     4.9 +/*                                 
    4.10 + * We detect special mappings in one of two ways:
    4.11 + *  1. If the MFN is an I/O page then Xen will set the m2p entry
    4.12 + *     to be outside our maximum possible pseudophys range.
    4.13 + *  2. If the MFN belongs to a different domain then we will certainly
    4.14 + *     not have MFN in our p2m table. Conversely, if the page is ours,
    4.15 + *     then we'll have p2m(m2p(MFN))==MFN.
    4.16 + * If we detect a special mapping then it doesn't have a 'struct page'.
    4.17 + * We force !VALID_PAGE() by returning an out-of-range pointer.
    4.18 + */
    4.19 +#define pte_page(_pte)                                        \
    4.20 +({                                                            \
    4.21 +    unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT;         \
    4.22 +    unsigned long pfn = mfn_to_pfn(mfn);                      \
    4.23 +    if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) )     \
    4.24 +        pfn = max_mapnr; /* specia: force !VALID_PAGE() */    \
    4.25 +    &mem_map[pfn];                                            \
    4.26 +})
    4.27 +
    4.28  #define pte_none(x)		(!(x).pte_low)
    4.29  #define __mk_pte(page_nr,pgprot) __pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
    4.30  
     5.1 --- a/linux-2.4.26-xen-sparse/include/asm-xen/pgtable.h	Tue Jun 22 08:32:33 2004 +0000
     5.2 +++ b/linux-2.4.26-xen-sparse/include/asm-xen/pgtable.h	Tue Jun 22 09:42:30 2004 +0000
     5.3 @@ -116,7 +116,6 @@ extern void * high_memory;
     5.4  #define _PAGE_BIT_DIRTY		6
     5.5  #define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page, Pentium+, if present.. */
     5.6  #define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
     5.7 -#define _PAGE_BIT_IO            9
     5.8  
     5.9  #define _PAGE_PRESENT	0x001
    5.10  #define _PAGE_RW	0x002
    5.11 @@ -127,7 +126,6 @@ extern void * high_memory;
    5.12  #define _PAGE_DIRTY	0x040
    5.13  #define _PAGE_PSE	0x080	/* 4 MB (or 2MB) page, Pentium+, if present.. */
    5.14  #define _PAGE_GLOBAL	0x100	/* Global TLB entry PPro+ */
    5.15 -#define _PAGE_IO        0x200
    5.16  
    5.17  #define _PAGE_PROTNONE	0x080	/* If not present */
    5.18  
    5.19 @@ -200,7 +198,6 @@ static inline int pte_exec(pte_t pte)		{
    5.20  static inline int pte_dirty(pte_t pte)		{ return (pte).pte_low & _PAGE_DIRTY; }
    5.21  static inline int pte_young(pte_t pte)		{ return (pte).pte_low & _PAGE_ACCESSED; }
    5.22  static inline int pte_write(pte_t pte)		{ return (pte).pte_low & _PAGE_RW; }
    5.23 -static inline int pte_io(pte_t pte)		{ return (pte).pte_low & _PAGE_IO; }
    5.24  
    5.25  static inline pte_t pte_rdprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
    5.26  static inline pte_t pte_exprotect(pte_t pte)	{ (pte).pte_low &= ~_PAGE_USER; return pte; }
    5.27 @@ -212,7 +209,6 @@ static inline pte_t pte_mkexec(pte_t pte
    5.28  static inline pte_t pte_mkdirty(pte_t pte)	{ (pte).pte_low |= _PAGE_DIRTY; return pte; }
    5.29  static inline pte_t pte_mkyoung(pte_t pte)	{ (pte).pte_low |= _PAGE_ACCESSED; return pte; }
    5.30  static inline pte_t pte_mkwrite(pte_t pte)	{ (pte).pte_low |= _PAGE_RW; return pte; }
    5.31 -static inline pte_t pte_mkio(pte_t pte)		{ (pte).pte_low |= _PAGE_IO; return pte; }
    5.32  
    5.33  static inline int ptep_test_and_clear_dirty(pte_t *ptep)
    5.34  {
     6.1 --- a/linux-2.4.26-xen-sparse/mm/memory.c	Tue Jun 22 08:32:33 2004 +0000
     6.2 +++ b/linux-2.4.26-xen-sparse/mm/memory.c	Tue Jun 22 09:42:30 2004 +0000
     6.3 @@ -318,12 +318,6 @@ static inline int zap_pte_range(mmu_gath
     6.4  			continue;
     6.5  		if (pte_present(pte)) {
     6.6  			struct page *page = pte_page(pte);
     6.7 -#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
     6.8 -			if (pte_io(pte)) {
     6.9 -				queue_l1_entry_update(ptep, 0);
    6.10 -				continue;
    6.11 -			}
    6.12 -#endif
    6.13  			if (VALID_PAGE(page) && !PageReserved(page))
    6.14  				freed ++;
    6.15  			/* This will eventually call __free_pte on the pte. */
     7.1 --- a/linux-2.4.26-xen-sparse/mm/vmalloc.c	Tue Jun 22 08:32:33 2004 +0000
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,388 +0,0 @@
     7.4 -/*
     7.5 - *  linux/mm/vmalloc.c
     7.6 - *
     7.7 - *  Copyright (C) 1993  Linus Torvalds
     7.8 - *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
     7.9 - *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
    7.10 - */
    7.11 -
    7.12 -#include <linux/config.h>
    7.13 -#include <linux/slab.h>
    7.14 -#include <linux/vmalloc.h>
    7.15 -#include <linux/spinlock.h>
    7.16 -#include <linux/highmem.h>
    7.17 -#include <linux/smp_lock.h>
    7.18 -
    7.19 -#include <asm/uaccess.h>
    7.20 -#include <asm/pgalloc.h>
    7.21 -
    7.22 -rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
    7.23 -struct vm_struct * vmlist;
    7.24 -
    7.25 -static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
    7.26 -{
    7.27 -	pte_t * pte;
    7.28 -	unsigned long end;
    7.29 -
    7.30 -	if (pmd_none(*pmd))
    7.31 -		return;
    7.32 -	if (pmd_bad(*pmd)) {
    7.33 -		pmd_ERROR(*pmd);
    7.34 -		pmd_clear(pmd);
    7.35 -		return;
    7.36 -	}
    7.37 -	pte = pte_offset(pmd, address);
    7.38 -	address &= ~PMD_MASK;
    7.39 -	end = address + size;
    7.40 -	if (end > PMD_SIZE)
    7.41 -		end = PMD_SIZE;
    7.42 -	do {
    7.43 -		pte_t page;
    7.44 -		page = ptep_get_and_clear(pte);
    7.45 -		address += PAGE_SIZE;
    7.46 -		pte++;
    7.47 -		if (pte_none(page))
    7.48 -			continue;
    7.49 -		if (pte_present(page)) {
    7.50 -			struct page *ptpage = pte_page(page);
    7.51 -#if defined(CONFIG_XEN_PRIVILEGED_GUEST)
    7.52 -			if (pte_io(page))
    7.53 -				continue;
    7.54 -#endif
    7.55 -			if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
    7.56 -				__free_page(ptpage);
    7.57 -			continue;
    7.58 -		}
    7.59 -		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
    7.60 -	} while (address < end);
    7.61 -}
    7.62 -
    7.63 -static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
    7.64 -{
    7.65 -	pmd_t * pmd;
    7.66 -	unsigned long end;
    7.67 -
    7.68 -	if (pgd_none(*dir))
    7.69 -		return;
    7.70 -	if (pgd_bad(*dir)) {
    7.71 -		pgd_ERROR(*dir);
    7.72 -		pgd_clear(dir);
    7.73 -		return;
    7.74 -	}
    7.75 -	pmd = pmd_offset(dir, address);
    7.76 -	address &= ~PGDIR_MASK;
    7.77 -	end = address + size;
    7.78 -	if (end > PGDIR_SIZE)
    7.79 -		end = PGDIR_SIZE;
    7.80 -	do {
    7.81 -		free_area_pte(pmd, address, end - address);
    7.82 -		address = (address + PMD_SIZE) & PMD_MASK;
    7.83 -		pmd++;
    7.84 -	} while (address < end);
    7.85 -}
    7.86 -
    7.87 -void vmfree_area_pages(unsigned long address, unsigned long size)
    7.88 -{
    7.89 -	pgd_t * dir;
    7.90 -	unsigned long end = address + size;
    7.91 -
    7.92 -	dir = pgd_offset_k(address);
    7.93 -	flush_cache_all();
    7.94 -	do {
    7.95 -		free_area_pmd(dir, address, end - address);
    7.96 -		address = (address + PGDIR_SIZE) & PGDIR_MASK;
    7.97 -		dir++;
    7.98 -	} while (address && (address < end));
    7.99 -	flush_tlb_all();
   7.100 -}
   7.101 -
   7.102 -static inline int alloc_area_pte (pte_t * pte, unsigned long address,
   7.103 -			unsigned long size, int gfp_mask,
   7.104 -			pgprot_t prot, struct page ***pages)
   7.105 -{
   7.106 -	unsigned long end;
   7.107 -
   7.108 -	address &= ~PMD_MASK;
   7.109 -	end = address + size;
   7.110 -	if (end > PMD_SIZE)
   7.111 -		end = PMD_SIZE;
   7.112 -	do {
   7.113 -		struct page * page;
   7.114 -
   7.115 -		if (!pages) {
   7.116 -			spin_unlock(&init_mm.page_table_lock);
   7.117 -			page = alloc_page(gfp_mask);
   7.118 -			spin_lock(&init_mm.page_table_lock);
   7.119 -		} else {
   7.120 -			page = (**pages);
   7.121 -			(*pages)++;
   7.122 -
   7.123 -			/* Add a reference to the page so we can free later */
   7.124 -			if (page)
   7.125 -				atomic_inc(&page->count);
   7.126 -
   7.127 -		}
   7.128 -		if (!pte_none(*pte))
   7.129 -			printk(KERN_ERR "alloc_area_pte: page already exists\n");
   7.130 -		if (!page)
   7.131 -			return -ENOMEM;
   7.132 -		set_pte(pte, mk_pte(page, prot));
   7.133 -		address += PAGE_SIZE;
   7.134 -		pte++;
   7.135 -	} while (address < end);
   7.136 -	return 0;
   7.137 -}
   7.138 -
   7.139 -static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address,
   7.140 -			unsigned long size, int gfp_mask,
   7.141 -			pgprot_t prot, struct page ***pages)
   7.142 -{
   7.143 -	unsigned long end;
   7.144 -
   7.145 -	address &= ~PGDIR_MASK;
   7.146 -	end = address + size;
   7.147 -	if (end > PGDIR_SIZE)
   7.148 -		end = PGDIR_SIZE;
   7.149 -	do {
   7.150 -		pte_t * pte = pte_alloc(&init_mm, pmd, address);
   7.151 -		if (!pte)
   7.152 -			return -ENOMEM;
   7.153 -		if (alloc_area_pte(pte, address, end - address,
   7.154 -					gfp_mask, prot, pages))
   7.155 -			return -ENOMEM;
   7.156 -		address = (address + PMD_SIZE) & PMD_MASK;
   7.157 -		pmd++;
   7.158 -	} while (address < end);
   7.159 -	return 0;
   7.160 -}
   7.161 -
   7.162 -static inline int __vmalloc_area_pages (unsigned long address,
   7.163 -					unsigned long size,
   7.164 -					int gfp_mask,
   7.165 -					pgprot_t prot,
   7.166 -					struct page ***pages)
   7.167 -{
   7.168 -	pgd_t * dir;
   7.169 -	unsigned long start = address;
   7.170 -	unsigned long end = address + size;
   7.171 -
   7.172 -	dir = pgd_offset_k(address);
   7.173 -	spin_lock(&init_mm.page_table_lock);
   7.174 -	do {
   7.175 -		pmd_t *pmd;
   7.176 -		
   7.177 -		pmd = pmd_alloc(&init_mm, dir, address);
   7.178 -		if (!pmd)
   7.179 -			goto err;
   7.180 -
   7.181 -		if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot, pages))
   7.182 -			goto err;	// The kernel NEVER reclaims pmds, so no need to undo pmd_alloc() here
   7.183 -
   7.184 -		address = (address + PGDIR_SIZE) & PGDIR_MASK;
   7.185 -		dir++;
   7.186 -	} while (address && (address < end));
   7.187 -	spin_unlock(&init_mm.page_table_lock);
   7.188 -	flush_cache_all();
   7.189 -	return 0;
   7.190 -err:
   7.191 -	spin_unlock(&init_mm.page_table_lock);
   7.192 -	flush_cache_all();
   7.193 -	if (address > start)
   7.194 -		vmfree_area_pages(start, address - start);
   7.195 -	return -ENOMEM;
   7.196 -}
   7.197 -
   7.198 -int vmalloc_area_pages(unsigned long address, unsigned long size,
   7.199 -		       int gfp_mask, pgprot_t prot)
   7.200 -{
   7.201 -	return __vmalloc_area_pages(address, size, gfp_mask, prot, NULL);
   7.202 -}
   7.203 -
   7.204 -struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
   7.205 -{
   7.206 -	unsigned long addr, next;
   7.207 -	struct vm_struct **p, *tmp, *area;
   7.208 -
   7.209 -	area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
   7.210 -	if (!area)
   7.211 -		return NULL;
   7.212 -
   7.213 -	size += PAGE_SIZE;
   7.214 -	if (!size) {
   7.215 -		kfree (area);
   7.216 -		return NULL;
   7.217 -	}
   7.218 -
   7.219 -	addr = VMALLOC_START;
   7.220 -	write_lock(&vmlist_lock);
   7.221 -	for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
   7.222 -		if ((size + addr) < addr)
   7.223 -			goto out;
   7.224 -		if (size + addr <= (unsigned long) tmp->addr)
   7.225 -			break;
   7.226 -		next = tmp->size + (unsigned long) tmp->addr;
   7.227 -		if (next > addr) 
   7.228 -			addr = next;
   7.229 -		if (addr > VMALLOC_END-size)
   7.230 -			goto out;
   7.231 -	}
   7.232 -	area->flags = flags;
   7.233 -	area->addr = (void *)addr;
   7.234 -	area->size = size;
   7.235 -	area->next = *p;
   7.236 -	*p = area;
   7.237 -	write_unlock(&vmlist_lock);
   7.238 -	return area;
   7.239 -
   7.240 -out:
   7.241 -	write_unlock(&vmlist_lock);
   7.242 -	kfree(area);
   7.243 -	return NULL;
   7.244 -}
   7.245 -
   7.246 -void __vfree(void * addr, int free_area_pages)
   7.247 -{
   7.248 -	struct vm_struct **p, *tmp;
   7.249 -
   7.250 -	if (!addr)
   7.251 -		return;
   7.252 -	if ((PAGE_SIZE-1) & (unsigned long) addr) {
   7.253 -		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
   7.254 -		return;
   7.255 -	}
   7.256 -	write_lock(&vmlist_lock);
   7.257 -	for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
   7.258 -		if (tmp->addr == addr) {
   7.259 -			*p = tmp->next;
   7.260 -			if (free_area_pages)
   7.261 -			    vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
   7.262 -			write_unlock(&vmlist_lock);
   7.263 -			kfree(tmp);
   7.264 -			return;
   7.265 -		}
   7.266 -	}
   7.267 -	write_unlock(&vmlist_lock);
   7.268 -	printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
   7.269 -}
   7.270 -
   7.271 -void vfree(void * addr)
   7.272 -{
   7.273 -	__vfree(addr,1);
   7.274 -}
   7.275 -
   7.276 -void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
   7.277 -{
   7.278 -	void * addr;
   7.279 -	struct vm_struct *area;
   7.280 -
   7.281 -	size = PAGE_ALIGN(size);
   7.282 -	if (!size || (size >> PAGE_SHIFT) > num_physpages)
   7.283 -		return NULL;
   7.284 -	area = get_vm_area(size, VM_ALLOC);
   7.285 -	if (!area)
   7.286 -		return NULL;
   7.287 -	addr = area->addr;
   7.288 -	if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask,
   7.289 -				 prot, NULL)) {
   7.290 -		__vfree(addr, 0);
   7.291 -		return NULL;
   7.292 -	}
   7.293 -	return addr;
   7.294 -}
   7.295 -
   7.296 -void * vmap(struct page **pages, int count,
   7.297 -	    unsigned long flags, pgprot_t prot)
   7.298 -{
   7.299 -	void * addr;
   7.300 -	struct vm_struct *area;
   7.301 -	unsigned long size = count << PAGE_SHIFT;
   7.302 -
   7.303 -	if (!size || size > (max_mapnr << PAGE_SHIFT))
   7.304 -		return NULL;
   7.305 -	area = get_vm_area(size, flags);
   7.306 -	if (!area) {
   7.307 -		return NULL;
   7.308 -	}
   7.309 -	addr = area->addr;
   7.310 -	if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, 0,
   7.311 -				 prot, &pages)) {
   7.312 -		__vfree(addr, 0);
   7.313 -		return NULL;
   7.314 -	}
   7.315 -	return addr;
   7.316 -}
   7.317 -
   7.318 -long vread(char *buf, char *addr, unsigned long count)
   7.319 -{
   7.320 -	struct vm_struct *tmp;
   7.321 -	char *vaddr, *buf_start = buf;
   7.322 -	unsigned long n;
   7.323 -
   7.324 -	/* Don't allow overflow */
   7.325 -	if ((unsigned long) addr + count < count)
   7.326 -		count = -(unsigned long) addr;
   7.327 -
   7.328 -	read_lock(&vmlist_lock);
   7.329 -	for (tmp = vmlist; tmp; tmp = tmp->next) {
   7.330 -		vaddr = (char *) tmp->addr;
   7.331 -		if (addr >= vaddr + tmp->size - PAGE_SIZE)
   7.332 -			continue;
   7.333 -		while (addr < vaddr) {
   7.334 -			if (count == 0)
   7.335 -				goto finished;
   7.336 -			*buf = '\0';
   7.337 -			buf++;
   7.338 -			addr++;
   7.339 -			count--;
   7.340 -		}
   7.341 -		n = vaddr + tmp->size - PAGE_SIZE - addr;
   7.342 -		do {
   7.343 -			if (count == 0)
   7.344 -				goto finished;
   7.345 -			*buf = *addr;
   7.346 -			buf++;
   7.347 -			addr++;
   7.348 -			count--;
   7.349 -		} while (--n > 0);
   7.350 -	}
   7.351 -finished:
   7.352 -	read_unlock(&vmlist_lock);
   7.353 -	return buf - buf_start;
   7.354 -}
   7.355 -
   7.356 -long vwrite(char *buf, char *addr, unsigned long count)
   7.357 -{
   7.358 -	struct vm_struct *tmp;
   7.359 -	char *vaddr, *buf_start = buf;
   7.360 -	unsigned long n;
   7.361 -
   7.362 -	/* Don't allow overflow */
   7.363 -	if ((unsigned long) addr + count < count)
   7.364 -		count = -(unsigned long) addr;
   7.365 -
   7.366 -	read_lock(&vmlist_lock);
   7.367 -	for (tmp = vmlist; tmp; tmp = tmp->next) {
   7.368 -		vaddr = (char *) tmp->addr;
   7.369 -		if (addr >= vaddr + tmp->size - PAGE_SIZE)
   7.370 -			continue;
   7.371 -		while (addr < vaddr) {
   7.372 -			if (count == 0)
   7.373 -				goto finished;
   7.374 -			buf++;
   7.375 -			addr++;
   7.376 -			count--;
   7.377 -		}
   7.378 -		n = vaddr + tmp->size - PAGE_SIZE - addr;
   7.379 -		do {
   7.380 -			if (count == 0)
   7.381 -				goto finished;
   7.382 -			*addr = *buf;
   7.383 -			buf++;
   7.384 -			addr++;
   7.385 -			count--;
   7.386 -		} while (--n > 0);
   7.387 -	}
   7.388 -finished:
   7.389 -	read_unlock(&vmlist_lock);
   7.390 -	return buf - buf_start;
   7.391 -}