ia64/xen-unstable

changeset 2208:0a0e66e9edc2

bitkeeper revision 1.1159.25.2 (411b9db3L6VLu1hZxOjZz_zsDqan8w)

Xen-specific get_unmapped_area checks.
author kaf24@scramble.cl.cam.ac.uk
date Thu Aug 12 16:41:23 2004 +0000 (2004-08-12)
parents 1ee88bcf9566
children b8884dc7fd28
files .rootkeys linux-2.6.7-xen-sparse/arch/xen/i386/mm/Makefile linux-2.6.7-xen-sparse/arch/xen/i386/mm/mmap.c linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h linux-2.6.7-xen-sparse/mm/mmap.c
line diff
     1.1 --- a/.rootkeys	Thu Aug 12 15:55:07 2004 +0000
     1.2 +++ b/.rootkeys	Thu Aug 12 16:41:23 2004 +0000
     1.3 @@ -159,6 +159,7 @@ 4118cc35CbY8rfGVspF5O-7EkXBEAA linux-2.6
     1.4  40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c
     1.5  40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.7-xen-sparse/arch/xen/i386/mm/init.c
     1.6  41062ab7CjxC1UBaFhOMWWdhHkIUyg linux-2.6.7-xen-sparse/arch/xen/i386/mm/ioremap.c
     1.7 +411b9db3oFpYQc4C-_mO2lRTcSz8UQ linux-2.6.7-xen-sparse/arch/xen/i386/mm/mmap.c
     1.8  40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.7-xen-sparse/arch/xen/i386/mm/pgtable.c
     1.9  4107adf12ndy94MidCaivDibJ3pPAg linux-2.6.7-xen-sparse/arch/xen/i386/pci/Makefile
    1.10  4107adf1WcCgkhsdLTRGX52cOG1vJg linux-2.6.7-xen-sparse/arch/xen/i386/pci/direct.c
    1.11 @@ -241,6 +242,7 @@ 3f108af1ylCIm82H052FVTfXACBHrw linux-2.6
    1.12  3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.7-xen-sparse/include/asm-xen/suspend.h
    1.13  3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.7-xen-sparse/include/asm-xen/xen_proc.h
    1.14  40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.7-xen-sparse/mkbuildtree
    1.15 +411b9db3dpQAK-pcP8WwcRHZGn2eKg linux-2.6.7-xen-sparse/mm/mmap.c
    1.16  410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.7-xen-sparse/mm/page_alloc.c
    1.17  40e1b09db5mN69Ijj0X_Eol-S7dXiw tools/Make.defs
    1.18  3f776bd1Hy9rn69ntXBhPReUFw9IEA tools/Makefile
     2.1 --- a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/Makefile	Thu Aug 12 15:55:07 2004 +0000
     2.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/Makefile	Thu Aug 12 16:41:23 2004 +0000
     2.3 @@ -6,7 +6,7 @@ XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
     2.4  
     2.5  CFLAGS	+= -Iarch/$(XENARCH)/mm
     2.6  
     2.7 -obj-y	:= init.o fault.o ioremap.o pgtable.o hypervisor.o
     2.8 +obj-y	:= init.o fault.o ioremap.o pgtable.o hypervisor.o mmap.o
     2.9  c-obj-y	:= extable.o pageattr.o 
    2.10  
    2.11  c-obj-$(CONFIG_DISCONTIGMEM)	+= discontig.o
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/mmap.c	Thu Aug 12 16:41:23 2004 +0000
     3.3 @@ -0,0 +1,60 @@
     3.4 +
     3.5 +#include <linux/slab.h>
     3.6 +#include <linux/mman.h>
     3.7 +#include <linux/init.h>
     3.8 +#include <asm/pgalloc.h>
     3.9 +
    3.10 +unsigned long
    3.11 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
    3.12 +		unsigned long len, unsigned long pgoff, unsigned long flags)
    3.13 +{
    3.14 +	struct mm_struct *mm = current->mm;
    3.15 +	struct vm_area_struct *vma;
    3.16 +	unsigned long start_addr;
    3.17 +
    3.18 +	if (len > TASK_SIZE)
    3.19 +		return -ENOMEM;
    3.20 +
    3.21 +	if (addr) {
    3.22 +		addr = PAGE_ALIGN(addr);
    3.23 +		vma = find_vma(mm, addr);
    3.24 +		if (((TASK_SIZE - len) >= addr) &&
    3.25 +		    (addr >= (FIRST_USER_PGD_NR<<PGDIR_SHIFT)) &&
    3.26 +		    (!vma || ((addr + len) <= vma->vm_start)))
    3.27 +			return addr;
    3.28 +	}
    3.29 +	start_addr = addr = mm->free_area_cache;
    3.30 +
    3.31 +full_search:
    3.32 +	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
    3.33 +		/* At this point:  (!vma || addr < vma->vm_end). */
    3.34 +		if (TASK_SIZE - len < addr) {
    3.35 +			/*
    3.36 +			 * Start a new search - just in case we missed
    3.37 +			 * some holes.
    3.38 +			 */
    3.39 +			if (start_addr != TASK_UNMAPPED_BASE) {
    3.40 +				start_addr = addr = TASK_UNMAPPED_BASE;
    3.41 +				goto full_search;
    3.42 +			}
    3.43 +			return -ENOMEM;
    3.44 +		}
    3.45 +		if (!vma || addr + len <= vma->vm_start) {
    3.46 +			/*
    3.47 +			 * Remember the place where we stopped the search:
    3.48 +			 */
    3.49 +			mm->free_area_cache = addr + len;
    3.50 +			return addr;
    3.51 +		}
    3.52 +		addr = vma->vm_end;
    3.53 +	}
    3.54 +}
    3.55 +
    3.56 +unsigned long
    3.57 +arch_check_fixed_mapping(struct file *filp, unsigned long addr,
    3.58 +		unsigned long len, unsigned long pgoff, unsigned long flags)
    3.59 +{
    3.60 +	if ( addr < (FIRST_USER_PGD_NR<<PGDIR_SHIFT) )
    3.61 +		return -EINVAL;
    3.62 +	return 0;
    3.63 +}
     4.1 --- a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Thu Aug 12 15:55:07 2004 +0000
     4.2 +++ b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable.h	Thu Aug 12 16:41:23 2004 +0000
     4.3 @@ -457,4 +457,7 @@ static inline unsigned long arbitrary_vi
     4.4  #define __HAVE_ARCH_PTE_SAME
     4.5  #include <asm-generic/pgtable.h>
     4.6  
     4.7 +#define HAVE_ARCH_UNMAPPED_AREA
     4.8 +#define HAVE_ARCH_CHECK_FIXED_MAPPING
     4.9 +
    4.10  #endif /* _I386_PGTABLE_H */
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/linux-2.6.7-xen-sparse/mm/mmap.c	Thu Aug 12 16:41:23 2004 +0000
     5.3 @@ -0,0 +1,1816 @@
     5.4 +/*
     5.5 + * mm/mmap.c
     5.6 + *
     5.7 + * Written by obz.
     5.8 + *
     5.9 + * Address space accounting code	<alan@redhat.com>
    5.10 + */
    5.11 +
    5.12 +#include <linux/slab.h>
    5.13 +#include <linux/shm.h>
    5.14 +#include <linux/mman.h>
    5.15 +#include <linux/pagemap.h>
    5.16 +#include <linux/swap.h>
    5.17 +#include <linux/syscalls.h>
    5.18 +#include <linux/init.h>
    5.19 +#include <linux/file.h>
    5.20 +#include <linux/fs.h>
    5.21 +#include <linux/personality.h>
    5.22 +#include <linux/security.h>
    5.23 +#include <linux/hugetlb.h>
    5.24 +#include <linux/profile.h>
    5.25 +#include <linux/module.h>
    5.26 +#include <linux/mount.h>
    5.27 +#include <linux/mempolicy.h>
    5.28 +#include <linux/rmap.h>
    5.29 +
    5.30 +#include <asm/uaccess.h>
    5.31 +#include <asm/pgalloc.h>
    5.32 +#include <asm/cacheflush.h>
    5.33 +#include <asm/tlb.h>
    5.34 +
    5.35 +/*
    5.36 + * WARNING: the debugging will use recursive algorithms so never enable this
    5.37 + * unless you know what you are doing.
    5.38 + */
    5.39 +#undef DEBUG_MM_RB
    5.40 +
    5.41 +/* description of effects of mapping type and prot in current implementation.
    5.42 + * this is due to the limited x86 page protection hardware.  The expected
    5.43 + * behavior is in parens:
    5.44 + *
    5.45 + * map_type	prot
    5.46 + *		PROT_NONE	PROT_READ	PROT_WRITE	PROT_EXEC
    5.47 + * MAP_SHARED	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
    5.48 + *		w: (no) no	w: (no) no	w: (yes) yes	w: (no) no
    5.49 + *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
    5.50 + *		
    5.51 + * MAP_PRIVATE	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
    5.52 + *		w: (no) no	w: (no) no	w: (copy) copy	w: (no) no
    5.53 + *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
    5.54 + *
    5.55 + */
    5.56 +pgprot_t protection_map[16] = {
    5.57 +	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
    5.58 +	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
    5.59 +};
    5.60 +
    5.61 +int sysctl_overcommit_memory = 0;	/* default is heuristic overcommit */
    5.62 +int sysctl_overcommit_ratio = 50;	/* default is 50% */
    5.63 +int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
    5.64 +atomic_t vm_committed_space = ATOMIC_INIT(0);
    5.65 +
    5.66 +EXPORT_SYMBOL(sysctl_overcommit_memory);
    5.67 +EXPORT_SYMBOL(sysctl_overcommit_ratio);
    5.68 +EXPORT_SYMBOL(sysctl_max_map_count);
    5.69 +EXPORT_SYMBOL(vm_committed_space);
    5.70 +
    5.71 +/*
    5.72 + * Requires inode->i_mapping->i_mmap_lock
    5.73 + */
    5.74 +static void __remove_shared_vm_struct(struct vm_area_struct *vma,
    5.75 +		struct file *file, struct address_space *mapping)
    5.76 +{
    5.77 +	if (vma->vm_flags & VM_DENYWRITE)
    5.78 +		atomic_inc(&file->f_dentry->d_inode->i_writecount);
    5.79 +	if (vma->vm_flags & VM_SHARED)
    5.80 +		mapping->i_mmap_writable--;
    5.81 +
    5.82 +	flush_dcache_mmap_lock(mapping);
    5.83 +	if (unlikely(vma->vm_flags & VM_NONLINEAR))
    5.84 +		list_del_init(&vma->shared.vm_set.list);
    5.85 +	else
    5.86 +		vma_prio_tree_remove(vma, &mapping->i_mmap);
    5.87 +	flush_dcache_mmap_unlock(mapping);
    5.88 +}
    5.89 +
    5.90 +/*
    5.91 + * Remove one vm structure and free it.
    5.92 + */
    5.93 +static void remove_vm_struct(struct vm_area_struct *vma)
    5.94 +{
    5.95 +	struct file *file = vma->vm_file;
    5.96 +
    5.97 +	if (file) {
    5.98 +		struct address_space *mapping = file->f_mapping;
    5.99 +		spin_lock(&mapping->i_mmap_lock);
   5.100 +		__remove_shared_vm_struct(vma, file, mapping);
   5.101 +		spin_unlock(&mapping->i_mmap_lock);
   5.102 +	}
   5.103 +	if (vma->vm_ops && vma->vm_ops->close)
   5.104 +		vma->vm_ops->close(vma);
   5.105 +	if (file)
   5.106 +		fput(file);
   5.107 +	anon_vma_unlink(vma);
   5.108 +	mpol_free(vma_policy(vma));
   5.109 +	kmem_cache_free(vm_area_cachep, vma);
   5.110 +}
   5.111 +
   5.112 +/*
   5.113 + *  sys_brk() for the most part doesn't need the global kernel
   5.114 + *  lock, except when an application is doing something nasty
   5.115 + *  like trying to un-brk an area that has already been mapped
   5.116 + *  to a regular file.  in this case, the unmapping will need
   5.117 + *  to invoke file system routines that need the global lock.
   5.118 + */
   5.119 +asmlinkage unsigned long sys_brk(unsigned long brk)
   5.120 +{
   5.121 +	unsigned long rlim, retval;
   5.122 +	unsigned long newbrk, oldbrk;
   5.123 +	struct mm_struct *mm = current->mm;
   5.124 +
   5.125 +	down_write(&mm->mmap_sem);
   5.126 +
   5.127 +	if (brk < mm->end_code)
   5.128 +		goto out;
   5.129 +	newbrk = PAGE_ALIGN(brk);
   5.130 +	oldbrk = PAGE_ALIGN(mm->brk);
   5.131 +	if (oldbrk == newbrk)
   5.132 +		goto set_brk;
   5.133 +
   5.134 +	/* Always allow shrinking brk. */
   5.135 +	if (brk <= mm->brk) {
   5.136 +		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
   5.137 +			goto set_brk;
   5.138 +		goto out;
   5.139 +	}
   5.140 +
   5.141 +	/* Check against rlimit.. */
   5.142 +	rlim = current->rlim[RLIMIT_DATA].rlim_cur;
   5.143 +	if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
   5.144 +		goto out;
   5.145 +
   5.146 +	/* Check against existing mmap mappings. */
   5.147 +	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
   5.148 +		goto out;
   5.149 +
   5.150 +	/* Ok, looks good - let it rip. */
   5.151 +	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
   5.152 +		goto out;
   5.153 +set_brk:
   5.154 +	mm->brk = brk;
   5.155 +out:
   5.156 +	retval = mm->brk;
   5.157 +	up_write(&mm->mmap_sem);
   5.158 +	return retval;
   5.159 +}
   5.160 +
   5.161 +#ifdef DEBUG_MM_RB
   5.162 +static int browse_rb(struct rb_root *root)
   5.163 +{
   5.164 +	int i = 0, j;
   5.165 +	struct rb_node *nd, *pn = NULL;
   5.166 +	unsigned long prev = 0, pend = 0;
   5.167 +
   5.168 +	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
   5.169 +		struct vm_area_struct *vma;
   5.170 +		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
   5.171 +		if (vma->vm_start < prev)
   5.172 +			printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
   5.173 +		if (vma->vm_start < pend)
   5.174 +			printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
   5.175 +		if (vma->vm_start > vma->vm_end)
   5.176 +			printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
   5.177 +		i++;
   5.178 +		pn = nd;
   5.179 +	}
   5.180 +	j = 0;
   5.181 +	for (nd = pn; nd; nd = rb_prev(nd)) {
   5.182 +		j++;
   5.183 +	}
   5.184 +	if (i != j)
   5.185 +		printk("backwards %d, forwards %d\n", j, i), i = 0;
   5.186 +	return i;
   5.187 +}
   5.188 +
   5.189 +void validate_mm(struct mm_struct *mm)
   5.190 +{
   5.191 +	int bug = 0;
   5.192 +	int i = 0;
   5.193 +	struct vm_area_struct *tmp = mm->mmap;
   5.194 +	while (tmp) {
   5.195 +		tmp = tmp->vm_next;
   5.196 +		i++;
   5.197 +	}
   5.198 +	if (i != mm->map_count)
   5.199 +		printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
   5.200 +	i = browse_rb(&mm->mm_rb);
   5.201 +	if (i != mm->map_count)
   5.202 +		printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
   5.203 +	if (bug)
   5.204 +		BUG();
   5.205 +}
   5.206 +#else
   5.207 +#define validate_mm(mm) do { } while (0)
   5.208 +#endif
   5.209 +
   5.210 +static struct vm_area_struct *
   5.211 +find_vma_prepare(struct mm_struct *mm, unsigned long addr,
   5.212 +		struct vm_area_struct **pprev, struct rb_node ***rb_link,
   5.213 +		struct rb_node ** rb_parent)
   5.214 +{
   5.215 +	struct vm_area_struct * vma;
   5.216 +	struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
   5.217 +
   5.218 +	__rb_link = &mm->mm_rb.rb_node;
   5.219 +	rb_prev = __rb_parent = NULL;
   5.220 +	vma = NULL;
   5.221 +
   5.222 +	while (*__rb_link) {
   5.223 +		struct vm_area_struct *vma_tmp;
   5.224 +
   5.225 +		__rb_parent = *__rb_link;
   5.226 +		vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
   5.227 +
   5.228 +		if (vma_tmp->vm_end > addr) {
   5.229 +			vma = vma_tmp;
   5.230 +			if (vma_tmp->vm_start <= addr)
   5.231 +				return vma;
   5.232 +			__rb_link = &__rb_parent->rb_left;
   5.233 +		} else {
   5.234 +			rb_prev = __rb_parent;
   5.235 +			__rb_link = &__rb_parent->rb_right;
   5.236 +		}
   5.237 +	}
   5.238 +
   5.239 +	*pprev = NULL;
   5.240 +	if (rb_prev)
   5.241 +		*pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
   5.242 +	*rb_link = __rb_link;
   5.243 +	*rb_parent = __rb_parent;
   5.244 +	return vma;
   5.245 +}
   5.246 +
   5.247 +static inline void
   5.248 +__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
   5.249 +		struct vm_area_struct *prev, struct rb_node *rb_parent)
   5.250 +{
   5.251 +	if (prev) {
   5.252 +		vma->vm_next = prev->vm_next;
   5.253 +		prev->vm_next = vma;
   5.254 +	} else {
   5.255 +		mm->mmap = vma;
   5.256 +		if (rb_parent)
   5.257 +			vma->vm_next = rb_entry(rb_parent,
   5.258 +					struct vm_area_struct, vm_rb);
   5.259 +		else
   5.260 +			vma->vm_next = NULL;
   5.261 +	}
   5.262 +}
   5.263 +
   5.264 +void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
   5.265 +		struct rb_node **rb_link, struct rb_node *rb_parent)
   5.266 +{
   5.267 +	rb_link_node(&vma->vm_rb, rb_parent, rb_link);
   5.268 +	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
   5.269 +}
   5.270 +
   5.271 +static inline void __vma_link_file(struct vm_area_struct *vma)
   5.272 +{
   5.273 +	struct file * file;
   5.274 +
   5.275 +	file = vma->vm_file;
   5.276 +	if (file) {
   5.277 +		struct address_space *mapping = file->f_mapping;
   5.278 +
   5.279 +		if (vma->vm_flags & VM_DENYWRITE)
   5.280 +			atomic_dec(&file->f_dentry->d_inode->i_writecount);
   5.281 +		if (vma->vm_flags & VM_SHARED)
   5.282 +			mapping->i_mmap_writable++;
   5.283 +
   5.284 +		flush_dcache_mmap_lock(mapping);
   5.285 +		if (unlikely(vma->vm_flags & VM_NONLINEAR))
   5.286 +			list_add_tail(&vma->shared.vm_set.list,
   5.287 +					&mapping->i_mmap_nonlinear);
   5.288 +		else
   5.289 +			vma_prio_tree_insert(vma, &mapping->i_mmap);
   5.290 +		flush_dcache_mmap_unlock(mapping);
   5.291 +	}
   5.292 +}
   5.293 +
   5.294 +static void
   5.295 +__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
   5.296 +	struct vm_area_struct *prev, struct rb_node **rb_link,
   5.297 +	struct rb_node *rb_parent)
   5.298 +{
   5.299 +	__vma_link_list(mm, vma, prev, rb_parent);
   5.300 +	__vma_link_rb(mm, vma, rb_link, rb_parent);
   5.301 +	__anon_vma_link(vma);
   5.302 +}
   5.303 +
   5.304 +static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
   5.305 +			struct vm_area_struct *prev, struct rb_node **rb_link,
   5.306 +			struct rb_node *rb_parent)
   5.307 +{
   5.308 +	struct address_space *mapping = NULL;
   5.309 +
   5.310 +	if (vma->vm_file)
   5.311 +		mapping = vma->vm_file->f_mapping;
   5.312 +
   5.313 +	if (mapping)
   5.314 +		spin_lock(&mapping->i_mmap_lock);
   5.315 +	anon_vma_lock(vma);
   5.316 +
   5.317 +	__vma_link(mm, vma, prev, rb_link, rb_parent);
   5.318 +	__vma_link_file(vma);
   5.319 +
   5.320 +	anon_vma_unlock(vma);
   5.321 +	if (mapping)
   5.322 +		spin_unlock(&mapping->i_mmap_lock);
   5.323 +
   5.324 +	mark_mm_hugetlb(mm, vma);
   5.325 +	mm->map_count++;
   5.326 +	validate_mm(mm);
   5.327 +}
   5.328 +
   5.329 +/*
   5.330 + * Helper for vma_adjust in the split_vma insert case:
   5.331 + * insert vm structure into list and rbtree and anon_vma,
   5.332 + * but it has already been inserted into prio_tree earlier.
   5.333 + */
   5.334 +static void
   5.335 +__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
   5.336 +{
   5.337 +	struct vm_area_struct * __vma, * prev;
   5.338 +	struct rb_node ** rb_link, * rb_parent;
   5.339 +
   5.340 +	__vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
   5.341 +	if (__vma && __vma->vm_start < vma->vm_end)
   5.342 +		BUG();
   5.343 +	__vma_link(mm, vma, prev, rb_link, rb_parent);
   5.344 +	mm->map_count++;
   5.345 +}
   5.346 +
   5.347 +static inline void
   5.348 +__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
   5.349 +		struct vm_area_struct *prev)
   5.350 +{
   5.351 +	prev->vm_next = vma->vm_next;
   5.352 +	rb_erase(&vma->vm_rb, &mm->mm_rb);
   5.353 +	if (mm->mmap_cache == vma)
   5.354 +		mm->mmap_cache = prev;
   5.355 +}
   5.356 +
   5.357 +/*
   5.358 + * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
   5.359 + * is already present in an i_mmap tree without adjusting the tree.
   5.360 + * The following helper function should be used when such adjustments
   5.361 + * are necessary.  The "insert" vma (if any) is to be inserted
   5.362 + * before we drop the necessary locks.
   5.363 + */
   5.364 +void vma_adjust(struct vm_area_struct *vma, unsigned long start,
   5.365 +	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
   5.366 +{
   5.367 +	struct mm_struct *mm = vma->vm_mm;
   5.368 +	struct vm_area_struct *next = vma->vm_next;
   5.369 +	struct address_space *mapping = NULL;
   5.370 +	struct prio_tree_root *root = NULL;
   5.371 +	struct file *file = vma->vm_file;
   5.372 +	struct anon_vma *anon_vma = NULL;
   5.373 +	long adjust_next = 0;
   5.374 +	int remove_next = 0;
   5.375 +
   5.376 +	if (next && !insert) {
   5.377 +		if (end >= next->vm_end) {
   5.378 +			/*
   5.379 +			 * vma expands, overlapping all the next, and
   5.380 +			 * perhaps the one after too (mprotect case 6).
   5.381 +			 */
   5.382 +again:			remove_next = 1 + (end > next->vm_end);
   5.383 +			end = next->vm_end;
   5.384 +			anon_vma = next->anon_vma;
   5.385 +		} else if (end > next->vm_start) {
   5.386 +			/*
   5.387 +			 * vma expands, overlapping part of the next:
   5.388 +			 * mprotect case 5 shifting the boundary up.
   5.389 +			 */
   5.390 +			adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
   5.391 +			anon_vma = next->anon_vma;
   5.392 +		} else if (end < vma->vm_end) {
   5.393 +			/*
   5.394 +			 * vma shrinks, and !insert tells it's not
   5.395 +			 * split_vma inserting another: so it must be
   5.396 +			 * mprotect case 4 shifting the boundary down.
   5.397 +			 */
   5.398 +			adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
   5.399 +			anon_vma = next->anon_vma;
   5.400 +		}
   5.401 +	}
   5.402 +
   5.403 +	if (file) {
   5.404 +		mapping = file->f_mapping;
   5.405 +		if (!(vma->vm_flags & VM_NONLINEAR))
   5.406 +			root = &mapping->i_mmap;
   5.407 +		spin_lock(&mapping->i_mmap_lock);
   5.408 +		if (insert) {
   5.409 +			/*
   5.410 +			 * Put into prio_tree now, so instantiated pages
   5.411 +			 * are visible to arm/parisc __flush_dcache_page
   5.412 +			 * throughout; but we cannot insert into address
   5.413 +			 * space until vma start or end is updated.
   5.414 +			 */
   5.415 +			__vma_link_file(insert);
   5.416 +		}
   5.417 +	}
   5.418 +
   5.419 +	/*
   5.420 +	 * When changing only vma->vm_end, we don't really need
   5.421 +	 * anon_vma lock: but is that case worth optimizing out?
   5.422 +	 */
   5.423 +	if (vma->anon_vma)
   5.424 +		anon_vma = vma->anon_vma;
   5.425 +	if (anon_vma)
   5.426 +		spin_lock(&anon_vma->lock);
   5.427 +
   5.428 +	if (root) {
   5.429 +		flush_dcache_mmap_lock(mapping);
   5.430 +		vma_prio_tree_remove(vma, root);
   5.431 +		if (adjust_next)
   5.432 +			vma_prio_tree_remove(next, root);
   5.433 +	}
   5.434 +
   5.435 +	vma->vm_start = start;
   5.436 +	vma->vm_end = end;
   5.437 +	vma->vm_pgoff = pgoff;
   5.438 +	if (adjust_next) {
   5.439 +		next->vm_start += adjust_next << PAGE_SHIFT;
   5.440 +		next->vm_pgoff += adjust_next;
   5.441 +	}
   5.442 +
   5.443 +	if (root) {
   5.444 +		if (adjust_next) {
   5.445 +			vma_prio_tree_init(next);
   5.446 +			vma_prio_tree_insert(next, root);
   5.447 +		}
   5.448 +		vma_prio_tree_init(vma);
   5.449 +		vma_prio_tree_insert(vma, root);
   5.450 +		flush_dcache_mmap_unlock(mapping);
   5.451 +	}
   5.452 +
   5.453 +	if (remove_next) {
   5.454 +		/*
   5.455 +		 * vma_merge has merged next into vma, and needs
   5.456 +		 * us to remove next before dropping the locks.
   5.457 +		 */
   5.458 +		__vma_unlink(mm, next, vma);
   5.459 +		if (file)
   5.460 +			__remove_shared_vm_struct(next, file, mapping);
   5.461 +		if (next->anon_vma)
   5.462 +			__anon_vma_merge(vma, next);
   5.463 +	} else if (insert) {
   5.464 +		/*
   5.465 +		 * split_vma has split insert from vma, and needs
   5.466 +		 * us to insert it before dropping the locks
   5.467 +		 * (it may either follow vma or precede it).
   5.468 +		 */
   5.469 +		__insert_vm_struct(mm, insert);
   5.470 +	}
   5.471 +
   5.472 +	if (anon_vma)
   5.473 +		spin_unlock(&anon_vma->lock);
   5.474 +	if (mapping)
   5.475 +		spin_unlock(&mapping->i_mmap_lock);
   5.476 +
   5.477 +	if (remove_next) {
   5.478 +		if (file)
   5.479 +			fput(file);
   5.480 +		mm->map_count--;
   5.481 +		mpol_free(vma_policy(next));
   5.482 +		kmem_cache_free(vm_area_cachep, next);
   5.483 +		/*
   5.484 +		 * In mprotect's case 6 (see comments on vma_merge),
   5.485 +		 * we must remove another next too. It would clutter
   5.486 +		 * up the code too much to do both in one go.
   5.487 +		 */
   5.488 +		if (remove_next == 2) {
   5.489 +			next = vma->vm_next;
   5.490 +			goto again;
   5.491 +		}
   5.492 +	}
   5.493 +
   5.494 +	validate_mm(mm);
   5.495 +}
   5.496 +
   5.497 +/*
   5.498 + * If the vma has a ->close operation then the driver probably needs to release
   5.499 + * per-vma resources, so we don't attempt to merge those.
   5.500 + */
   5.501 +#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
   5.502 +
   5.503 +static inline int is_mergeable_vma(struct vm_area_struct *vma,
   5.504 +			struct file *file, unsigned long vm_flags)
   5.505 +{
   5.506 +	if (vma->vm_flags != vm_flags)
   5.507 +		return 0;
   5.508 +	if (vma->vm_file != file)
   5.509 +		return 0;
   5.510 +	if (vma->vm_ops && vma->vm_ops->close)
   5.511 +		return 0;
   5.512 +	return 1;
   5.513 +}
   5.514 +
   5.515 +static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
   5.516 +					struct anon_vma *anon_vma2)
   5.517 +{
   5.518 +	return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2);
   5.519 +}
   5.520 +
   5.521 +/*
   5.522 + * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
   5.523 + * in front of (at a lower virtual address and file offset than) the vma.
   5.524 + *
   5.525 + * We cannot merge two vmas if they have differently assigned (non-NULL)
   5.526 + * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
   5.527 + *
   5.528 + * We don't check here for the merged mmap wrapping around the end of pagecache
   5.529 + * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
   5.530 + * wrap, nor mmaps which cover the final page at index -1UL.
   5.531 + */
   5.532 +static int
   5.533 +can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
   5.534 +	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
   5.535 +{
   5.536 +	if (is_mergeable_vma(vma, file, vm_flags) &&
   5.537 +	    is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
   5.538 +		if (vma->vm_pgoff == vm_pgoff)
   5.539 +			return 1;
   5.540 +	}
   5.541 +	return 0;
   5.542 +}
   5.543 +
   5.544 +/*
   5.545 + * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
   5.546 + * beyond (at a higher virtual address and file offset than) the vma.
   5.547 + *
   5.548 + * We cannot merge two vmas if they have differently assigned (non-NULL)
   5.549 + * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
   5.550 + */
   5.551 +static int
   5.552 +can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
   5.553 +	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
   5.554 +{
   5.555 +	if (is_mergeable_vma(vma, file, vm_flags) &&
   5.556 +	    is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
   5.557 +		pgoff_t vm_pglen;
   5.558 +		vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
   5.559 +		if (vma->vm_pgoff + vm_pglen == vm_pgoff)
   5.560 +			return 1;
   5.561 +	}
   5.562 +	return 0;
   5.563 +}
   5.564 +
   5.565 +/*
   5.566 + * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
   5.567 + * whether that can be merged with its predecessor or its successor.
   5.568 + * Or both (it neatly fills a hole).
   5.569 + *
   5.570 + * In most cases - when called for mmap, brk or mremap - [addr,end) is
   5.571 + * certain not to be mapped by the time vma_merge is called; but when
   5.572 + * called for mprotect, it is certain to be already mapped (either at
   5.573 + * an offset within prev, or at the start of next), and the flags of
   5.574 + * this area are about to be changed to vm_flags - and the no-change
   5.575 + * case has already been eliminated.
   5.576 + *
   5.577 + * The following mprotect cases have to be considered, where AAAA is
   5.578 + * the area passed down from mprotect_fixup, never extending beyond one
   5.579 + * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
   5.580 + *
   5.581 + *     AAAA             AAAA                AAAA          AAAA
   5.582 + *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
   5.583 + *    cannot merge    might become    might become    might become
   5.584 + *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
   5.585 + *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
   5.586 + *    mremap move:                                    PPPPNNNNNNNN 8
   5.587 + *        AAAA
   5.588 + *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
   5.589 + *    might become    case 1 below    case 2 below    case 3 below
   5.590 + *
   5.591 + * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
   5.592 + * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
   5.593 + */
   5.594 +struct vm_area_struct *vma_merge(struct mm_struct *mm,
   5.595 +			struct vm_area_struct *prev, unsigned long addr,
   5.596 +			unsigned long end, unsigned long vm_flags,
   5.597 +		     	struct anon_vma *anon_vma, struct file *file,
   5.598 +			pgoff_t pgoff, struct mempolicy *policy)
   5.599 +{
   5.600 +	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
   5.601 +	struct vm_area_struct *area, *next;
   5.602 +
   5.603 +	/*
   5.604 +	 * We later require that vma->vm_flags == vm_flags,
   5.605 +	 * so this tests vma->vm_flags & VM_SPECIAL, too.
   5.606 +	 */
   5.607 +	if (vm_flags & VM_SPECIAL)
   5.608 +		return NULL;
   5.609 +
   5.610 +	if (prev)
   5.611 +		next = prev->vm_next;
   5.612 +	else
   5.613 +		next = mm->mmap;
   5.614 +	area = next;
   5.615 +	if (next && next->vm_end == end)		/* cases 6, 7, 8 */
   5.616 +		next = next->vm_next;
   5.617 +
   5.618 +	/*
   5.619 +	 * Can it merge with the predecessor?
   5.620 +	 */
   5.621 +	if (prev && prev->vm_end == addr &&
   5.622 +  			mpol_equal(vma_policy(prev), policy) &&
   5.623 +			can_vma_merge_after(prev, vm_flags,
   5.624 +						anon_vma, file, pgoff)) {
   5.625 +		/*
   5.626 +		 * OK, it can.  Can we now merge in the successor as well?
   5.627 +		 */
   5.628 +		if (next && end == next->vm_start &&
   5.629 +				mpol_equal(policy, vma_policy(next)) &&
   5.630 +				can_vma_merge_before(next, vm_flags,
   5.631 +					anon_vma, file, pgoff+pglen) &&
   5.632 +				is_mergeable_anon_vma(prev->anon_vma,
   5.633 +						      next->anon_vma)) {
   5.634 +							/* cases 1, 6 */
   5.635 +			vma_adjust(prev, prev->vm_start,
   5.636 +				next->vm_end, prev->vm_pgoff, NULL);
   5.637 +		} else					/* cases 2, 5, 7 */
   5.638 +			vma_adjust(prev, prev->vm_start,
   5.639 +				end, prev->vm_pgoff, NULL);
   5.640 +		return prev;
   5.641 +	}
   5.642 +
   5.643 +	/*
   5.644 +	 * Can this new request be merged in front of next?
   5.645 +	 */
   5.646 +	if (next && end == next->vm_start &&
   5.647 + 			mpol_equal(policy, vma_policy(next)) &&
   5.648 +			can_vma_merge_before(next, vm_flags,
   5.649 +					anon_vma, file, pgoff+pglen)) {
   5.650 +		if (prev && addr < prev->vm_end)	/* case 4 */
   5.651 +			vma_adjust(prev, prev->vm_start,
   5.652 +				addr, prev->vm_pgoff, NULL);
   5.653 +		else					/* cases 3, 8 */
   5.654 +			vma_adjust(area, addr, next->vm_end,
   5.655 +				next->vm_pgoff - pglen, NULL);
   5.656 +		return area;
   5.657 +	}
   5.658 +
   5.659 +	return NULL;
   5.660 +}
   5.661 +
   5.662 +/*
   5.663 + * find_mergeable_anon_vma is used by anon_vma_prepare, to check
   5.664 + * neighbouring vmas for a suitable anon_vma, before it goes off
   5.665 + * to allocate a new anon_vma.  It checks because a repetitive
   5.666 + * sequence of mprotects and faults may otherwise lead to distinct
   5.667 + * anon_vmas being allocated, preventing vma merge in subsequent
   5.668 + * mprotect.
   5.669 + */
   5.670 +struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
   5.671 +{
   5.672 +	struct vm_area_struct *near;
   5.673 +	unsigned long vm_flags;
   5.674 +
   5.675 +	near = vma->vm_next;
   5.676 +	if (!near)
   5.677 +		goto try_prev;
   5.678 +
   5.679 +	/*
   5.680 +	 * Since only mprotect tries to remerge vmas, match flags
   5.681 +	 * which might be mprotected into each other later on.
   5.682 +	 * Neither mlock nor madvise tries to remerge at present,
   5.683 +	 * so leave their flags as obstructing a merge.
   5.684 +	 */
   5.685 +	vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
   5.686 +	vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
   5.687 +
   5.688 +	if (near->anon_vma && vma->vm_end == near->vm_start &&
   5.689 + 			mpol_equal(vma_policy(vma), vma_policy(near)) &&
   5.690 +			can_vma_merge_before(near, vm_flags,
   5.691 +				NULL, vma->vm_file, vma->vm_pgoff +
   5.692 +				((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)))
   5.693 +		return near->anon_vma;
   5.694 +try_prev:
   5.695 +	/*
   5.696 +	 * It is potentially slow to have to call find_vma_prev here.
   5.697 +	 * But it's only on the first write fault on the vma, not
   5.698 +	 * every time, and we could devise a way to avoid it later
   5.699 +	 * (e.g. stash info in next's anon_vma_node when assigning
   5.700 +	 * an anon_vma, or when trying vma_merge).  Another time.
   5.701 +	 */
   5.702 +	if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma)
   5.703 +		BUG();
   5.704 +	if (!near)
   5.705 +		goto none;
   5.706 +
   5.707 +	vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC);
   5.708 +	vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC);
   5.709 +
   5.710 +	if (near->anon_vma && near->vm_end == vma->vm_start &&
   5.711 +  			mpol_equal(vma_policy(near), vma_policy(vma)) &&
   5.712 +			can_vma_merge_after(near, vm_flags,
   5.713 +				NULL, vma->vm_file, vma->vm_pgoff))
   5.714 +		return near->anon_vma;
   5.715 +none:
   5.716 +	/*
   5.717 +	 * There's no absolute need to look only at touching neighbours:
   5.718 +	 * we could search further afield for "compatible" anon_vmas.
   5.719 +	 * But it would probably just be a waste of time searching,
   5.720 +	 * or lead to too many vmas hanging off the same anon_vma.
   5.721 +	 * We're trying to allow mprotect remerging later on,
   5.722 +	 * not trying to minimize memory used for anon_vmas.
   5.723 +	 */
   5.724 +	return NULL;
   5.725 +}
   5.726 +
   5.727 +/*
   5.728 + * The caller must hold down_write(current->mm->mmap_sem).
   5.729 + */
   5.730 +
   5.731 +unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
   5.732 +			unsigned long len, unsigned long prot,
   5.733 +			unsigned long flags, unsigned long pgoff)
   5.734 +{
   5.735 +	struct mm_struct * mm = current->mm;
   5.736 +	struct vm_area_struct * vma, * prev;
   5.737 +	struct inode *inode;
   5.738 +	unsigned int vm_flags;
   5.739 +	int correct_wcount = 0;
   5.740 +	int error;
   5.741 +	struct rb_node ** rb_link, * rb_parent;
   5.742 +	int accountable = 1;
   5.743 +	unsigned long charged = 0;
   5.744 +
   5.745 +	if (file) {
   5.746 +		if (is_file_hugepages(file))
   5.747 +			accountable = 0;
   5.748 +
   5.749 +		if (!file->f_op || !file->f_op->mmap)
   5.750 +			return -ENODEV;
   5.751 +
   5.752 +		if ((prot & PROT_EXEC) &&
   5.753 +		    (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
   5.754 +			return -EPERM;
   5.755 +	}
   5.756 +
   5.757 +	if (!len)
   5.758 +		return addr;
   5.759 +
   5.760 +	/* Careful about overflows.. */
   5.761 +	len = PAGE_ALIGN(len);
   5.762 +	if (!len || len > TASK_SIZE)
   5.763 +		return -EINVAL;
   5.764 +
   5.765 +	/* offset overflow? */
   5.766 +	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
   5.767 +		return -EINVAL;
   5.768 +
   5.769 +	/* Too many mappings? */
   5.770 +	if (mm->map_count > sysctl_max_map_count)
   5.771 +		return -ENOMEM;
   5.772 +
   5.773 +	/* Obtain the address to map to. we verify (or select) it and ensure
   5.774 +	 * that it represents a valid section of the address space.
   5.775 +	 */
   5.776 +	addr = get_unmapped_area(file, addr, len, pgoff, flags);
   5.777 +	if (addr & ~PAGE_MASK)
   5.778 +		return addr;
   5.779 +
   5.780 +	/* Do simple checking here so the lower-level routines won't have
   5.781 +	 * to. we assume access permissions have been handled by the open
   5.782 +	 * of the memory object, so we don't do any here.
   5.783 +	 */
   5.784 +	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
   5.785 +			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
   5.786 +
   5.787 +	if (flags & MAP_LOCKED) {
   5.788 +		if (!capable(CAP_IPC_LOCK))
   5.789 +			return -EPERM;
   5.790 +		vm_flags |= VM_LOCKED;
   5.791 +	}
   5.792 +	/* mlock MCL_FUTURE? */
   5.793 +	if (vm_flags & VM_LOCKED) {
   5.794 +		unsigned long locked = mm->locked_vm << PAGE_SHIFT;
   5.795 +		locked += len;
   5.796 +		if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
   5.797 +			return -EAGAIN;
   5.798 +	}
   5.799 +
   5.800 +	inode = file ? file->f_dentry->d_inode : NULL;
   5.801 +
   5.802 +	if (file) {
   5.803 +		switch (flags & MAP_TYPE) {
   5.804 +		case MAP_SHARED:
   5.805 +			if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
   5.806 +				return -EACCES;
   5.807 +
   5.808 +			/*
   5.809 +			 * Make sure we don't allow writing to an append-only
   5.810 +			 * file..
   5.811 +			 */
   5.812 +			if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
   5.813 +				return -EACCES;
   5.814 +
   5.815 +			/*
   5.816 +			 * Make sure there are no mandatory locks on the file.
   5.817 +			 */
   5.818 +			if (locks_verify_locked(inode))
   5.819 +				return -EAGAIN;
   5.820 +
   5.821 +			vm_flags |= VM_SHARED | VM_MAYSHARE;
   5.822 +			if (!(file->f_mode & FMODE_WRITE))
   5.823 +				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
   5.824 +
   5.825 +			/* fall through */
   5.826 +		case MAP_PRIVATE:
   5.827 +			if (!(file->f_mode & FMODE_READ))
   5.828 +				return -EACCES;
   5.829 +			break;
   5.830 +
   5.831 +		default:
   5.832 +			return -EINVAL;
   5.833 +		}
   5.834 +	} else {
   5.835 +		switch (flags & MAP_TYPE) {
   5.836 +		case MAP_SHARED:
   5.837 +			vm_flags |= VM_SHARED | VM_MAYSHARE;
   5.838 +			break;
   5.839 +		case MAP_PRIVATE:
   5.840 +			/*
   5.841 +			 * Set pgoff according to addr for anon_vma.
   5.842 +			 */
   5.843 +			pgoff = addr >> PAGE_SHIFT;
   5.844 +			break;
   5.845 +		default:
   5.846 +			return -EINVAL;
   5.847 +		}
   5.848 +	}
   5.849 +
   5.850 +	error = security_file_mmap(file, prot, flags);
   5.851 +	if (error)
   5.852 +		return error;
   5.853 +		
   5.854 +	/* Clear old maps */
   5.855 +	error = -ENOMEM;
   5.856 +munmap_back:
   5.857 +	vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
   5.858 +	if (vma && vma->vm_start < addr + len) {
   5.859 +		if (do_munmap(mm, addr, len))
   5.860 +			return -ENOMEM;
   5.861 +		goto munmap_back;
   5.862 +	}
   5.863 +
   5.864 +	/* Check against address space limit. */
   5.865 +	if ((mm->total_vm << PAGE_SHIFT) + len
   5.866 +	    > current->rlim[RLIMIT_AS].rlim_cur)
   5.867 +		return -ENOMEM;
   5.868 +
   5.869 +	if (accountable && (!(flags & MAP_NORESERVE) ||
   5.870 +			sysctl_overcommit_memory > 1)) {
   5.871 +		if (vm_flags & VM_SHARED) {
   5.872 +			/* Check memory availability in shmem_file_setup? */
   5.873 +			vm_flags |= VM_ACCOUNT;
   5.874 +		} else if (vm_flags & VM_WRITE) {
   5.875 +			/*
   5.876 +			 * Private writable mapping: check memory availability
   5.877 +			 */
   5.878 +			charged = len >> PAGE_SHIFT;
   5.879 +			if (security_vm_enough_memory(charged))
   5.880 +				return -ENOMEM;
   5.881 +			vm_flags |= VM_ACCOUNT;
   5.882 +		}
   5.883 +	}
   5.884 +
   5.885 +	/*
   5.886 +	 * Can we just expand an old private anonymous mapping?
   5.887 +	 * The VM_SHARED test is necessary because shmem_zero_setup
   5.888 +	 * will create the file object for a shared anonymous map below.
   5.889 +	 */
   5.890 +	if (!file && !(vm_flags & VM_SHARED) &&
   5.891 +	    vma_merge(mm, prev, addr, addr + len, vm_flags,
   5.892 +					NULL, NULL, pgoff, NULL))
   5.893 +		goto out;
   5.894 +
   5.895 +	/*
   5.896 +	 * Determine the object being mapped and call the appropriate
   5.897 +	 * specific mapper. the address has already been validated, but
   5.898 +	 * not unmapped, but the maps are removed from the list.
   5.899 +	 */
   5.900 +	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
   5.901 +	if (!vma) {
   5.902 +		error = -ENOMEM;
   5.903 +		goto unacct_error;
   5.904 +	}
   5.905 +	memset(vma, 0, sizeof(*vma));
   5.906 +
   5.907 +	vma->vm_mm = mm;
   5.908 +	vma->vm_start = addr;
   5.909 +	vma->vm_end = addr + len;
   5.910 +	vma->vm_flags = vm_flags;
   5.911 +	vma->vm_page_prot = protection_map[vm_flags & 0x0f];
   5.912 +	vma->vm_pgoff = pgoff;
   5.913 +
   5.914 +	if (file) {
   5.915 +		error = -EINVAL;
   5.916 +		if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
   5.917 +			goto free_vma;
   5.918 +		if (vm_flags & VM_DENYWRITE) {
   5.919 +			error = deny_write_access(file);
   5.920 +			if (error)
   5.921 +				goto free_vma;
   5.922 +			correct_wcount = 1;
   5.923 +		}
   5.924 +		vma->vm_file = file;
   5.925 +		get_file(file);
   5.926 +		error = file->f_op->mmap(file, vma);
   5.927 +		if (error)
   5.928 +			goto unmap_and_free_vma;
   5.929 +	} else if (vm_flags & VM_SHARED) {
   5.930 +		error = shmem_zero_setup(vma);
   5.931 +		if (error)
   5.932 +			goto free_vma;
   5.933 +	}
   5.934 +
   5.935 +	/* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
   5.936 +	 * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
   5.937 +	 * that memory reservation must be checked; but that reservation
   5.938 +	 * belongs to shared memory object, not to vma: so now clear it.
   5.939 +	 */
   5.940 +	if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
   5.941 +		vma->vm_flags &= ~VM_ACCOUNT;
   5.942 +
   5.943 +	/* Can addr have changed??
   5.944 +	 *
   5.945 +	 * Answer: Yes, several device drivers can do it in their
   5.946 +	 *         f_op->mmap method. -DaveM
   5.947 +	 */
   5.948 +	addr = vma->vm_start;
   5.949 +
   5.950 +	if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
   5.951 +			vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
   5.952 +		vma_link(mm, vma, prev, rb_link, rb_parent);
   5.953 +		if (correct_wcount)
   5.954 +			atomic_inc(&inode->i_writecount);
   5.955 +	} else {
   5.956 +		if (file) {
   5.957 +			if (correct_wcount)
   5.958 +				atomic_inc(&inode->i_writecount);
   5.959 +			fput(file);
   5.960 +		}
   5.961 +		mpol_free(vma_policy(vma));
   5.962 +		kmem_cache_free(vm_area_cachep, vma);
   5.963 +	}
   5.964 +out:	
   5.965 +	mm->total_vm += len >> PAGE_SHIFT;
   5.966 +	if (vm_flags & VM_LOCKED) {
   5.967 +		mm->locked_vm += len >> PAGE_SHIFT;
   5.968 +		make_pages_present(addr, addr + len);
   5.969 +	}
   5.970 +	if (flags & MAP_POPULATE) {
   5.971 +		up_write(&mm->mmap_sem);
   5.972 +		sys_remap_file_pages(addr, len, 0,
   5.973 +					pgoff, flags & MAP_NONBLOCK);
   5.974 +		down_write(&mm->mmap_sem);
   5.975 +	}
   5.976 +	return addr;
   5.977 +
   5.978 +unmap_and_free_vma:
   5.979 +	if (correct_wcount)
   5.980 +		atomic_inc(&inode->i_writecount);
   5.981 +	vma->vm_file = NULL;
   5.982 +	fput(file);
   5.983 +
   5.984 +	/* Undo any partial mapping done by a device driver. */
   5.985 +	zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
   5.986 +free_vma:
   5.987 +	kmem_cache_free(vm_area_cachep, vma);
   5.988 +unacct_error:
   5.989 +	if (charged)
   5.990 +		vm_unacct_memory(charged);
   5.991 +	return error;
   5.992 +}
   5.993 +
   5.994 +EXPORT_SYMBOL(do_mmap_pgoff);
   5.995 +
   5.996 +/* Get an address range which is currently unmapped.
   5.997 + * For shmat() with addr=0.
   5.998 + *
   5.999 + * Ugly calling convention alert:
  5.1000 + * Return value with the low bits set means error value,
  5.1001 + * ie
  5.1002 + *	if (ret & ~PAGE_MASK)
  5.1003 + *		error = ret;
  5.1004 + *
  5.1005 + * This function "knows" that -ENOMEM has the bits set.
  5.1006 + */
  5.1007 +#ifndef HAVE_ARCH_UNMAPPED_AREA
  5.1008 +static inline unsigned long
  5.1009 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
  5.1010 +		unsigned long len, unsigned long pgoff, unsigned long flags)
  5.1011 +{
  5.1012 +	struct mm_struct *mm = current->mm;
  5.1013 +	struct vm_area_struct *vma;
  5.1014 +	unsigned long start_addr;
  5.1015 +
  5.1016 +	if (len > TASK_SIZE)
  5.1017 +		return -ENOMEM;
  5.1018 +
  5.1019 +	if (addr) {
  5.1020 +		addr = PAGE_ALIGN(addr);
  5.1021 +		vma = find_vma(mm, addr);
  5.1022 +		if (TASK_SIZE - len >= addr &&
  5.1023 +		    (!vma || addr + len <= vma->vm_start))
  5.1024 +			return addr;
  5.1025 +	}
  5.1026 +	start_addr = addr = mm->free_area_cache;
  5.1027 +
  5.1028 +full_search:
  5.1029 +	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  5.1030 +		/* At this point:  (!vma || addr < vma->vm_end). */
  5.1031 +		if (TASK_SIZE - len < addr) {
  5.1032 +			/*
  5.1033 +			 * Start a new search - just in case we missed
  5.1034 +			 * some holes.
  5.1035 +			 */
  5.1036 +			if (start_addr != TASK_UNMAPPED_BASE) {
  5.1037 +				start_addr = addr = TASK_UNMAPPED_BASE;
  5.1038 +				goto full_search;
  5.1039 +			}
  5.1040 +			return -ENOMEM;
  5.1041 +		}
  5.1042 +		if (!vma || addr + len <= vma->vm_start) {
  5.1043 +			/*
  5.1044 +			 * Remember the place where we stopped the search:
  5.1045 +			 */
  5.1046 +			mm->free_area_cache = addr + len;
  5.1047 +			return addr;
  5.1048 +		}
  5.1049 +		addr = vma->vm_end;
  5.1050 +	}
  5.1051 +}
  5.1052 +#else
  5.1053 +extern unsigned long
  5.1054 +arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
  5.1055 +			unsigned long, unsigned long);
  5.1056 +#endif	
  5.1057 +
  5.1058 +#ifndef HAVE_ARCH_CHECK_FIXED_MAPPING
  5.1059 +#define arch_check_fixed_mapping(_file,_addr,_len,_pgoff,_flags) 0
  5.1060 +#else
  5.1061 +extern unsigned long
  5.1062 +arch_check_fixed_mapping(struct file *, unsigned long, unsigned long,
  5.1063 +			unsigned long, unsigned long);
  5.1064 +#endif
  5.1065 +
  5.1066 +unsigned long
  5.1067 +get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
  5.1068 +		unsigned long pgoff, unsigned long flags)
  5.1069 +{
  5.1070 +	if (flags & MAP_FIXED) {
  5.1071 +		unsigned long ret;
  5.1072 +
  5.1073 +		if (addr > TASK_SIZE - len)
  5.1074 +			return -ENOMEM;
  5.1075 +		if (addr & ~PAGE_MASK)
  5.1076 +			return -EINVAL;
  5.1077 +		ret = arch_check_fixed_mapping(file, addr, len, pgoff, flags);
  5.1078 +		if (ret != 0)
  5.1079 +			return ret;
  5.1080 +		if (file && is_file_hugepages(file))  {
  5.1081 +			/*
  5.1082 +			 * Check if the given range is hugepage aligned, and
  5.1083 +			 * can be made suitable for hugepages.
  5.1084 +			 */
  5.1085 +			ret = prepare_hugepage_range(addr, len);
  5.1086 +		} else {
  5.1087 +			/*
  5.1088 +			 * Ensure that a normal request is not falling in a
  5.1089 +			 * reserved hugepage range.  For some archs like IA-64,
  5.1090 +			 * there is a separate region for hugepages.
  5.1091 +			 */
  5.1092 +			ret = is_hugepage_only_range(addr, len);
  5.1093 +		}
  5.1094 +		if (ret)
  5.1095 +			return -EINVAL;
  5.1096 +		return addr;
  5.1097 +	}
  5.1098 +
  5.1099 +	if (file && file->f_op && file->f_op->get_unmapped_area)
  5.1100 +		return file->f_op->get_unmapped_area(file, addr, len,
  5.1101 +						pgoff, flags);
  5.1102 +
  5.1103 +	return arch_get_unmapped_area(file, addr, len, pgoff, flags);
  5.1104 +}
  5.1105 +
  5.1106 +EXPORT_SYMBOL(get_unmapped_area);
  5.1107 +
  5.1108 +/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
  5.1109 +struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
  5.1110 +{
  5.1111 +	struct vm_area_struct *vma = NULL;
  5.1112 +
  5.1113 +	if (mm) {
  5.1114 +		/* Check the cache first. */
  5.1115 +		/* (Cache hit rate is typically around 35%.) */
  5.1116 +		vma = mm->mmap_cache;
  5.1117 +		if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
  5.1118 +			struct rb_node * rb_node;
  5.1119 +
  5.1120 +			rb_node = mm->mm_rb.rb_node;
  5.1121 +			vma = NULL;
  5.1122 +
  5.1123 +			while (rb_node) {
  5.1124 +				struct vm_area_struct * vma_tmp;
  5.1125 +
  5.1126 +				vma_tmp = rb_entry(rb_node,
  5.1127 +						struct vm_area_struct, vm_rb);
  5.1128 +
  5.1129 +				if (vma_tmp->vm_end > addr) {
  5.1130 +					vma = vma_tmp;
  5.1131 +					if (vma_tmp->vm_start <= addr)
  5.1132 +						break;
  5.1133 +					rb_node = rb_node->rb_left;
  5.1134 +				} else
  5.1135 +					rb_node = rb_node->rb_right;
  5.1136 +			}
  5.1137 +			if (vma)
  5.1138 +				mm->mmap_cache = vma;
  5.1139 +		}
  5.1140 +	}
  5.1141 +	return vma;
  5.1142 +}
  5.1143 +
  5.1144 +EXPORT_SYMBOL(find_vma);
  5.1145 +
  5.1146 +/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
  5.1147 +struct vm_area_struct *
  5.1148 +find_vma_prev(struct mm_struct *mm, unsigned long addr,
  5.1149 +			struct vm_area_struct **pprev)
  5.1150 +{
  5.1151 +	struct vm_area_struct *vma = NULL, *prev = NULL;
  5.1152 +	struct rb_node * rb_node;
  5.1153 +	if (!mm)
  5.1154 +		goto out;
  5.1155 +
  5.1156 +	/* Guard against addr being lower than the first VMA */
  5.1157 +	vma = mm->mmap;
  5.1158 +
  5.1159 +	/* Go through the RB tree quickly. */
  5.1160 +	rb_node = mm->mm_rb.rb_node;
  5.1161 +
  5.1162 +	while (rb_node) {
  5.1163 +		struct vm_area_struct *vma_tmp;
  5.1164 +		vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
  5.1165 +
  5.1166 +		if (addr < vma_tmp->vm_end) {
  5.1167 +			rb_node = rb_node->rb_left;
  5.1168 +		} else {
  5.1169 +			prev = vma_tmp;
  5.1170 +			if (!prev->vm_next || (addr < prev->vm_next->vm_end))
  5.1171 +				break;
  5.1172 +			rb_node = rb_node->rb_right;
  5.1173 +		}
  5.1174 +	}
  5.1175 +
  5.1176 +out:
  5.1177 +	*pprev = prev;
  5.1178 +	return prev ? prev->vm_next : vma;
  5.1179 +}
  5.1180 +
  5.1181 +#ifdef CONFIG_STACK_GROWSUP
  5.1182 +/*
  5.1183 + * vma is the first one with address > vma->vm_end.  Have to extend vma.
  5.1184 + */
  5.1185 +int expand_stack(struct vm_area_struct * vma, unsigned long address)
  5.1186 +{
  5.1187 +	unsigned long grow;
  5.1188 +
  5.1189 +	if (!(vma->vm_flags & VM_GROWSUP))
  5.1190 +		return -EFAULT;
  5.1191 +
  5.1192 +	/*
  5.1193 +	 * We must make sure the anon_vma is allocated
  5.1194 +	 * so that the anon_vma locking is not a noop.
  5.1195 +	 */
  5.1196 +	if (unlikely(anon_vma_prepare(vma)))
  5.1197 +		return -ENOMEM;
  5.1198 +	anon_vma_lock(vma);
  5.1199 +
  5.1200 +	/*
  5.1201 +	 * vma->vm_start/vm_end cannot change under us because the caller
  5.1202 +	 * is required to hold the mmap_sem in read mode.  We need the
  5.1203 +	 * anon_vma lock to serialize against concurrent expand_stacks.
  5.1204 +	 */
  5.1205 +	address += 4 + PAGE_SIZE - 1;
  5.1206 +	address &= PAGE_MASK;
  5.1207 +	grow = (address - vma->vm_end) >> PAGE_SHIFT;
  5.1208 +
  5.1209 +	/* Overcommit.. */
  5.1210 +	if (security_vm_enough_memory(grow)) {
  5.1211 +		anon_vma_unlock(vma);
  5.1212 +		return -ENOMEM;
  5.1213 +	}
  5.1214 +	
  5.1215 +	if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
  5.1216 +			((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
  5.1217 +			current->rlim[RLIMIT_AS].rlim_cur) {
  5.1218 +		anon_vma_unlock(vma);
  5.1219 +		vm_unacct_memory(grow);
  5.1220 +		return -ENOMEM;
  5.1221 +	}
  5.1222 +	vma->vm_end = address;
  5.1223 +	vma->vm_mm->total_vm += grow;
  5.1224 +	if (vma->vm_flags & VM_LOCKED)
  5.1225 +		vma->vm_mm->locked_vm += grow;
  5.1226 +	anon_vma_unlock(vma);
  5.1227 +	return 0;
  5.1228 +}
  5.1229 +
  5.1230 +struct vm_area_struct *
  5.1231 +find_extend_vma(struct mm_struct *mm, unsigned long addr)
  5.1232 +{
  5.1233 +	struct vm_area_struct *vma, *prev;
  5.1234 +
  5.1235 +	addr &= PAGE_MASK;
  5.1236 +	vma = find_vma_prev(mm, addr, &prev);
  5.1237 +	if (vma && (vma->vm_start <= addr))
  5.1238 +		return vma;
  5.1239 +	if (!prev || expand_stack(prev, addr))
  5.1240 +		return NULL;
  5.1241 +	if (prev->vm_flags & VM_LOCKED) {
  5.1242 +		make_pages_present(addr, prev->vm_end);
  5.1243 +	}
  5.1244 +	return prev;
  5.1245 +}
  5.1246 +#else
  5.1247 +/*
  5.1248 + * vma is the first one with address < vma->vm_start.  Have to extend vma.
  5.1249 + */
  5.1250 +int expand_stack(struct vm_area_struct *vma, unsigned long address)
  5.1251 +{
  5.1252 +	unsigned long grow;
  5.1253 +
  5.1254 +	/*
  5.1255 +	 * We must make sure the anon_vma is allocated
  5.1256 +	 * so that the anon_vma locking is not a noop.
  5.1257 +	 */
  5.1258 +	if (unlikely(anon_vma_prepare(vma)))
  5.1259 +		return -ENOMEM;
  5.1260 +	anon_vma_lock(vma);
  5.1261 +
  5.1262 +	/*
  5.1263 +	 * vma->vm_start/vm_end cannot change under us because the caller
  5.1264 +	 * is required to hold the mmap_sem in read mode.  We need the
  5.1265 +	 * anon_vma lock to serialize against concurrent expand_stacks.
  5.1266 +	 */
  5.1267 +	address &= PAGE_MASK;
  5.1268 +	grow = (vma->vm_start - address) >> PAGE_SHIFT;
  5.1269 +
  5.1270 +	/* Overcommit.. */
  5.1271 +	if (security_vm_enough_memory(grow)) {
  5.1272 +		anon_vma_unlock(vma);
  5.1273 +		return -ENOMEM;
  5.1274 +	}
  5.1275 +	
  5.1276 +	if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
  5.1277 +			((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
  5.1278 +			current->rlim[RLIMIT_AS].rlim_cur) {
  5.1279 +		anon_vma_unlock(vma);
  5.1280 +		vm_unacct_memory(grow);
  5.1281 +		return -ENOMEM;
  5.1282 +	}
  5.1283 +	vma->vm_start = address;
  5.1284 +	vma->vm_pgoff -= grow;
  5.1285 +	vma->vm_mm->total_vm += grow;
  5.1286 +	if (vma->vm_flags & VM_LOCKED)
  5.1287 +		vma->vm_mm->locked_vm += grow;
  5.1288 +	anon_vma_unlock(vma);
  5.1289 +	return 0;
  5.1290 +}
  5.1291 +
  5.1292 +struct vm_area_struct *
  5.1293 +find_extend_vma(struct mm_struct * mm, unsigned long addr)
  5.1294 +{
  5.1295 +	struct vm_area_struct * vma;
  5.1296 +	unsigned long start;
  5.1297 +
  5.1298 +	addr &= PAGE_MASK;
  5.1299 +	vma = find_vma(mm,addr);
  5.1300 +	if (!vma)
  5.1301 +		return NULL;
  5.1302 +	if (vma->vm_start <= addr)
  5.1303 +		return vma;
  5.1304 +	if (!(vma->vm_flags & VM_GROWSDOWN))
  5.1305 +		return NULL;
  5.1306 +	start = vma->vm_start;
  5.1307 +	if (expand_stack(vma, addr))
  5.1308 +		return NULL;
  5.1309 +	if (vma->vm_flags & VM_LOCKED) {
  5.1310 +		make_pages_present(addr, start);
  5.1311 +	}
  5.1312 +	return vma;
  5.1313 +}
  5.1314 +#endif
  5.1315 +
  5.1316 +/*
  5.1317 + * Try to free as many page directory entries as we can,
  5.1318 + * without having to work very hard at actually scanning
  5.1319 + * the page tables themselves.
  5.1320 + *
  5.1321 + * Right now we try to free page tables if we have a nice
  5.1322 + * PGDIR-aligned area that got free'd up. We could be more
  5.1323 + * granular if we want to, but this is fast and simple,
  5.1324 + * and covers the bad cases.
  5.1325 + *
  5.1326 + * "prev", if it exists, points to a vma before the one
  5.1327 + * we just free'd - but there's no telling how much before.
  5.1328 + */
  5.1329 +static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
  5.1330 +	unsigned long start, unsigned long end)
  5.1331 +{
  5.1332 +	unsigned long first = start & PGDIR_MASK;
  5.1333 +	unsigned long last = end + PGDIR_SIZE - 1;
  5.1334 +	unsigned long start_index, end_index;
  5.1335 +	struct mm_struct *mm = tlb->mm;
  5.1336 +
  5.1337 +	if (!prev) {
  5.1338 +		prev = mm->mmap;
  5.1339 +		if (!prev)
  5.1340 +			goto no_mmaps;
  5.1341 +		if (prev->vm_end > start) {
  5.1342 +			if (last > prev->vm_start)
  5.1343 +				last = prev->vm_start;
  5.1344 +			goto no_mmaps;
  5.1345 +		}
  5.1346 +	}
  5.1347 +	for (;;) {
  5.1348 +		struct vm_area_struct *next = prev->vm_next;
  5.1349 +
  5.1350 +		if (next) {
  5.1351 +			if (next->vm_start < start) {
  5.1352 +				prev = next;
  5.1353 +				continue;
  5.1354 +			}
  5.1355 +			if (last > next->vm_start)
  5.1356 +				last = next->vm_start;
  5.1357 +		}
  5.1358 +		if (prev->vm_end > first)
  5.1359 +			first = prev->vm_end + PGDIR_SIZE - 1;
  5.1360 +		break;
  5.1361 +	}
  5.1362 +no_mmaps:
  5.1363 +	if (last < first)	/* for arches with discontiguous pgd indices */
  5.1364 +		return;
  5.1365 +	/*
  5.1366 +	 * If the PGD bits are not consecutive in the virtual address, the
  5.1367 +	 * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
  5.1368 +	 */
  5.1369 +	start_index = pgd_index(first);
  5.1370 +	if (start_index < FIRST_USER_PGD_NR)
  5.1371 +		start_index = FIRST_USER_PGD_NR;
  5.1372 +	end_index = pgd_index(last);
  5.1373 +	if (end_index > start_index) {
  5.1374 +		clear_page_tables(tlb, start_index, end_index - start_index);
  5.1375 +		flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK);
  5.1376 +	}
  5.1377 +}
  5.1378 +
  5.1379 +/* Normal function to fix up a mapping
  5.1380 + * This function is the default for when an area has no specific
  5.1381 + * function.  This may be used as part of a more specific routine.
  5.1382 + *
  5.1383 + * By the time this function is called, the area struct has been
  5.1384 + * removed from the process mapping list.
  5.1385 + */
  5.1386 +static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
  5.1387 +{
  5.1388 +	size_t len = area->vm_end - area->vm_start;
  5.1389 +
  5.1390 +	area->vm_mm->total_vm -= len >> PAGE_SHIFT;
  5.1391 +	if (area->vm_flags & VM_LOCKED)
  5.1392 +		area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
  5.1393 +	/*
  5.1394 +	 * Is this a new hole at the lowest possible address?
  5.1395 +	 */
  5.1396 +	if (area->vm_start >= TASK_UNMAPPED_BASE &&
  5.1397 +				area->vm_start < area->vm_mm->free_area_cache)
  5.1398 +	      area->vm_mm->free_area_cache = area->vm_start;
  5.1399 +
  5.1400 +	remove_vm_struct(area);
  5.1401 +}
  5.1402 +
  5.1403 +/*
  5.1404 + * Update the VMA and inode share lists.
  5.1405 + *
  5.1406 + * Ok - we have the memory areas we should free on the 'free' list,
  5.1407 + * so release them, and do the vma updates.
  5.1408 + */
  5.1409 +static void unmap_vma_list(struct mm_struct *mm,
  5.1410 +	struct vm_area_struct *mpnt)
  5.1411 +{
  5.1412 +	do {
  5.1413 +		struct vm_area_struct *next = mpnt->vm_next;
  5.1414 +		unmap_vma(mm, mpnt);
  5.1415 +		mpnt = next;
  5.1416 +	} while (mpnt != NULL);
  5.1417 +	validate_mm(mm);
  5.1418 +}
  5.1419 +
  5.1420 +/*
  5.1421 + * Get rid of page table information in the indicated region.
  5.1422 + *
  5.1423 + * Called with the page table lock held.
  5.1424 + */
  5.1425 +static void unmap_region(struct mm_struct *mm,
  5.1426 +	struct vm_area_struct *vma,
  5.1427 +	struct vm_area_struct *prev,
  5.1428 +	unsigned long start,
  5.1429 +	unsigned long end)
  5.1430 +{
  5.1431 +	struct mmu_gather *tlb;
  5.1432 +	unsigned long nr_accounted = 0;
  5.1433 +
  5.1434 +	lru_add_drain();
  5.1435 +	tlb = tlb_gather_mmu(mm, 0);
  5.1436 +	unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
  5.1437 +	vm_unacct_memory(nr_accounted);
  5.1438 +
  5.1439 +	if (is_hugepage_only_range(start, end - start))
  5.1440 +		hugetlb_free_pgtables(tlb, prev, start, end);
  5.1441 +	else
  5.1442 +		free_pgtables(tlb, prev, start, end);
  5.1443 +	tlb_finish_mmu(tlb, start, end);
  5.1444 +}
  5.1445 +
  5.1446 +/*
  5.1447 + * Create a list of vma's touched by the unmap, removing them from the mm's
  5.1448 + * vma list as we go..
  5.1449 + */
  5.1450 +static void
  5.1451 +detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
  5.1452 +	struct vm_area_struct *prev, unsigned long end)
  5.1453 +{
  5.1454 +	struct vm_area_struct **insertion_point;
  5.1455 +	struct vm_area_struct *tail_vma = NULL;
  5.1456 +
  5.1457 +	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
  5.1458 +	do {
  5.1459 +		rb_erase(&vma->vm_rb, &mm->mm_rb);
  5.1460 +		mm->map_count--;
  5.1461 +		tail_vma = vma;
  5.1462 +		vma = vma->vm_next;
  5.1463 +	} while (vma && vma->vm_start < end);
  5.1464 +	*insertion_point = vma;
  5.1465 +	tail_vma->vm_next = NULL;
  5.1466 +	mm->mmap_cache = NULL;		/* Kill the cache. */
  5.1467 +}
  5.1468 +
  5.1469 +/*
  5.1470 + * Split a vma into two pieces at address 'addr', a new vma is allocated
  5.1471 + * either for the first part or the the tail.
  5.1472 + */
  5.1473 +int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
  5.1474 +	      unsigned long addr, int new_below)
  5.1475 +{
  5.1476 +	struct mempolicy *pol;
  5.1477 +	struct vm_area_struct *new;
  5.1478 +
  5.1479 +	if (mm->map_count >= sysctl_max_map_count)
  5.1480 +		return -ENOMEM;
  5.1481 +
  5.1482 +	new = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  5.1483 +	if (!new)
  5.1484 +		return -ENOMEM;
  5.1485 +
  5.1486 +	/* most fields are the same, copy all, and then fixup */
  5.1487 +	*new = *vma;
  5.1488 +	vma_prio_tree_init(new);
  5.1489 +
  5.1490 +	if (new_below)
  5.1491 +		new->vm_end = addr;
  5.1492 +	else {
  5.1493 +		new->vm_start = addr;
  5.1494 +		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
  5.1495 +	}
  5.1496 +
  5.1497 +	pol = mpol_copy(vma_policy(vma));
  5.1498 +	if (IS_ERR(pol)) {
  5.1499 +		kmem_cache_free(vm_area_cachep, new);
  5.1500 +		return PTR_ERR(pol);
  5.1501 +	}
  5.1502 +	vma_set_policy(new, pol);
  5.1503 +
  5.1504 +	if (new->vm_file)
  5.1505 +		get_file(new->vm_file);
  5.1506 +
  5.1507 +	if (new->vm_ops && new->vm_ops->open)
  5.1508 +		new->vm_ops->open(new);
  5.1509 +
  5.1510 +	if (new_below)
  5.1511 +		vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
  5.1512 +			((addr - new->vm_start) >> PAGE_SHIFT), new);
  5.1513 +	else
  5.1514 +		vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
  5.1515 +
  5.1516 +	return 0;
  5.1517 +}
  5.1518 +
  5.1519 +/* Munmap is split into 2 main parts -- this part which finds
  5.1520 + * what needs doing, and the areas themselves, which do the
  5.1521 + * work.  This now handles partial unmappings.
  5.1522 + * Jeremy Fitzhardinge <jeremy@goop.org>
  5.1523 + */
  5.1524 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
  5.1525 +{
  5.1526 +	unsigned long end;
  5.1527 +	struct vm_area_struct *mpnt, *prev, *last;
  5.1528 +
  5.1529 +	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
  5.1530 +		return -EINVAL;
  5.1531 +
  5.1532 +	if ((len = PAGE_ALIGN(len)) == 0)
  5.1533 +		return -EINVAL;
  5.1534 +
  5.1535 +	/* Find the first overlapping VMA */
  5.1536 +	mpnt = find_vma_prev(mm, start, &prev);
  5.1537 +	if (!mpnt)
  5.1538 +		return 0;
  5.1539 +	/* we have  start < mpnt->vm_end  */
  5.1540 +
  5.1541 +	if (is_vm_hugetlb_page(mpnt)) {
  5.1542 +		int ret = is_aligned_hugepage_range(start, len);
  5.1543 +
  5.1544 +		if (ret)
  5.1545 +			return ret;
  5.1546 +	}
  5.1547 +
  5.1548 +	/* if it doesn't overlap, we have nothing.. */
  5.1549 +	end = start + len;
  5.1550 +	if (mpnt->vm_start >= end)
  5.1551 +		return 0;
  5.1552 +
  5.1553 +	/* Something will probably happen, so notify. */
  5.1554 +	if (mpnt->vm_file && (mpnt->vm_flags & VM_EXEC))
  5.1555 +		profile_exec_unmap(mm);
  5.1556 + 
  5.1557 +	/*
  5.1558 +	 * If we need to split any vma, do it now to save pain later.
  5.1559 +	 *
  5.1560 +	 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
  5.1561 +	 * unmapped vm_area_struct will remain in use: so lower split_vma
  5.1562 +	 * places tmp vma above, and higher split_vma places tmp vma below.
  5.1563 +	 */
  5.1564 +	if (start > mpnt->vm_start) {
  5.1565 +		if (split_vma(mm, mpnt, start, 0))
  5.1566 +			return -ENOMEM;
  5.1567 +		prev = mpnt;
  5.1568 +	}
  5.1569 +
  5.1570 +	/* Does it split the last one? */
  5.1571 +	last = find_vma(mm, end);
  5.1572 +	if (last && end > last->vm_start) {
  5.1573 +		if (split_vma(mm, last, end, 1))
  5.1574 +			return -ENOMEM;
  5.1575 +	}
  5.1576 +	mpnt = prev? prev->vm_next: mm->mmap;
  5.1577 +
  5.1578 +	/*
  5.1579 +	 * Remove the vma's, and unmap the actual pages
  5.1580 +	 */
  5.1581 +	detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
  5.1582 +	spin_lock(&mm->page_table_lock);
  5.1583 +	unmap_region(mm, mpnt, prev, start, end);
  5.1584 +	spin_unlock(&mm->page_table_lock);
  5.1585 +
  5.1586 +	/* Fix up all other VM information */
  5.1587 +	unmap_vma_list(mm, mpnt);
  5.1588 +
  5.1589 +	return 0;
  5.1590 +}
  5.1591 +
  5.1592 +EXPORT_SYMBOL(do_munmap);
  5.1593 +
  5.1594 +asmlinkage long sys_munmap(unsigned long addr, size_t len)
  5.1595 +{
  5.1596 +	int ret;
  5.1597 +	struct mm_struct *mm = current->mm;
  5.1598 +
  5.1599 +	down_write(&mm->mmap_sem);
  5.1600 +	ret = do_munmap(mm, addr, len);
  5.1601 +	up_write(&mm->mmap_sem);
  5.1602 +	return ret;
  5.1603 +}
  5.1604 +
  5.1605 +/*
  5.1606 + *  this is really a simplified "do_mmap".  it only handles
  5.1607 + *  anonymous maps.  eventually we may be able to do some
  5.1608 + *  brk-specific accounting here.
  5.1609 + */
  5.1610 +unsigned long do_brk(unsigned long addr, unsigned long len)
  5.1611 +{
  5.1612 +	struct mm_struct * mm = current->mm;
  5.1613 +	struct vm_area_struct * vma, * prev;
  5.1614 +	unsigned long flags;
  5.1615 +	struct rb_node ** rb_link, * rb_parent;
  5.1616 +	pgoff_t pgoff = addr >> PAGE_SHIFT;
  5.1617 +
  5.1618 +	len = PAGE_ALIGN(len);
  5.1619 +	if (!len)
  5.1620 +		return addr;
  5.1621 +
  5.1622 +	if ((addr + len) > TASK_SIZE || (addr + len) < addr)
  5.1623 +		return -EINVAL;
  5.1624 +
  5.1625 +	/*
  5.1626 +	 * mlock MCL_FUTURE?
  5.1627 +	 */
  5.1628 +	if (mm->def_flags & VM_LOCKED) {
  5.1629 +		unsigned long locked = mm->locked_vm << PAGE_SHIFT;
  5.1630 +		locked += len;
  5.1631 +		if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
  5.1632 +			return -EAGAIN;
  5.1633 +	}
  5.1634 +
  5.1635 +	/*
  5.1636 +	 * Clear old maps.  this also does some error checking for us
  5.1637 +	 */
  5.1638 + munmap_back:
  5.1639 +	vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
  5.1640 +	if (vma && vma->vm_start < addr + len) {
  5.1641 +		if (do_munmap(mm, addr, len))
  5.1642 +			return -ENOMEM;
  5.1643 +		goto munmap_back;
  5.1644 +	}
  5.1645 +
  5.1646 +	/* Check against address space limits *after* clearing old maps... */
  5.1647 +	if ((mm->total_vm << PAGE_SHIFT) + len
  5.1648 +	    > current->rlim[RLIMIT_AS].rlim_cur)
  5.1649 +		return -ENOMEM;
  5.1650 +
  5.1651 +	if (mm->map_count > sysctl_max_map_count)
  5.1652 +		return -ENOMEM;
  5.1653 +
  5.1654 +	if (security_vm_enough_memory(len >> PAGE_SHIFT))
  5.1655 +		return -ENOMEM;
  5.1656 +
  5.1657 +	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
  5.1658 +
  5.1659 +	/* Can we just expand an old private anonymous mapping? */
  5.1660 +	if (vma_merge(mm, prev, addr, addr + len, flags,
  5.1661 +					NULL, NULL, pgoff, NULL))
  5.1662 +		goto out;
  5.1663 +
  5.1664 +	/*
  5.1665 +	 * create a vma struct for an anonymous mapping
  5.1666 +	 */
  5.1667 +	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  5.1668 +	if (!vma) {
  5.1669 +		vm_unacct_memory(len >> PAGE_SHIFT);
  5.1670 +		return -ENOMEM;
  5.1671 +	}
  5.1672 +	memset(vma, 0, sizeof(*vma));
  5.1673 +
  5.1674 +	vma->vm_mm = mm;
  5.1675 +	vma->vm_start = addr;
  5.1676 +	vma->vm_end = addr + len;
  5.1677 +	vma->vm_pgoff = pgoff;
  5.1678 +	vma->vm_flags = flags;
  5.1679 +	vma->vm_page_prot = protection_map[flags & 0x0f];
  5.1680 +	vma_link(mm, vma, prev, rb_link, rb_parent);
  5.1681 +out:
  5.1682 +	mm->total_vm += len >> PAGE_SHIFT;
  5.1683 +	if (flags & VM_LOCKED) {
  5.1684 +		mm->locked_vm += len >> PAGE_SHIFT;
  5.1685 +		make_pages_present(addr, addr + len);
  5.1686 +	}
  5.1687 +	return addr;
  5.1688 +}
  5.1689 +
  5.1690 +EXPORT_SYMBOL(do_brk);
  5.1691 +
  5.1692 +/* Release all mmaps. */
  5.1693 +void exit_mmap(struct mm_struct *mm)
  5.1694 +{
  5.1695 +	struct mmu_gather *tlb;
  5.1696 +	struct vm_area_struct *vma;
  5.1697 +	unsigned long nr_accounted = 0;
  5.1698 +
  5.1699 +	profile_exit_mmap(mm);
  5.1700 + 
  5.1701 +	lru_add_drain();
  5.1702 +
  5.1703 +	spin_lock(&mm->page_table_lock);
  5.1704 +
  5.1705 +	tlb = tlb_gather_mmu(mm, 1);
  5.1706 +	flush_cache_mm(mm);
  5.1707 +	/* Use ~0UL here to ensure all VMAs in the mm are unmapped */
  5.1708 +	mm->map_count -= unmap_vmas(&tlb, mm, mm->mmap, 0,
  5.1709 +					~0UL, &nr_accounted, NULL);
  5.1710 +	vm_unacct_memory(nr_accounted);
  5.1711 +	BUG_ON(mm->map_count);	/* This is just debugging */
  5.1712 +	clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD);
  5.1713 +	tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm));
  5.1714 +
  5.1715 +	vma = mm->mmap;
  5.1716 +	mm->mmap = mm->mmap_cache = NULL;
  5.1717 +	mm->mm_rb = RB_ROOT;
  5.1718 +	mm->rss = 0;
  5.1719 +	mm->total_vm = 0;
  5.1720 +	mm->locked_vm = 0;
  5.1721 +
  5.1722 +	spin_unlock(&mm->page_table_lock);
  5.1723 +
  5.1724 +	/*
  5.1725 +	 * Walk the list again, actually closing and freeing it
  5.1726 +	 * without holding any MM locks.
  5.1727 +	 */
  5.1728 +	while (vma) {
  5.1729 +		struct vm_area_struct *next = vma->vm_next;
  5.1730 +		remove_vm_struct(vma);
  5.1731 +		vma = next;
  5.1732 +	}
  5.1733 +}
  5.1734 +
  5.1735 +/* Insert vm structure into process list sorted by address
  5.1736 + * and into the inode's i_mmap tree.  If vm_file is non-NULL
  5.1737 + * then i_mmap_lock is taken here.
  5.1738 + */
  5.1739 +void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
  5.1740 +{
  5.1741 +	struct vm_area_struct * __vma, * prev;
  5.1742 +	struct rb_node ** rb_link, * rb_parent;
  5.1743 +
  5.1744 +	/*
  5.1745 +	 * The vm_pgoff of a purely anonymous vma should be irrelevant
  5.1746 +	 * until its first write fault, when page's anon_vma and index
  5.1747 +	 * are set.  But now set the vm_pgoff it will almost certainly
  5.1748 +	 * end up with (unless mremap moves it elsewhere before that
  5.1749 +	 * first wfault), so /proc/pid/maps tells a consistent story.
  5.1750 +	 *
  5.1751 +	 * By setting it to reflect the virtual start address of the
  5.1752 +	 * vma, merges and splits can happen in a seamless way, just
  5.1753 +	 * using the existing file pgoff checks and manipulations.
  5.1754 +	 * Similarly in do_mmap_pgoff and in do_brk.
  5.1755 +	 */
  5.1756 +	if (!vma->vm_file) {
  5.1757 +		BUG_ON(vma->anon_vma);
  5.1758 +		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
  5.1759 +	}
  5.1760 +	__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
  5.1761 +	if (__vma && __vma->vm_start < vma->vm_end)
  5.1762 +		BUG();
  5.1763 +	vma_link(mm, vma, prev, rb_link, rb_parent);
  5.1764 +}
  5.1765 +
  5.1766 +/*
  5.1767 + * Copy the vma structure to a new location in the same mm,
  5.1768 + * prior to moving page table entries, to effect an mremap move.
  5.1769 + */
  5.1770 +struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
  5.1771 +	unsigned long addr, unsigned long len, pgoff_t pgoff)
  5.1772 +{
  5.1773 +	struct vm_area_struct *vma = *vmap;
  5.1774 +	unsigned long vma_start = vma->vm_start;
  5.1775 +	struct mm_struct *mm = vma->vm_mm;
  5.1776 +	struct vm_area_struct *new_vma, *prev;
  5.1777 +	struct rb_node **rb_link, *rb_parent;
  5.1778 +	struct mempolicy *pol;
  5.1779 +
  5.1780 +	/*
  5.1781 +	 * If anonymous vma has not yet been faulted, update new pgoff
  5.1782 +	 * to match new location, to increase its chance of merging.
  5.1783 +	 */
  5.1784 +	if (!vma->vm_file && !vma->anon_vma)
  5.1785 +		pgoff = addr >> PAGE_SHIFT;
  5.1786 +
  5.1787 +	find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
  5.1788 +	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
  5.1789 +			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
  5.1790 +	if (new_vma) {
  5.1791 +		/*
  5.1792 +		 * Source vma may have been merged into new_vma
  5.1793 +		 */
  5.1794 +		if (vma_start >= new_vma->vm_start &&
  5.1795 +		    vma_start < new_vma->vm_end)
  5.1796 +			*vmap = new_vma;
  5.1797 +	} else {
  5.1798 +		new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
  5.1799 +		if (new_vma) {
  5.1800 +			*new_vma = *vma;
  5.1801 +			vma_prio_tree_init(new_vma);
  5.1802 +			pol = mpol_copy(vma_policy(vma));
  5.1803 +			if (IS_ERR(pol)) {
  5.1804 +				kmem_cache_free(vm_area_cachep, new_vma);
  5.1805 +				return NULL;
  5.1806 +			}
  5.1807 +			vma_set_policy(new_vma, pol);
  5.1808 +			new_vma->vm_start = addr;
  5.1809 +			new_vma->vm_end = addr + len;
  5.1810 +			new_vma->vm_pgoff = pgoff;
  5.1811 +			if (new_vma->vm_file)
  5.1812 +				get_file(new_vma->vm_file);
  5.1813 +			if (new_vma->vm_ops && new_vma->vm_ops->open)
  5.1814 +				new_vma->vm_ops->open(new_vma);
  5.1815 +			vma_link(mm, new_vma, prev, rb_link, rb_parent);
  5.1816 +		}
  5.1817 +	}
  5.1818 +	return new_vma;
  5.1819 +}