ia64/xen-unstable

changeset 6606:7c2afbad0188

Fix writing to mmap'ed /dev/mem region mapped PROT_WRITE
and MAP_PRIVATE. This is in fact a generic Linux bug.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Sep 02 16:51:55 2005 +0000 (2005-09-02)
parents cac138ea9284
children ec11c5cca195
files linux-2.6-xen-sparse/mm/memory.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/mm/memory.c	Fri Sep 02 14:20:12 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/mm/memory.c	Fri Sep 02 16:51:55 2005 +0000
     1.3 @@ -1367,20 +1367,15 @@ static int do_wp_page(struct mm_struct *
     1.4  	struct page *old_page, *new_page;
     1.5  	unsigned long pfn = pte_pfn(pte);
     1.6  	pte_t entry;
     1.7 +	struct page invalid_page;
     1.8  
     1.9  	if (unlikely(!pfn_valid(pfn))) {
    1.10 -		/*
    1.11 -		 * This should really halt the system so it can be debugged or
    1.12 -		 * at least the kernel stops what it's doing before it corrupts
    1.13 -		 * data, but for the moment just pretend this is OOM.
    1.14 -		 */
    1.15 -		pte_unmap(page_table);
    1.16 -		printk(KERN_ERR "do_wp_page: bogus page at address %08lx\n",
    1.17 -				address);
    1.18 -		spin_unlock(&mm->page_table_lock);
    1.19 -		return VM_FAULT_OOM;
    1.20 +		/* This can happen with /dev/mem (PROT_WRITE, MAP_PRIVATE). */
    1.21 +		invalid_page.flags = (1<<PG_reserved) | (1<<PG_locked);
    1.22 +		old_page = &invalid_page;
    1.23 +	} else {
    1.24 +		old_page = pfn_to_page(pfn);
    1.25  	}
    1.26 -	old_page = pfn_to_page(pfn);
    1.27  
    1.28  	if (!TestSetPageLocked(old_page)) {
    1.29  		int reuse = can_share_swap_page(old_page);
    1.30 @@ -1416,7 +1411,13 @@ static int do_wp_page(struct mm_struct *
    1.31  		new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
    1.32  		if (!new_page)
    1.33  			goto no_new_page;
    1.34 -		copy_user_highpage(new_page, old_page, address);
    1.35 +		if (old_page == &invalid_page) {
    1.36 +			char *vto = kmap_atomic(new_page, KM_USER1);
    1.37 +			copy_page(vto, (void *)(address & PAGE_MASK));
    1.38 +			kunmap_atomic(vto, KM_USER1);
    1.39 +		} else {
    1.40 +			copy_user_highpage(new_page, old_page, address);
    1.41 +		}
    1.42  	}
    1.43  	/*
    1.44  	 * Re-check the pte - we dropped the lock