ia64/xen-unstable

changeset 4161:fc1e5218f616

bitkeeper revision 1.1236.34.4 (42370eder_IQwERchpmusDF9eNt_8A)

No modifications needed to pageattr.c.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Tue Mar 15 16:35:42 2005 +0000 (2005-03-15)
parents 7a281bd1e0f5
children 95637bf4672a
files .rootkeys linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile linux-2.6.11-xen-sparse/arch/xen/i386/mm/pageattr.c
line diff
     1.1 --- a/.rootkeys	Tue Mar 15 15:58:52 2005 +0000
     1.2 +++ b/.rootkeys	Tue Mar 15 16:35:42 2005 +0000
     1.3 @@ -172,7 +172,6 @@ 4118cc35CbY8rfGVspF5O-7EkXBEAA linux-2.6
     1.4  40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
     1.5  40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c
     1.6  41062ab7CjxC1UBaFhOMWWdhHkIUyg linux-2.6.11-xen-sparse/arch/xen/i386/mm/ioremap.c
     1.7 -413b5ab8LIowAnQrEmaOJSdmqm96jQ linux-2.6.11-xen-sparse/arch/xen/i386/mm/pageattr.c
     1.8  40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c
     1.9  4107adf12ndy94MidCaivDibJ3pPAg linux-2.6.11-xen-sparse/arch/xen/i386/pci/Makefile
    1.10  4107adf1WcCgkhsdLTRGX52cOG1vJg linux-2.6.11-xen-sparse/arch/xen/i386/pci/direct.c
     2.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile	Tue Mar 15 15:58:52 2005 +0000
     2.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile	Tue Mar 15 16:35:42 2005 +0000
     2.3 @@ -6,8 +6,8 @@ XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
     2.4  
     2.5  CFLAGS	+= -Iarch/$(XENARCH)/mm
     2.6  
     2.7 -obj-y	:= init.o pgtable.o fault.o ioremap.o pageattr.o hypervisor.o
     2.8 -c-obj-y	:= extable.o mmap.o
     2.9 +obj-y	:= init.o pgtable.o fault.o ioremap.o hypervisor.o
    2.10 +c-obj-y	:= extable.o mmap.o pageattr.o
    2.11  
    2.12  c-obj-$(CONFIG_DISCONTIGMEM)	+= discontig.o
    2.13  c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
     3.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pageattr.c	Tue Mar 15 15:58:52 2005 +0000
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,226 +0,0 @@
     3.4 -/* 
     3.5 - * Copyright 2002 Andi Kleen, SuSE Labs. 
     3.6 - * Thanks to Ben LaHaise for precious feedback.
     3.7 - */ 
     3.8 -
     3.9 -#include <linux/config.h>
    3.10 -#include <linux/mm.h>
    3.11 -#include <linux/sched.h>
    3.12 -#include <linux/highmem.h>
    3.13 -#include <linux/module.h>
    3.14 -#include <linux/slab.h>
    3.15 -#include <asm/uaccess.h>
    3.16 -#include <asm/processor.h>
    3.17 -#include <asm/tlbflush.h>
    3.18 -
    3.19 -static DEFINE_SPINLOCK(cpa_lock);
    3.20 -static struct list_head df_list = LIST_HEAD_INIT(df_list);
    3.21 -
    3.22 -
    3.23 -pte_t *lookup_address(unsigned long address) 
    3.24 -{ 
    3.25 -	pgd_t *pgd = pgd_offset_k(address);
    3.26 -	pud_t *pud;
    3.27 -	pmd_t *pmd;
    3.28 -	if (pgd_none(*pgd))
    3.29 -		return NULL;
    3.30 -	pud = pud_offset(pgd, address);
    3.31 -	if (pud_none(*pud))
    3.32 -		return NULL;
    3.33 -	pmd = pmd_offset(pud, address);
    3.34 -	if (pmd_none(*pmd))
    3.35 -		return NULL;
    3.36 -	if (pmd_large(*pmd))
    3.37 -		return (pte_t *)pmd;
    3.38 -        return pte_offset_kernel(pmd, address);
    3.39 -} 
    3.40 -
    3.41 -static struct page *split_large_page(unsigned long address, pgprot_t prot)
    3.42 -{ 
    3.43 -	int i; 
    3.44 -	unsigned long addr;
    3.45 -	struct page *base;
    3.46 -	pte_t *pbase;
    3.47 -
    3.48 -	spin_unlock_irq(&cpa_lock);
    3.49 -	base = alloc_pages(GFP_KERNEL, 0);
    3.50 -	spin_lock_irq(&cpa_lock);
    3.51 -	if (!base) 
    3.52 -		return NULL;
    3.53 -
    3.54 -	address = __pa(address);
    3.55 -	addr = address & LARGE_PAGE_MASK; 
    3.56 -	pbase = (pte_t *)page_address(base);
    3.57 -	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
    3.58 -		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
    3.59 -				   addr == address ? prot : PAGE_KERNEL);
    3.60 -	}
    3.61 -	return base;
    3.62 -} 
    3.63 -
    3.64 -static void flush_kernel_map(void *dummy) 
    3.65 -{ 
    3.66 -	/* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
    3.67 -	if (boot_cpu_data.x86_model >= 4) 
    3.68 -		wbinvd();
    3.69 -	/* Flush all to work around Errata in early athlons regarding 
    3.70 -	 * large page flushing. 
    3.71 -	 */
    3.72 -	__flush_tlb_all(); 	
    3.73 -}
    3.74 -
    3.75 -static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
    3.76 -{ 
    3.77 -	struct page *page;
    3.78 -	unsigned long flags;
    3.79 -
    3.80 -	set_pte_atomic(kpte, pte); 	/* change init_mm */
    3.81 -	if (PTRS_PER_PMD > 1)
    3.82 -		return;
    3.83 -
    3.84 -	spin_lock_irqsave(&pgd_lock, flags);
    3.85 -	for (page = pgd_list; page; page = (struct page *)page->index) {
    3.86 -		pgd_t *pgd;
    3.87 -		pud_t *pud;
    3.88 -		pmd_t *pmd;
    3.89 -		pgd = (pgd_t *)page_address(page) + pgd_index(address);
    3.90 -		pud = pud_offset(pgd, address);
    3.91 -		pmd = pmd_offset(pud, address);
    3.92 -		set_pte_atomic((pte_t *)pmd, pte);
    3.93 -	}
    3.94 -	spin_unlock_irqrestore(&pgd_lock, flags);
    3.95 -}
    3.96 -
    3.97 -/* 
    3.98 - * No more special protections in this 2/4MB area - revert to a
    3.99 - * large page again. 
   3.100 - */
   3.101 -static inline void revert_page(struct page *kpte_page, unsigned long address)
   3.102 -{
   3.103 -	pte_t *linear = (pte_t *) 
   3.104 -		pmd_offset(pud_offset(pgd_offset_k(address), address), address);
   3.105 -	set_pmd_pte(linear,  address,
   3.106 -		    pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
   3.107 -			    PAGE_KERNEL_LARGE));
   3.108 -}
   3.109 -
   3.110 -static int
   3.111 -__change_page_attr(struct page *page, pgprot_t prot)
   3.112 -{ 
   3.113 -	pte_t *kpte; 
   3.114 -	unsigned long address;
   3.115 -	struct page *kpte_page;
   3.116 -
   3.117 -	BUG_ON(PageHighMem(page));
   3.118 -	address = (unsigned long)page_address(page);
   3.119 -
   3.120 -	kpte = lookup_address(address);
   3.121 -	if (!kpte)
   3.122 -		return -EINVAL;
   3.123 -	kpte_page = virt_to_page(kpte);
   3.124 -	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
   3.125 -		if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
   3.126 -			set_pte_batched(kpte, mk_pte(page, prot)); 
   3.127 -		} else {
   3.128 -			struct page *split = split_large_page(address, prot); 
   3.129 -			if (!split)
   3.130 -				return -ENOMEM;
   3.131 -			set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
   3.132 -			kpte_page = split;
   3.133 -		}	
   3.134 -		get_page(kpte_page);
   3.135 -	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
   3.136 -		set_pte_batched(kpte, mk_pte(page, PAGE_KERNEL));
   3.137 -		__put_page(kpte_page);
   3.138 -	} else
   3.139 -		BUG();
   3.140 -
   3.141 -	/*
   3.142 -	 * If the pte was reserved, it means it was created at boot
   3.143 -	 * time (not via split_large_page) and in turn we must not
   3.144 -	 * replace it with a largepage.
   3.145 -	 */
   3.146 -	if (!PageReserved(kpte_page)) {
   3.147 -		/* memleak and potential failed 2M page regeneration */
   3.148 -		BUG_ON(!page_count(kpte_page));
   3.149 -
   3.150 -		if (cpu_has_pse && (page_count(kpte_page) == 1)) {
   3.151 -			list_add(&kpte_page->lru, &df_list);
   3.152 -			revert_page(kpte_page, address);
   3.153 -		}
   3.154 -	}
   3.155 -	return 0;
   3.156 -} 
   3.157 -
   3.158 -static inline void flush_map(void)
   3.159 -{
   3.160 -	on_each_cpu(flush_kernel_map, NULL, 1, 1);
   3.161 -}
   3.162 -
   3.163 -/*
   3.164 - * Change the page attributes of an page in the linear mapping.
   3.165 - *
   3.166 - * This should be used when a page is mapped with a different caching policy
   3.167 - * than write-back somewhere - some CPUs do not like it when mappings with
   3.168 - * different caching policies exist. This changes the page attributes of the
   3.169 - * in kernel linear mapping too.
   3.170 - * 
   3.171 - * The caller needs to ensure that there are no conflicting mappings elsewhere.
   3.172 - * This function only deals with the kernel linear map.
   3.173 - * 
   3.174 - * Caller must call global_flush_tlb() after this.
   3.175 - */
   3.176 -int change_page_attr(struct page *page, int numpages, pgprot_t prot)
   3.177 -{
   3.178 -	int err = 0; 
   3.179 -	int i; 
   3.180 -	unsigned long flags;
   3.181 -
   3.182 -	spin_lock_irqsave(&cpa_lock, flags);
   3.183 -	for (i = 0; i < numpages; i++, page++) { 
   3.184 -		err = __change_page_attr(page, prot);
   3.185 -		if (err) 
   3.186 -			break; 
   3.187 -	} 	
   3.188 -	flush_page_update_queue();
   3.189 -	spin_unlock_irqrestore(&cpa_lock, flags);
   3.190 -	return err;
   3.191 -}
   3.192 -
   3.193 -void global_flush_tlb(void)
   3.194 -{ 
   3.195 -	LIST_HEAD(l);
   3.196 -	struct list_head* n;
   3.197 -
   3.198 -	BUG_ON(irqs_disabled());
   3.199 -
   3.200 -	spin_lock_irq(&cpa_lock);
   3.201 -	list_splice_init(&df_list, &l);
   3.202 -	spin_unlock_irq(&cpa_lock);
   3.203 -	flush_map();
   3.204 -	n = l.next;
   3.205 -	while (n != &l) {
   3.206 -		struct page *pg = list_entry(n, struct page, lru);
   3.207 -		n = n->next;
   3.208 -		__free_page(pg);
   3.209 -	}
   3.210 -} 
   3.211 -
   3.212 -#ifdef CONFIG_DEBUG_PAGEALLOC
   3.213 -void kernel_map_pages(struct page *page, int numpages, int enable)
   3.214 -{
   3.215 -	if (PageHighMem(page))
   3.216 -		return;
   3.217 -	/* the return value is ignored - the calls cannot fail,
   3.218 -	 * large pages are disabled at boot time.
   3.219 -	 */
   3.220 -	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
   3.221 -	/* we should perform an IPI and flush all tlbs,
   3.222 -	 * but that can deadlock->flush only current cpu.
   3.223 -	 */
   3.224 -	__flush_tlb_all();
   3.225 -}
   3.226 -#endif
   3.227 -
   3.228 -EXPORT_SYMBOL(change_page_attr);
   3.229 -EXPORT_SYMBOL(global_flush_tlb);