ia64/xen-unstable

changeset 2435:791d6dc54a24

bitkeeper revision 1.1159.69.17 (413b5ab8tE2tTd_Ht7wxKU0Uktv2_w)

Fix WBINVD uses.
author kaf24@freefall.cl.cam.ac.uk
date Sun Sep 05 18:28:08 2004 +0000 (2004-09-05)
parents 75ba907f8ccf
children 8ae0831442b8
files .rootkeys linux-2.6.8.1-patches/agpgart.patch linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/Makefile linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pageattr.c
line diff
     1.1 --- a/.rootkeys	Sun Sep 05 05:19:12 2004 +0000
     1.2 +++ b/.rootkeys	Sun Sep 05 18:28:08 2004 +0000
     1.3 @@ -163,6 +163,7 @@ 4118cc35CbY8rfGVspF5O-7EkXBEAA linux-2.6
     1.4  40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/hypervisor.c
     1.5  40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/init.c
     1.6  41062ab7CjxC1UBaFhOMWWdhHkIUyg linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/ioremap.c
     1.7 +413b5ab8LIowAnQrEmaOJSdmqm96jQ linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pageattr.c
     1.8  40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pgtable.c
     1.9  4107adf12ndy94MidCaivDibJ3pPAg linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/Makefile
    1.10  4107adf1WcCgkhsdLTRGX52cOG1vJg linux-2.6.8.1-xen-sparse/arch/xen/i386/pci/direct.c
     2.1 --- a/linux-2.6.8.1-patches/agpgart.patch	Sun Sep 05 05:19:12 2004 +0000
     2.2 +++ b/linux-2.6.8.1-patches/agpgart.patch	Sun Sep 05 18:28:08 2004 +0000
     2.3 @@ -326,3 +326,21 @@
     2.4   
     2.5   	for (i = 0; i < num_entries; i++) {
     2.6   		agp_bridge->gatt_table[i] =
     2.7 +--- linux-2.6.8.1/include/asm-i386/agp.h	2004-08-14 11:54:47.000000000 +0100
     2.8 ++++ linux-2.6.8.1-xen0/include/asm-i386/agp.h	2004-09-05 05:57:26.040268956 +0100
     2.9 +@@ -3,6 +3,7 @@
    2.10 + 
    2.11 + #include <asm/pgtable.h>
    2.12 + #include <asm/cacheflush.h>
    2.13 ++#include <asm/system.h>
    2.14 + 
    2.15 + /* 
    2.16 +  * Functions to keep the agpgart mappings coherent with the MMU.
    2.17 +@@ -19,6 +20,6 @@
    2.18 + /* Could use CLFLUSH here if the cpu supports it. But then it would
    2.19 +    need to be called for each cacheline of the whole page so it may not be 
    2.20 +    worth it. Would need a page for it. */
    2.21 +-#define flush_agp_cache() asm volatile("wbinvd":::"memory")
    2.22 ++#define flush_agp_cache() wbinvd()
    2.23 + 
    2.24 + #endif
     3.1 --- a/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/Makefile	Sun Sep 05 05:19:12 2004 +0000
     3.2 +++ b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/Makefile	Sun Sep 05 18:28:08 2004 +0000
     3.3 @@ -6,8 +6,8 @@ XENARCH	:= $(subst ",,$(CONFIG_XENARCH))
     3.4  
     3.5  CFLAGS	+= -Iarch/$(XENARCH)/mm
     3.6  
     3.7 -obj-y	:= init.o fault.o ioremap.o pgtable.o hypervisor.o
     3.8 -c-obj-y	:= extable.o pageattr.o 
     3.9 +obj-y	:= init.o fault.o ioremap.o pgtable.o hypervisor.o pageattr.o
    3.10 +c-obj-y	:= extable.o
    3.11  
    3.12  c-obj-$(CONFIG_DISCONTIGMEM)	+= discontig.o
    3.13  c-obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
     4.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.2 +++ b/linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pageattr.c	Sun Sep 05 18:28:08 2004 +0000
     4.3 @@ -0,0 +1,215 @@
     4.4 +/* 
     4.5 + * Copyright 2002 Andi Kleen, SuSE Labs. 
     4.6 + * Thanks to Ben LaHaise for precious feedback.
     4.7 + */ 
     4.8 +
     4.9 +#include <linux/config.h>
    4.10 +#include <linux/mm.h>
    4.11 +#include <linux/sched.h>
    4.12 +#include <linux/highmem.h>
    4.13 +#include <linux/module.h>
    4.14 +#include <linux/slab.h>
    4.15 +#include <asm/uaccess.h>
    4.16 +#include <asm/processor.h>
    4.17 +#include <asm/tlbflush.h>
    4.18 +
    4.19 +static spinlock_t cpa_lock = SPIN_LOCK_UNLOCKED;
    4.20 +static struct list_head df_list = LIST_HEAD_INIT(df_list);
    4.21 +
    4.22 +
    4.23 +pte_t *lookup_address(unsigned long address) 
    4.24 +{ 
    4.25 +	pgd_t *pgd = pgd_offset_k(address); 
    4.26 +	pmd_t *pmd;
    4.27 +	if (pgd_none(*pgd))
    4.28 +		return NULL;
    4.29 +	pmd = pmd_offset(pgd, address); 	       
    4.30 +	if (pmd_none(*pmd))
    4.31 +		return NULL;
    4.32 +	if (pmd_large(*pmd))
    4.33 +		return (pte_t *)pmd;
    4.34 +        return pte_offset_kernel(pmd, address);
    4.35 +} 
    4.36 +
    4.37 +static struct page *split_large_page(unsigned long address, pgprot_t prot)
    4.38 +{ 
    4.39 +	int i; 
    4.40 +	unsigned long addr;
    4.41 +	struct page *base;
    4.42 +	pte_t *pbase;
    4.43 +
    4.44 +	spin_unlock_irq(&cpa_lock);
    4.45 +	base = alloc_pages(GFP_KERNEL, 0);
    4.46 +	spin_lock_irq(&cpa_lock);
    4.47 +	if (!base) 
    4.48 +		return NULL;
    4.49 +
    4.50 +	address = __pa(address);
    4.51 +	addr = address & LARGE_PAGE_MASK; 
    4.52 +	pbase = (pte_t *)page_address(base);
    4.53 +	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
    4.54 +		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
    4.55 +				   addr == address ? prot : PAGE_KERNEL);
    4.56 +	}
    4.57 +	return base;
    4.58 +} 
    4.59 +
    4.60 +static void flush_kernel_map(void *dummy) 
    4.61 +{ 
    4.62 +	/* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
    4.63 +	if (boot_cpu_data.x86_model >= 4) 
    4.64 +		wbinvd();
    4.65 +	/* Flush all to work around Errata in early athlons regarding 
    4.66 +	 * large page flushing. 
    4.67 +	 */
    4.68 +	__flush_tlb_all(); 	
    4.69 +}
    4.70 +
    4.71 +static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
    4.72 +{ 
    4.73 +	struct page *page;
    4.74 +	unsigned long flags;
    4.75 +
    4.76 +	set_pte_atomic(kpte, pte); 	/* change init_mm */
    4.77 +	if (PTRS_PER_PMD > 1)
    4.78 +		return;
    4.79 +
    4.80 +	spin_lock_irqsave(&pgd_lock, flags);
    4.81 +	for (page = pgd_list; page; page = (struct page *)page->index) {
    4.82 +		pgd_t *pgd;
    4.83 +		pmd_t *pmd;
    4.84 +		pgd = (pgd_t *)page_address(page) + pgd_index(address);
    4.85 +		pmd = pmd_offset(pgd, address);
    4.86 +		set_pte_atomic((pte_t *)pmd, pte);
    4.87 +	}
    4.88 +	spin_unlock_irqrestore(&pgd_lock, flags);
    4.89 +}
    4.90 +
    4.91 +/* 
    4.92 + * No more special protections in this 2/4MB area - revert to a
    4.93 + * large page again. 
    4.94 + */
    4.95 +static inline void revert_page(struct page *kpte_page, unsigned long address)
    4.96 +{
    4.97 +	pte_t *linear = (pte_t *) 
    4.98 +		pmd_offset(pgd_offset(&init_mm, address), address);
    4.99 +	set_pmd_pte(linear,  address,
   4.100 +		    pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
   4.101 +			    PAGE_KERNEL_LARGE));
   4.102 +}
   4.103 +
   4.104 +static int
   4.105 +__change_page_attr(struct page *page, pgprot_t prot)
   4.106 +{ 
   4.107 +	pte_t *kpte; 
   4.108 +	unsigned long address;
   4.109 +	struct page *kpte_page;
   4.110 +
   4.111 +#ifdef CONFIG_HIGHMEM
   4.112 +	if (page >= highmem_start_page) 
   4.113 +		BUG(); 
   4.114 +#endif
   4.115 +	address = (unsigned long)page_address(page);
   4.116 +
   4.117 +	kpte = lookup_address(address);
   4.118 +	if (!kpte)
   4.119 +		return -EINVAL;
   4.120 +	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
   4.121 +	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
   4.122 +		if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
   4.123 +			pte_t old = *kpte;
   4.124 +			pte_t standard = mk_pte(page, PAGE_KERNEL); 
   4.125 +			set_pte_atomic(kpte, mk_pte(page, prot)); 
   4.126 +			if (pte_same(old,standard))
   4.127 +				get_page(kpte_page);
   4.128 +		} else {
   4.129 +			struct page *split = split_large_page(address, prot); 
   4.130 +			if (!split)
   4.131 +				return -ENOMEM;
   4.132 +			get_page(kpte_page);
   4.133 +			set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
   4.134 +		}	
   4.135 +	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
   4.136 +		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
   4.137 +		__put_page(kpte_page);
   4.138 +	}
   4.139 +
   4.140 +	if (cpu_has_pse && (page_count(kpte_page) == 1)) {
   4.141 +		list_add(&kpte_page->lru, &df_list);
   4.142 +		revert_page(kpte_page, address);
   4.143 +	} 
   4.144 +	return 0;
   4.145 +} 
   4.146 +
   4.147 +static inline void flush_map(void)
   4.148 +{
   4.149 +	on_each_cpu(flush_kernel_map, NULL, 1, 1);
   4.150 +}
   4.151 +
   4.152 +/*
   4.153 + * Change the page attributes of an page in the linear mapping.
   4.154 + *
   4.155 + * This should be used when a page is mapped with a different caching policy
   4.156 + * than write-back somewhere - some CPUs do not like it when mappings with
   4.157 + * different caching policies exist. This changes the page attributes of the
   4.158 + * in kernel linear mapping too.
   4.159 + * 
   4.160 + * The caller needs to ensure that there are no conflicting mappings elsewhere.
   4.161 + * This function only deals with the kernel linear map.
   4.162 + * 
   4.163 + * Caller must call global_flush_tlb() after this.
   4.164 + */
   4.165 +int change_page_attr(struct page *page, int numpages, pgprot_t prot)
   4.166 +{
   4.167 +	int err = 0; 
   4.168 +	int i; 
   4.169 +	unsigned long flags;
   4.170 +
   4.171 +	spin_lock_irqsave(&cpa_lock, flags);
   4.172 +	for (i = 0; i < numpages; i++, page++) { 
   4.173 +		err = __change_page_attr(page, prot);
   4.174 +		if (err) 
   4.175 +			break; 
   4.176 +	} 	
   4.177 +	spin_unlock_irqrestore(&cpa_lock, flags);
   4.178 +	return err;
   4.179 +}
   4.180 +
   4.181 +void global_flush_tlb(void)
   4.182 +{ 
   4.183 +	LIST_HEAD(l);
   4.184 +	struct list_head* n;
   4.185 +
   4.186 +	BUG_ON(irqs_disabled());
   4.187 +
   4.188 +	spin_lock_irq(&cpa_lock);
   4.189 +	list_splice_init(&df_list, &l);
   4.190 +	spin_unlock_irq(&cpa_lock);
   4.191 +	flush_map();
   4.192 +	n = l.next;
   4.193 +	while (n != &l) {
   4.194 +		struct page *pg = list_entry(n, struct page, lru);
   4.195 +		n = n->next;
   4.196 +		__free_page(pg);
   4.197 +	}
   4.198 +} 
   4.199 +
   4.200 +#ifdef CONFIG_DEBUG_PAGEALLOC
   4.201 +void kernel_map_pages(struct page *page, int numpages, int enable)
   4.202 +{
   4.203 +	if (PageHighMem(page))
   4.204 +		return;
   4.205 +	/* the return value is ignored - the calls cannot fail,
   4.206 +	 * large pages are disabled at boot time.
   4.207 +	 */
   4.208 +	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
   4.209 +	/* we should perform an IPI and flush all tlbs,
   4.210 +	 * but that can deadlock->flush only current cpu.
   4.211 +	 */
   4.212 +	__flush_tlb_all();
   4.213 +}
   4.214 +EXPORT_SYMBOL(kernel_map_pages);
   4.215 +#endif
   4.216 +
   4.217 +EXPORT_SYMBOL(change_page_attr);
   4.218 +EXPORT_SYMBOL(global_flush_tlb);