ia64/xen-unstable

changeset 4144:288c77d96e81

bitkeeper revision 1.1236.25.23 (4236103b1rjrMEGV-jUhVaEFEZhosw)

Merge firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-2.0-testing.bk
into firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author kaf24@firebug.cl.cam.ac.uk
date Mon Mar 14 22:29:15 2005 +0000 (2005-03-14)
parents 6a7120b3405b 77df9e441078
children f2d61710e4d9 ce88c0b8cb9c
files linux-2.6.11-xen-sparse/arch/xen/i386/mm/highmem.c linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/highmem.h
line diff
     1.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/highmem.c	Mon Mar 14 19:24:43 2005 +0000
     1.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/highmem.c	Mon Mar 14 22:29:15 2005 +0000
     1.3 @@ -25,7 +25,7 @@ void kunmap(struct page *page)
     1.4   * However when holding an atomic kmap is is not legal to sleep, so atomic
     1.5   * kmaps are appropriate for short, tight code paths only.
     1.6   */
     1.7 -void *kmap_atomic(struct page *page, enum km_type type)
     1.8 +static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
     1.9  {
    1.10  	enum fixed_addresses idx;
    1.11  	unsigned long vaddr;
    1.12 @@ -41,33 +41,21 @@ void *kmap_atomic(struct page *page, enu
    1.13  	if (!pte_none(*(kmap_pte-idx)))
    1.14  		BUG();
    1.15  #endif
    1.16 -	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
    1.17 +	set_pte(kmap_pte-idx, mk_pte(page, prot));
    1.18  	__flush_tlb_one(vaddr);
    1.19  
    1.20  	return (void*) vaddr;
    1.21  }
    1.22  
    1.23 -/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection */
    1.24 +void *kmap_atomic(struct page *page, enum km_type type)
    1.25 +{
    1.26 +	return __kmap_atomic(page, type, kmap_prot);
    1.27 +}
    1.28 +
    1.29 +/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
    1.30  void *kmap_atomic_pte(struct page *page, enum km_type type)
    1.31  {
    1.32 -	enum fixed_addresses idx;
    1.33 -	unsigned long vaddr;
    1.34 -
    1.35 -	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
    1.36 -	inc_preempt_count();
    1.37 -	if (page < highmem_start_page)
    1.38 -		return page_address(page);
    1.39 -
    1.40 -	idx = type + KM_TYPE_NR*smp_processor_id();
    1.41 -	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
    1.42 -#ifdef CONFIG_DEBUG_HIGHMEM
    1.43 -	if (!pte_none(*(kmap_pte-idx)))
    1.44 -		BUG();
    1.45 -#endif
    1.46 -	set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL_RO));
    1.47 -	__flush_tlb_one(vaddr);
    1.48 -
    1.49 -	return (void*) vaddr;
    1.50 +	return __kmap_atomic(page, type, PAGE_KERNEL_RO);
    1.51  }
    1.52  
    1.53  void kunmap_atomic(void *kvaddr, enum km_type type)
    1.54 @@ -97,31 +85,6 @@ void kunmap_atomic(void *kvaddr, enum km
    1.55  	preempt_check_resched();
    1.56  }
    1.57  
    1.58 -void kunmap_atomic_force(void *kvaddr, enum km_type type)
    1.59 -{
    1.60 -	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
    1.61 -	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
    1.62 -
    1.63 -	if (vaddr < FIXADDR_START) { // FIXME
    1.64 -		dec_preempt_count();
    1.65 -		preempt_check_resched();
    1.66 -		return;
    1.67 -	}
    1.68 -
    1.69 -	if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
    1.70 -		BUG();
    1.71 -
    1.72 -	/*
    1.73 -	 * force other mappings to Oops if they'll try to access
    1.74 -	 * this pte without first remap it
    1.75 -	 */
    1.76 -	pte_clear(kmap_pte-idx);
    1.77 -	__flush_tlb_one(vaddr);
    1.78 -
    1.79 -	dec_preempt_count();
    1.80 -	preempt_check_resched();
    1.81 -}
    1.82 -
    1.83  struct page *kmap_atomic_to_page(void *ptr)
    1.84  {
    1.85  	unsigned long idx, vaddr = (unsigned long)ptr;
     2.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c	Mon Mar 14 19:24:43 2005 +0000
     2.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c	Mon Mar 14 22:29:15 2005 +0000
     2.3 @@ -232,7 +232,7 @@ struct page *pte_alloc_one(struct mm_str
     2.4  	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
     2.5  	if (pte == NULL)
     2.6  		return pte;
     2.7 -	if (pte >= highmem_start_page)
     2.8 +	if (PageHighMem(pte))
     2.9  		return pte;
    2.10  	/* not a highmem page -- free page and grab one from the cache */
    2.11  	__free_page(pte);
    2.12 @@ -247,7 +247,7 @@ void pte_free(struct page *pte)
    2.13  {
    2.14  	set_page_count(pte, 1);
    2.15  #ifdef CONFIG_HIGHPTE
    2.16 -	if (pte < highmem_start_page)
    2.17 +	if (!PageHighMem(pte))
    2.18  #endif
    2.19  		kmem_cache_free(pte_cache,
    2.20  				phys_to_virt(page_to_pseudophys(pte)));
     3.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/highmem.h	Mon Mar 14 19:24:43 2005 +0000
     3.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/highmem.h	Mon Mar 14 22:29:15 2005 +0000
     3.3 @@ -73,7 +73,6 @@ void kunmap(struct page *page);
     3.4  void *kmap_atomic(struct page *page, enum km_type type);
     3.5  void *kmap_atomic_pte(struct page *page, enum km_type type);
     3.6  void kunmap_atomic(void *kvaddr, enum km_type type);
     3.7 -void kunmap_atomic_force(void *kvaddr, enum km_type type);
     3.8  struct page *kmap_atomic_to_page(void *ptr);
     3.9  
    3.10  #define flush_cache_kmaps()	do { } while (0)