ia64/linux-2.6.18-xen.hg

changeset 712:03a4b2b9d85d

linux/i386: utilize hypervisor highmem handling helpers

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Oct 27 13:34:49 2008 +0000 (2008-10-27)
parents 61eafcbeaeb4
children f40f4f86d5a2
files arch/i386/mm/highmem-xen.c include/asm-i386/mach-xen/asm/highmem.h include/linux/highmem.h
line diff
     1.1 --- a/arch/i386/mm/highmem-xen.c	Mon Oct 27 13:34:34 2008 +0000
     1.2 +++ b/arch/i386/mm/highmem-xen.c	Mon Oct 27 13:34:49 2008 +0000
     1.3 @@ -128,9 +128,56 @@ struct page *kmap_atomic_to_page(void *p
     1.4  	return pte_page(*pte);
     1.5  }
     1.6  
     1.7 +void clear_highpage(struct page *page)
     1.8 +{
     1.9 +	void *kaddr;
    1.10 +
    1.11 +	if (likely(xen_feature(XENFEAT_highmem_assist))
    1.12 +	    && PageHighMem(page)) {
    1.13 +		struct mmuext_op meo;
    1.14 +
    1.15 +		meo.cmd = MMUEXT_CLEAR_PAGE;
    1.16 +		meo.arg1.mfn = pfn_to_mfn(page_to_pfn(page));
    1.17 +		if (HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
    1.18 +			return;
    1.19 +	}
    1.20 +
    1.21 +	kaddr = kmap_atomic(page, KM_USER0);
    1.22 +	clear_page(kaddr);
    1.23 +	kunmap_atomic(kaddr, KM_USER0);
    1.24 +}
    1.25 +
    1.26 +void copy_highpage(struct page *to, struct page *from)
    1.27 +{
    1.28 +	void *vfrom, *vto;
    1.29 +
    1.30 +	if (likely(xen_feature(XENFEAT_highmem_assist))
    1.31 +	    && (PageHighMem(from) || PageHighMem(to))) {
    1.32 +		unsigned long from_pfn = page_to_pfn(from);
    1.33 +		unsigned long to_pfn = page_to_pfn(to);
    1.34 +		struct mmuext_op meo;
    1.35 +
    1.36 +		meo.cmd = MMUEXT_COPY_PAGE;
    1.37 +		meo.arg1.mfn = pfn_to_mfn(to_pfn);
    1.38 +		meo.arg2.src_mfn = pfn_to_mfn(from_pfn);
    1.39 +		if (mfn_to_pfn(meo.arg2.src_mfn) == from_pfn
    1.40 +		    && mfn_to_pfn(meo.arg1.mfn) == to_pfn
    1.41 +		    && HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
    1.42 +			return;
    1.43 +	}
    1.44 +
    1.45 +	vfrom = kmap_atomic(from, KM_USER0);
    1.46 +	vto = kmap_atomic(to, KM_USER1);
    1.47 +	copy_page(vto, vfrom);
    1.48 +	kunmap_atomic(vfrom, KM_USER0);
    1.49 +	kunmap_atomic(vto, KM_USER1);
    1.50 +}
    1.51 +
    1.52  EXPORT_SYMBOL(kmap);
    1.53  EXPORT_SYMBOL(kunmap);
    1.54  EXPORT_SYMBOL(kmap_atomic);
    1.55  EXPORT_SYMBOL(kmap_atomic_pte);
    1.56  EXPORT_SYMBOL(kunmap_atomic);
    1.57  EXPORT_SYMBOL(kmap_atomic_to_page);
    1.58 +EXPORT_SYMBOL(clear_highpage);
    1.59 +EXPORT_SYMBOL(copy_highpage);
     2.1 --- a/include/asm-i386/mach-xen/asm/highmem.h	Mon Oct 27 13:34:34 2008 +0000
     2.2 +++ b/include/asm-i386/mach-xen/asm/highmem.h	Mon Oct 27 13:34:49 2008 +0000
     2.3 @@ -75,6 +75,23 @@ struct page *kmap_atomic_to_page(void *p
     2.4  
     2.5  #define flush_cache_kmaps()	do { } while (0)
     2.6  
     2.7 +void clear_highpage(struct page *);
     2.8 +static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
     2.9 +{
    2.10 +	clear_highpage(page);
    2.11 +}
    2.12 +#define __HAVE_ARCH_CLEAR_HIGHPAGE
    2.13 +#define __HAVE_ARCH_CLEAR_USER_HIGHPAGE
    2.14 +
    2.15 +void copy_highpage(struct page *to, struct page *from);
    2.16 +static inline void copy_user_highpage(struct page *to, struct page *from,
    2.17 +	unsigned long vaddr, struct vm_area_struct *vma)
    2.18 +{
    2.19 +	copy_highpage(to, from);
    2.20 +}
    2.21 +#define __HAVE_ARCH_COPY_HIGHPAGE
    2.22 +#define __HAVE_ARCH_COPY_USER_HIGHPAGE
    2.23 +
    2.24  #endif /* __KERNEL__ */
    2.25  
    2.26  #endif /* _ASM_HIGHMEM_H */
     3.1 --- a/include/linux/highmem.h	Mon Oct 27 13:34:34 2008 +0000
     3.2 +++ b/include/linux/highmem.h	Mon Oct 27 13:34:49 2008 +0000
     3.3 @@ -50,6 +50,7 @@ static inline void *kmap(struct page *pa
     3.4  
     3.5  #endif /* CONFIG_HIGHMEM */
     3.6  
     3.7 +#ifndef __HAVE_ARCH_CLEAR_USER_HIGHPAGE
     3.8  /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
     3.9  static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
    3.10  {
    3.11 @@ -59,6 +60,7 @@ static inline void clear_user_highpage(s
    3.12  	/* Make sure this page is cleared on other CPU's too before using it */
    3.13  	smp_wmb();
    3.14  }
    3.15 +#endif
    3.16  
    3.17  #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
    3.18  static inline struct page *
    3.19 @@ -73,12 +75,14 @@ alloc_zeroed_user_highpage(struct vm_are
    3.20  }
    3.21  #endif
    3.22  
    3.23 +#ifndef __HAVE_ARCH_CLEAR_HIGHPAGE
    3.24  static inline void clear_highpage(struct page *page)
    3.25  {
    3.26  	void *kaddr = kmap_atomic(page, KM_USER0);
    3.27  	clear_page(kaddr);
    3.28  	kunmap_atomic(kaddr, KM_USER0);
    3.29  }
    3.30 +#endif
    3.31  
    3.32  /*
    3.33   * Same but also flushes aliased cache contents to RAM.
    3.34 @@ -95,6 +99,7 @@ static inline void memclear_highpage_flu
    3.35  	kunmap_atomic(kaddr, KM_USER0);
    3.36  }
    3.37  
    3.38 +#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
    3.39  static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr)
    3.40  {
    3.41  	char *vfrom, *vto;
    3.42 @@ -107,7 +112,9 @@ static inline void copy_user_highpage(st
    3.43  	/* Make sure this page is cleared on other CPU's too before using it */
    3.44  	smp_wmb();
    3.45  }
    3.46 +#endif
    3.47  
    3.48 +#ifndef __HAVE_ARCH_COPY_HIGHPAGE
    3.49  static inline void copy_highpage(struct page *to, struct page *from)
    3.50  {
    3.51  	char *vfrom, *vto;
    3.52 @@ -118,5 +125,6 @@ static inline void copy_highpage(struct 
    3.53  	kunmap_atomic(vfrom, KM_USER0);
    3.54  	kunmap_atomic(vto, KM_USER1);
    3.55  }
    3.56 +#endif
    3.57  
    3.58  #endif /* _LINUX_HIGHMEM_H */