]> xenbits.xensource.com Git - people/aperard/linux-arndale.git/commitdiff
ARM: mm: Add support for flushing HugeTLB pages.
authorSteve Capper <steve.capper@arm.com>
Thu, 19 Jul 2012 10:51:50 +0000 (11:51 +0100)
committerVasanth Ananthan <vasanthananthan@gmail.com>
Tue, 8 Jan 2013 10:34:47 +0000 (16:04 +0530)
On ARM we use the __flush_dcache_page function to flush the dcache of pages
when needed; usually when the PG_dcache_clean bit is unset and we are setting a
PTE.

A HugeTLB page is represented as a compound page consisting of an array of
pages. Thus to flush the dcache of a HugeTLB page, one must flush more than a
single page.

This patch modifies __flush_dcache_page such that all constituent pages of a
HugeTLB page are flushed.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Steve Capper <steve.capper@arm.com>
arch/arm/mm/flush.c

index 1c8f7f56417598303cac08ca3baa8353361366ff..0a69cb83a2273b06992a2fb84c9de6c921c6305a 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/highmem.h>
 #include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
+#include <linux/hugetlb.h>
 
 #include "mm.h"
 
@@ -168,17 +169,21 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
         * coherent with the kernels mapping.
         */
        if (!PageHighMem(page)) {
-               __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+               __cpuc_flush_dcache_area(page_address(page), (PAGE_SIZE << compound_order(page)));
        } else {
-               void *addr = kmap_high_get(page);
-               if (addr) {
-                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
-                       kunmap_high(page);
-               } else if (cache_is_vipt()) {
-                       /* unmapped pages might still be cached */
-                       addr = kmap_atomic(page);
-                       __cpuc_flush_dcache_area(addr, PAGE_SIZE);
-                       kunmap_atomic(addr);
+               unsigned long i;
+               for(i = 0; i < (1 << compound_order(page)); i++) {
+                       struct page *cpage = page + i;
+                       void *addr = kmap_high_get(cpage);
+                       if (addr) {
+                               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                               kunmap_high(cpage);
+                       } else if (cache_is_vipt()) {
+                               /* unmapped pages might still be cached */
+                               addr = kmap_atomic(cpage);
+                               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+                               kunmap_atomic(addr);
+                       }
                }
        }