From: Will Deacon Date: Tue, 2 Oct 2012 10:18:52 +0000 (+0100) Subject: mm: thp: Set the accessed flag for old pages on access fault. X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=7839a75cfaf679dffec1e4dba727555beb203ac8;p=people%2Faperard%2Flinux-arndale.git mm: thp: Set the accessed flag for old pages on access fault. On x86 memory accesses to pages without the ACCESSED flag set result in the ACCESSED flag being set automatically. With the ARM architecture a page access fault is raised instead (and it will continue to be raised until the ACCESSED flag is set for the appropriate PTE/PMD). For normal memory pages, handle_pte_fault will call pte_mkyoung (effectively setting the ACCESSED flag). For transparent huge pages, pmd_mkyoung will only be called for a write fault. This patch ensures that faults on transparent hugepages which do not result in a CoW update the access flags for the faulting pmd. Cc: Chris Metcalf Acked-by: Kirill A. Shutemov Reviewed-by: Andrea Arcangeli Signed-off-by: Will Deacon Signed-off-by: Steve Capper --- diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index b31cb7da03466..62a0d5aad401d 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -8,6 +8,8 @@ extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); +extern void huge_pmd_set_accessed(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, int dirty); extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 40f17c34b4153..76bee59ded54d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -879,6 +879,14 @@ out_free_pages: goto out; } +void huge_pmd_set_accessed(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmd, int dirty) +{ + pmd_t entry = pmd_mkyoung(*pmd); + if (pmdp_set_access_flags(vma, address & HPAGE_PMD_MASK, pmd, entry, dirty)) + update_mmu_cache_pmd(vma, address, pmd); +} + int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd) { diff --git a/mm/memory.c b/mm/memory.c index 221fc9ffcab1d..95b66187b0b5a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3537,7 +3537,8 @@ retry: barrier(); if (pmd_trans_huge(orig_pmd)) { - if (flags & FAULT_FLAG_WRITE && + unsigned int dirty = flags & FAULT_FLAG_WRITE; + if (dirty && !pmd_write(orig_pmd) && !pmd_trans_splitting(orig_pmd)) { ret = do_huge_pmd_wp_page(mm, vma, address, pmd, @@ -3550,7 +3551,13 @@ retry: if (unlikely(ret & VM_FAULT_OOM)) goto retry; return ret; + } else if (pmd_trans_huge_lock(pmd, vma) == 1) { + if (likely(pmd_same(*pmd, orig_pmd))) + huge_pmd_set_accessed(vma, address, pmd, + dirty); + spin_unlock(&mm->page_table_lock); } + return 0; } }