]> xenbits.xensource.com Git - people/aperard/linux.git/commitdiff
mm/memory: page_add_anon_rmap() -> folio_add_anon_rmap_pte()
authorDavid Hildenbrand <david@redhat.com>
Wed, 20 Dec 2023 22:44:44 +0000 (23:44 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:52 +0000 (11:58 -0800)
Let's convert restore_exclusive_pte() and do_swap_page().  While at it,
perform some folio conversion in restore_exclusive_pte().

Link: https://lkml.kernel.org/r/20231220224504.646757-21-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/memory.c

index 0e5e069aaec401d95b8af5775e921df5948828d1..e84917c118ac71b1ac6ef70b6453d60f111902af 100644 (file)
@@ -710,6 +710,7 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
                                  struct page *page, unsigned long address,
                                  pte_t *ptep)
 {
+       struct folio *folio = page_folio(page);
        pte_t orig_pte;
        pte_t pte;
        swp_entry_t entry;
@@ -725,14 +726,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma,
        else if (is_writable_device_exclusive_entry(entry))
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
 
-       VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page)));
+       VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) &&
+                                          PageAnonExclusive(page)), folio);
 
        /*
         * No need to take a page reference as one was already
         * created when the swap entry was made.
         */
-       if (PageAnon(page))
-               page_add_anon_rmap(page, vma, address, RMAP_NONE);
+       if (folio_test_anon(folio))
+               folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE);
        else
                /*
                 * Currently device exclusive access only supports anonymous
@@ -4076,7 +4078,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                folio_add_new_anon_rmap(folio, vma, vmf->address);
                folio_add_lru_vma(folio, vma);
        } else {
-               page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
+               folio_add_anon_rmap_pte(folio, page, vma, vmf->address,
+                                       rmap_flags);
        }
 
        VM_BUG_ON(!folio_test_anon(folio) ||