]> xenbits.xensource.com Git - people/jgross/linux.git/commitdiff
mm/khugepaged: cleanup memcg uncharge for failure path
authorPeter Xu <peterx@redhat.com>
Fri, 3 Mar 2023 15:12:18 +0000 (10:12 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 28 Mar 2023 23:20:11 +0000 (16:20 -0700)
Explicit memcg uncharging is not needed when the memcg accounting has the
same lifespan of the page/folio.  That becomes the case for khugepaged
after Yang & Zach's recent rework so the hpage will be allocated for each
collapse rather than being cached.

Cleanup the explicit memcg uncharge in khugepaged failure path and leave
that for put_page().

Link: https://lkml.kernel.org/r/20230303151218.311015-1-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Suggested-by: Zach O'Keefe <zokeefe@google.com>
Reviewed-by: Zach O'Keefe <zokeefe@google.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Cc: David Stevens <stevensd@chromium.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/khugepaged.c

index 17562c6925462a40efca70bfd81b3410039bce1c..074ea534f786d2bd821bb358325788c59a390f1b 100644 (file)
@@ -1135,10 +1135,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
 out_up_write:
        mmap_write_unlock(mm);
 out_nolock:
-       if (hpage) {
-               mem_cgroup_uncharge(page_folio(hpage));
+       if (hpage)
                put_page(hpage);
-       }
        trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
        return result;
 }
@@ -2137,10 +2135,8 @@ xa_unlocked:
                unlock_page(hpage);
 out:
        VM_BUG_ON(!list_empty(&pagelist));
-       if (hpage) {
-               mem_cgroup_uncharge(page_folio(hpage));
+       if (hpage)
                put_page(hpage);
-       }
 
        trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
        return result;