mm/rmap: abstract large mapcount operations for large folios (!hugetlb)

Let's abstract the operations so we can extend these operations easily.

Link: https://lkml.kernel.org/r/20250303163014.1128035-10-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Andy Lutomirks^H^Hski <luto@kernel.org>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Matthew Wilcow (Oracle) <willy@infradead.org>
Cc: Michal Koutn <mkoutny@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: tejun heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zefan Li <lizefan.x@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand 2025-03-03 17:30:02 +01:00 committed by Andrew Morton
parent 1862a4af10
commit 932961c4b6
2 changed files with 34 additions and 12 deletions

View File

@ -173,6 +173,30 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
struct anon_vma *folio_get_anon_vma(const struct folio *folio);
static inline void folio_set_large_mapcount(struct folio *folio, int mapcount,
struct vm_area_struct *vma)
{
/* Note: mapcounts start at -1. */
atomic_set(&folio->_large_mapcount, mapcount - 1);
}
static inline void folio_add_large_mapcount(struct folio *folio,
int diff, struct vm_area_struct *vma)
{
atomic_add(diff, &folio->_large_mapcount);
}
static inline void folio_sub_large_mapcount(struct folio *folio,
int diff, struct vm_area_struct *vma)
{
atomic_sub(diff, &folio->_large_mapcount);
}
#define folio_inc_large_mapcount(folio, vma) \
folio_add_large_mapcount(folio, 1, vma)
#define folio_dec_large_mapcount(folio, vma) \
folio_sub_large_mapcount(folio, 1, vma)
/* RMAP flags, currently only relevant for some anon rmap operations. */
typedef int __bitwise rmap_t;
@ -352,12 +376,12 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
do {
atomic_inc(&page->_mapcount);
} while (page++, --nr_pages > 0);
atomic_add(orig_nr_pages, &folio->_large_mapcount);
folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
case RMAP_LEVEL_PMD:
case RMAP_LEVEL_PUD:
atomic_inc(&folio->_entire_mapcount);
atomic_inc(&folio->_large_mapcount);
folio_inc_large_mapcount(folio, dst_vma);
break;
}
}
@ -451,7 +475,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
ClearPageAnonExclusive(page);
atomic_inc(&page->_mapcount);
} while (page++, --nr_pages > 0);
atomic_add(orig_nr_pages, &folio->_large_mapcount);
folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
case RMAP_LEVEL_PMD:
case RMAP_LEVEL_PUD:
@ -461,7 +485,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
ClearPageAnonExclusive(page);
}
atomic_inc(&folio->_entire_mapcount);
atomic_inc(&folio->_large_mapcount);
folio_inc_large_mapcount(folio, dst_vma);
break;
}
return 0;

View File

@ -1266,7 +1266,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED)
nr = first;
atomic_add(orig_nr_pages, &folio->_large_mapcount);
folio_add_large_mapcount(folio, orig_nr_pages, vma);
break;
case RMAP_LEVEL_PMD:
case RMAP_LEVEL_PUD:
@ -1290,7 +1290,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
nr = 0;
}
}
atomic_inc(&folio->_large_mapcount);
folio_inc_large_mapcount(folio, vma);
break;
}
return nr;
@ -1556,14 +1556,12 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
SetPageAnonExclusive(page);
}
/* increment count (starts at -1) */
atomic_set(&folio->_large_mapcount, nr - 1);
folio_set_large_mapcount(folio, nr, vma);
atomic_set(&folio->_nr_pages_mapped, nr);
} else {
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
/* increment count (starts at -1) */
atomic_set(&folio->_large_mapcount, 0);
folio_set_large_mapcount(folio, 1, vma);
atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED);
if (exclusive)
SetPageAnonExclusive(&folio->page);
@ -1665,7 +1663,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
break;
}
atomic_sub(nr_pages, &folio->_large_mapcount);
folio_sub_large_mapcount(folio, nr_pages, vma);
do {
last += atomic_add_negative(-1, &page->_mapcount);
} while (page++, --nr_pages > 0);
@ -1678,7 +1676,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
break;
case RMAP_LEVEL_PMD:
case RMAP_LEVEL_PUD:
atomic_dec(&folio->_large_mapcount);
folio_dec_large_mapcount(folio, vma);
last = atomic_add_negative(-1, &folio->_entire_mapcount);
if (last) {
nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped);