mm, arch: consolidate hugetlb CMA reservation

Every architecture that supports hugetlb_cma command line parameter
reserves CMA areas for hugetlb during setup_arch().

This obfuscates the ordering of hugetlb CMA initialization with respect to
the rest initialization of the core MM.

Introduce arch_hugetlb_cma_order() callback to allow architectures report
the desired order-per-bit of CMA areas and provide a week implementation
of arch_hugetlb_cma_order() for architectures that don't support hugetlb
with CMA.

Use this callback in hugetlb_cma_reserve() instead if passing the order as
parameter and call hugetlb_cma_reserve() from mm_core_init_early() rather
than have it spread over architecture specific code.

Link: https://lkml.kernel.org/r/20260111082105.290734-28-rppt@kernel.org
Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alex Shi <alexs@kernel.org>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@kernel.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Klara Modin <klarasmodin@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Magnus Lindholm <linmag7@gmail.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Pratyush Yadav <pratyush@kernel.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Mike Rapoport (Microsoft) 2026-01-11 10:21:01 +02:00 committed by Andrew Morton
parent 6632314fdd
commit 9fac145b6d
16 changed files with 51 additions and 47 deletions

View File

@ -125,7 +125,7 @@ The contiguous memory allocator (CMA) enables reservation of contiguous memory
regions on NUMA nodes during early boot. However, CMA cannot reserve memory
on NUMA nodes that are not online during early boot. ::
void __init hugetlb_cma_reserve(int order) {
void __init hugetlb_cma_reserve(void) {
if (!node_online(nid))
/* do not allow reservations */
}

View File

@ -56,8 +56,6 @@ extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_PTEP_GET
extern pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
void __init arm64_hugetlb_cma_reserve(void);
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);

View File

@ -36,16 +36,12 @@
* huge pages could still be served from those areas.
*/
#ifdef CONFIG_CMA
void __init arm64_hugetlb_cma_reserve(void)
unsigned int arch_hugetlb_cma_order(void)
{
int order;
if (pud_sect_supported())
order = PUD_SHIFT - PAGE_SHIFT;
else
order = CONT_PMD_SHIFT - PAGE_SHIFT;
return PUD_SHIFT - PAGE_SHIFT;
hugetlb_cma_reserve(order);
return CONT_PMD_SHIFT - PAGE_SHIFT;
}
#endif /* CONFIG_CMA */

View File

@ -311,15 +311,6 @@ void __init bootmem_init(void)
arch_numa_init();
/*
* must be done after arch_numa_init() which calls numa_init() to
* initialize node_online_map that gets used in hugetlb_cma_reserve()
* while allocating required CMA size across online nodes.
*/
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
arm64_hugetlb_cma_reserve();
#endif
kvm_hyp_reserve();
dma_limits_init();

View File

@ -68,7 +68,6 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty);
void gigantic_hugetlb_cma_reserve(void) __init;
#include <asm-generic/hugetlb.h>
#else /* ! CONFIG_HUGETLB_PAGE */
@ -77,10 +76,6 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
{
}
static inline void __init gigantic_hugetlb_cma_reserve(void)
{
}
static inline void __init hugetlbpage_init_defaultsize(void)
{
}

View File

@ -1003,7 +1003,6 @@ void __init setup_arch(char **cmdline_p)
fadump_cma_init();
kdump_cma_reserve();
kvm_cma_reserve();
gigantic_hugetlb_cma_reserve();
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);

View File

@ -200,18 +200,15 @@ static int __init hugetlbpage_init(void)
arch_initcall(hugetlbpage_init);
void __init gigantic_hugetlb_cma_reserve(void)
unsigned int __init arch_hugetlb_cma_order(void)
{
unsigned long order = 0;
if (radix_enabled())
order = PUD_SHIFT - PAGE_SHIFT;
return PUD_SHIFT - PAGE_SHIFT;
else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift)
/*
* For pseries we do use ibm,expected#pages for reserving 16G pages.
*/
order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
return mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
if (order)
hugetlb_cma_reserve(order);
return 0;
}

View File

@ -447,3 +447,11 @@ static __init int gigantic_pages_init(void)
}
arch_initcall(gigantic_pages_init);
#endif
unsigned int __init arch_hugetlb_cma_order(void)
{
if (IS_ENABLED(CONFIG_64BIT))
return PUD_SHIFT - PAGE_SHIFT;
return 0;
}

View File

@ -311,8 +311,6 @@ static void __init setup_bootmem(void)
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
dma_contiguous_reserve(dma32_phys_limit);
if (IS_ENABLED(CONFIG_64BIT))
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
}
#ifdef CONFIG_RELOCATABLE

View File

@ -963,8 +963,6 @@ void __init setup_arch(char **cmdline_p)
setup_uv();
dma_contiguous_reserve(ident_map_size);
vmcp_cma_reserve();
if (cpu_has_edat2())
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
reserve_crashkernel();
#ifdef CONFIG_CRASH_DUMP

View File

@ -255,3 +255,11 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
else
return false;
}
unsigned int __init arch_hugetlb_cma_order(void)
{
if (cpu_has_edat2())
return PUD_SHIFT - PAGE_SHIFT;
return 0;
}

View File

@ -1189,10 +1189,6 @@ void __init setup_arch(char **cmdline_p)
initmem_init();
dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
if (boot_cpu_has(X86_FEATURE_GBPAGES)) {
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
}
/*
* Reserve memory for crash kernel after SRAT is parsed so that it
* won't consume hotpluggable memory.

View File

@ -42,3 +42,11 @@ static __init int gigantic_pages_init(void)
arch_initcall(gigantic_pages_init);
#endif
#endif
unsigned int __init arch_hugetlb_cma_order(void)
{
if (boot_cpu_has(X86_FEATURE_GBPAGES))
return PUD_SHIFT - PAGE_SHIFT;
return 0;
}

View File

@ -281,6 +281,8 @@ void fixup_hugetlb_reservations(struct vm_area_struct *vma);
void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
int hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
unsigned int arch_hugetlb_cma_order(void);
#else /* !CONFIG_HUGETLB_PAGE */
static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
@ -1322,9 +1324,9 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
}
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
extern void __init hugetlb_cma_reserve(int order);
extern void __init hugetlb_cma_reserve(void);
#else
static inline __init void hugetlb_cma_reserve(int order)
static inline __init void hugetlb_cma_reserve(void)
{
}
#endif

View File

@ -134,12 +134,24 @@ static int __init cmdline_parse_hugetlb_cma_only(char *p)
early_param("hugetlb_cma_only", cmdline_parse_hugetlb_cma_only);
void __init hugetlb_cma_reserve(int order)
unsigned int __weak arch_hugetlb_cma_order(void)
{
unsigned long size, reserved, per_node;
return 0;
}
void __init hugetlb_cma_reserve(void)
{
unsigned long size, reserved, per_node, order;
bool node_specific_cma_alloc = false;
int nid;
if (!hugetlb_cma_size)
return;
order = arch_hugetlb_cma_order();
if (!order)
return;
/*
* HugeTLB CMA reservation is required for gigantic
* huge pages which could not be allocated via the
@ -149,9 +161,6 @@ void __init hugetlb_cma_reserve(int order)
VM_WARN_ON(order <= MAX_PAGE_ORDER);
cma_reserve_called = true;
if (!hugetlb_cma_size)
return;
hugetlb_bootmem_set_nodes();
for (nid = 0; nid < MAX_NUMNODES; nid++) {

View File

@ -2677,6 +2677,7 @@ void __init __weak mem_init(void)
void __init mm_core_init_early(void)
{
hugetlb_cma_reserve();
hugetlb_bootmem_alloc();
free_area_init();