arm64 fixes for -rc5
- Fix a sporadic boot failure due to incorrect randomization of the linear map on systems that support it - Fix the zapping (both clearing the entries *and* invalidating the TLB) of hugetlb PTEs constructed using the contiguous bit -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmfDdBIQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNN0GB/9gmEOX1GwMU6wFjPYqvjWlkGCFDwrldO84 uF9jEUbPaw3P4xHTOFyPCfEWidktqa+yDVbe90mB7GVOM+1eEZ81em1k1hYBEXbz Q73Nl5VrNzxX4BjOrdxxoTSaR/TKklUh5mqWfIzy1RxEnBfpr/GuDPtUn1GViCAs sU16Ju12UdYXn3tyHFDHpjZS9WYZskfnrvS0QvXinz0LahZrCkeaH+ptYHrTjMFx hxyrRQwOlqLnZWvjLOegH9AC6uyRkKDinXKhXqHYvUfcfEkQsKwM7Fpc6cviUD0Q X2npLNegnYxPniwmLpXfNXazPDnKVMzxb9lpqw1fZS3nAuh8XOde =RqDZ -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes from Will Deacon: "Ryan's been hard at work finding and fixing mm bugs in the arm64 code, so here's a small crop of fixes for -rc5. The main changes are to fix our zapping of non-present PTEs for hugetlb entries created using the contiguous bit in the page-table rather than a block entry at the level above. Prior to these fixes, we were pulling the contiguous bit back out of the PTE in order to determine the size of the hugetlb page but this is clearly bogus if the thing isn't present and consequently both the clearing of the PTE(s) and the TLB invalidation were unreliable. Although the problem was found by code inspection, we really don't want this sitting around waiting to trigger and the changes are CC'd to stable accordingly. Note that the diffstat looks a lot worse than it really is; huge_ptep_get_and_clear() now takes a size argument from the core code and so all the arch implementations of that have been updated in a pretty mechanical fashion. - Fix a sporadic boot failure due to incorrect randomization of the linear map on systems that support it - Fix the zapping (both clearing the entries *and* invalidating the TLB) of hugetlb PTEs constructed using the contiguous bit" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: hugetlb: Fix flush_hugetlb_tlb_range() invalidation level arm64: hugetlb: Fix huge_ptep_get_and_clear() for non-present ptes mm: hugetlb: Add huge page size param to huge_ptep_get_and_clear() arm64/mm: Fix Boot panic on Ampere Altra
This commit is contained in:
commit
9d20040d71
|
@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty);
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz);
|
||||
#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
|
||||
extern void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
|
@ -76,12 +76,22 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
|
|||
{
|
||||
unsigned long stride = huge_page_size(hstate_vma(vma));
|
||||
|
||||
if (stride == PMD_SIZE)
|
||||
__flush_tlb_range(vma, start, end, stride, false, 2);
|
||||
else if (stride == PUD_SIZE)
|
||||
__flush_tlb_range(vma, start, end, stride, false, 1);
|
||||
else
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
|
||||
switch (stride) {
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
case PUD_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PUD_SIZE, false, 1);
|
||||
break;
|
||||
#endif
|
||||
case CONT_PMD_SIZE:
|
||||
case PMD_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PMD_SIZE, false, 2);
|
||||
break;
|
||||
case CONT_PTE_SIZE:
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3);
|
||||
break;
|
||||
default:
|
||||
__flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __ASM_HUGETLB_H */
|
||||
|
|
|
@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
|
||||
{
|
||||
int contig_ptes = 0;
|
||||
int contig_ptes = 1;
|
||||
|
||||
*pgsize = size;
|
||||
|
||||
switch (size) {
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
case PUD_SIZE:
|
||||
if (pud_sect_supported())
|
||||
contig_ptes = 1;
|
||||
break;
|
||||
#endif
|
||||
case PMD_SIZE:
|
||||
contig_ptes = 1;
|
||||
break;
|
||||
case CONT_PMD_SIZE:
|
||||
*pgsize = PMD_SIZE;
|
||||
contig_ptes = CONT_PMDS;
|
||||
|
@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
|
|||
*pgsize = PAGE_SIZE;
|
||||
contig_ptes = CONT_PTES;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(!__hugetlb_valid_size(size));
|
||||
}
|
||||
|
||||
return contig_ptes;
|
||||
|
@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm,
|
|||
unsigned long pgsize,
|
||||
unsigned long ncontig)
|
||||
{
|
||||
pte_t orig_pte = __ptep_get(ptep);
|
||||
unsigned long i;
|
||||
pte_t pte, tmp_pte;
|
||||
bool present;
|
||||
|
||||
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
|
||||
pte_t pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
/*
|
||||
* If HW_AFDBM is enabled, then the HW could turn on
|
||||
* the dirty or accessed bit for any page in the set,
|
||||
* so check them all.
|
||||
*/
|
||||
if (pte_dirty(pte))
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
|
||||
if (pte_young(pte))
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
present = pte_present(pte);
|
||||
while (--ncontig) {
|
||||
ptep++;
|
||||
addr += pgsize;
|
||||
tmp_pte = __ptep_get_and_clear(mm, addr, ptep);
|
||||
if (present) {
|
||||
if (pte_dirty(tmp_pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
if (pte_young(tmp_pte))
|
||||
pte = pte_mkyoung(pte);
|
||||
}
|
||||
}
|
||||
return orig_pte;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static pte_t get_clear_contig_flush(struct mm_struct *mm,
|
||||
|
@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|||
__pte_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
int ncontig;
|
||||
size_t pgsize;
|
||||
pte_t orig_pte = __ptep_get(ptep);
|
||||
|
||||
if (!pte_cont(orig_pte))
|
||||
return __ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
ncontig = find_num_contig(mm, addr, ptep, &pgsize);
|
||||
|
||||
ncontig = num_contig_ptes(sz, &pgsize);
|
||||
return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
|
||||
}
|
||||
|
||||
|
@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
|
|||
|
||||
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
unsigned long psize = huge_page_size(hstate_vma(vma));
|
||||
|
||||
if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
|
||||
/*
|
||||
* Break-before-make (BBM) is required for all user space mappings
|
||||
|
@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
|
|||
if (pte_user_exec(__ptep_get(ptep)))
|
||||
return huge_ptep_clear_flush(vma, addr, ptep);
|
||||
}
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
|
||||
}
|
||||
|
||||
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
|
||||
|
|
|
@ -279,12 +279,7 @@ void __init arm64_memblock_init(void)
|
|||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
extern u16 memstart_offset_seed;
|
||||
|
||||
/*
|
||||
* Use the sanitised version of id_aa64mmfr0_el1 so that linear
|
||||
* map randomization can be enabled by shrinking the IPA space.
|
||||
*/
|
||||
u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
|
||||
int parange = cpuid_feature_extract_unsigned_field(
|
||||
mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
s64 range = linear_region_size -
|
||||
|
|
|
@ -36,7 +36,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
pte_t clear;
|
||||
pte_t pte = ptep_get(ptep);
|
||||
|
@ -51,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_tlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
|
|
@ -27,7 +27,8 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
pte_t clear;
|
||||
pte_t pte = *ptep;
|
||||
|
@ -42,13 +43,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
/*
|
||||
* clear the huge pte entry firstly, so that the other smp threads will
|
||||
* not get old pte entry after finishing flush_tlb_page and before
|
||||
* setting new huge pte entry
|
||||
*/
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_tlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
pte_t *ptep, unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
|
|
@ -126,7 +126,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
pte_t entry;
|
||||
|
||||
|
|
|
@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
|
||||
}
|
||||
|
@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte;
|
||||
unsigned long sz = huge_page_size(hstate_vma(vma));
|
||||
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz);
|
||||
flush_hugetlb_page(vma, addr);
|
||||
return pte;
|
||||
}
|
||||
|
|
|
@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep);
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
|
|
@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
pte_t orig_pte = ptep_get(ptep);
|
||||
int pte_num;
|
||||
|
|
|
@ -25,8 +25,16 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
#define __HAVE_ARCH_HUGE_PTEP_GET
|
||||
pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||
|
||||
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
unsigned long sz)
|
||||
{
|
||||
return __huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
|
||||
static inline void arch_clear_hugetlb_flags(struct folio *folio)
|
||||
{
|
||||
|
@ -48,7 +56,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
|||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep)
|
||||
{
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
|
||||
return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
|
||||
|
@ -59,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte);
|
||||
|
||||
if (changed) {
|
||||
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
__huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
__set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
||||
}
|
||||
return changed;
|
||||
|
@ -69,7 +77,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep);
|
||||
|
||||
__set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
|
||||
}
|
||||
|
|
|
@ -188,8 +188,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|||
return __rste_to_pte(pte_val(*ptep));
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
pte_t __huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get(mm, addr, ptep);
|
||||
pmd_t *pmdp = (pmd_t *) ptep;
|
||||
|
|
|
@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
pte_t *ptep, unsigned long sz);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
|
||||
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
|
|
@ -260,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
}
|
||||
|
||||
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
unsigned int i, nptes, orig_shift, shift;
|
||||
unsigned long size;
|
||||
|
|
|
@ -90,7 +90,7 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
unsigned long addr, pte_t *ptep, unsigned long sz)
|
||||
{
|
||||
return ptep_get_and_clear(mm, addr, ptep);
|
||||
}
|
||||
|
|
|
@ -1004,7 +1004,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
|
|||
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
||||
unsigned long psize = huge_page_size(hstate_vma(vma));
|
||||
|
||||
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -5447,7 +5447,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
|
|||
if (src_ptl != dst_ptl)
|
||||
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
|
||||
|
||||
pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
|
||||
pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
|
||||
|
||||
if (need_clear_uffd_wp && pte_marker_uffd_wp(pte))
|
||||
huge_pte_clear(mm, new_addr, dst_pte, sz);
|
||||
|
@ -5622,7 +5622,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
|
||||
}
|
||||
|
||||
pte = huge_ptep_get_and_clear(mm, address, ptep);
|
||||
pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
|
||||
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
|
||||
if (huge_pte_dirty(pte))
|
||||
set_page_dirty(page);
|
||||
|
|
Loading…
Reference in New Issue