mm: convert wp_page_copy() to use folios
Conflicts: mm/memory.c - We don't have 7d4a8be0c4b2 ("mm/mmu_notifier: remove unused mmu_notifier_range_update_to_read_only export") so call mmu_notifier_range_init with both mm and vma (context) JIRA: https://issues.redhat.com/browse/RHEL-1848 commit 28d41a4863316321bb5aa616bd82d65c84fc0f8b Author: Matthew Wilcox (Oracle) <willy@infradead.org> Date: Mon Jan 16 19:18:11 2023 +0000 mm: convert wp_page_copy() to use folios Use new_folio instead of new_page throughout, because we allocated it and know it's an order-0 folio. Most old_page uses become old_folio, but use vmf->page where we need the precise page. Link: https://lkml.kernel.org/r/20230116191813.2145215-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Chris von Recklinghausen <crecklin@redhat.com>
This commit is contained in:
parent
ad7b024ba7
commit
9e9103fead
65
mm/memory.c
65
mm/memory.c
|
@ -3052,8 +3052,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
||||||
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
|
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
|
||||||
struct vm_area_struct *vma = vmf->vma;
|
struct vm_area_struct *vma = vmf->vma;
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
struct page *old_page = vmf->page;
|
struct folio *old_folio = NULL;
|
||||||
struct page *new_page = NULL;
|
struct folio *new_folio = NULL;
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
int page_copied = 0;
|
int page_copied = 0;
|
||||||
struct mmu_notifier_range range;
|
struct mmu_notifier_range range;
|
||||||
|
@ -3061,23 +3061,22 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
||||||
|
|
||||||
delayacct_wpcopy_start();
|
delayacct_wpcopy_start();
|
||||||
|
|
||||||
|
if (vmf->page)
|
||||||
|
old_folio = page_folio(vmf->page);
|
||||||
if (unlikely(anon_vma_prepare(vma)))
|
if (unlikely(anon_vma_prepare(vma)))
|
||||||
goto oom;
|
goto oom;
|
||||||
|
|
||||||
if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
|
if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
|
||||||
struct folio *new_folio;
|
|
||||||
|
|
||||||
new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
|
new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
|
||||||
if (!new_folio)
|
if (!new_folio)
|
||||||
goto oom;
|
goto oom;
|
||||||
new_page = &new_folio->page;
|
|
||||||
} else {
|
} else {
|
||||||
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
|
new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
|
||||||
vmf->address);
|
vmf->address, false);
|
||||||
if (!new_page)
|
if (!new_folio)
|
||||||
goto oom;
|
goto oom;
|
||||||
|
|
||||||
ret = __wp_page_copy_user(new_page, old_page, vmf);
|
ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/*
|
/*
|
||||||
* COW failed, if the fault was solved by other,
|
* COW failed, if the fault was solved by other,
|
||||||
|
@ -3086,21 +3085,21 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
||||||
* from the second attempt.
|
* from the second attempt.
|
||||||
* The -EHWPOISON case will not be retried.
|
* The -EHWPOISON case will not be retried.
|
||||||
*/
|
*/
|
||||||
put_page(new_page);
|
folio_put(new_folio);
|
||||||
if (old_page)
|
if (old_folio)
|
||||||
put_page(old_page);
|
folio_put(old_folio);
|
||||||
|
|
||||||
delayacct_wpcopy_end();
|
delayacct_wpcopy_end();
|
||||||
return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
|
return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0;
|
||||||
}
|
}
|
||||||
kmsan_copy_page_meta(new_page, old_page);
|
kmsan_copy_page_meta(&new_folio->page, vmf->page);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
|
if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL))
|
||||||
goto oom_free_new;
|
goto oom_free_new;
|
||||||
cgroup_throttle_swaprate(new_page, GFP_KERNEL);
|
cgroup_throttle_swaprate(&new_folio->page, GFP_KERNEL);
|
||||||
|
|
||||||
__SetPageUptodate(new_page);
|
__folio_mark_uptodate(new_folio);
|
||||||
|
|
||||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
|
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
|
||||||
vmf->address & PAGE_MASK,
|
vmf->address & PAGE_MASK,
|
||||||
|
@ -3112,16 +3111,16 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
||||||
*/
|
*/
|
||||||
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
|
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
|
||||||
if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
|
if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
|
||||||
if (old_page) {
|
if (old_folio) {
|
||||||
if (!PageAnon(old_page)) {
|
if (!folio_test_anon(old_folio)) {
|
||||||
dec_mm_counter(mm, mm_counter_file(old_page));
|
dec_mm_counter(mm, mm_counter_file(&old_folio->page));
|
||||||
inc_mm_counter(mm, MM_ANONPAGES);
|
inc_mm_counter(mm, MM_ANONPAGES);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
inc_mm_counter(mm, MM_ANONPAGES);
|
inc_mm_counter(mm, MM_ANONPAGES);
|
||||||
}
|
}
|
||||||
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
|
flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
|
||||||
entry = mk_pte(new_page, vma->vm_page_prot);
|
entry = mk_pte(&new_folio->page, vma->vm_page_prot);
|
||||||
entry = pte_sw_mkyoung(entry);
|
entry = pte_sw_mkyoung(entry);
|
||||||
if (unlikely(unshare)) {
|
if (unlikely(unshare)) {
|
||||||
if (pte_soft_dirty(vmf->orig_pte))
|
if (pte_soft_dirty(vmf->orig_pte))
|
||||||
|
@ -3140,8 +3139,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
||||||
* some TLBs while the old PTE remains in others.
|
* some TLBs while the old PTE remains in others.
|
||||||
*/
|
*/
|
||||||
ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
|
ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
|
||||||
page_add_new_anon_rmap(new_page, vma, vmf->address);
|
folio_add_new_anon_rmap(new_folio, vma, vmf->address);
|
||||||
lru_cache_add_inactive_or_unevictable(new_page, vma);
|
folio_add_lru_vma(new_folio, vma);
|
||||||
/*
|
/*
|
||||||
* We call the notify macro here because, when using secondary
|
* We call the notify macro here because, when using secondary
|
||||||
* mmu page tables (such as kvm shadow page tables), we want the
|
* mmu page tables (such as kvm shadow page tables), we want the
|
||||||
|
@ -3150,7 +3149,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
||||||
BUG_ON(unshare && pte_write(entry));
|
BUG_ON(unshare && pte_write(entry));
|
||||||
set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
|
set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
|
||||||
update_mmu_cache(vma, vmf->address, vmf->pte);
|
update_mmu_cache(vma, vmf->address, vmf->pte);
|
||||||
if (old_page) {
|
if (old_folio) {
|
||||||
/*
|
/*
|
||||||
* Only after switching the pte to the new page may
|
* Only after switching the pte to the new page may
|
||||||
* we remove the mapcount here. Otherwise another
|
* we remove the mapcount here. Otherwise another
|
||||||
|
@ -3173,18 +3172,18 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
||||||
* mapcount is visible. So transitively, TLBs to
|
* mapcount is visible. So transitively, TLBs to
|
||||||
* old page will be flushed before it can be reused.
|
* old page will be flushed before it can be reused.
|
||||||
*/
|
*/
|
||||||
page_remove_rmap(old_page, vma, false);
|
page_remove_rmap(vmf->page, vma, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Free the old page.. */
|
/* Free the old page.. */
|
||||||
new_page = old_page;
|
new_folio = old_folio;
|
||||||
page_copied = 1;
|
page_copied = 1;
|
||||||
} else {
|
} else {
|
||||||
update_mmu_tlb(vma, vmf->address, vmf->pte);
|
update_mmu_tlb(vma, vmf->address, vmf->pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (new_page)
|
if (new_folio)
|
||||||
put_page(new_page);
|
folio_put(new_folio);
|
||||||
|
|
||||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||||
/*
|
/*
|
||||||
|
@ -3192,19 +3191,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
|
||||||
* the above ptep_clear_flush_notify() did already call it.
|
* the above ptep_clear_flush_notify() did already call it.
|
||||||
*/
|
*/
|
||||||
mmu_notifier_invalidate_range_only_end(&range);
|
mmu_notifier_invalidate_range_only_end(&range);
|
||||||
if (old_page) {
|
if (old_folio) {
|
||||||
if (page_copied)
|
if (page_copied)
|
||||||
free_swap_cache(old_page);
|
free_swap_cache(&old_folio->page);
|
||||||
put_page(old_page);
|
folio_put(old_folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
delayacct_wpcopy_end();
|
delayacct_wpcopy_end();
|
||||||
return (page_copied && !unshare) ? VM_FAULT_WRITE : 0;
|
return (page_copied && !unshare) ? VM_FAULT_WRITE : 0;
|
||||||
oom_free_new:
|
oom_free_new:
|
||||||
put_page(new_page);
|
folio_put(new_folio);
|
||||||
oom:
|
oom:
|
||||||
if (old_page)
|
if (old_folio)
|
||||||
put_page(old_page);
|
folio_put(old_folio);
|
||||||
|
|
||||||
delayacct_wpcopy_end();
|
delayacct_wpcopy_end();
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
|
|
Loading…
Reference in New Issue