mm: userfaultfd: don't pass around both mm and vma
Conflicts: mm/userfaultfd.c - We already have
153132571f
("userfaultfd/shmem: support UFFDIO_CONTINUE for shmem")
and
73f37dbcfe17 ("mm: userfaultfd: fix UFFDIO_CONTINUE on fallocated shmem pages")
so keep the setting of ret and possible jump to out.
JIRA: https://issues.redhat.com/browse/RHEL-27741
commit 61c5004022f56c443b86800e8985d8803f3a22aa
Author: Axel Rasmussen <axelrasmussen@google.com>
Date: Tue Mar 14 15:12:48 2023 -0700
mm: userfaultfd: don't pass around both mm and vma
Quite a few userfaultfd functions took both mm and vma pointers as
arguments. Since the mm is trivially accessible via vma->vm_mm, there's
no reason to pass both; it just needlessly extends the already long
argument list.
Get rid of the mm pointer, where possible, to shorten the argument list.
Link: https://lkml.kernel.org/r/20230314221250.682452-3-axelrasmussen@google
.com
Signed-off-by: Axel Rasmussen <axelrasmussen@google.com>
Acked-by: Peter Xu <peterx@redhat.com>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Hugh Dickins <hughd@google.com>
Cc: James Houghton <jthoughton@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nadav Amit <namit@vmware.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Chris von Recklinghausen <crecklin@redhat.com>
This commit is contained in:
parent
81108c01c8
commit
75317fb06a
|
@ -1637,7 +1637,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
|||
|
||||
/* Reset ptes for the whole vma range if wr-protected */
|
||||
if (userfaultfd_wp(vma))
|
||||
uffd_wp_range(mm, vma, start, vma_end - start, false);
|
||||
uffd_wp_range(vma, start, vma_end - start, false);
|
||||
|
||||
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
|
||||
prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
|
||||
|
|
|
@ -158,7 +158,7 @@ unsigned long hugetlb_total_pages(void);
|
|||
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags);
|
||||
#ifdef CONFIG_USERFAULTFD
|
||||
int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
|
||||
int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
|
@ -393,8 +393,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_USERFAULTFD
|
||||
static inline int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
pte_t *dst_pte,
|
||||
static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
|
|
|
@ -152,14 +152,14 @@ extern void shmem_uncharge(struct inode *inode, long pages);
|
|||
|
||||
#ifdef CONFIG_USERFAULTFD
|
||||
#ifdef CONFIG_SHMEM
|
||||
extern int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
|
||||
extern int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
bool zeropage, bool wp_copy,
|
||||
struct page **pagep);
|
||||
#else /* !CONFIG_SHMEM */
|
||||
#define shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, \
|
||||
#define shmem_mfill_atomic_pte(dst_pmd, dst_vma, dst_addr, \
|
||||
src_addr, zeropage, wp_copy, pagep) ({ BUG(); 0; })
|
||||
#endif /* CONFIG_SHMEM */
|
||||
#endif /* CONFIG_USERFAULTFD */
|
||||
|
|
|
@ -56,7 +56,7 @@ enum mcopy_atomic_mode {
|
|||
MCOPY_ATOMIC_CONTINUE,
|
||||
};
|
||||
|
||||
extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
|
||||
extern int mfill_atomic_install_pte(pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr, struct page *page,
|
||||
bool newly_allocated, bool wp_copy);
|
||||
|
@ -73,7 +73,7 @@ extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst
|
|||
extern int mwriteprotect_range(struct mm_struct *dst_mm,
|
||||
unsigned long start, unsigned long len,
|
||||
bool enable_wp, atomic_t *mmap_changing);
|
||||
extern long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
|
||||
extern long uffd_wp_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long len, bool enable_wp);
|
||||
|
||||
/* mm helpers */
|
||||
|
|
|
@ -6168,8 +6168,7 @@ out_mutex:
|
|||
* Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
|
||||
* with modifications for hugetlb pages.
|
||||
*/
|
||||
int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
pte_t *dst_pte,
|
||||
int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
|
@ -6177,6 +6176,7 @@ int hugetlb_mfill_atomic_pte(struct mm_struct *dst_mm,
|
|||
struct page **pagep,
|
||||
bool wp_copy)
|
||||
{
|
||||
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
||||
bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE);
|
||||
struct hstate *h = hstate_vma(dst_vma);
|
||||
struct address_space *mapping = dst_vma->vm_file->f_mapping;
|
||||
|
|
|
@ -2429,8 +2429,7 @@ static struct inode *shmem_get_inode(struct user_namespace *mnt_userns, struct s
|
|||
}
|
||||
|
||||
#ifdef CONFIG_USERFAULTFD
|
||||
int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
pmd_t *dst_pmd,
|
||||
int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
|
@ -2520,11 +2519,11 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
|
|||
goto out_release;
|
||||
|
||||
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
|
||||
gfp & GFP_RECLAIM_MASK, dst_mm);
|
||||
gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm);
|
||||
if (ret)
|
||||
goto out_release;
|
||||
|
||||
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
|
||||
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
|
||||
&folio->page, true, wp_copy);
|
||||
if (ret)
|
||||
goto out_delete_from_cache;
|
||||
|
|
|
@ -55,12 +55,13 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
|
|||
* This function handles both MCOPY_ATOMIC_NORMAL and _CONTINUE for both shmem
|
||||
* and anon, and for both shared and private VMAs.
|
||||
*/
|
||||
int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
|
||||
int mfill_atomic_install_pte(pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr, struct page *page,
|
||||
bool newly_allocated, bool wp_copy)
|
||||
{
|
||||
int ret;
|
||||
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
||||
pte_t _dst_pte, *dst_pte;
|
||||
bool writable = dst_vma->vm_flags & VM_WRITE;
|
||||
bool vm_shared = dst_vma->vm_flags & VM_SHARED;
|
||||
|
@ -130,8 +131,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int mfill_atomic_pte_copy(struct mm_struct *dst_mm,
|
||||
pmd_t *dst_pmd,
|
||||
static int mfill_atomic_pte_copy(pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
|
@ -193,10 +193,10 @@ static int mfill_atomic_pte_copy(struct mm_struct *dst_mm,
|
|||
__SetPageUptodate(page);
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (mem_cgroup_charge(page_folio(page), dst_mm, GFP_KERNEL))
|
||||
if (mem_cgroup_charge(page_folio(page), dst_vma->vm_mm, GFP_KERNEL))
|
||||
goto out_release;
|
||||
|
||||
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
|
||||
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
|
||||
page, true, wp_copy);
|
||||
if (ret)
|
||||
goto out_release;
|
||||
|
@ -207,8 +207,7 @@ out_release:
|
|||
goto out;
|
||||
}
|
||||
|
||||
static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm,
|
||||
pmd_t *dst_pmd,
|
||||
static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr)
|
||||
{
|
||||
|
@ -221,7 +220,7 @@ static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm,
|
|||
_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
|
||||
dst_vma->vm_page_prot));
|
||||
ret = -EAGAIN;
|
||||
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
|
||||
dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
|
||||
if (!dst_pte)
|
||||
goto out;
|
||||
if (dst_vma->vm_file) {
|
||||
|
@ -236,7 +235,7 @@ static int mfill_atomic_pte_zeropage(struct mm_struct *dst_mm,
|
|||
ret = -EEXIST;
|
||||
if (!pte_none(*dst_pte))
|
||||
goto out_unlock;
|
||||
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
|
||||
set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
|
||||
/* No need to invalidate - it was non-present before */
|
||||
update_mmu_cache(dst_vma, dst_addr, dst_pte);
|
||||
ret = 0;
|
||||
|
@ -247,8 +246,7 @@ out:
|
|||
}
|
||||
|
||||
/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */
|
||||
static int mfill_atomic_pte_continue(struct mm_struct *dst_mm,
|
||||
pmd_t *dst_pmd,
|
||||
static int mfill_atomic_pte_continue(pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
bool wp_copy)
|
||||
|
@ -276,7 +274,7 @@ static int mfill_atomic_pte_continue(struct mm_struct *dst_mm,
|
|||
goto out_release;
|
||||
}
|
||||
|
||||
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
|
||||
ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
|
||||
page, false, wp_copy);
|
||||
if (ret)
|
||||
goto out_release;
|
||||
|
@ -317,7 +315,7 @@ static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
|
|||
* mfill_atomic processing for HUGETLB vmas. Note that this routine is
|
||||
* called with mmap_lock held, it will release mmap_lock before returning.
|
||||
*/
|
||||
static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm,
|
||||
static __always_inline ssize_t mfill_atomic_hugetlb(
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_start,
|
||||
unsigned long src_start,
|
||||
|
@ -325,6 +323,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm,
|
|||
enum mcopy_atomic_mode mode,
|
||||
bool wp_copy)
|
||||
{
|
||||
struct mm_struct *dst_mm = dst_vma->vm_mm;
|
||||
int vm_shared = dst_vma->vm_flags & VM_SHARED;
|
||||
ssize_t err;
|
||||
pte_t *dst_pte;
|
||||
|
@ -418,7 +417,7 @@ retry:
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = hugetlb_mfill_atomic_pte(dst_mm, dst_pte, dst_vma,
|
||||
err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma,
|
||||
dst_addr, src_addr, mode, &page,
|
||||
wp_copy);
|
||||
|
||||
|
@ -470,17 +469,15 @@ out:
|
|||
}
|
||||
#else /* !CONFIG_HUGETLB_PAGE */
|
||||
/* fail at build time if gcc attempts to use this */
|
||||
extern ssize_t mfill_atomic_hugetlb(struct mm_struct *dst_mm,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_start,
|
||||
unsigned long src_start,
|
||||
unsigned long len,
|
||||
enum mcopy_atomic_mode mode,
|
||||
bool wp_copy);
|
||||
extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_start,
|
||||
unsigned long src_start,
|
||||
unsigned long len,
|
||||
enum mcopy_atomic_mode mode,
|
||||
bool wp_copy);
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
|
||||
pmd_t *dst_pmd,
|
||||
static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd,
|
||||
struct vm_area_struct *dst_vma,
|
||||
unsigned long dst_addr,
|
||||
unsigned long src_addr,
|
||||
|
@ -491,7 +488,7 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
|
|||
ssize_t err;
|
||||
|
||||
if (mode == MCOPY_ATOMIC_CONTINUE) {
|
||||
return mfill_atomic_pte_continue(dst_mm, dst_pmd, dst_vma,
|
||||
return mfill_atomic_pte_continue(dst_pmd, dst_vma,
|
||||
dst_addr, wp_copy);
|
||||
}
|
||||
|
||||
|
@ -507,14 +504,14 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
|
|||
*/
|
||||
if (!(dst_vma->vm_flags & VM_SHARED)) {
|
||||
if (mode == MCOPY_ATOMIC_NORMAL)
|
||||
err = mfill_atomic_pte_copy(dst_mm, dst_pmd, dst_vma,
|
||||
err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
|
||||
dst_addr, src_addr, page,
|
||||
wp_copy);
|
||||
else
|
||||
err = mfill_atomic_pte_zeropage(dst_mm, dst_pmd,
|
||||
err = mfill_atomic_pte_zeropage(dst_pmd,
|
||||
dst_vma, dst_addr);
|
||||
} else {
|
||||
err = shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma,
|
||||
err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
|
||||
dst_addr, src_addr,
|
||||
mode != MCOPY_ATOMIC_NORMAL,
|
||||
wp_copy, page);
|
||||
|
@ -595,7 +592,7 @@ retry:
|
|||
* If this is a HUGETLB vma, pass off to appropriate routine
|
||||
*/
|
||||
if (is_vm_hugetlb_page(dst_vma))
|
||||
return mfill_atomic_hugetlb(dst_mm, dst_vma, dst_start,
|
||||
return mfill_atomic_hugetlb(dst_vma, dst_start,
|
||||
src_start, len, mcopy_mode,
|
||||
wp_copy);
|
||||
|
||||
|
@ -648,7 +645,7 @@ retry:
|
|||
BUG_ON(pmd_none(*dst_pmd));
|
||||
BUG_ON(pmd_trans_huge(*dst_pmd));
|
||||
|
||||
err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
|
||||
err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
|
||||
src_addr, &page, mcopy_mode, wp_copy);
|
||||
cond_resched();
|
||||
|
||||
|
@ -717,7 +714,7 @@ ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long start,
|
|||
mmap_changing, 0);
|
||||
}
|
||||
|
||||
long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
|
||||
long uffd_wp_range(struct vm_area_struct *dst_vma,
|
||||
unsigned long start, unsigned long len, bool enable_wp)
|
||||
{
|
||||
unsigned int mm_cp_flags;
|
||||
|
@ -739,7 +736,7 @@ long uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *dst_vma,
|
|||
*/
|
||||
if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
|
||||
mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
|
||||
tlb_gather_mmu(&tlb, dst_mm);
|
||||
tlb_gather_mmu(&tlb, dst_vma->vm_mm);
|
||||
ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
|
||||
tlb_finish_mmu(&tlb);
|
||||
|
||||
|
@ -795,7 +792,7 @@ int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
|
|||
_start = max(dst_vma->vm_start, start);
|
||||
_end = min(dst_vma->vm_end, end);
|
||||
|
||||
err = uffd_wp_range(dst_mm, dst_vma, _start, _end - _start, enable_wp);
|
||||
err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);
|
||||
|
||||
/* Return 0 on success, <0 on failures */
|
||||
if (err < 0)
|
||||
|
|
Loading…
Reference in New Issue