mm: apply __must_check to vmap_pages_range_noflush()

JIRA: https://issues.redhat.com/browse/RHEL-27741

commit d905ae2b0f7eaf8fb37febfe4833ccf3f8c1c27a
Author: Alexander Potapenko <glider@google.com>
Date:   Thu Apr 13 15:12:23 2023 +0200

    mm: apply __must_check to vmap_pages_range_noflush()

    To prevent errors when vmap_pages_range_noflush() or
    __vmap_pages_range_noflush() silently fail (see the link below for an
    example), annotate them with __must_check so that the callers do not
    unconditionally assume the mapping succeeded.

    Link: https://lkml.kernel.org/r/20230413131223.4135168-4-glider@google.com
    Signed-off-by: Alexander Potapenko <glider@google.com>
    Reported-by: Dipanjan Das <mail.dipanjan.das@gmail.com>
      Link: https://lore.kernel.org/linux-mm/CANX2M5ZRrRA64k0hOif02TjmY9kbbO2aCBPyq79es34RXZ=cAw@mail.gmail.com/
    Reviewed-by: Marco Elver <elver@google.com>
    Cc: Christoph Hellwig <hch@infradead.org>
    Cc: Dmitry Vyukov <dvyukov@google.com>
    Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
    Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

Signed-off-by: Chris von Recklinghausen <crecklin@redhat.com>
This commit is contained in:
Chris von Recklinghausen 2024-04-12 15:16:49 -04:00
parent c4677d95e9
commit 7b0c486c6c
1 changed files with 5 additions and 5 deletions

View File

@ -896,7 +896,7 @@ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
*/
#ifdef CONFIG_MMU
void __init vmalloc_init(void);
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift);
#else
static inline void vmalloc_init(void)
@ -904,16 +904,16 @@ static inline void vmalloc_init(void)
}
static inline
int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
int __must_check vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
return -EINVAL;
}
#endif
int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages,
unsigned int page_shift);
int __must_check __vmap_pages_range_noflush(unsigned long addr,
unsigned long end, pgprot_t prot,
struct page **pages, unsigned int page_shift);
void vunmap_range_noflush(unsigned long start, unsigned long end);