mm, slab: check GFP_SLAB_BUG_MASK before alloc_pages in kmalloc_order
kmalloc cannot allocate memory from HIGHMEM. Allocating large amounts of memory currently bypasses the check and will simply leak the memory when page_address() returns NULL. To fix this, factor the GFP_SLAB_BUG_MASK check out of slab & slub, and call it from kmalloc_order() as well. In order to make the code clear, the warning message is put in one place. Signed-off-by: Long Li <lonuxli.64@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Pekka Enberg <penberg@kernel.org> Acked-by: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Link: http://lkml.kernel.org/r/20200704035027.GA62481@lilong Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
dabc3e291d
commit
444050990d
10
mm/slab.c
10
mm/slab.c
|
@ -2589,13 +2589,9 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
|
||||||
* Be lazy and only check for valid flags here, keeping it out of the
|
* Be lazy and only check for valid flags here, keeping it out of the
|
||||||
* critical path in kmem_cache_alloc().
|
* critical path in kmem_cache_alloc().
|
||||||
*/
|
*/
|
||||||
if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
|
if (unlikely(flags & GFP_SLAB_BUG_MASK))
|
||||||
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
|
flags = kmalloc_fix_flags(flags);
|
||||||
flags &= ~GFP_SLAB_BUG_MASK;
|
|
||||||
pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
|
|
||||||
invalid_mask, &invalid_mask, flags, &flags);
|
|
||||||
dump_stack();
|
|
||||||
}
|
|
||||||
WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
|
WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO));
|
||||||
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
|
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
|
||||||
|
|
||||||
|
|
|
@ -152,6 +152,7 @@ void create_kmalloc_caches(slab_flags_t);
|
||||||
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
|
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
gfp_t kmalloc_fix_flags(gfp_t flags);
|
||||||
|
|
||||||
/* Functions provided by the slab allocators */
|
/* Functions provided by the slab allocators */
|
||||||
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
|
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
|
||||||
|
|
|
@ -26,6 +26,8 @@
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <trace/events/kmem.h>
|
#include <trace/events/kmem.h>
|
||||||
|
|
||||||
|
#include "internal.h"
|
||||||
|
|
||||||
#include "slab.h"
|
#include "slab.h"
|
||||||
|
|
||||||
enum slab_state slab_state;
|
enum slab_state slab_state;
|
||||||
|
@ -1332,6 +1334,18 @@ void __init create_kmalloc_caches(slab_flags_t flags)
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_SLOB */
|
#endif /* !CONFIG_SLOB */
|
||||||
|
|
||||||
|
gfp_t kmalloc_fix_flags(gfp_t flags)
|
||||||
|
{
|
||||||
|
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
|
||||||
|
|
||||||
|
flags &= ~GFP_SLAB_BUG_MASK;
|
||||||
|
pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
|
||||||
|
invalid_mask, &invalid_mask, flags, &flags);
|
||||||
|
dump_stack();
|
||||||
|
|
||||||
|
return flags;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To avoid unnecessary overhead, we pass through large allocation requests
|
* To avoid unnecessary overhead, we pass through large allocation requests
|
||||||
* directly to the page allocator. We use __GFP_COMP, because we will need to
|
* directly to the page allocator. We use __GFP_COMP, because we will need to
|
||||||
|
@ -1342,6 +1356,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
|
||||||
void *ret = NULL;
|
void *ret = NULL;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
|
if (unlikely(flags & GFP_SLAB_BUG_MASK))
|
||||||
|
flags = kmalloc_fix_flags(flags);
|
||||||
|
|
||||||
flags |= __GFP_COMP;
|
flags |= __GFP_COMP;
|
||||||
page = alloc_pages(flags, order);
|
page = alloc_pages(flags, order);
|
||||||
if (likely(page)) {
|
if (likely(page)) {
|
||||||
|
|
|
@ -1745,13 +1745,8 @@ out:
|
||||||
|
|
||||||
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||||
{
|
{
|
||||||
if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
|
if (unlikely(flags & GFP_SLAB_BUG_MASK))
|
||||||
gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
|
flags = kmalloc_fix_flags(flags);
|
||||||
flags &= ~GFP_SLAB_BUG_MASK;
|
|
||||||
pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
|
|
||||||
invalid_mask, &invalid_mask, flags, &flags);
|
|
||||||
dump_stack();
|
|
||||||
}
|
|
||||||
|
|
||||||
return allocate_slab(s,
|
return allocate_slab(s,
|
||||||
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
|
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
|
||||||
|
|
Loading…
Reference in New Issue