bpf: Ensure unit_size is matched with slab cache object size
JIRA: https://issues.redhat.com/browse/RHEL-10691 commit c930472552022bd09aab3cd946ba3f243070d5c7 Author: Hou Tao <houtao1@huawei.com> Date: Fri Sep 8 21:39:22 2023 +0800 bpf: Ensure unit_size is matched with slab cache object size Add extra check in bpf_mem_alloc_init() to ensure the unit_size of bpf_mem_cache is matched with the object_size of underlying slab cache. If these two sizes are unmatched, print a warning once and return -EINVAL in bpf_mem_alloc_init(), so the mismatch can be found early and the potential issue can be prevented. Suggested-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20230908133923.2675053-4-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
This commit is contained in:
parent
04fe473f83
commit
566203cfbe
|
@ -486,6 +486,24 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
|
||||||
alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
|
alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
|
||||||
|
{
|
||||||
|
struct llist_node *first;
|
||||||
|
unsigned int obj_size;
|
||||||
|
|
||||||
|
first = c->free_llist.first;
|
||||||
|
if (!first)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
obj_size = ksize(first);
|
||||||
|
if (obj_size != c->unit_size) {
|
||||||
|
WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n",
|
||||||
|
idx, obj_size, c->unit_size);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* When size != 0 bpf_mem_cache for each cpu.
|
/* When size != 0 bpf_mem_cache for each cpu.
|
||||||
* This is typical bpf hash map use case when all elements have equal size.
|
* This is typical bpf hash map use case when all elements have equal size.
|
||||||
*
|
*
|
||||||
|
@ -496,10 +514,10 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
|
||||||
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||||
{
|
{
|
||||||
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
|
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
|
||||||
|
int cpu, i, err, unit_size, percpu_size = 0;
|
||||||
struct bpf_mem_caches *cc, __percpu *pcc;
|
struct bpf_mem_caches *cc, __percpu *pcc;
|
||||||
struct bpf_mem_cache *c, __percpu *pc;
|
struct bpf_mem_cache *c, __percpu *pc;
|
||||||
struct obj_cgroup *objcg = NULL;
|
struct obj_cgroup *objcg = NULL;
|
||||||
int cpu, i, unit_size, percpu_size = 0;
|
|
||||||
|
|
||||||
if (size) {
|
if (size) {
|
||||||
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
|
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
|
||||||
|
@ -537,6 +555,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||||
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
|
pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
|
||||||
if (!pcc)
|
if (!pcc)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
err = 0;
|
||||||
#ifdef CONFIG_MEMCG_KMEM
|
#ifdef CONFIG_MEMCG_KMEM
|
||||||
objcg = get_obj_cgroup_from_current();
|
objcg = get_obj_cgroup_from_current();
|
||||||
#endif
|
#endif
|
||||||
|
@ -557,10 +576,20 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||||
if (i != bpf_mem_cache_idx(c->unit_size))
|
if (i != bpf_mem_cache_idx(c->unit_size))
|
||||||
continue;
|
continue;
|
||||||
prefill_mem_cache(c, cpu);
|
prefill_mem_cache(c, cpu);
|
||||||
|
err = check_obj_size(c, i);
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
ma->caches = pcc;
|
ma->caches = pcc;
|
||||||
return 0;
|
/* refill_work is either zeroed or initialized, so it is safe to
|
||||||
|
* call irq_work_sync().
|
||||||
|
*/
|
||||||
|
if (err)
|
||||||
|
bpf_mem_alloc_destroy(ma);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drain_mem_cache(struct bpf_mem_cache *c)
|
static void drain_mem_cache(struct bpf_mem_cache *c)
|
||||||
|
|
Loading…
Reference in New Issue