bpf: Fix percpu address space issues
JIRA: https://issues.redhat.com/browse/RHEL-63880 commit 6d641ca50d7ec7d5e4e889c3f8ea22afebc2a403 Author: Uros Bizjak <ubizjak@gmail.com> Date: Sun Aug 11 18:13:33 2024 +0200 bpf: Fix percpu address space issues In arraymap.c: In bpf_array_map_seq_start() and bpf_array_map_seq_next() cast return values from the __percpu address space to the generic address space via uintptr_t [1]. Correct the declaration of pptr pointer in __bpf_array_map_seq_show() to void __percpu * and cast the value from the generic address space to the __percpu address space via uintptr_t [1]. In hashtab.c: Assign the return value from bpf_mem_cache_alloc() to void pointer and cast the value to void __percpu ** (void pointer to percpu void pointer) before dereferencing. In memalloc.c: Explicitly declare __percpu variables. Cast obj to void __percpu **. In helpers.c: Cast ptr in BPF_CALL_1 and BPF_CALL_2 from generic address space to __percpu address space via const uintptr_t [1]. Found by GCC's named address space checks. There were no changes in the resulting object files. [1] https://sparse.docs.kernel.org/en/latest/annotations.html#address-space-name Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Martin KaFai Lau <martin.lau@linux.dev> Cc: Eduard Zingerman <eddyz87@gmail.com> Cc: Song Liu <song@kernel.org> Cc: Yonghong Song <yonghong.song@linux.dev> Cc: John Fastabend <john.fastabend@gmail.com> Cc: KP Singh <kpsingh@kernel.org> Cc: Stanislav Fomichev <sdf@fomichev.me> Cc: Hao Luo <haoluo@google.com> Cc: Jiri Olsa <jolsa@kernel.org> Acked-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20240811161414.56744-1-ubizjak@gmail.com Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
This commit is contained in:
parent
6f68b4c1a5
commit
a11edfe6ee
|
@ -600,7 +600,7 @@ static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
array = container_of(map, struct bpf_array, map);
|
||||
index = info->index & array->index_mask;
|
||||
if (info->percpu_value_buf)
|
||||
return array->pptrs[index];
|
||||
return (void *)(uintptr_t)array->pptrs[index];
|
||||
return array_map_elem_ptr(array, index);
|
||||
}
|
||||
|
||||
|
@ -619,7 +619,7 @@ static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
array = container_of(map, struct bpf_array, map);
|
||||
index = info->index & array->index_mask;
|
||||
if (info->percpu_value_buf)
|
||||
return array->pptrs[index];
|
||||
return (void *)(uintptr_t)array->pptrs[index];
|
||||
return array_map_elem_ptr(array, index);
|
||||
}
|
||||
|
||||
|
@ -632,7 +632,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
|
|||
struct bpf_iter_meta meta;
|
||||
struct bpf_prog *prog;
|
||||
int off = 0, cpu = 0;
|
||||
void __percpu **pptr;
|
||||
void __percpu *pptr;
|
||||
u32 size;
|
||||
|
||||
meta.seq = seq;
|
||||
|
@ -648,7 +648,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
|
|||
if (!info->percpu_value_buf) {
|
||||
ctx.value = v;
|
||||
} else {
|
||||
pptr = v;
|
||||
pptr = (void __percpu *)(uintptr_t)v;
|
||||
size = array->elem_size;
|
||||
for_each_possible_cpu(cpu) {
|
||||
copy_map_value_long(map, info->percpu_value_buf + off,
|
||||
|
|
|
@ -1048,14 +1048,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
|||
pptr = htab_elem_get_ptr(l_new, key_size);
|
||||
} else {
|
||||
/* alloc_percpu zero-fills */
|
||||
pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
|
||||
if (!pptr) {
|
||||
void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
|
||||
|
||||
if (!ptr) {
|
||||
bpf_mem_cache_free(&htab->ma, l_new);
|
||||
l_new = ERR_PTR(-ENOMEM);
|
||||
goto dec_count;
|
||||
}
|
||||
l_new->ptr_to_pptr = pptr;
|
||||
pptr = *(void **)pptr;
|
||||
l_new->ptr_to_pptr = ptr;
|
||||
pptr = *(void __percpu **)ptr;
|
||||
}
|
||||
|
||||
pcpu_init_value(htab, pptr, value, onallcpus);
|
||||
|
|
|
@ -715,7 +715,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
|
|||
if (cpu >= nr_cpu_ids)
|
||||
return (unsigned long)NULL;
|
||||
|
||||
return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
|
||||
return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
|
||||
|
@ -728,7 +728,7 @@ const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
|
|||
|
||||
BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
|
||||
{
|
||||
return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
|
||||
return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
|
||||
|
|
|
@ -140,8 +140,8 @@ static struct llist_node notrace *__llist_del_first(struct llist_head *head)
|
|||
static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
|
||||
{
|
||||
if (c->percpu_size) {
|
||||
void **obj = kmalloc_node(c->percpu_size, flags, node);
|
||||
void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
|
||||
void __percpu **obj = kmalloc_node(c->percpu_size, flags, node);
|
||||
void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
|
||||
|
||||
if (!obj || !pptr) {
|
||||
free_percpu(pptr);
|
||||
|
@ -258,7 +258,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
|
|||
static void free_one(void *obj, bool percpu)
|
||||
{
|
||||
if (percpu) {
|
||||
free_percpu(((void **)obj)[1]);
|
||||
free_percpu(((void __percpu **)obj)[1]);
|
||||
kfree(obj);
|
||||
return;
|
||||
}
|
||||
|
@ -514,8 +514,8 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
|
|||
*/
|
||||
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
|
||||
{
|
||||
struct bpf_mem_caches *cc, __percpu *pcc;
|
||||
struct bpf_mem_cache *c, __percpu *pc;
|
||||
struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
|
||||
struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc;
|
||||
struct obj_cgroup *objcg = NULL;
|
||||
int cpu, i, unit_size, percpu_size = 0;
|
||||
|
||||
|
@ -596,7 +596,7 @@ int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg
|
|||
|
||||
int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size)
|
||||
{
|
||||
struct bpf_mem_caches *cc, __percpu *pcc;
|
||||
struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
|
||||
int cpu, i, unit_size, percpu_size;
|
||||
struct obj_cgroup *objcg;
|
||||
struct bpf_mem_cache *c;
|
||||
|
|
Loading…
Reference in New Issue