mm/percpu.c: introduce pcpu_alloc_size()

JIRA: https://issues.redhat.com/browse/RHEL-23643

commit b460bc8302f222d346f0c15bba980eb8c36d6278
Author: Hou Tao <houtao1@huawei.com>
Date:   Fri Oct 20 21:31:57 2023 +0800

    mm/percpu.c: introduce pcpu_alloc_size()
    
    Introduce pcpu_alloc_size() to get the size of the dynamic per-cpu
    area. It will be used by bpf memory allocator in the following patches.
    BPF memory allocator maintains per-cpu area caches for multiple area
    sizes and its free API only has the to-be-freed per-cpu pointer, so it
    needs the size of dynamic per-cpu area to select the corresponding cache
    when bpf program frees the dynamic per-cpu pointer.
    
    Acked-by: Dennis Zhou <dennis@kernel.org>
    Signed-off-by: Hou Tao <houtao1@huawei.com>
    Link: https://lore.kernel.org/r/20231020133202.4043247-3-houtao@huaweicloud.com
    Signed-off-by: Alexei Starovoitov <ast@kernel.org>

Signed-off-by: Artem Savkov <asavkov@redhat.com>
This commit is contained in:
Artem Savkov 2024-03-06 12:03:39 +01:00
parent 513a6387a1
commit 4ab5be6999
2 changed files with 32 additions and 0 deletions

View File

@ -129,6 +129,7 @@ extern void __init setup_per_cpu_areas(void);
extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
extern void free_percpu(void __percpu *__pdata);
extern size_t pcpu_alloc_size(void __percpu *__pdata);
DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))

View File

@ -2250,6 +2250,37 @@ static void pcpu_balance_workfn(struct work_struct *work)
mutex_unlock(&pcpu_alloc_mutex);
}
/**
* pcpu_alloc_size - the size of the dynamic percpu area
* @ptr: pointer to the dynamic percpu area
*
* Returns the size of the @ptr allocation. This is undefined for statically
* defined percpu variables as there is no corresponding chunk->bound_map.
*
* RETURNS:
* The size of the dynamic percpu area.
*
* CONTEXT:
* Can be called from atomic context.
*/
size_t pcpu_alloc_size(void __percpu *ptr)
{
struct pcpu_chunk *chunk;
unsigned long bit_off, end;
void *addr;
if (!ptr)
return 0;
addr = __pcpu_ptr_to_addr(ptr);
/* No pcpu_lock here: ptr has not been freed, so chunk is still alive */
chunk = pcpu_chunk_addr_search(addr);
bit_off = (addr - chunk->base_addr) / PCPU_MIN_ALLOC_SIZE;
end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
bit_off + 1);
return (end - bit_off) * PCPU_MIN_ALLOC_SIZE;
}
/**
* free_percpu - free percpu area
* @ptr: pointer to area to free