2022-07-07 20:16:20 +00:00
|
|
|
#ifndef IOU_ALLOC_CACHE_H
|
|
|
|
|
#define IOU_ALLOC_CACHE_H
|
|
|
|
|
|
2025-01-28 20:56:09 +00:00
|
|
|
#include <linux/io_uring_types.h>
|
|
|
|
|
|
2022-07-07 20:20:54 +00:00
|
|
|
/*
|
|
|
|
|
* Don't allow the cache to grow beyond this size.
|
|
|
|
|
*/
|
2024-03-17 00:23:44 +00:00
|
|
|
#define IO_ALLOC_CACHE_MAX 128
|
2022-07-07 20:20:54 +00:00
|
|
|
|
2025-01-28 20:56:11 +00:00
|
|
|
void io_alloc_cache_free(struct io_alloc_cache *cache,
|
|
|
|
|
void (*free)(const void *));
|
|
|
|
|
bool io_alloc_cache_init(struct io_alloc_cache *cache,
|
|
|
|
|
unsigned max_nr, unsigned int size,
|
|
|
|
|
unsigned int init_bytes);
|
|
|
|
|
|
|
|
|
|
void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp);
|
|
|
|
|
|
2022-07-07 20:20:54 +00:00
|
|
|
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
|
2024-03-20 21:19:44 +00:00
|
|
|
void *entry)
|
2022-07-07 20:16:20 +00:00
|
|
|
{
|
2023-04-04 12:39:57 +00:00
|
|
|
if (cache->nr_cached < cache->max_cached) {
|
2024-03-20 21:19:44 +00:00
|
|
|
if (!kasan_mempool_poison_object(entry))
|
|
|
|
|
return false;
|
|
|
|
|
cache->entries[cache->nr_cached++] = entry;
|
2022-07-07 20:20:54 +00:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
return false;
|
2022-07-07 20:16:20 +00:00
|
|
|
}
|
|
|
|
|
|
2024-03-20 21:19:44 +00:00
|
|
|
static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
|
2022-07-07 20:16:20 +00:00
|
|
|
{
|
2024-03-20 21:19:44 +00:00
|
|
|
if (cache->nr_cached) {
|
|
|
|
|
void *entry = cache->entries[--cache->nr_cached];
|
2022-07-07 20:16:20 +00:00
|
|
|
|
2025-01-23 03:00:57 +00:00
|
|
|
/*
|
|
|
|
|
* If KASAN is enabled, always clear the initial bytes that
|
|
|
|
|
* must be zeroed post alloc, in case any of them overlap
|
|
|
|
|
* with KASAN storage.
|
|
|
|
|
*/
|
|
|
|
|
#if defined(CONFIG_KASAN)
|
2023-12-19 22:29:05 +00:00
|
|
|
kasan_mempool_unpoison_object(entry, cache->elem_size);
|
2025-01-23 03:00:57 +00:00
|
|
|
if (cache->init_clear)
|
|
|
|
|
memset(entry, 0, cache->init_clear);
|
|
|
|
|
#endif
|
2023-02-23 16:43:52 +00:00
|
|
|
return entry;
|
2022-07-07 20:16:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-23 03:00:57 +00:00
|
|
|
static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp)
|
2024-12-16 20:46:07 +00:00
|
|
|
{
|
2025-01-23 03:00:57 +00:00
|
|
|
void *obj;
|
2024-12-16 20:46:07 +00:00
|
|
|
|
2025-01-23 03:00:57 +00:00
|
|
|
obj = io_alloc_cache_get(cache);
|
|
|
|
|
if (obj)
|
2024-12-16 20:46:07 +00:00
|
|
|
return obj;
|
2025-01-28 20:56:11 +00:00
|
|
|
return io_cache_alloc_new(cache, gfp);
|
2022-07-07 20:16:20 +00:00
|
|
|
}
|
|
|
|
|
|
2025-03-04 19:48:12 +00:00
|
|
|
static inline void io_cache_free(struct io_alloc_cache *cache, void *obj)
|
|
|
|
|
{
|
|
|
|
|
if (!io_alloc_cache_put(cache, obj))
|
|
|
|
|
kfree(obj);
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-07 20:16:20 +00:00
|
|
|
#endif
|