sched_ext: idle: Make idle static keys private
Make all the static keys used by the idle CPU selection policy private to ext_idle.c. This avoids unnecessary exposure in headers and improves code encapsulation. Cc: Yury Norov <yury.norov@gmail.com> Signed-off-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
f09177ca5f
commit
d73249f887
|
@ -4765,7 +4765,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
|
||||||
static_branch_disable(&scx_ops_enq_exiting);
|
static_branch_disable(&scx_ops_enq_exiting);
|
||||||
static_branch_disable(&scx_ops_enq_migration_disabled);
|
static_branch_disable(&scx_ops_enq_migration_disabled);
|
||||||
static_branch_disable(&scx_ops_cpu_preempt);
|
static_branch_disable(&scx_ops_cpu_preempt);
|
||||||
static_branch_disable(&scx_builtin_idle_enabled);
|
scx_idle_disable();
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
|
||||||
if (ei->kind >= SCX_EXIT_ERROR) {
|
if (ei->kind >= SCX_EXIT_ERROR) {
|
||||||
|
@ -5403,12 +5403,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
|
||||||
if (scx_ops.cpu_acquire || scx_ops.cpu_release)
|
if (scx_ops.cpu_acquire || scx_ops.cpu_release)
|
||||||
static_branch_enable(&scx_ops_cpu_preempt);
|
static_branch_enable(&scx_ops_cpu_preempt);
|
||||||
|
|
||||||
if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
|
scx_idle_enable(ops);
|
||||||
scx_idle_reset_masks();
|
|
||||||
static_branch_enable(&scx_builtin_idle_enabled);
|
|
||||||
} else {
|
|
||||||
static_branch_disable(&scx_builtin_idle_enabled);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock out forks, cgroup on/offlining and moves before opening the
|
* Lock out forks, cgroup on/offlining and moves before opening the
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#include "ext_idle.h"
|
#include "ext_idle.h"
|
||||||
|
|
||||||
/* Enable/disable built-in idle CPU selection policy */
|
/* Enable/disable built-in idle CPU selection policy */
|
||||||
DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
|
static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||||
|
@ -22,10 +22,10 @@ DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Enable/disable LLC aware optimizations */
|
/* Enable/disable LLC aware optimizations */
|
||||||
DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
|
static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
|
||||||
|
|
||||||
/* Enable/disable NUMA aware optimizations */
|
/* Enable/disable NUMA aware optimizations */
|
||||||
DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
|
static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
|
||||||
|
|
||||||
static struct {
|
static struct {
|
||||||
cpumask_var_t cpu;
|
cpumask_var_t cpu;
|
||||||
|
@ -441,16 +441,6 @@ cpu_found:
|
||||||
return cpu;
|
return cpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
void scx_idle_reset_masks(void)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Consider all online cpus idle. Should converge to the actual state
|
|
||||||
* quickly.
|
|
||||||
*/
|
|
||||||
cpumask_copy(idle_masks.cpu, cpu_online_mask);
|
|
||||||
cpumask_copy(idle_masks.smt, cpu_online_mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
void scx_idle_init_masks(void)
|
void scx_idle_init_masks(void)
|
||||||
{
|
{
|
||||||
BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
|
BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
|
||||||
|
@ -532,6 +522,29 @@ void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
void scx_idle_enable(struct sched_ext_ops *ops)
|
||||||
|
{
|
||||||
|
if (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
|
||||||
|
static_branch_disable(&scx_builtin_idle_enabled);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
static_branch_enable(&scx_builtin_idle_enabled);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/*
|
||||||
|
* Consider all online cpus idle. Should converge to the actual state
|
||||||
|
* quickly.
|
||||||
|
*/
|
||||||
|
cpumask_copy(idle_masks.cpu, cpu_online_mask);
|
||||||
|
cpumask_copy(idle_masks.smt, cpu_online_mask);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void scx_idle_disable(void)
|
||||||
|
{
|
||||||
|
static_branch_disable(&scx_builtin_idle_enabled);
|
||||||
|
}
|
||||||
|
|
||||||
/********************************************************************************
|
/********************************************************************************
|
||||||
* Helpers that can be called from the BPF scheduler.
|
* Helpers that can be called from the BPF scheduler.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -10,20 +10,15 @@
|
||||||
#ifndef _KERNEL_SCHED_EXT_IDLE_H
|
#ifndef _KERNEL_SCHED_EXT_IDLE_H
|
||||||
#define _KERNEL_SCHED_EXT_IDLE_H
|
#define _KERNEL_SCHED_EXT_IDLE_H
|
||||||
|
|
||||||
extern struct static_key_false scx_builtin_idle_enabled;
|
struct sched_ext_ops;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
extern struct static_key_false scx_selcpu_topo_llc;
|
|
||||||
extern struct static_key_false scx_selcpu_topo_numa;
|
|
||||||
|
|
||||||
void scx_idle_update_selcpu_topology(void);
|
void scx_idle_update_selcpu_topology(void);
|
||||||
void scx_idle_reset_masks(void);
|
|
||||||
void scx_idle_init_masks(void);
|
void scx_idle_init_masks(void);
|
||||||
bool scx_idle_test_and_clear_cpu(int cpu);
|
bool scx_idle_test_and_clear_cpu(int cpu);
|
||||||
s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags);
|
s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags);
|
||||||
#else /* !CONFIG_SMP */
|
#else /* !CONFIG_SMP */
|
||||||
static inline void scx_idle_update_selcpu_topology(void) {}
|
static inline void scx_idle_update_selcpu_topology(void) {}
|
||||||
static inline void scx_idle_reset_masks(void) {}
|
|
||||||
static inline void scx_idle_init_masks(void) {}
|
static inline void scx_idle_init_masks(void) {}
|
||||||
static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; }
|
static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; }
|
||||||
static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
|
static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
|
||||||
|
@ -33,7 +28,8 @@ static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flag
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *found);
|
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *found);
|
||||||
|
void scx_idle_enable(struct sched_ext_ops *ops);
|
||||||
extern int scx_idle_init(void);
|
void scx_idle_disable(void);
|
||||||
|
int scx_idle_init(void);
|
||||||
|
|
||||||
#endif /* _KERNEL_SCHED_EXT_IDLE_H */
|
#endif /* _KERNEL_SCHED_EXT_IDLE_H */
|
||||||
|
|
Loading…
Reference in New Issue