sched: Provide rt_mutex specific scheduler helpers
JIRA: https://issues.redhat.com/browse/RHEL-28616 commit 6b596e62ed9f90c4a97e68ae1f7b1af5beeb3c05 Author: Peter Zijlstra <peterz@infradead.org> Date: Fri, 8 Sep 2023 18:22:51 +0200 sched: Provide rt_mutex specific scheduler helpers With PREEMPT_RT there is a rt_mutex recursion problem where sched_submit_work() can use an rtlock (aka spinlock_t). More specifically what happens is: mutex_lock() /* really rt_mutex */ ... __rt_mutex_slowlock_locked() task_blocks_on_rt_mutex() // enqueue current task as waiter // do PI chain walk rt_mutex_slowlock_block() schedule() sched_submit_work() ... spin_lock() /* really rtlock */ ... __rt_mutex_slowlock_locked() task_blocks_on_rt_mutex() // enqueue current task as waiter *AGAIN* // *CONFUSION* Fix this by making rt_mutex do the sched_submit_work() early, before it enqueues itself as a waiter -- before it even knows *if* it will wait. [[ basically Thomas' patch but with different naming and a few asserts added ]] Originally-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20230908162254.999499-5-bigeasy@linutronix.de Signed-off-by: Waiman Long <longman@redhat.com>
This commit is contained in:
parent
d11684a859
commit
707cdda6c1
|
@ -921,6 +921,9 @@ struct task_struct {
|
||||||
* ->sched_remote_wakeup gets used, so it can be in this word.
|
* ->sched_remote_wakeup gets used, so it can be in this word.
|
||||||
*/
|
*/
|
||||||
unsigned sched_remote_wakeup:1;
|
unsigned sched_remote_wakeup:1;
|
||||||
|
#ifdef CONFIG_RT_MUTEXES
|
||||||
|
unsigned sched_rt_mutex:1;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Bit to tell LSMs we're in execve(): */
|
/* Bit to tell LSMs we're in execve(): */
|
||||||
unsigned in_execve:1;
|
unsigned in_execve:1;
|
||||||
|
|
|
@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_RT_MUTEXES
|
#ifdef CONFIG_RT_MUTEXES
|
||||||
|
extern void rt_mutex_pre_schedule(void);
|
||||||
|
extern void rt_mutex_schedule(void);
|
||||||
|
extern void rt_mutex_post_schedule(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must hold either p->pi_lock or task_rq(p)->lock.
|
* Must hold either p->pi_lock or task_rq(p)->lock.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -6763,9 +6763,6 @@ static inline void sched_submit_work(struct task_struct *tsk)
|
||||||
static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
|
static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG);
|
||||||
unsigned int task_flags;
|
unsigned int task_flags;
|
||||||
|
|
||||||
if (task_is_running(tsk))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Establish LD_WAIT_CONFIG context to ensure none of the code called
|
* Establish LD_WAIT_CONFIG context to ensure none of the code called
|
||||||
* will use a blocking primitive -- which would lead to recursion.
|
* will use a blocking primitive -- which would lead to recursion.
|
||||||
|
@ -6823,7 +6820,12 @@ asmlinkage __visible void __sched schedule(void)
|
||||||
{
|
{
|
||||||
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
||||||
|
|
||||||
sched_submit_work(tsk);
|
#ifdef CONFIG_RT_MUTEXES
|
||||||
|
lockdep_assert(!tsk->sched_rt_mutex);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (!task_is_running(tsk))
|
||||||
|
sched_submit_work(tsk);
|
||||||
__schedule_loop(SM_NONE);
|
__schedule_loop(SM_NONE);
|
||||||
sched_update_worker(tsk);
|
sched_update_worker(tsk);
|
||||||
}
|
}
|
||||||
|
@ -7113,6 +7115,32 @@ static void __setscheduler_prio(struct task_struct *p, int prio)
|
||||||
|
|
||||||
#ifdef CONFIG_RT_MUTEXES
|
#ifdef CONFIG_RT_MUTEXES
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Would be more useful with typeof()/auto_type but they don't mix with
|
||||||
|
* bit-fields. Since it's a local thing, use int. Keep the generic sounding
|
||||||
|
* name such that if someone were to implement this function we get to compare
|
||||||
|
* notes.
|
||||||
|
*/
|
||||||
|
#define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; })
|
||||||
|
|
||||||
|
void rt_mutex_pre_schedule(void)
|
||||||
|
{
|
||||||
|
lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1));
|
||||||
|
sched_submit_work(current);
|
||||||
|
}
|
||||||
|
|
||||||
|
void rt_mutex_schedule(void)
|
||||||
|
{
|
||||||
|
lockdep_assert(current->sched_rt_mutex);
|
||||||
|
__schedule_loop(SM_NONE);
|
||||||
|
}
|
||||||
|
|
||||||
|
void rt_mutex_post_schedule(void)
|
||||||
|
{
|
||||||
|
sched_update_worker(current);
|
||||||
|
lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0));
|
||||||
|
}
|
||||||
|
|
||||||
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
|
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
|
||||||
{
|
{
|
||||||
if (pi_task)
|
if (pi_task)
|
||||||
|
|
Loading…
Reference in New Issue