Merge: Support PREEMPT_DYNAMIC on aarch64
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/673 Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2065226 Upstream Status: Linux Tested: Stress tests on aarch64 after runtime changes to preempt setting. Support DYNAMIC_PREEMPT for arm64 to match x86_64. RHEL configs already enables this in common so all that's needed is to enable it at the arch level. Signed-off-by: Phil Auld <pauld@redhat.com> Approved-by: Mark Salter <msalter@redhat.com> Approved-by: Prarit Bhargava <prarit@redhat.com> Approved-by: David Arcari <darcari@redhat.com> Approved-by: Mark Langsdorf <mlangsdo@redhat.com> Signed-off-by: Patrick Talbert <ptalbert@redhat.com>
This commit is contained in:
commit
1529ab2e9e
37
arch/Kconfig
37
arch/Kconfig
|
@ -1251,12 +1251,41 @@ config HAVE_STATIC_CALL_INLINE
|
|||
|
||||
config HAVE_PREEMPT_DYNAMIC
|
||||
bool
|
||||
|
||||
config HAVE_PREEMPT_DYNAMIC_CALL
|
||||
bool
|
||||
depends on HAVE_STATIC_CALL
|
||||
depends on GENERIC_ENTRY
|
||||
select HAVE_PREEMPT_DYNAMIC
|
||||
help
|
||||
Select this if the architecture support boot time preempt setting
|
||||
on top of static calls. It is strongly advised to support inline
|
||||
static call to avoid any overhead.
|
||||
An architecture should select this if it can handle the preemption
|
||||
model being selected at boot time using static calls.
|
||||
|
||||
Where an architecture selects HAVE_STATIC_CALL_INLINE, any call to a
|
||||
preemption function will be patched directly.
|
||||
|
||||
Where an architecture does not select HAVE_STATIC_CALL_INLINE, any
|
||||
call to a preemption function will go through a trampoline, and the
|
||||
trampoline will be patched.
|
||||
|
||||
It is strongly advised to support inline static call to avoid any
|
||||
overhead.
|
||||
|
||||
config HAVE_PREEMPT_DYNAMIC_KEY
|
||||
bool
|
||||
depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
|
||||
select HAVE_PREEMPT_DYNAMIC
|
||||
help
|
||||
An architecture should select this if it can handle the preemption
|
||||
model being selected at boot time using static keys.
|
||||
|
||||
Each preemption function will be given an early return based on a
|
||||
static key. This should have slightly lower overhead than non-inline
|
||||
static calls, as this effectively inlines each trampoline into the
|
||||
start of its callee. This may avoid redundant work, and may
|
||||
integrate better with CFI schemes.
|
||||
|
||||
This will have greater overhead than using inline static calls as
|
||||
the call to the preemption function cannot be entirely elided.
|
||||
|
||||
config ARCH_WANT_LD_ORPHAN_WARN
|
||||
bool
|
||||
|
|
|
@ -193,6 +193,7 @@ config ARM64
|
|||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_PREEMPT_DYNAMIC_KEY
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#ifndef __ASM_PREEMPT_H
|
||||
#define __ASM_PREEMPT_H
|
||||
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
#define PREEMPT_NEED_RESCHED BIT(32)
|
||||
|
@ -80,10 +81,24 @@ static inline bool should_resched(int preempt_offset)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
|
||||
void preempt_schedule(void);
|
||||
#define __preempt_schedule() preempt_schedule()
|
||||
void preempt_schedule_notrace(void);
|
||||
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
|
||||
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
||||
void dynamic_preempt_schedule(void);
|
||||
#define __preempt_schedule() dynamic_preempt_schedule()
|
||||
void dynamic_preempt_schedule_notrace(void);
|
||||
#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
|
||||
|
||||
#else /* CONFIG_PREEMPT_DYNAMIC */
|
||||
|
||||
#define __preempt_schedule() preempt_schedule()
|
||||
#define __preempt_schedule_notrace() preempt_schedule_notrace()
|
||||
|
||||
#endif /* CONFIG_PREEMPT_DYNAMIC */
|
||||
#endif /* CONFIG_PREEMPTION */
|
||||
|
||||
#endif /* __ASM_PREEMPT_H */
|
||||
|
|
|
@ -121,9 +121,26 @@ static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
|
|||
exit_to_kernel_mode(regs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
||||
#define need_irq_preemption() \
|
||||
(static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
|
||||
#else
|
||||
#define need_irq_preemption() (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
#endif
|
||||
|
||||
static void __sched arm64_preempt_schedule_irq(void)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
if (!need_irq_preemption())
|
||||
return;
|
||||
|
||||
/*
|
||||
* Note: thread_info::preempt_count includes both thread_info::count
|
||||
* and thread_info::need_resched, and is not equivalent to
|
||||
* preempt_count().
|
||||
*/
|
||||
if (READ_ONCE(current_thread_info()->preempt_count) != 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
|
||||
|
@ -350,14 +367,7 @@ static void noinstr el1_interrupt(struct pt_regs *regs,
|
|||
enter_el1_irq_or_nmi(regs);
|
||||
do_interrupt_handler(regs, handler);
|
||||
|
||||
/*
|
||||
* Note: thread_info::preempt_count includes both thread_info::count
|
||||
* and thread_info::need_resched, and is not equivalent to
|
||||
* preempt_count().
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION) &&
|
||||
READ_ONCE(current_thread_info()->preempt_count) == 0)
|
||||
arm64_preempt_schedule_irq();
|
||||
arm64_preempt_schedule_irq();
|
||||
|
||||
exit_el1_irq_or_nmi(regs);
|
||||
}
|
||||
|
|
|
@ -242,7 +242,7 @@ config X86
|
|||
select HAVE_STACK_VALIDATION if X86_64
|
||||
select HAVE_STATIC_CALL
|
||||
select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION
|
||||
select HAVE_PREEMPT_DYNAMIC
|
||||
select HAVE_PREEMPT_DYNAMIC_CALL
|
||||
select HAVE_RSEQ
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_UNSTABLE_SCHED_CLOCK
|
||||
|
|
|
@ -108,16 +108,18 @@ static __always_inline bool should_resched(int preempt_offset)
|
|||
extern asmlinkage void preempt_schedule(void);
|
||||
extern asmlinkage void preempt_schedule_thunk(void);
|
||||
|
||||
#define __preempt_schedule_func preempt_schedule_thunk
|
||||
#define preempt_schedule_dynamic_enabled preempt_schedule_thunk
|
||||
#define preempt_schedule_dynamic_disabled NULL
|
||||
|
||||
extern asmlinkage void preempt_schedule_notrace(void);
|
||||
extern asmlinkage void preempt_schedule_notrace_thunk(void);
|
||||
|
||||
#define __preempt_schedule_notrace_func preempt_schedule_notrace_thunk
|
||||
#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace_thunk
|
||||
#define preempt_schedule_notrace_dynamic_disabled NULL
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
|
||||
DECLARE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
|
||||
DECLARE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
|
||||
|
||||
#define __preempt_schedule() \
|
||||
do { \
|
||||
|
@ -125,7 +127,7 @@ do { \
|
|||
asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : ASM_CALL_CONSTRAINT); \
|
||||
} while (0)
|
||||
|
||||
DECLARE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
||||
DECLARE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
|
||||
|
||||
#define __preempt_schedule_notrace() \
|
||||
do { \
|
||||
|
|
|
@ -454,10 +454,21 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
|
|||
*
|
||||
* Conditional reschedule with additional sanity checks.
|
||||
*/
|
||||
void irqentry_exit_cond_resched(void);
|
||||
void raw_irqentry_exit_cond_resched(void);
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
||||
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
||||
#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
|
||||
#define irqentry_exit_cond_resched_dynamic_disabled NULL
|
||||
DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
|
||||
#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
|
||||
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
||||
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
||||
void dynamic_irqentry_exit_cond_resched(void);
|
||||
#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
|
||||
#endif
|
||||
#else /* CONFIG_PREEMPT_DYNAMIC */
|
||||
#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
|
||||
#endif /* CONFIG_PREEMPT_DYNAMIC */
|
||||
|
||||
/**
|
||||
* irqentry_exit - Handle return from exception that used irqentry_enter()
|
||||
|
|
|
@ -93,7 +93,7 @@ struct user;
|
|||
extern int __cond_resched(void);
|
||||
# define might_resched() __cond_resched()
|
||||
|
||||
#elif defined(CONFIG_PREEMPT_DYNAMIC)
|
||||
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
||||
|
||||
extern int __cond_resched(void);
|
||||
|
||||
|
@ -104,6 +104,11 @@ static __always_inline void might_resched(void)
|
|||
static_call_mod(might_resched)();
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
||||
|
||||
extern int dynamic_might_resched(void);
|
||||
# define might_resched() dynamic_might_resched()
|
||||
|
||||
#else
|
||||
|
||||
# define might_resched() do { } while (0)
|
||||
|
|
|
@ -2032,7 +2032,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
|
|||
#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
|
||||
extern int __cond_resched(void);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
||||
|
||||
DECLARE_STATIC_CALL(cond_resched, __cond_resched);
|
||||
|
||||
|
@ -2041,6 +2041,14 @@ static __always_inline int _cond_resched(void)
|
|||
return static_call_mod(cond_resched)();
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
||||
extern int dynamic_cond_resched(void);
|
||||
|
||||
static __always_inline int _cond_resched(void)
|
||||
{
|
||||
return dynamic_cond_resched();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int _cond_resched(void)
|
||||
|
|
|
@ -31,7 +31,8 @@ quiet_cmd_compile.h = CHK $@
|
|||
cmd_compile.h = \
|
||||
$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
|
||||
"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT_BUILD)" \
|
||||
"$(CONFIG_PREEMPT_RT)" $(CONFIG_CC_VERSION_TEXT) "$(LD)"
|
||||
"${CONFIG_PREEMPT_DYNAMIC}" "$(CONFIG_PREEMPT_RT)" \
|
||||
$(CONFIG_CC_VERSION_TEXT) "$(LD)"
|
||||
|
||||
include/generated/compile.h: FORCE
|
||||
$(call cmd,compile.h)
|
||||
|
|
|
@ -96,8 +96,9 @@ config PREEMPTION
|
|||
config PREEMPT_DYNAMIC
|
||||
bool "Preemption behaviour defined on boot"
|
||||
depends on HAVE_PREEMPT_DYNAMIC && !PREEMPT_RT
|
||||
select JUMP_LABEL if HAVE_PREEMPT_DYNAMIC_KEY
|
||||
select PREEMPT_BUILD
|
||||
default y
|
||||
default y if HAVE_PREEMPT_DYNAMIC_CALL
|
||||
help
|
||||
This option allows to define the preemption model on the kernel
|
||||
command line parameter and thus override the default preemption
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <linux/context_tracking.h>
|
||||
#include <linux/entry-common.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/livepatch.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/tick.h>
|
||||
|
@ -382,7 +383,7 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void irqentry_exit_cond_resched(void)
|
||||
void raw_irqentry_exit_cond_resched(void)
|
||||
{
|
||||
if (!preempt_count()) {
|
||||
/* Sanity check RCU and thread stack */
|
||||
|
@ -394,7 +395,17 @@ void irqentry_exit_cond_resched(void)
|
|||
}
|
||||
}
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
||||
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
||||
DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
|
||||
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
||||
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
|
||||
void dynamic_irqentry_exit_cond_resched(void)
|
||||
{
|
||||
if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
|
||||
return;
|
||||
raw_irqentry_exit_cond_resched();
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
|
||||
|
@ -422,13 +433,9 @@ noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
|
|||
}
|
||||
|
||||
instrumentation_begin();
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION)) {
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
static_call(irqentry_exit_cond_resched)();
|
||||
#else
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
irqentry_exit_cond_resched();
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Covers both tracing and lockdep */
|
||||
trace_hardirqs_on();
|
||||
instrumentation_end();
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/kcov.h>
|
||||
#include <linux/scs.h>
|
||||
|
||||
|
@ -6492,17 +6493,31 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
|
|||
*/
|
||||
if (likely(!preemptible()))
|
||||
return;
|
||||
|
||||
preempt_schedule_common();
|
||||
}
|
||||
NOKPROBE_SYMBOL(preempt_schedule);
|
||||
EXPORT_SYMBOL(preempt_schedule);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
|
||||
EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
|
||||
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
||||
#ifndef preempt_schedule_dynamic_enabled
|
||||
#define preempt_schedule_dynamic_enabled preempt_schedule
|
||||
#define preempt_schedule_dynamic_disabled NULL
|
||||
#endif
|
||||
DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
|
||||
EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
|
||||
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
||||
static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
|
||||
void __sched notrace dynamic_preempt_schedule(void)
|
||||
{
|
||||
if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
|
||||
return;
|
||||
preempt_schedule();
|
||||
}
|
||||
NOKPROBE_SYMBOL(dynamic_preempt_schedule);
|
||||
EXPORT_SYMBOL(dynamic_preempt_schedule);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
* preempt_schedule_notrace - preempt_schedule called by tracing
|
||||
|
@ -6557,148 +6572,28 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
|
|||
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
||||
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
||||
#ifndef preempt_schedule_notrace_dynamic_enabled
|
||||
#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
|
||||
#define preempt_schedule_notrace_dynamic_disabled NULL
|
||||
#endif
|
||||
DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
|
||||
EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
|
||||
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
||||
static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
|
||||
void __sched notrace dynamic_preempt_schedule_notrace(void)
|
||||
{
|
||||
if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
|
||||
return;
|
||||
preempt_schedule_notrace();
|
||||
}
|
||||
NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
|
||||
EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_PREEMPTION */
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
|
||||
#include <linux/entry-common.h>
|
||||
|
||||
/*
|
||||
* SC:cond_resched
|
||||
* SC:might_resched
|
||||
* SC:preempt_schedule
|
||||
* SC:preempt_schedule_notrace
|
||||
* SC:irqentry_exit_cond_resched
|
||||
*
|
||||
*
|
||||
* NONE:
|
||||
* cond_resched <- __cond_resched
|
||||
* might_resched <- RET0
|
||||
* preempt_schedule <- NOP
|
||||
* preempt_schedule_notrace <- NOP
|
||||
* irqentry_exit_cond_resched <- NOP
|
||||
*
|
||||
* VOLUNTARY:
|
||||
* cond_resched <- __cond_resched
|
||||
* might_resched <- __cond_resched
|
||||
* preempt_schedule <- NOP
|
||||
* preempt_schedule_notrace <- NOP
|
||||
* irqentry_exit_cond_resched <- NOP
|
||||
*
|
||||
* FULL:
|
||||
* cond_resched <- RET0
|
||||
* might_resched <- RET0
|
||||
* preempt_schedule <- preempt_schedule
|
||||
* preempt_schedule_notrace <- preempt_schedule_notrace
|
||||
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
|
||||
*/
|
||||
|
||||
enum {
|
||||
preempt_dynamic_undefined = -1,
|
||||
preempt_dynamic_none,
|
||||
preempt_dynamic_voluntary,
|
||||
preempt_dynamic_full,
|
||||
};
|
||||
|
||||
int preempt_dynamic_mode = preempt_dynamic_undefined;
|
||||
|
||||
int sched_dynamic_mode(const char *str)
|
||||
{
|
||||
if (!strcmp(str, "none"))
|
||||
return preempt_dynamic_none;
|
||||
|
||||
if (!strcmp(str, "voluntary"))
|
||||
return preempt_dynamic_voluntary;
|
||||
|
||||
if (!strcmp(str, "full"))
|
||||
return preempt_dynamic_full;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void sched_dynamic_update(int mode)
|
||||
{
|
||||
/*
|
||||
* Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
|
||||
* the ZERO state, which is invalid.
|
||||
*/
|
||||
static_call_update(cond_resched, __cond_resched);
|
||||
static_call_update(might_resched, __cond_resched);
|
||||
static_call_update(preempt_schedule, __preempt_schedule_func);
|
||||
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
||||
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
||||
|
||||
switch (mode) {
|
||||
case preempt_dynamic_none:
|
||||
static_call_update(cond_resched, __cond_resched);
|
||||
static_call_update(might_resched, (void *)&__static_call_return0);
|
||||
static_call_update(preempt_schedule, NULL);
|
||||
static_call_update(preempt_schedule_notrace, NULL);
|
||||
static_call_update(irqentry_exit_cond_resched, NULL);
|
||||
pr_info("Dynamic Preempt: none\n");
|
||||
break;
|
||||
|
||||
case preempt_dynamic_voluntary:
|
||||
static_call_update(cond_resched, __cond_resched);
|
||||
static_call_update(might_resched, __cond_resched);
|
||||
static_call_update(preempt_schedule, NULL);
|
||||
static_call_update(preempt_schedule_notrace, NULL);
|
||||
static_call_update(irqentry_exit_cond_resched, NULL);
|
||||
pr_info("Dynamic Preempt: voluntary\n");
|
||||
break;
|
||||
|
||||
case preempt_dynamic_full:
|
||||
static_call_update(cond_resched, (void *)&__static_call_return0);
|
||||
static_call_update(might_resched, (void *)&__static_call_return0);
|
||||
static_call_update(preempt_schedule, __preempt_schedule_func);
|
||||
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
|
||||
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
|
||||
pr_info("Dynamic Preempt: full\n");
|
||||
break;
|
||||
}
|
||||
|
||||
preempt_dynamic_mode = mode;
|
||||
}
|
||||
|
||||
static int __init setup_preempt_mode(char *str)
|
||||
{
|
||||
int mode = sched_dynamic_mode(str);
|
||||
if (mode < 0) {
|
||||
pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
|
||||
return 0;
|
||||
}
|
||||
|
||||
sched_dynamic_update(mode);
|
||||
return 1;
|
||||
}
|
||||
__setup("preempt=", setup_preempt_mode);
|
||||
|
||||
static void __init preempt_dynamic_init(void)
|
||||
{
|
||||
if (preempt_dynamic_mode == preempt_dynamic_undefined) {
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
|
||||
sched_dynamic_update(preempt_dynamic_none);
|
||||
} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
|
||||
sched_dynamic_update(preempt_dynamic_voluntary);
|
||||
} else {
|
||||
/* Default static call setting, nothing to do */
|
||||
WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
|
||||
preempt_dynamic_mode = preempt_dynamic_full;
|
||||
pr_info("Dynamic Preempt: full\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PREEMPT_DYNAMIC */
|
||||
|
||||
static inline void preempt_dynamic_init(void) { }
|
||||
|
||||
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
|
||||
|
||||
/*
|
||||
* This is the entry point to schedule() from kernel preemption
|
||||
* off of irq context.
|
||||
|
@ -8204,11 +8099,35 @@ EXPORT_SYMBOL(__cond_resched);
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
||||
#define cond_resched_dynamic_enabled __cond_resched
|
||||
#define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
|
||||
DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
|
||||
EXPORT_STATIC_CALL_TRAMP(cond_resched);
|
||||
|
||||
#define might_resched_dynamic_enabled __cond_resched
|
||||
#define might_resched_dynamic_disabled ((void *)&__static_call_return0)
|
||||
DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
|
||||
EXPORT_STATIC_CALL_TRAMP(might_resched);
|
||||
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
||||
static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
|
||||
int __sched dynamic_cond_resched(void)
|
||||
{
|
||||
if (!static_branch_unlikely(&sk_dynamic_cond_resched))
|
||||
return 0;
|
||||
return __cond_resched();
|
||||
}
|
||||
EXPORT_SYMBOL(dynamic_cond_resched);
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
|
||||
int __sched dynamic_might_resched(void)
|
||||
{
|
||||
if (!static_branch_unlikely(&sk_dynamic_might_resched))
|
||||
return 0;
|
||||
return __cond_resched();
|
||||
}
|
||||
EXPORT_SYMBOL(dynamic_might_resched);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -8273,6 +8192,154 @@ int __cond_resched_rwlock_write(rwlock_t *lock)
|
|||
}
|
||||
EXPORT_SYMBOL(__cond_resched_rwlock_write);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_DYNAMIC
|
||||
|
||||
#ifdef CONFIG_GENERIC_ENTRY
|
||||
#include <linux/entry-common.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SC:cond_resched
|
||||
* SC:might_resched
|
||||
* SC:preempt_schedule
|
||||
* SC:preempt_schedule_notrace
|
||||
* SC:irqentry_exit_cond_resched
|
||||
*
|
||||
*
|
||||
* NONE:
|
||||
* cond_resched <- __cond_resched
|
||||
* might_resched <- RET0
|
||||
* preempt_schedule <- NOP
|
||||
* preempt_schedule_notrace <- NOP
|
||||
* irqentry_exit_cond_resched <- NOP
|
||||
*
|
||||
* VOLUNTARY:
|
||||
* cond_resched <- __cond_resched
|
||||
* might_resched <- __cond_resched
|
||||
* preempt_schedule <- NOP
|
||||
* preempt_schedule_notrace <- NOP
|
||||
* irqentry_exit_cond_resched <- NOP
|
||||
*
|
||||
* FULL:
|
||||
* cond_resched <- RET0
|
||||
* might_resched <- RET0
|
||||
* preempt_schedule <- preempt_schedule
|
||||
* preempt_schedule_notrace <- preempt_schedule_notrace
|
||||
* irqentry_exit_cond_resched <- irqentry_exit_cond_resched
|
||||
*/
|
||||
|
||||
enum {
|
||||
preempt_dynamic_undefined = -1,
|
||||
preempt_dynamic_none,
|
||||
preempt_dynamic_voluntary,
|
||||
preempt_dynamic_full,
|
||||
};
|
||||
|
||||
int preempt_dynamic_mode = preempt_dynamic_undefined;
|
||||
|
||||
int sched_dynamic_mode(const char *str)
|
||||
{
|
||||
if (!strcmp(str, "none"))
|
||||
return preempt_dynamic_none;
|
||||
|
||||
if (!strcmp(str, "voluntary"))
|
||||
return preempt_dynamic_voluntary;
|
||||
|
||||
if (!strcmp(str, "full"))
|
||||
return preempt_dynamic_full;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
|
||||
#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
|
||||
#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
|
||||
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
|
||||
#define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
|
||||
#define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
|
||||
#else
|
||||
#error "Unsupported PREEMPT_DYNAMIC mechanism"
|
||||
#endif
|
||||
|
||||
void sched_dynamic_update(int mode)
|
||||
{
|
||||
/*
|
||||
* Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
|
||||
* the ZERO state, which is invalid.
|
||||
*/
|
||||
preempt_dynamic_enable(cond_resched);
|
||||
preempt_dynamic_enable(might_resched);
|
||||
preempt_dynamic_enable(preempt_schedule);
|
||||
preempt_dynamic_enable(preempt_schedule_notrace);
|
||||
preempt_dynamic_enable(irqentry_exit_cond_resched);
|
||||
|
||||
switch (mode) {
|
||||
case preempt_dynamic_none:
|
||||
preempt_dynamic_enable(cond_resched);
|
||||
preempt_dynamic_disable(might_resched);
|
||||
preempt_dynamic_disable(preempt_schedule);
|
||||
preempt_dynamic_disable(preempt_schedule_notrace);
|
||||
preempt_dynamic_disable(irqentry_exit_cond_resched);
|
||||
pr_info("Dynamic Preempt: none\n");
|
||||
break;
|
||||
|
||||
case preempt_dynamic_voluntary:
|
||||
preempt_dynamic_enable(cond_resched);
|
||||
preempt_dynamic_enable(might_resched);
|
||||
preempt_dynamic_disable(preempt_schedule);
|
||||
preempt_dynamic_disable(preempt_schedule_notrace);
|
||||
preempt_dynamic_disable(irqentry_exit_cond_resched);
|
||||
pr_info("Dynamic Preempt: voluntary\n");
|
||||
break;
|
||||
|
||||
case preempt_dynamic_full:
|
||||
preempt_dynamic_disable(cond_resched);
|
||||
preempt_dynamic_disable(might_resched);
|
||||
preempt_dynamic_enable(preempt_schedule);
|
||||
preempt_dynamic_enable(preempt_schedule_notrace);
|
||||
preempt_dynamic_enable(irqentry_exit_cond_resched);
|
||||
pr_info("Dynamic Preempt: full\n");
|
||||
break;
|
||||
}
|
||||
|
||||
preempt_dynamic_mode = mode;
|
||||
}
|
||||
|
||||
static int __init setup_preempt_mode(char *str)
|
||||
{
|
||||
int mode = sched_dynamic_mode(str);
|
||||
if (mode < 0) {
|
||||
pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
|
||||
return 0;
|
||||
}
|
||||
|
||||
sched_dynamic_update(mode);
|
||||
return 1;
|
||||
}
|
||||
__setup("preempt=", setup_preempt_mode);
|
||||
|
||||
static void __init preempt_dynamic_init(void)
|
||||
{
|
||||
if (preempt_dynamic_mode == preempt_dynamic_undefined) {
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
|
||||
sched_dynamic_update(preempt_dynamic_none);
|
||||
} else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
|
||||
sched_dynamic_update(preempt_dynamic_voluntary);
|
||||
} else {
|
||||
/* Default static call setting, nothing to do */
|
||||
WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
|
||||
preempt_dynamic_mode = preempt_dynamic_full;
|
||||
pr_info("Dynamic Preempt: full\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PREEMPT_DYNAMIC */
|
||||
|
||||
static inline void preempt_dynamic_init(void) { }
|
||||
|
||||
#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
|
||||
|
||||
/**
|
||||
* yield - yield the current processor to other threads.
|
||||
*
|
||||
|
|
|
@ -5,9 +5,10 @@ TARGET=$1
|
|||
ARCH=$2
|
||||
SMP=$3
|
||||
PREEMPT=$4
|
||||
PREEMPT_RT=$5
|
||||
CC_VERSION="$6"
|
||||
LD=$7
|
||||
PREEMPT_DYNAMIC=$5
|
||||
PREEMPT_RT=$6
|
||||
CC_VERSION="$7"
|
||||
LD=$8
|
||||
|
||||
# Do not expand names
|
||||
set -f
|
||||
|
@ -41,8 +42,14 @@ fi
|
|||
UTS_VERSION="#$VERSION"
|
||||
CONFIG_FLAGS=""
|
||||
if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
|
||||
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
|
||||
if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"; fi
|
||||
|
||||
if [ -n "$PREEMPT_RT" ] ; then
|
||||
CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"
|
||||
elif [ -n "$PREEMPT_DYNAMIC" ] ; then
|
||||
CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_DYNAMIC"
|
||||
elif [ -n "$PREEMPT" ] ; then
|
||||
CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"
|
||||
fi
|
||||
|
||||
# Truncate to maximum length
|
||||
UTS_LEN=64
|
||||
|
|
Loading…
Reference in New Issue