Merge: Update intel_idle to upstream 6.17
MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/7315 JIRA: https://issues.redhat.com/browse/RHEL-113139 Some commits have been dropped; however, these have no functional impact. Signed-off-by: David Arcari <darcari@redhat.com> Approved-by: Steve Best <sbest@redhat.com> Approved-by: Tony Camuso <tcamuso@redhat.com> Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> Merged-by: CKI GitLab Kmaint Pipeline Bot <26919896-cki-kmaint-pipeline-bot@users.noreply.gitlab.com>
This commit is contained in:
commit
08cd8aaaef
|
@ -299,3 +299,27 @@ struct smp_ops smp_ops = {
|
||||||
.send_call_func_single_ipi = native_send_call_func_single_ipi,
|
.send_call_func_single_ipi = native_send_call_func_single_ipi,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL_GPL(smp_ops);
|
EXPORT_SYMBOL_GPL(smp_ops);
|
||||||
|
|
||||||
|
int arch_cpu_rescan_dead_smt_siblings(void)
|
||||||
|
{
|
||||||
|
enum cpuhp_smt_control old = cpu_smt_control;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If SMT has been disabled and SMT siblings are in HLT, bring them back
|
||||||
|
* online and offline them again so that they end up in MWAIT proper.
|
||||||
|
*
|
||||||
|
* Called with hotplug enabled.
|
||||||
|
*/
|
||||||
|
if (old != CPU_SMT_DISABLED && old != CPU_SMT_FORCE_DISABLED)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = cpuhp_smt_enable();
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = cpuhp_smt_disable(old);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(arch_cpu_rescan_dead_smt_siblings);
|
||||||
|
|
|
@ -188,7 +188,8 @@ out:
|
||||||
|
|
||||||
int arch_resume_nosmt(void)
|
int arch_resume_nosmt(void)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We reached this while coming out of hibernation. This means
|
* We reached this while coming out of hibernation. This means
|
||||||
* that SMT siblings are sleeping in hlt, as mwait is not safe
|
* that SMT siblings are sleeping in hlt, as mwait is not safe
|
||||||
|
@ -202,18 +203,10 @@ int arch_resume_nosmt(void)
|
||||||
* Called with hotplug disabled.
|
* Called with hotplug disabled.
|
||||||
*/
|
*/
|
||||||
cpu_hotplug_enable();
|
cpu_hotplug_enable();
|
||||||
if (cpu_smt_control == CPU_SMT_DISABLED ||
|
|
||||||
cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
|
|
||||||
enum cpuhp_smt_control old = cpu_smt_control;
|
|
||||||
|
|
||||||
ret = cpuhp_smt_enable();
|
ret = arch_cpu_rescan_dead_smt_siblings();
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
ret = cpuhp_smt_disable(old);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
out:
|
|
||||||
cpu_hotplug_disable();
|
cpu_hotplug_disable();
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -174,6 +174,12 @@ bool processor_physically_present(acpi_handle handle);
|
||||||
static inline void acpi_early_processor_control_setup(void) {}
|
static inline void acpi_early_processor_control_setup(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
|
||||||
|
void acpi_idle_rescan_dead_smt_siblings(void);
|
||||||
|
#else
|
||||||
|
static inline void acpi_idle_rescan_dead_smt_siblings(void) {}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* --------------------------------------------------------------------------
|
/* --------------------------------------------------------------------------
|
||||||
Embedded Controller
|
Embedded Controller
|
||||||
-------------------------------------------------------------------------- */
|
-------------------------------------------------------------------------- */
|
||||||
|
|
|
@ -298,6 +298,9 @@ static int __init acpi_processor_driver_init(void)
|
||||||
* after acpi_cppc_processor_probe() has been called for all online CPUs
|
* after acpi_cppc_processor_probe() has been called for all online CPUs
|
||||||
*/
|
*/
|
||||||
acpi_processor_init_invariance_cppc();
|
acpi_processor_init_invariance_cppc();
|
||||||
|
|
||||||
|
acpi_idle_rescan_dead_smt_siblings();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
driver_unregister(&acpi_processor_driver);
|
driver_unregister(&acpi_processor_driver);
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
#include <acpi/processor.h>
|
#include <acpi/processor.h>
|
||||||
#include <linux/context_tracking.h>
|
#include <linux/context_tracking.h>
|
||||||
|
|
||||||
|
#include "internal.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Include the apic definitions for x86 to have the APIC timer related defines
|
* Include the apic definitions for x86 to have the APIC timer related defines
|
||||||
* available also for UP (on SMP it gets magically included via linux/smp.h).
|
* available also for UP (on SMP it gets magically included via linux/smp.h).
|
||||||
|
@ -55,6 +57,12 @@ struct cpuidle_driver acpi_idle_driver = {
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
|
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
|
||||||
|
void acpi_idle_rescan_dead_smt_siblings(void)
|
||||||
|
{
|
||||||
|
if (cpuidle_get_driver() == &acpi_idle_driver)
|
||||||
|
arch_cpu_rescan_dead_smt_siblings();
|
||||||
|
}
|
||||||
|
|
||||||
static
|
static
|
||||||
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
|
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
|
||||||
|
|
||||||
|
|
|
@ -48,9 +48,11 @@
|
||||||
#include <trace/events/power.h>
|
#include <trace/events/power.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/sched/smt.h>
|
#include <linux/sched/smt.h>
|
||||||
|
#include <linux/mutex.h>
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
|
#include <linux/sysfs.h>
|
||||||
#include <asm/cpu_device_id.h>
|
#include <asm/cpu_device_id.h>
|
||||||
#include <asm/intel-family.h>
|
#include <asm/intel-family.h>
|
||||||
#include <asm/mwait.h>
|
#include <asm/mwait.h>
|
||||||
|
@ -91,9 +93,15 @@ struct idle_cpu {
|
||||||
*/
|
*/
|
||||||
unsigned long auto_demotion_disable_flags;
|
unsigned long auto_demotion_disable_flags;
|
||||||
bool disable_promotion_to_c1e;
|
bool disable_promotion_to_c1e;
|
||||||
|
bool c1_demotion_supported;
|
||||||
bool use_acpi;
|
bool use_acpi;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static bool c1_demotion_supported;
|
||||||
|
static DEFINE_MUTEX(c1_demotion_mutex);
|
||||||
|
|
||||||
|
static struct device *sysfs_root __initdata;
|
||||||
|
|
||||||
static const struct idle_cpu *icpu __initdata;
|
static const struct idle_cpu *icpu __initdata;
|
||||||
static struct cpuidle_state *cpuidle_state_table __initdata;
|
static struct cpuidle_state *cpuidle_state_table __initdata;
|
||||||
|
|
||||||
|
@ -142,8 +150,8 @@ static __always_inline int __intel_idle(struct cpuidle_device *dev,
|
||||||
int index, bool irqoff)
|
int index, bool irqoff)
|
||||||
{
|
{
|
||||||
struct cpuidle_state *state = &drv->states[index];
|
struct cpuidle_state *state = &drv->states[index];
|
||||||
unsigned long eax = flg2MWAIT(state->flags);
|
unsigned int eax = flg2MWAIT(state->flags);
|
||||||
unsigned long ecx = 1*irqoff; /* break on interrupt flag */
|
unsigned int ecx = 1*irqoff; /* break on interrupt flag */
|
||||||
|
|
||||||
mwait_idle_with_hints(eax, ecx);
|
mwait_idle_with_hints(eax, ecx);
|
||||||
|
|
||||||
|
@ -216,9 +224,9 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
|
||||||
static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
|
static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
|
||||||
struct cpuidle_driver *drv, int index)
|
struct cpuidle_driver *drv, int index)
|
||||||
{
|
{
|
||||||
unsigned long ecx = 1; /* break on interrupt flag */
|
|
||||||
struct cpuidle_state *state = &drv->states[index];
|
struct cpuidle_state *state = &drv->states[index];
|
||||||
unsigned long eax = flg2MWAIT(state->flags);
|
unsigned int eax = flg2MWAIT(state->flags);
|
||||||
|
unsigned int ecx = 1; /* break on interrupt flag */
|
||||||
|
|
||||||
if (state->flags & CPUIDLE_FLAG_INIT_XSTATE)
|
if (state->flags & CPUIDLE_FLAG_INIT_XSTATE)
|
||||||
fpu_idle_fpregs();
|
fpu_idle_fpregs();
|
||||||
|
@ -1548,18 +1556,21 @@ static const struct idle_cpu idle_cpu_gmt __initconst = {
|
||||||
static const struct idle_cpu idle_cpu_spr __initconst = {
|
static const struct idle_cpu idle_cpu_spr __initconst = {
|
||||||
.state_table = spr_cstates,
|
.state_table = spr_cstates,
|
||||||
.disable_promotion_to_c1e = true,
|
.disable_promotion_to_c1e = true,
|
||||||
|
.c1_demotion_supported = true,
|
||||||
.use_acpi = true,
|
.use_acpi = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct idle_cpu idle_cpu_gnr __initconst = {
|
static const struct idle_cpu idle_cpu_gnr __initconst = {
|
||||||
.state_table = gnr_cstates,
|
.state_table = gnr_cstates,
|
||||||
.disable_promotion_to_c1e = true,
|
.disable_promotion_to_c1e = true,
|
||||||
|
.c1_demotion_supported = true,
|
||||||
.use_acpi = true,
|
.use_acpi = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct idle_cpu idle_cpu_gnrd __initconst = {
|
static const struct idle_cpu idle_cpu_gnrd __initconst = {
|
||||||
.state_table = gnrd_cstates,
|
.state_table = gnrd_cstates,
|
||||||
.disable_promotion_to_c1e = true,
|
.disable_promotion_to_c1e = true,
|
||||||
|
.c1_demotion_supported = true,
|
||||||
.use_acpi = true,
|
.use_acpi = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1598,12 +1609,14 @@ static const struct idle_cpu idle_cpu_snr __initconst = {
|
||||||
static const struct idle_cpu idle_cpu_grr __initconst = {
|
static const struct idle_cpu idle_cpu_grr __initconst = {
|
||||||
.state_table = grr_cstates,
|
.state_table = grr_cstates,
|
||||||
.disable_promotion_to_c1e = true,
|
.disable_promotion_to_c1e = true,
|
||||||
|
.c1_demotion_supported = true,
|
||||||
.use_acpi = true,
|
.use_acpi = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct idle_cpu idle_cpu_srf __initconst = {
|
static const struct idle_cpu idle_cpu_srf __initconst = {
|
||||||
.state_table = srf_cstates,
|
.state_table = srf_cstates,
|
||||||
.disable_promotion_to_c1e = true,
|
.disable_promotion_to_c1e = true,
|
||||||
|
.c1_demotion_supported = true,
|
||||||
.use_acpi = true,
|
.use_acpi = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1664,7 +1677,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
|
static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
|
||||||
X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
|
X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2323,6 +2336,88 @@ static void __init intel_idle_cpuidle_devices_uninit(void)
|
||||||
cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
|
cpuidle_unregister_device(per_cpu_ptr(intel_idle_cpuidle_devices, i));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_c1_demotion_toggle(void *enable)
|
||||||
|
{
|
||||||
|
unsigned long long msr_val;
|
||||||
|
|
||||||
|
rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
|
||||||
|
/*
|
||||||
|
* Enable/disable C1 undemotion along with C1 demotion, as this is the
|
||||||
|
* most sensible configuration in general.
|
||||||
|
*/
|
||||||
|
if (enable)
|
||||||
|
msr_val |= NHM_C1_AUTO_DEMOTE | SNB_C1_AUTO_UNDEMOTE;
|
||||||
|
else
|
||||||
|
msr_val &= ~(NHM_C1_AUTO_DEMOTE | SNB_C1_AUTO_UNDEMOTE);
|
||||||
|
wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t intel_c1_demotion_store(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
bool enable;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = kstrtobool(buf, &enable);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
mutex_lock(&c1_demotion_mutex);
|
||||||
|
/* Enable/disable C1 demotion on all CPUs */
|
||||||
|
on_each_cpu(intel_c1_demotion_toggle, (void *)enable, 1);
|
||||||
|
mutex_unlock(&c1_demotion_mutex);
|
||||||
|
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t intel_c1_demotion_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
unsigned long long msr_val;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read the MSR value for a CPU and assume it is the same for all CPUs. Any other
|
||||||
|
* configuration would be a BIOS bug.
|
||||||
|
*/
|
||||||
|
rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_val);
|
||||||
|
return sysfs_emit(buf, "%d\n", !!(msr_val & NHM_C1_AUTO_DEMOTE));
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RW(intel_c1_demotion);
|
||||||
|
|
||||||
|
static int __init intel_idle_sysfs_init(void)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (!c1_demotion_supported)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
sysfs_root = bus_get_dev_root(&cpu_subsys);
|
||||||
|
if (!sysfs_root)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err = sysfs_add_file_to_group(&sysfs_root->kobj,
|
||||||
|
&dev_attr_intel_c1_demotion.attr,
|
||||||
|
"cpuidle");
|
||||||
|
if (err) {
|
||||||
|
put_device(sysfs_root);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init intel_idle_sysfs_uninit(void)
|
||||||
|
{
|
||||||
|
if (!sysfs_root)
|
||||||
|
return;
|
||||||
|
|
||||||
|
sysfs_remove_file_from_group(&sysfs_root->kobj,
|
||||||
|
&dev_attr_intel_c1_demotion.attr,
|
||||||
|
"cpuidle");
|
||||||
|
put_device(sysfs_root);
|
||||||
|
}
|
||||||
|
|
||||||
static int __init intel_idle_init(void)
|
static int __init intel_idle_init(void)
|
||||||
{
|
{
|
||||||
const struct x86_cpu_id *id;
|
const struct x86_cpu_id *id;
|
||||||
|
@ -2373,6 +2468,8 @@ static int __init intel_idle_init(void)
|
||||||
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
|
auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
|
||||||
if (icpu->disable_promotion_to_c1e)
|
if (icpu->disable_promotion_to_c1e)
|
||||||
c1e_promotion = C1E_PROMOTION_DISABLE;
|
c1e_promotion = C1E_PROMOTION_DISABLE;
|
||||||
|
if (icpu->c1_demotion_supported)
|
||||||
|
c1_demotion_supported = true;
|
||||||
if (icpu->use_acpi || force_use_acpi)
|
if (icpu->use_acpi || force_use_acpi)
|
||||||
intel_idle_acpi_cst_extract();
|
intel_idle_acpi_cst_extract();
|
||||||
} else if (!intel_idle_acpi_cst_extract()) {
|
} else if (!intel_idle_acpi_cst_extract()) {
|
||||||
|
@ -2386,6 +2483,10 @@ static int __init intel_idle_init(void)
|
||||||
if (!intel_idle_cpuidle_devices)
|
if (!intel_idle_cpuidle_devices)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
retval = intel_idle_sysfs_init();
|
||||||
|
if (retval)
|
||||||
|
pr_warn("failed to initialized sysfs");
|
||||||
|
|
||||||
intel_idle_cpuidle_driver_init(&intel_idle_driver);
|
intel_idle_cpuidle_driver_init(&intel_idle_driver);
|
||||||
|
|
||||||
retval = cpuidle_register_driver(&intel_idle_driver);
|
retval = cpuidle_register_driver(&intel_idle_driver);
|
||||||
|
@ -2404,17 +2505,20 @@ static int __init intel_idle_init(void)
|
||||||
pr_debug("Local APIC timer is reliable in %s\n",
|
pr_debug("Local APIC timer is reliable in %s\n",
|
||||||
boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
|
boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
|
||||||
|
|
||||||
|
arch_cpu_rescan_dead_smt_siblings();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
hp_setup_fail:
|
hp_setup_fail:
|
||||||
intel_idle_cpuidle_devices_uninit();
|
intel_idle_cpuidle_devices_uninit();
|
||||||
cpuidle_unregister_driver(&intel_idle_driver);
|
cpuidle_unregister_driver(&intel_idle_driver);
|
||||||
init_driver_fail:
|
init_driver_fail:
|
||||||
|
intel_idle_sysfs_uninit();
|
||||||
free_percpu(intel_idle_cpuidle_devices);
|
free_percpu(intel_idle_cpuidle_devices);
|
||||||
return retval;
|
return retval;
|
||||||
|
|
||||||
}
|
}
|
||||||
device_initcall(intel_idle_init);
|
subsys_initcall_sync(intel_idle_init);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are not really modular, but we used to support that. Meaning we also
|
* We are not really modular, but we used to support that. Meaning we also
|
||||||
|
|
|
@ -116,6 +116,7 @@ extern void cpu_maps_update_begin(void);
|
||||||
extern void cpu_maps_update_done(void);
|
extern void cpu_maps_update_done(void);
|
||||||
int bringup_hibernate_cpu(unsigned int sleep_cpu);
|
int bringup_hibernate_cpu(unsigned int sleep_cpu);
|
||||||
void bringup_nonboot_cpus(unsigned int setup_max_cpus);
|
void bringup_nonboot_cpus(unsigned int setup_max_cpus);
|
||||||
|
int arch_cpu_rescan_dead_smt_siblings(void);
|
||||||
|
|
||||||
#else /* CONFIG_SMP */
|
#else /* CONFIG_SMP */
|
||||||
#define cpuhp_tasks_frozen 0
|
#define cpuhp_tasks_frozen 0
|
||||||
|
@ -130,6 +131,8 @@ static inline void cpu_maps_update_done(void)
|
||||||
|
|
||||||
static inline int add_cpu(unsigned int cpu) { return 0;}
|
static inline int add_cpu(unsigned int cpu) { return 0;}
|
||||||
|
|
||||||
|
static inline int arch_cpu_rescan_dead_smt_siblings(void) { return 0; }
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
extern struct bus_type cpu_subsys;
|
extern struct bus_type cpu_subsys;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue