Fix a CPU topology parsing bug on AMD guests, and address
a lockdep warning on the resctrl filesystem. Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmjGjXMRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1jPsg/+O0Oyk4MkKtTip3Xf3XFFZUigCk5Lg9Ib AMCICzDUPSTbaGhH8hCErE0YgD8Rk7gyO+w0qKNH5q2DAhkuLFc609cHwldsZgMz n8WncqL/NxrtqmROyhGFs9i9TcX8W1qR5zHopov6EgCXQ1448PftA0jQLhQlww9j SqYPSVii78hHFKBUiaWw0nc40wDHsIMzdUxTUrqd9Qi/7RsTikApTqwUINb65uFS XLYPUVauZLh1dU2qJKMt2GHuslxn6sBKV1sRXkjsxI+KzuKd61CHDUNcbFCrsjBE 3nJwd0Oc4+2Pdx8Qu3I/8K+IHnwYLxd/0NT2ROCnn8Grqp1AzMmoGpnajF30yPYg ncLRazjlfNb5EKIQyRJN/Um9dY0yraHlIgfvJ8lZ9Aqon80WxSmPRCg2SdipKdZh /XJw0kP4dUCH7X9xnhAsz02VwQTXITeZMzK8jev02nzxkPodcv5Gq4mD0CclM3VJ LeGwasWkba7hcZy8+pICtKkZ+1O/+Cd1w0cE9YkPN+c+bqDXIQRmaFIZHTM/lVRx HFmfn0gd0EVidKu1CAuRHH7BDOI2K95LLTChdR0toKfkI3ra7oUFZN2BXLxfYV2J 3t1PWrJvHMCjifOq2F8m7WGbg7VzoTjOGaZIFvBxGKKB1azBm7Hxlq1OMjp8LUcx 3T5TPE1agtw= =pmQD -----END PGP SIGNATURE----- Merge tag 'x86-urgent-2025-09-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Ingo Molnar: "Fix a CPU topology parsing bug on AMD guests, and address a lockdep warning in the resctrl filesystem" * tag 'x86-urgent-2025-09-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: fs/resctrl: Eliminate false positive lockdep warning when reading SNC counters x86/cpu/topology: Always try cpu_parse_topology_ext() on AMD/Hygon
This commit is contained in:
commit
df86f912b4
|
@ -175,27 +175,30 @@ static void topoext_fixup(struct topo_scan *tscan)
|
||||||
|
|
||||||
static void parse_topology_amd(struct topo_scan *tscan)
|
static void parse_topology_amd(struct topo_scan *tscan)
|
||||||
{
|
{
|
||||||
bool has_topoext = false;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the extended topology leaf 0x8000_001e is available
|
* Try to get SMT, CORE, TILE, and DIE shifts from extended
|
||||||
* try to get SMT, CORE, TILE, and DIE shifts from extended
|
|
||||||
* CPUID leaf 0x8000_0026 on supported processors first. If
|
* CPUID leaf 0x8000_0026 on supported processors first. If
|
||||||
* extended CPUID leaf 0x8000_0026 is not supported, try to
|
* extended CPUID leaf 0x8000_0026 is not supported, try to
|
||||||
* get SMT and CORE shift from leaf 0xb first, then try to
|
* get SMT and CORE shift from leaf 0xb. If either leaf is
|
||||||
* get the CORE shift from leaf 0x8000_0008.
|
* available, cpu_parse_topology_ext() will return true.
|
||||||
*/
|
*/
|
||||||
if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
|
bool has_xtopology = cpu_parse_topology_ext(tscan);
|
||||||
has_topoext = cpu_parse_topology_ext(tscan);
|
|
||||||
|
|
||||||
if (cpu_feature_enabled(X86_FEATURE_AMD_HTR_CORES))
|
if (cpu_feature_enabled(X86_FEATURE_AMD_HTR_CORES))
|
||||||
tscan->c->topo.cpu_type = cpuid_ebx(0x80000026);
|
tscan->c->topo.cpu_type = cpuid_ebx(0x80000026);
|
||||||
|
|
||||||
if (!has_topoext && !parse_8000_0008(tscan))
|
/*
|
||||||
|
* If XTOPOLOGY leaves (0x26/0xb) are not available, try to
|
||||||
|
* get the CORE shift from leaf 0x8000_0008 first.
|
||||||
|
*/
|
||||||
|
if (!has_xtopology && !parse_8000_0008(tscan))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Prefer leaf 0x8000001e if available */
|
/*
|
||||||
if (parse_8000_001e(tscan, has_topoext))
|
* Prefer leaf 0x8000001e if available to get the SMT shift and
|
||||||
|
* the initial APIC ID if XTOPOLOGY leaves are not available.
|
||||||
|
*/
|
||||||
|
if (parse_8000_001e(tscan, has_xtopology))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Try the NODEID MSR */
|
/* Try the NODEID MSR */
|
||||||
|
|
|
@ -625,11 +625,11 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(d, &r->mon_domains, hdr.list) {
|
list_for_each_entry(d, &r->mon_domains, hdr.list) {
|
||||||
if (d->ci_id == domid) {
|
if (d->ci_id == domid) {
|
||||||
rr.ci_id = d->ci_id;
|
|
||||||
cpu = cpumask_any(&d->hdr.cpu_mask);
|
cpu = cpumask_any(&d->hdr.cpu_mask);
|
||||||
ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE);
|
ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE);
|
||||||
if (!ci)
|
if (!ci)
|
||||||
continue;
|
continue;
|
||||||
|
rr.ci = ci;
|
||||||
mon_event_read(&rr, r, NULL, rdtgrp,
|
mon_event_read(&rr, r, NULL, rdtgrp,
|
||||||
&ci->shared_cpu_map, evtid, false);
|
&ci->shared_cpu_map, evtid, false);
|
||||||
goto checkresult;
|
goto checkresult;
|
||||||
|
|
|
@ -98,7 +98,7 @@ struct mon_data {
|
||||||
* domains in @r sharing L3 @ci.id
|
* domains in @r sharing L3 @ci.id
|
||||||
* @evtid: Which monitor event to read.
|
* @evtid: Which monitor event to read.
|
||||||
* @first: Initialize MBM counter when true.
|
* @first: Initialize MBM counter when true.
|
||||||
* @ci_id: Cacheinfo id for L3. Only set when @d is NULL. Used when summing domains.
|
* @ci: Cacheinfo for L3. Only set when @d is NULL. Used when summing domains.
|
||||||
* @err: Error encountered when reading counter.
|
* @err: Error encountered when reading counter.
|
||||||
* @val: Returned value of event counter. If @rgrp is a parent resource group,
|
* @val: Returned value of event counter. If @rgrp is a parent resource group,
|
||||||
* @val includes the sum of event counts from its child resource groups.
|
* @val includes the sum of event counts from its child resource groups.
|
||||||
|
@ -112,7 +112,7 @@ struct rmid_read {
|
||||||
struct rdt_mon_domain *d;
|
struct rdt_mon_domain *d;
|
||||||
enum resctrl_event_id evtid;
|
enum resctrl_event_id evtid;
|
||||||
bool first;
|
bool first;
|
||||||
unsigned int ci_id;
|
struct cacheinfo *ci;
|
||||||
int err;
|
int err;
|
||||||
u64 val;
|
u64 val;
|
||||||
void *arch_mon_ctx;
|
void *arch_mon_ctx;
|
||||||
|
|
|
@ -361,7 +361,6 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
|
||||||
{
|
{
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
struct rdt_mon_domain *d;
|
struct rdt_mon_domain *d;
|
||||||
struct cacheinfo *ci;
|
|
||||||
struct mbm_state *m;
|
struct mbm_state *m;
|
||||||
int err, ret;
|
int err, ret;
|
||||||
u64 tval = 0;
|
u64 tval = 0;
|
||||||
|
@ -389,8 +388,7 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Summing domains that share a cache, must be on a CPU for that cache. */
|
/* Summing domains that share a cache, must be on a CPU for that cache. */
|
||||||
ci = get_cpu_cacheinfo_level(cpu, RESCTRL_L3_CACHE);
|
if (!cpumask_test_cpu(cpu, &rr->ci->shared_cpu_map))
|
||||||
if (!ci || ci->id != rr->ci_id)
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -402,7 +400,7 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
|
||||||
*/
|
*/
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
|
list_for_each_entry(d, &rr->r->mon_domains, hdr.list) {
|
||||||
if (d->ci_id != rr->ci_id)
|
if (d->ci_id != rr->ci->id)
|
||||||
continue;
|
continue;
|
||||||
err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
|
err = resctrl_arch_rmid_read(rr->r, d, closid, rmid,
|
||||||
rr->evtid, &tval, rr->arch_mon_ctx);
|
rr->evtid, &tval, rr->arch_mon_ctx);
|
||||||
|
|
Loading…
Reference in New Issue