Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.13-rc3). No conflicts or adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
5098462fba
Documentation
admin-guide
arch/arm64
devicetree/bindings/power
networking
arch
arm64
x86
events/intel
include/asm
kernel
mm
block
drivers
acpi/arm64
base
block
bluetooth
clk
dma-buf
gpu/drm
amd
amdgpu
amdkfd
display
amdgpu_dm
dc
core
dc.hdc_spl_translate.cdml2
hwss
inc
link
resource
dcn20
dcn30
dcn302
dcn303
dcn31
dcn314
dcn315
dcn316
dcn32
dcn321
dcn35
dcn351
dcn401
modules/freesync
pm
display
sti
v3d
xe
iio/magnetometer
iommu/iommufd
irqchip
md/bcache
|
@ -4822,6 +4822,11 @@
|
|||
can be preempted anytime. Tasks will also yield
|
||||
contended spinlocks (if the critical section isn't
|
||||
explicitly preempt disabled beyond the lock itself).
|
||||
lazy - Scheduler controlled. Similar to full but instead
|
||||
of preempting the task immediately, the task gets
|
||||
one HZ tick time to yield itself before the
|
||||
preemption will be forced. One preemption is when the
|
||||
task returns to user space.
|
||||
|
||||
print-fatal-signals=
|
||||
[KNL] debug: print fatal signals
|
||||
|
|
|
@ -255,8 +255,9 @@ stable kernels.
|
|||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Hisilicon | Hip{08,09,10,10C| #162001900 | N/A |
|
||||
| | ,11} SMMU PMCG | | |
|
||||
| Hisilicon | Hip{08,09,09A,10| #162001900 | N/A |
|
||||
| | ,10C,11} | | |
|
||||
| | SMMU PMCG | | |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Hisilicon | Hip09 | #162100801 | HISILICON_ERRATUM_162100801 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
|
|
@ -55,6 +55,10 @@ patternProperties:
|
|||
patternProperties:
|
||||
"^power-domain@[0-9a-f]+$":
|
||||
$ref: "#/$defs/power-domain-node"
|
||||
patternProperties:
|
||||
"^power-domain@[0-9a-f]+$":
|
||||
$ref: "#/$defs/power-domain-node"
|
||||
unevaluatedProperties: false
|
||||
unevaluatedProperties: false
|
||||
unevaluatedProperties: false
|
||||
unevaluatedProperties: false
|
||||
|
|
|
@ -2184,6 +2184,12 @@ nexthop_compat_mode - BOOLEAN
|
|||
understands the new API, this sysctl can be disabled to achieve full
|
||||
performance benefits of the new API by disabling the nexthop expansion
|
||||
and extraneous notifications.
|
||||
|
||||
Note that as a backward-compatible mode, dumping of modern features
|
||||
might be incomplete or wrong. For example, resilient groups will not be
|
||||
shown as such, but rather as just a list of next hops. Also weights that
|
||||
do not fit into 8 bits will show incorrectly.
|
||||
|
||||
Default: true (backward compat mode)
|
||||
|
||||
fib_notify_on_flag_change - INTEGER
|
||||
|
|
|
@ -3383,6 +3383,8 @@ S: Maintained
|
|||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
|
||||
F: Documentation/arch/arm64/
|
||||
F: arch/arm64/
|
||||
F: drivers/virt/coco/arm-cca-guest/
|
||||
F: drivers/virt/coco/pkvm-guest/
|
||||
F: tools/testing/selftests/arm64/
|
||||
X: arch/arm64/boot/dts/
|
||||
|
||||
|
@ -15351,7 +15353,7 @@ M: Daniel Machon <daniel.machon@microchip.com>
|
|||
M: UNGLinuxDriver@microchip.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/microchip/lan969x/*
|
||||
F: drivers/net/ethernet/microchip/sparx5/lan969x/*
|
||||
|
||||
MICROCHIP LCDFB DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
|
@ -16343,6 +16345,7 @@ F: Documentation/networking/
|
|||
F: Documentation/networking/net_cachelines/
|
||||
F: Documentation/process/maintainer-netdev.rst
|
||||
F: Documentation/userspace-api/netlink/
|
||||
F: include/linux/ethtool.h
|
||||
F: include/linux/framer/framer-provider.h
|
||||
F: include/linux/framer/framer.h
|
||||
F: include/linux/in.h
|
||||
|
@ -16357,6 +16360,7 @@ F: include/linux/rtnetlink.h
|
|||
F: include/linux/seq_file_net.h
|
||||
F: include/linux/skbuff*
|
||||
F: include/net/
|
||||
F: include/uapi/linux/ethtool.h
|
||||
F: include/uapi/linux/genetlink.h
|
||||
F: include/uapi/linux/hsr_netlink.h
|
||||
F: include/uapi/linux/in.h
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -44,6 +44,8 @@ cpucap_is_possible(const unsigned int cap)
|
|||
return IS_ENABLED(CONFIG_ARM64_TLB_RANGE);
|
||||
case ARM64_HAS_S1POE:
|
||||
return IS_ENABLED(CONFIG_ARM64_POE);
|
||||
case ARM64_HAS_GCS:
|
||||
return IS_ENABLED(CONFIG_ARM64_GCS);
|
||||
case ARM64_UNMAP_KERNEL_AT_EL0:
|
||||
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
|
||||
case ARM64_WORKAROUND_843419:
|
||||
|
|
|
@ -847,8 +847,7 @@ static inline bool system_supports_poe(void)
|
|||
|
||||
static inline bool system_supports_gcs(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_GCS) &&
|
||||
alternative_has_cap_unlikely(ARM64_HAS_GCS);
|
||||
return alternative_has_cap_unlikely(ARM64_HAS_GCS);
|
||||
}
|
||||
|
||||
static inline bool system_supports_haft(void)
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#ifndef BUILD_VDSO
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -44,7 +45,7 @@ static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
|
|||
if (system_supports_mte()) {
|
||||
if (flags & (MAP_ANONYMOUS | MAP_HUGETLB))
|
||||
return VM_MTE_ALLOWED;
|
||||
if (shmem_file(file))
|
||||
if (shmem_file(file) || is_file_hugepages(file))
|
||||
return VM_MTE_ALLOWED;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,20 +30,17 @@ static bool is_image_text(unsigned long addr)
|
|||
|
||||
static void __kprobes *patch_map(void *addr, int fixmap)
|
||||
{
|
||||
unsigned long uintaddr = (uintptr_t) addr;
|
||||
bool image = is_image_text(uintaddr);
|
||||
struct page *page;
|
||||
phys_addr_t phys;
|
||||
|
||||
if (image)
|
||||
page = phys_to_page(__pa_symbol(addr));
|
||||
else if (IS_ENABLED(CONFIG_EXECMEM))
|
||||
page = vmalloc_to_page(addr);
|
||||
else
|
||||
return addr;
|
||||
if (is_image_text((unsigned long)addr)) {
|
||||
phys = __pa_symbol(addr);
|
||||
} else {
|
||||
struct page *page = vmalloc_to_page(addr);
|
||||
BUG_ON(!page);
|
||||
phys = page_to_phys(page) + offset_in_page(addr);
|
||||
}
|
||||
|
||||
BUG_ON(!page);
|
||||
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
|
||||
(uintaddr & ~PAGE_MASK));
|
||||
return (void *)set_fixmap_offset(fixmap, phys);
|
||||
}
|
||||
|
||||
static void __kprobes patch_unmap(int fixmap)
|
||||
|
|
|
@ -720,6 +720,8 @@ static int fpmr_set(struct task_struct *target, const struct user_regset *regset
|
|||
if (!system_supports_fpmr())
|
||||
return -EINVAL;
|
||||
|
||||
fpmr = target->thread.uw.fpmr;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpmr, 0, count);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1427,7 +1429,7 @@ static int tagged_addr_ctrl_get(struct task_struct *target,
|
|||
{
|
||||
long ctrl = get_tagged_addr_ctrl(target);
|
||||
|
||||
if (IS_ERR_VALUE(ctrl))
|
||||
if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
|
||||
return ctrl;
|
||||
|
||||
return membuf_write(&to, &ctrl, sizeof(ctrl));
|
||||
|
@ -1441,6 +1443,10 @@ static int tagged_addr_ctrl_set(struct task_struct *target, const struct
|
|||
int ret;
|
||||
long ctrl;
|
||||
|
||||
ctrl = get_tagged_addr_ctrl(target);
|
||||
if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl)))
|
||||
return ctrl;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1472,6 +1478,8 @@ static int poe_set(struct task_struct *target, const struct
|
|||
if (!system_supports_poe())
|
||||
return -EINVAL;
|
||||
|
||||
ctrl = target->thread.por_el0;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1483,6 +1491,22 @@ static int poe_set(struct task_struct *target, const struct
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_GCS
|
||||
static void task_gcs_to_user(struct user_gcs *user_gcs,
|
||||
const struct task_struct *target)
|
||||
{
|
||||
user_gcs->features_enabled = target->thread.gcs_el0_mode;
|
||||
user_gcs->features_locked = target->thread.gcs_el0_locked;
|
||||
user_gcs->gcspr_el0 = target->thread.gcspr_el0;
|
||||
}
|
||||
|
||||
static void task_gcs_from_user(struct task_struct *target,
|
||||
const struct user_gcs *user_gcs)
|
||||
{
|
||||
target->thread.gcs_el0_mode = user_gcs->features_enabled;
|
||||
target->thread.gcs_el0_locked = user_gcs->features_locked;
|
||||
target->thread.gcspr_el0 = user_gcs->gcspr_el0;
|
||||
}
|
||||
|
||||
static int gcs_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
struct membuf to)
|
||||
|
@ -1495,9 +1519,7 @@ static int gcs_get(struct task_struct *target,
|
|||
if (target == current)
|
||||
gcs_preserve_current_state();
|
||||
|
||||
user_gcs.features_enabled = target->thread.gcs_el0_mode;
|
||||
user_gcs.features_locked = target->thread.gcs_el0_locked;
|
||||
user_gcs.gcspr_el0 = target->thread.gcspr_el0;
|
||||
task_gcs_to_user(&user_gcs, target);
|
||||
|
||||
return membuf_write(&to, &user_gcs, sizeof(user_gcs));
|
||||
}
|
||||
|
@ -1513,6 +1535,8 @@ static int gcs_set(struct task_struct *target, const struct
|
|||
if (!system_supports_gcs())
|
||||
return -EINVAL;
|
||||
|
||||
task_gcs_to_user(&user_gcs, target);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &user_gcs, 0, -1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1520,9 +1544,7 @@ static int gcs_set(struct task_struct *target, const struct
|
|||
if (user_gcs.features_enabled & ~PR_SHADOW_STACK_SUPPORTED_STATUS_MASK)
|
||||
return -EINVAL;
|
||||
|
||||
target->thread.gcs_el0_mode = user_gcs.features_enabled;
|
||||
target->thread.gcs_el0_locked = user_gcs.features_locked;
|
||||
target->thread.gcspr_el0 = user_gcs.gcspr_el0;
|
||||
task_gcs_from_user(target, &user_gcs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -32,9 +32,9 @@ static unsigned long nr_pinned_asids;
|
|||
static unsigned long *pinned_asid_map;
|
||||
|
||||
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
|
||||
#define ASID_FIRST_VERSION (1UL << asid_bits)
|
||||
#define ASID_FIRST_VERSION (1UL << 16)
|
||||
|
||||
#define NUM_USER_ASIDS ASID_FIRST_VERSION
|
||||
#define NUM_USER_ASIDS (1UL << asid_bits)
|
||||
#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
|
||||
#define asid2ctxid(asid, genid) ((asid) | (genid))
|
||||
|
||||
|
|
|
@ -30,11 +30,13 @@ void copy_highpage(struct page *to, struct page *from)
|
|||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
if (folio_test_hugetlb(src) &&
|
||||
folio_test_hugetlb_mte_tagged(src)) {
|
||||
if (!folio_try_hugetlb_mte_tagging(dst))
|
||||
if (folio_test_hugetlb(src)) {
|
||||
if (!folio_test_hugetlb_mte_tagged(src) ||
|
||||
from != folio_page(src, 0))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst));
|
||||
|
||||
/*
|
||||
* Populate tags for all subpages.
|
||||
*
|
||||
|
|
|
@ -117,15 +117,6 @@ static void __init arch_reserve_crashkernel(void)
|
|||
|
||||
static phys_addr_t __init max_zone_phys(phys_addr_t zone_limit)
|
||||
{
|
||||
/**
|
||||
* Information we get from firmware (e.g. DT dma-ranges) describe DMA
|
||||
* bus constraints. Devices using DMA might have their own limitations.
|
||||
* Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
|
||||
* DMA zone on platforms that have RAM there.
|
||||
*/
|
||||
if (memblock_start_of_DRAM() < U32_MAX)
|
||||
zone_limit = min(zone_limit, U32_MAX);
|
||||
|
||||
return min(zone_limit, memblock_end_of_DRAM() - 1) + 1;
|
||||
}
|
||||
|
||||
|
@ -141,6 +132,14 @@ static void __init zone_sizes_init(void)
|
|||
acpi_zone_dma_limit = acpi_iort_dma_get_max_cpu_address();
|
||||
dt_zone_dma_limit = of_dma_get_max_cpu_address(NULL);
|
||||
zone_dma_limit = min(dt_zone_dma_limit, acpi_zone_dma_limit);
|
||||
/*
|
||||
* Information we get from firmware (e.g. DT dma-ranges) describe DMA
|
||||
* bus constraints. Devices using DMA might have their own limitations.
|
||||
* Some of them rely on DMA zone in low 32-bit memory. Keep low RAM
|
||||
* DMA zone on platforms that have RAM there.
|
||||
*/
|
||||
if (memblock_start_of_DRAM() < U32_MAX)
|
||||
zone_dma_limit = min(zone_dma_limit, U32_MAX);
|
||||
arm64_dma_phys_limit = max_zone_phys(zone_dma_limit);
|
||||
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
|
||||
#endif
|
||||
|
|
|
@ -7135,6 +7135,7 @@ __init int intel_pmu_init(void)
|
|||
|
||||
case INTEL_METEORLAKE:
|
||||
case INTEL_METEORLAKE_L:
|
||||
case INTEL_ARROWLAKE_U:
|
||||
intel_pmu_init_hybrid(hybrid_big_small);
|
||||
|
||||
x86_pmu.pebs_latency_data = cmt_latency_data;
|
||||
|
|
|
@ -1489,7 +1489,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
|
|||
* hence we need to drain when changing said
|
||||
* size.
|
||||
*/
|
||||
intel_pmu_drain_large_pebs(cpuc);
|
||||
intel_pmu_drain_pebs_buffer();
|
||||
adaptive_pebs_record_size_update();
|
||||
wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
|
||||
cpuc->active_pebs_data_cfg = pebs_data_cfg;
|
||||
|
|
|
@ -36,10 +36,12 @@
|
|||
#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit */
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW5 /* Saved Dirty bit (leaf) */
|
||||
#define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW5 /* No PTI shadow (root PGD) */
|
||||
#else
|
||||
/* Shared with _PAGE_BIT_UFFD_WP which is not supported on 32 bit */
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit */
|
||||
#define _PAGE_BIT_SAVED_DIRTY _PAGE_BIT_SOFTW2 /* Saved Dirty bit (leaf) */
|
||||
#define _PAGE_BIT_NOPTISHADOW _PAGE_BIT_SOFTW2 /* No PTI shadow (root PGD) */
|
||||
#endif
|
||||
|
||||
/* If _PAGE_BIT_PRESENT is clear, we use these: */
|
||||
|
@ -139,6 +141,8 @@
|
|||
|
||||
#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
|
||||
|
||||
#define _PAGE_NOPTISHADOW (_AT(pteval_t, 1) << _PAGE_BIT_NOPTISHADOW)
|
||||
|
||||
/*
|
||||
* Set of bits not changed in pte_modify. The pte's
|
||||
* protection key is treated like _PAGE_RW, for
|
||||
|
|
|
@ -1065,7 +1065,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
*/
|
||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||
cpu_has(c, X86_FEATURE_AUTOIBRS))
|
||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
|
||||
WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0);
|
||||
|
||||
/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
|
||||
clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
|
||||
|
|
|
@ -178,8 +178,6 @@ struct _cpuid4_info_regs {
|
|||
struct amd_northbridge *nb;
|
||||
};
|
||||
|
||||
static unsigned short num_cache_leaves;
|
||||
|
||||
/* AMD doesn't have CPUID4. Emulate it here to report the same
|
||||
information to the user. This makes some assumptions about the machine:
|
||||
L2 not shared, no SMT etc. that is currently true on AMD CPUs.
|
||||
|
@ -717,20 +715,23 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c)
|
|||
|
||||
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
num_cache_leaves = find_num_cache_leaves(c);
|
||||
ci->num_leaves = find_num_cache_leaves(c);
|
||||
} else if (c->extended_cpuid_level >= 0x80000006) {
|
||||
if (cpuid_edx(0x80000006) & 0xf000)
|
||||
num_cache_leaves = 4;
|
||||
ci->num_leaves = 4;
|
||||
else
|
||||
num_cache_leaves = 3;
|
||||
ci->num_leaves = 3;
|
||||
}
|
||||
}
|
||||
|
||||
void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
num_cache_leaves = find_num_cache_leaves(c);
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||
|
||||
ci->num_leaves = find_num_cache_leaves(c);
|
||||
}
|
||||
|
||||
void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
|
@ -740,21 +741,21 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|||
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
|
||||
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
||||
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(c->cpu_index);
|
||||
|
||||
if (c->cpuid_level > 3) {
|
||||
static int is_initialized;
|
||||
|
||||
if (is_initialized == 0) {
|
||||
/* Init num_cache_leaves from boot CPU */
|
||||
num_cache_leaves = find_num_cache_leaves(c);
|
||||
is_initialized++;
|
||||
}
|
||||
/*
|
||||
* There should be at least one leaf. A non-zero value means
|
||||
* that the number of leaves has been initialized.
|
||||
*/
|
||||
if (!ci->num_leaves)
|
||||
ci->num_leaves = find_num_cache_leaves(c);
|
||||
|
||||
/*
|
||||
* Whenever possible use cpuid(4), deterministic cache
|
||||
* parameters cpuid leaf to find the cache details
|
||||
*/
|
||||
for (i = 0; i < num_cache_leaves; i++) {
|
||||
for (i = 0; i < ci->num_leaves; i++) {
|
||||
struct _cpuid4_info_regs this_leaf = {};
|
||||
int retval;
|
||||
|
||||
|
@ -790,14 +791,14 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
|||
* Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
|
||||
* trace cache
|
||||
*/
|
||||
if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
|
||||
if ((!ci->num_leaves || c->x86 == 15) && c->cpuid_level > 1) {
|
||||
/* supports eax=2 call */
|
||||
int j, n;
|
||||
unsigned int regs[4];
|
||||
unsigned char *dp = (unsigned char *)regs;
|
||||
int only_trace = 0;
|
||||
|
||||
if (num_cache_leaves != 0 && c->x86 == 15)
|
||||
if (ci->num_leaves && c->x86 == 15)
|
||||
only_trace = 1;
|
||||
|
||||
/* Number of times to iterate */
|
||||
|
@ -991,14 +992,12 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
|
|||
|
||||
int init_cache_level(unsigned int cpu)
|
||||
{
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
|
||||
|
||||
if (!num_cache_leaves)
|
||||
/* There should be at least one leaf. */
|
||||
if (!ci->num_leaves)
|
||||
return -ENOENT;
|
||||
if (!this_cpu_ci)
|
||||
return -EINVAL;
|
||||
this_cpu_ci->num_levels = 3;
|
||||
this_cpu_ci->num_leaves = num_cache_leaves;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -555,7 +555,9 @@ static void init_intel(struct cpuinfo_x86 *c)
|
|||
c->x86_vfm == INTEL_WESTMERE_EX))
|
||||
set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_MWAIT) && c->x86_vfm == INTEL_ATOM_GOLDMONT)
|
||||
if (boot_cpu_has(X86_FEATURE_MWAIT) &&
|
||||
(c->x86_vfm == INTEL_ATOM_GOLDMONT ||
|
||||
c->x86_vfm == INTEL_LUNARLAKE_M))
|
||||
set_cpu_bug(c, X86_BUG_MONITOR);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
|
@ -428,8 +428,8 @@ void __init topology_apply_cmdline_limits_early(void)
|
|||
{
|
||||
unsigned int possible = nr_cpu_ids;
|
||||
|
||||
/* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' 'noapic' */
|
||||
if (!setup_max_cpus || ioapic_is_disabled || apic_is_disabled)
|
||||
/* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' */
|
||||
if (!setup_max_cpus || apic_is_disabled)
|
||||
possible = 1;
|
||||
|
||||
/* 'possible_cpus=N' */
|
||||
|
@ -443,7 +443,7 @@ void __init topology_apply_cmdline_limits_early(void)
|
|||
|
||||
static __init bool restrict_to_up(void)
|
||||
{
|
||||
if (!smp_found_config || ioapic_is_disabled)
|
||||
if (!smp_found_config)
|
||||
return true;
|
||||
/*
|
||||
* XEN PV is special as it does not advertise the local APIC
|
||||
|
|
|
@ -63,16 +63,6 @@ setfx:
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the value of PKRU register that was already pushed onto the signal frame.
|
||||
*/
|
||||
static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||
{
|
||||
if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
|
||||
return 0;
|
||||
return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
|
||||
}
|
||||
|
||||
/*
|
||||
* Signal frame handlers.
|
||||
*/
|
||||
|
@ -168,14 +158,8 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
|
|||
|
||||
static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (use_xsave()) {
|
||||
err = xsave_to_user_sigframe(buf);
|
||||
if (!err)
|
||||
err = update_pkru_in_sigframe(buf, pkru);
|
||||
return err;
|
||||
}
|
||||
if (use_xsave())
|
||||
return xsave_to_user_sigframe(buf, pkru);
|
||||
|
||||
if (use_fxsr())
|
||||
return fxsave_to_user_sigframe((struct fxregs_state __user *) buf);
|
||||
|
|
|
@ -69,6 +69,28 @@ static inline u64 xfeatures_mask_independent(void)
|
|||
return fpu_kernel_cfg.independent_features;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the value of PKRU register that was already pushed onto the signal frame.
|
||||
*/
|
||||
static inline int update_pkru_in_sigframe(struct xregs_state __user *buf, u64 mask, u32 pkru)
|
||||
{
|
||||
u64 xstate_bv;
|
||||
int err;
|
||||
|
||||
if (unlikely(!cpu_feature_enabled(X86_FEATURE_OSPKE)))
|
||||
return 0;
|
||||
|
||||
/* Mark PKRU as in-use so that it is restored correctly. */
|
||||
xstate_bv = (mask & xfeatures_in_use()) | XFEATURE_MASK_PKRU;
|
||||
|
||||
err = __put_user(xstate_bv, &buf->header.xfeatures);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Update PKRU value in the userspace xsave buffer. */
|
||||
return __put_user(pkru, (unsigned int __user *)get_xsave_addr_user(buf, XFEATURE_PKRU));
|
||||
}
|
||||
|
||||
/* XSAVE/XRSTOR wrapper functions */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -256,7 +278,7 @@ static inline u64 xfeatures_need_sigframe_write(void)
|
|||
* The caller has to zero buf::header before calling this because XSAVE*
|
||||
* does not touch the reserved fields in the header.
|
||||
*/
|
||||
static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
|
||||
static inline int xsave_to_user_sigframe(struct xregs_state __user *buf, u32 pkru)
|
||||
{
|
||||
/*
|
||||
* Include the features which are not xsaved/rstored by the kernel
|
||||
|
@ -281,6 +303,9 @@ static inline int xsave_to_user_sigframe(struct xregs_state __user *buf)
|
|||
XSTATE_OP(XSAVE, buf, lmask, hmask, err);
|
||||
clac();
|
||||
|
||||
if (!err)
|
||||
err = update_pkru_in_sigframe(buf, mask, pkru);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <asm/pgtable_types.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/unwind_hints.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
/*
|
||||
* Must be relocatable PIC code callable as a C function, in particular
|
||||
|
@ -242,6 +243,13 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
|
|||
movq CR0(%r8), %r8
|
||||
movq %rax, %cr3
|
||||
movq %r8, %cr0
|
||||
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
/* Saved in save_processor_state. */
|
||||
movq $saved_context, %rax
|
||||
lgdt saved_context_gdt_desc(%rax)
|
||||
#endif
|
||||
|
||||
movq %rbp, %rax
|
||||
|
||||
popf
|
||||
|
|
|
@ -174,7 +174,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
|
|||
if (result)
|
||||
return result;
|
||||
|
||||
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
|
||||
set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -218,14 +218,14 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
|
|||
if (result)
|
||||
return result;
|
||||
if (pgtable_l5_enabled()) {
|
||||
set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag));
|
||||
set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||
} else {
|
||||
/*
|
||||
* With p4d folded, pgd is equal to p4d.
|
||||
* The pgd entry has to point to the pud page table in this case.
|
||||
*/
|
||||
pud_t *pud = pud_offset(p4d, 0);
|
||||
set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag));
|
||||
set_pgd(pgd, __pgd(__pa(pud) | info->kernpg_flag | _PAGE_NOPTISHADOW));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
|
|||
* Top-level entries added to init_mm's usermode pgd after boot
|
||||
* will not be automatically propagated to other mms.
|
||||
*/
|
||||
if (!pgdp_maps_userspace(pgdp))
|
||||
if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW))
|
||||
return pgd;
|
||||
|
||||
/*
|
||||
|
|
108
block/blk-mq.c
108
block/blk-mq.c
|
@ -43,6 +43,7 @@
|
|||
|
||||
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
|
||||
static DEFINE_PER_CPU(call_single_data_t, blk_cpu_csd);
|
||||
static DEFINE_MUTEX(blk_mq_cpuhp_lock);
|
||||
|
||||
static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
|
||||
static void blk_mq_request_bypass_insert(struct request *rq,
|
||||
|
@ -3739,13 +3740,91 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||
static void __blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
if (!(hctx->flags & BLK_MQ_F_STACKING))
|
||||
lockdep_assert_held(&blk_mq_cpuhp_lock);
|
||||
|
||||
if (!(hctx->flags & BLK_MQ_F_STACKING) &&
|
||||
!hlist_unhashed(&hctx->cpuhp_online)) {
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
|
||||
&hctx->cpuhp_online);
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
|
||||
&hctx->cpuhp_dead);
|
||||
INIT_HLIST_NODE(&hctx->cpuhp_online);
|
||||
}
|
||||
|
||||
if (!hlist_unhashed(&hctx->cpuhp_dead)) {
|
||||
cpuhp_state_remove_instance_nocalls(CPUHP_BLK_MQ_DEAD,
|
||||
&hctx->cpuhp_dead);
|
||||
INIT_HLIST_NODE(&hctx->cpuhp_dead);
|
||||
}
|
||||
}
|
||||
|
||||
static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
mutex_lock(&blk_mq_cpuhp_lock);
|
||||
__blk_mq_remove_cpuhp(hctx);
|
||||
mutex_unlock(&blk_mq_cpuhp_lock);
|
||||
}
|
||||
|
||||
static void __blk_mq_add_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
lockdep_assert_held(&blk_mq_cpuhp_lock);
|
||||
|
||||
if (!(hctx->flags & BLK_MQ_F_STACKING) &&
|
||||
hlist_unhashed(&hctx->cpuhp_online))
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
|
||||
&hctx->cpuhp_online);
|
||||
|
||||
if (hlist_unhashed(&hctx->cpuhp_dead))
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD,
|
||||
&hctx->cpuhp_dead);
|
||||
}
|
||||
|
||||
static void __blk_mq_remove_cpuhp_list(struct list_head *head)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
||||
lockdep_assert_held(&blk_mq_cpuhp_lock);
|
||||
|
||||
list_for_each_entry(hctx, head, hctx_list)
|
||||
__blk_mq_remove_cpuhp(hctx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unregister cpuhp callbacks from exited hw queues
|
||||
*
|
||||
* Safe to call if this `request_queue` is live
|
||||
*/
|
||||
static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q)
|
||||
{
|
||||
LIST_HEAD(hctx_list);
|
||||
|
||||
spin_lock(&q->unused_hctx_lock);
|
||||
list_splice_init(&q->unused_hctx_list, &hctx_list);
|
||||
spin_unlock(&q->unused_hctx_lock);
|
||||
|
||||
mutex_lock(&blk_mq_cpuhp_lock);
|
||||
__blk_mq_remove_cpuhp_list(&hctx_list);
|
||||
mutex_unlock(&blk_mq_cpuhp_lock);
|
||||
|
||||
spin_lock(&q->unused_hctx_lock);
|
||||
list_splice(&hctx_list, &q->unused_hctx_list);
|
||||
spin_unlock(&q->unused_hctx_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Register cpuhp callbacks from all hw queues
|
||||
*
|
||||
* Safe to call if this `request_queue` is live
|
||||
*/
|
||||
static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned long i;
|
||||
|
||||
mutex_lock(&blk_mq_cpuhp_lock);
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
__blk_mq_add_cpuhp(hctx);
|
||||
mutex_unlock(&blk_mq_cpuhp_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3796,8 +3875,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
|
|||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
|
||||
blk_mq_remove_cpuhp(hctx);
|
||||
|
||||
xa_erase(&q->hctx_table, hctx_idx);
|
||||
|
||||
spin_lock(&q->unused_hctx_lock);
|
||||
|
@ -3814,6 +3891,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
|
|||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (i == nr_queue)
|
||||
break;
|
||||
blk_mq_remove_cpuhp(hctx);
|
||||
blk_mq_exit_hctx(q, set, hctx, i);
|
||||
}
|
||||
}
|
||||
|
@ -3824,16 +3902,11 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
|||
{
|
||||
hctx->queue_num = hctx_idx;
|
||||
|
||||
if (!(hctx->flags & BLK_MQ_F_STACKING))
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE,
|
||||
&hctx->cpuhp_online);
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
|
||||
|
||||
hctx->tags = set->tags[hctx_idx];
|
||||
|
||||
if (set->ops->init_hctx &&
|
||||
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
|
||||
goto unregister_cpu_notifier;
|
||||
goto fail;
|
||||
|
||||
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
|
||||
hctx->numa_node))
|
||||
|
@ -3850,8 +3923,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
|||
exit_hctx:
|
||||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
unregister_cpu_notifier:
|
||||
blk_mq_remove_cpuhp(hctx);
|
||||
fail:
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -3877,6 +3949,8 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
|
|||
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
|
||||
spin_lock_init(&hctx->lock);
|
||||
INIT_LIST_HEAD(&hctx->dispatch);
|
||||
INIT_HLIST_NODE(&hctx->cpuhp_dead);
|
||||
INIT_HLIST_NODE(&hctx->cpuhp_online);
|
||||
hctx->queue = q;
|
||||
hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED;
|
||||
|
||||
|
@ -4415,6 +4489,12 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
|
|||
xa_for_each_start(&q->hctx_table, j, hctx, j)
|
||||
blk_mq_exit_hctx(q, set, hctx, j);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
/* unregister cpuhp callbacks for exited hctxs */
|
||||
blk_mq_remove_hw_queues_cpuhp(q);
|
||||
|
||||
/* register cpuhp for new initialized hctxs */
|
||||
blk_mq_add_hw_queues_cpuhp(q);
|
||||
}
|
||||
|
||||
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
|
|
|
@ -1716,6 +1716,8 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
|
|||
/* HiSilicon Hip09 Platform */
|
||||
{"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
||||
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
||||
{"HISI ", "HIP09A ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
||||
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
||||
/* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
|
||||
{"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
|
||||
"Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
|
||||
|
|
|
@ -208,6 +208,10 @@ static int __init numa_register_nodes(void)
|
|||
{
|
||||
int nid;
|
||||
|
||||
/* Check the validity of the memblock/node mapping */
|
||||
if (!memblock_validate_numa_coverage(0))
|
||||
return -EINVAL;
|
||||
|
||||
/* Finally register nodes. */
|
||||
for_each_node_mask(nid, numa_nodes_parsed) {
|
||||
unsigned long start_pfn, end_pfn;
|
||||
|
|
|
@ -58,7 +58,7 @@ bool last_level_cache_is_valid(unsigned int cpu)
|
|||
{
|
||||
struct cacheinfo *llc;
|
||||
|
||||
if (!cache_leaves(cpu))
|
||||
if (!cache_leaves(cpu) || !per_cpu_cacheinfo(cpu))
|
||||
return false;
|
||||
|
||||
llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
|
||||
|
@ -458,11 +458,9 @@ int __weak populate_cache_leaves(unsigned int cpu)
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline
|
||||
int allocate_cache_info(int cpu)
|
||||
static inline int allocate_cache_info(int cpu)
|
||||
{
|
||||
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
|
||||
sizeof(struct cacheinfo), GFP_ATOMIC);
|
||||
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_ATOMIC);
|
||||
if (!per_cpu_cacheinfo(cpu)) {
|
||||
cache_leaves(cpu) = 0;
|
||||
return -ENOMEM;
|
||||
|
@ -534,7 +532,11 @@ static inline int init_level_allocate_ci(unsigned int cpu)
|
|||
*/
|
||||
ci_cacheinfo(cpu)->early_ci_levels = false;
|
||||
|
||||
if (cache_leaves(cpu) <= early_leaves)
|
||||
/*
|
||||
* Some architectures (e.g., x86) do not use early initialization.
|
||||
* Allocate memory now in such case.
|
||||
*/
|
||||
if (cache_leaves(cpu) <= early_leaves && per_cpu_cacheinfo(cpu))
|
||||
return 0;
|
||||
|
||||
kfree(per_cpu_cacheinfo(cpu));
|
||||
|
|
|
@ -598,6 +598,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_attach_dev);
|
||||
|
||||
static int dev_get_regmap_match(struct device *dev, void *res, void *data);
|
||||
|
||||
static int regmap_detach_dev(struct device *dev, struct regmap *map)
|
||||
{
|
||||
if (!dev)
|
||||
return 0;
|
||||
|
||||
return devres_release(dev, dev_get_regmap_release,
|
||||
dev_get_regmap_match, (void *)map->name);
|
||||
}
|
||||
|
||||
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
|
||||
const struct regmap_config *config)
|
||||
{
|
||||
|
@ -1052,13 +1063,13 @@ skip_format_initialization:
|
|||
|
||||
/* Sanity check */
|
||||
if (range_cfg->range_max < range_cfg->range_min) {
|
||||
dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
|
||||
dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
|
||||
range_cfg->range_max, range_cfg->range_min);
|
||||
goto err_range;
|
||||
}
|
||||
|
||||
if (range_cfg->range_max > map->max_register) {
|
||||
dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
|
||||
dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
|
||||
range_cfg->range_max, map->max_register);
|
||||
goto err_range;
|
||||
}
|
||||
|
@ -1445,6 +1456,7 @@ void regmap_exit(struct regmap *map)
|
|||
{
|
||||
struct regmap_async *async;
|
||||
|
||||
regmap_detach_dev(map->dev, map);
|
||||
regcache_exit(map);
|
||||
|
||||
regmap_debugfs_exit(map);
|
||||
|
|
|
@ -28,6 +28,7 @@ module! {
|
|||
type: NullBlkModule,
|
||||
name: "rnull_mod",
|
||||
author: "Andreas Hindborg",
|
||||
description: "Rust implementation of the C null block driver",
|
||||
license: "GPL v2",
|
||||
}
|
||||
|
||||
|
|
|
@ -1586,9 +1586,12 @@ static void virtblk_remove(struct virtio_device *vdev)
|
|||
static int virtblk_freeze(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_blk *vblk = vdev->priv;
|
||||
struct request_queue *q = vblk->disk->queue;
|
||||
|
||||
/* Ensure no requests in virtqueues before deleting vqs. */
|
||||
blk_mq_freeze_queue(vblk->disk->queue);
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue_nowait(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
/* Ensure we don't receive any more interrupts */
|
||||
virtio_reset_device(vdev);
|
||||
|
@ -1612,8 +1615,8 @@ static int virtblk_restore(struct virtio_device *vdev)
|
|||
return ret;
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
blk_mq_unquiesce_queue(vblk->disk->queue);
|
||||
|
||||
blk_mq_unfreeze_queue(vblk->disk->queue);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -395,6 +395,7 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
{
|
||||
struct btmtk_data *data = hci_get_priv(hdev);
|
||||
int err;
|
||||
bool complete = false;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
|
||||
kfree_skb(skb);
|
||||
|
@ -416,19 +417,22 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
fallthrough;
|
||||
case HCI_DEVCOREDUMP_ACTIVE:
|
||||
default:
|
||||
/* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
|
||||
if (data->cd_info.cnt >= MTK_COREDUMP_NUM &&
|
||||
skb->len > MTK_COREDUMP_END_LEN)
|
||||
if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
|
||||
MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1))
|
||||
complete = true;
|
||||
|
||||
err = hci_devcd_append(hdev, skb);
|
||||
if (err < 0)
|
||||
break;
|
||||
data->cd_info.cnt++;
|
||||
|
||||
/* Mediatek coredump data would be more than MTK_COREDUMP_NUM */
|
||||
if (data->cd_info.cnt > MTK_COREDUMP_NUM &&
|
||||
skb->len > MTK_COREDUMP_END_LEN)
|
||||
if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN],
|
||||
MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) {
|
||||
bt_dev_info(hdev, "Mediatek coredump end");
|
||||
hci_devcd_complete(hdev);
|
||||
}
|
||||
if (complete) {
|
||||
bt_dev_info(hdev, "Mediatek coredump end");
|
||||
hci_devcd_complete(hdev);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -87,6 +87,7 @@ static const u32 slic_base[] = { 100000000, 3125000 };
|
|||
static const u32 npu_base[] = { 333000000, 400000000, 500000000 };
|
||||
/* EN7581 */
|
||||
static const u32 emi7581_base[] = { 540000000, 480000000, 400000000, 300000000 };
|
||||
static const u32 bus7581_base[] = { 600000000, 540000000 };
|
||||
static const u32 npu7581_base[] = { 800000000, 750000000, 720000000, 600000000 };
|
||||
static const u32 crypto_base[] = { 540000000, 480000000 };
|
||||
|
||||
|
@ -222,8 +223,8 @@ static const struct en_clk_desc en7581_base_clks[] = {
|
|||
.base_reg = REG_BUS_CLK_DIV_SEL,
|
||||
.base_bits = 1,
|
||||
.base_shift = 8,
|
||||
.base_values = bus_base,
|
||||
.n_base_values = ARRAY_SIZE(bus_base),
|
||||
.base_values = bus7581_base,
|
||||
.n_base_values = ARRAY_SIZE(bus7581_base),
|
||||
|
||||
.div_bits = 3,
|
||||
.div_shift = 0,
|
||||
|
@ -503,6 +504,8 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
|
|||
u32 rate;
|
||||
int i;
|
||||
|
||||
clk_data->num = EN7523_NUM_CLOCKS;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) {
|
||||
const struct en_clk_desc *desc = &en7523_base_clks[i];
|
||||
u32 reg = desc->div_reg ? desc->div_reg : desc->base_reg;
|
||||
|
@ -524,8 +527,6 @@ static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_dat
|
|||
|
||||
hw = en7523_register_pcie_clk(dev, np_base);
|
||||
clk_data->hws[EN7523_CLK_PCIE] = hw;
|
||||
|
||||
clk_data->num = EN7523_NUM_CLOCKS;
|
||||
}
|
||||
|
||||
static int en7523_clk_hw_init(struct platform_device *pdev,
|
||||
|
|
|
@ -2530,7 +2530,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
|
|||
rate = clk_core_req_round_rate_nolock(core, req_rate);
|
||||
|
||||
/* bail early if nothing to do */
|
||||
if (rate == clk_core_get_rate_recalc(core))
|
||||
if (rate == clk_core_get_rate_nolock(core))
|
||||
return 0;
|
||||
|
||||
/* fail on a direct rate set of a protected provider */
|
||||
|
|
|
@ -106,7 +106,7 @@ config COMMON_CLK_AXG_AUDIO
|
|||
select COMMON_CLK_MESON_SCLK_DIV
|
||||
select COMMON_CLK_MESON_CLKC_UTILS
|
||||
select REGMAP_MMIO
|
||||
depends on RESET_MESON_AUX
|
||||
select RESET_CONTROLLER
|
||||
help
|
||||
Support for the audio clock controller on AmLogic A113D devices,
|
||||
aka axg, Say Y if you want audio subsystem to work.
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
#include <linux/reset-controller.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <soc/amlogic/reset-meson-aux.h>
|
||||
|
||||
#include "meson-clkc-utils.h"
|
||||
#include "axg-audio.h"
|
||||
#include "clk-regmap.h"
|
||||
|
@ -1680,6 +1678,84 @@ static struct clk_regmap *const sm1_clk_regmaps[] = {
|
|||
&sm1_earcrx_dmac_clk,
|
||||
};
|
||||
|
||||
struct axg_audio_reset_data {
|
||||
struct reset_controller_dev rstc;
|
||||
struct regmap *map;
|
||||
unsigned int offset;
|
||||
};
|
||||
|
||||
static void axg_audio_reset_reg_and_bit(struct axg_audio_reset_data *rst,
|
||||
unsigned long id,
|
||||
unsigned int *reg,
|
||||
unsigned int *bit)
|
||||
{
|
||||
unsigned int stride = regmap_get_reg_stride(rst->map);
|
||||
|
||||
*reg = (id / (stride * BITS_PER_BYTE)) * stride;
|
||||
*reg += rst->offset;
|
||||
*bit = id % (stride * BITS_PER_BYTE);
|
||||
}
|
||||
|
||||
static int axg_audio_reset_update(struct reset_controller_dev *rcdev,
|
||||
unsigned long id, bool assert)
|
||||
{
|
||||
struct axg_audio_reset_data *rst =
|
||||
container_of(rcdev, struct axg_audio_reset_data, rstc);
|
||||
unsigned int offset, bit;
|
||||
|
||||
axg_audio_reset_reg_and_bit(rst, id, &offset, &bit);
|
||||
|
||||
regmap_update_bits(rst->map, offset, BIT(bit),
|
||||
assert ? BIT(bit) : 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int axg_audio_reset_status(struct reset_controller_dev *rcdev,
|
||||
unsigned long id)
|
||||
{
|
||||
struct axg_audio_reset_data *rst =
|
||||
container_of(rcdev, struct axg_audio_reset_data, rstc);
|
||||
unsigned int val, offset, bit;
|
||||
|
||||
axg_audio_reset_reg_and_bit(rst, id, &offset, &bit);
|
||||
|
||||
regmap_read(rst->map, offset, &val);
|
||||
|
||||
return !!(val & BIT(bit));
|
||||
}
|
||||
|
||||
static int axg_audio_reset_assert(struct reset_controller_dev *rcdev,
|
||||
unsigned long id)
|
||||
{
|
||||
return axg_audio_reset_update(rcdev, id, true);
|
||||
}
|
||||
|
||||
static int axg_audio_reset_deassert(struct reset_controller_dev *rcdev,
|
||||
unsigned long id)
|
||||
{
|
||||
return axg_audio_reset_update(rcdev, id, false);
|
||||
}
|
||||
|
||||
static int axg_audio_reset_toggle(struct reset_controller_dev *rcdev,
|
||||
unsigned long id)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = axg_audio_reset_assert(rcdev, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return axg_audio_reset_deassert(rcdev, id);
|
||||
}
|
||||
|
||||
static const struct reset_control_ops axg_audio_rstc_ops = {
|
||||
.assert = axg_audio_reset_assert,
|
||||
.deassert = axg_audio_reset_deassert,
|
||||
.reset = axg_audio_reset_toggle,
|
||||
.status = axg_audio_reset_status,
|
||||
};
|
||||
|
||||
static struct regmap_config axg_audio_regmap_cfg = {
|
||||
.reg_bits = 32,
|
||||
.val_bits = 32,
|
||||
|
@ -1690,14 +1766,16 @@ struct audioclk_data {
|
|||
struct clk_regmap *const *regmap_clks;
|
||||
unsigned int regmap_clk_num;
|
||||
struct meson_clk_hw_data hw_clks;
|
||||
unsigned int reset_offset;
|
||||
unsigned int reset_num;
|
||||
unsigned int max_register;
|
||||
const char *rst_drvname;
|
||||
};
|
||||
|
||||
static int axg_audio_clkc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
const struct audioclk_data *data;
|
||||
struct axg_audio_reset_data *rst;
|
||||
struct regmap *map;
|
||||
void __iomem *regs;
|
||||
struct clk_hw *hw;
|
||||
|
@ -1756,11 +1834,22 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Register auxiliary reset driver when applicable */
|
||||
if (data->rst_drvname)
|
||||
ret = devm_meson_rst_aux_register(dev, map, data->rst_drvname);
|
||||
/* Stop here if there is no reset */
|
||||
if (!data->reset_num)
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
rst = devm_kzalloc(dev, sizeof(*rst), GFP_KERNEL);
|
||||
if (!rst)
|
||||
return -ENOMEM;
|
||||
|
||||
rst->map = map;
|
||||
rst->offset = data->reset_offset;
|
||||
rst->rstc.nr_resets = data->reset_num;
|
||||
rst->rstc.ops = &axg_audio_rstc_ops;
|
||||
rst->rstc.of_node = dev->of_node;
|
||||
rst->rstc.owner = THIS_MODULE;
|
||||
|
||||
return devm_reset_controller_register(dev, &rst->rstc);
|
||||
}
|
||||
|
||||
static const struct audioclk_data axg_audioclk_data = {
|
||||
|
@ -1780,8 +1869,9 @@ static const struct audioclk_data g12a_audioclk_data = {
|
|||
.hws = g12a_audio_hw_clks,
|
||||
.num = ARRAY_SIZE(g12a_audio_hw_clks),
|
||||
},
|
||||
.reset_offset = AUDIO_SW_RESET,
|
||||
.reset_num = 26,
|
||||
.max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
|
||||
.rst_drvname = "rst-g12a",
|
||||
};
|
||||
|
||||
static const struct audioclk_data sm1_audioclk_data = {
|
||||
|
@ -1791,8 +1881,9 @@ static const struct audioclk_data sm1_audioclk_data = {
|
|||
.hws = sm1_audio_hw_clks,
|
||||
.num = ARRAY_SIZE(sm1_audio_hw_clks),
|
||||
},
|
||||
.reset_offset = AUDIO_SM1_SW_RESET0,
|
||||
.reset_num = 39,
|
||||
.max_register = AUDIO_EARCRX_DMAC_CLK_CTRL,
|
||||
.rst_drvname = "rst-sm1",
|
||||
};
|
||||
|
||||
static const struct of_device_id clkc_match_table[] = {
|
||||
|
|
|
@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
|
|||
static bool dma_fence_array_signaled(struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence_array *array = to_dma_fence_array(fence);
|
||||
int num_pending;
|
||||
unsigned int i;
|
||||
|
||||
if (atomic_read(&array->num_pending) > 0)
|
||||
/*
|
||||
* We need to read num_pending before checking the enable_signal bit
|
||||
* to avoid racing with the enable_signaling() implementation, which
|
||||
* might decrement the counter, and cause a partial check.
|
||||
* atomic_read_acquire() pairs with atomic_dec_and_test() in
|
||||
* dma_fence_array_enable_signaling()
|
||||
*
|
||||
* The !--num_pending check is here to account for the any_signaled case
|
||||
* if we race with enable_signaling(), that means the !num_pending check
|
||||
* in the is_signalling_enabled branch might be outdated (num_pending
|
||||
* might have been decremented), but that's fine. The user will get the
|
||||
* right value when testing again later.
|
||||
*/
|
||||
num_pending = atomic_read_acquire(&array->num_pending);
|
||||
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
|
||||
if (num_pending <= 0)
|
||||
goto signal;
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < array->num_fences; ++i) {
|
||||
if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
|
||||
goto signal;
|
||||
}
|
||||
return false;
|
||||
|
||||
signal:
|
||||
dma_fence_array_clear_pending_error(array);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/dma-fence-chain.h>
|
||||
#include <linux/dma-fence-unwrap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
/* Internal helper to start new array iteration, don't use directly */
|
||||
static struct dma_fence *
|
||||
|
@ -59,6 +60,25 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
|
||||
|
||||
|
||||
static int fence_cmp(const void *_a, const void *_b)
|
||||
{
|
||||
struct dma_fence *a = *(struct dma_fence **)_a;
|
||||
struct dma_fence *b = *(struct dma_fence **)_b;
|
||||
|
||||
if (a->context < b->context)
|
||||
return -1;
|
||||
else if (a->context > b->context)
|
||||
return 1;
|
||||
|
||||
if (dma_fence_is_later(b, a))
|
||||
return 1;
|
||||
else if (dma_fence_is_later(a, b))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Implementation for the dma_fence_merge() marco, don't use directly */
|
||||
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
||||
struct dma_fence **fences,
|
||||
|
@ -67,8 +87,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
|||
struct dma_fence_array *result;
|
||||
struct dma_fence *tmp, **array;
|
||||
ktime_t timestamp;
|
||||
unsigned int i;
|
||||
size_t count;
|
||||
int i, j, count;
|
||||
|
||||
count = 0;
|
||||
timestamp = ns_to_ktime(0);
|
||||
|
@ -96,78 +115,55 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
|||
if (!array)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* This trashes the input fence array and uses it as position for the
|
||||
* following merge loop. This works because the dma_fence_merge()
|
||||
* wrapper macro is creating this temporary array on the stack together
|
||||
* with the iterators.
|
||||
*/
|
||||
for (i = 0; i < num_fences; ++i)
|
||||
fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
|
||||
|
||||
count = 0;
|
||||
do {
|
||||
unsigned int sel;
|
||||
|
||||
restart:
|
||||
tmp = NULL;
|
||||
for (i = 0; i < num_fences; ++i) {
|
||||
struct dma_fence *next;
|
||||
|
||||
while (fences[i] && dma_fence_is_signaled(fences[i]))
|
||||
fences[i] = dma_fence_unwrap_next(&iter[i]);
|
||||
|
||||
next = fences[i];
|
||||
if (!next)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We can't guarantee that inpute fences are ordered by
|
||||
* context, but it is still quite likely when this
|
||||
* function is used multiple times. So attempt to order
|
||||
* the fences by context as we pass over them and merge
|
||||
* fences with the same context.
|
||||
*/
|
||||
if (!tmp || tmp->context > next->context) {
|
||||
tmp = next;
|
||||
sel = i;
|
||||
|
||||
} else if (tmp->context < next->context) {
|
||||
continue;
|
||||
|
||||
} else if (dma_fence_is_later(tmp, next)) {
|
||||
fences[i] = dma_fence_unwrap_next(&iter[i]);
|
||||
goto restart;
|
||||
for (i = 0; i < num_fences; ++i) {
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
|
||||
if (!dma_fence_is_signaled(tmp)) {
|
||||
array[count++] = dma_fence_get(tmp);
|
||||
} else {
|
||||
fences[sel] = dma_fence_unwrap_next(&iter[sel]);
|
||||
goto restart;
|
||||
ktime_t t = dma_fence_timestamp(tmp);
|
||||
|
||||
if (ktime_after(t, timestamp))
|
||||
timestamp = t;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (tmp) {
|
||||
array[count++] = dma_fence_get(tmp);
|
||||
fences[sel] = dma_fence_unwrap_next(&iter[sel]);
|
||||
if (count == 0 || count == 1)
|
||||
goto return_fastpath;
|
||||
|
||||
sort(array, count, sizeof(*array), fence_cmp, NULL);
|
||||
|
||||
/*
|
||||
* Only keep the most recent fence for each context.
|
||||
*/
|
||||
j = 0;
|
||||
for (i = 1; i < count; i++) {
|
||||
if (array[i]->context == array[j]->context)
|
||||
dma_fence_put(array[i]);
|
||||
else
|
||||
array[++j] = array[i];
|
||||
}
|
||||
count = ++j;
|
||||
|
||||
if (count > 1) {
|
||||
result = dma_fence_array_create(count, array,
|
||||
dma_fence_context_alloc(1),
|
||||
1, false);
|
||||
if (!result) {
|
||||
for (i = 0; i < count; i++)
|
||||
dma_fence_put(array[i]);
|
||||
tmp = NULL;
|
||||
goto return_tmp;
|
||||
}
|
||||
} while (tmp);
|
||||
|
||||
if (count == 0) {
|
||||
tmp = dma_fence_allocate_private_stub(ktime_get());
|
||||
goto return_tmp;
|
||||
return &result->base;
|
||||
}
|
||||
|
||||
if (count == 1) {
|
||||
return_fastpath:
|
||||
if (count == 0)
|
||||
tmp = dma_fence_allocate_private_stub(timestamp);
|
||||
else
|
||||
tmp = array[0];
|
||||
goto return_tmp;
|
||||
}
|
||||
|
||||
result = dma_fence_array_create(count, array,
|
||||
dma_fence_context_alloc(1),
|
||||
1, false);
|
||||
if (!result) {
|
||||
tmp = NULL;
|
||||
goto return_tmp;
|
||||
}
|
||||
return &result->base;
|
||||
|
||||
return_tmp:
|
||||
kfree(array);
|
||||
|
|
|
@ -145,7 +145,7 @@ const char *amdgpu_asic_name[] = {
|
|||
"LAST",
|
||||
};
|
||||
|
||||
#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM - 1, 0)
|
||||
#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM, 0)
|
||||
/*
|
||||
* Default init level where all blocks are expected to be initialized. This is
|
||||
* the level of initialization expected by default and also after a full reset
|
||||
|
@ -3670,9 +3670,11 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
|
|||
continue;
|
||||
|
||||
r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
|
||||
DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
|
||||
if (r)
|
||||
if (r) {
|
||||
dev_err(adev->dev, "RE-INIT-early: %s failed\n",
|
||||
block->version->funcs->name);
|
||||
return r;
|
||||
}
|
||||
block->status.hw = true;
|
||||
}
|
||||
}
|
||||
|
@ -3682,7 +3684,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
|
|||
|
||||
static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
struct amdgpu_ip_block *block;
|
||||
int i, r = 0;
|
||||
|
||||
static enum amd_ip_block_type ip_order[] = {
|
||||
AMD_IP_BLOCK_TYPE_SMC,
|
||||
|
@ -3697,34 +3700,28 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
|
|||
};
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
|
||||
int j;
|
||||
struct amdgpu_ip_block *block;
|
||||
block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
|
||||
|
||||
for (j = 0; j < adev->num_ip_blocks; j++) {
|
||||
block = &adev->ip_blocks[j];
|
||||
|
||||
if (block->version->type != ip_order[i] ||
|
||||
!block->status.valid ||
|
||||
block->status.hw)
|
||||
continue;
|
||||
if (!block)
|
||||
continue;
|
||||
|
||||
if (block->status.valid && !block->status.hw) {
|
||||
if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
|
||||
r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
|
||||
if (r)
|
||||
return r;
|
||||
r = amdgpu_ip_block_resume(block);
|
||||
} else {
|
||||
r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
|
||||
if (r) {
|
||||
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
|
||||
adev->ip_blocks[i].version->funcs->name, r);
|
||||
return r;
|
||||
}
|
||||
block->status.hw = true;
|
||||
r = block->version->funcs->hw_init(block);
|
||||
}
|
||||
|
||||
if (r) {
|
||||
dev_err(adev->dev, "RE-INIT-late: %s failed\n",
|
||||
block->version->funcs->name);
|
||||
break;
|
||||
}
|
||||
block->status.hw = true;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3765,7 +3762,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
|
|||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* First resume function for hardware IPs. The list of all the hardware
|
||||
* Second resume function for hardware IPs. The list of all the hardware
|
||||
* IPs that make up the asic is walked and the resume callbacks are run for
|
||||
* all blocks except COMMON, GMC, and IH. resume puts the hardware into a
|
||||
* functional state after a suspend and updates the software state as
|
||||
|
@ -3783,6 +3780,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
|
|||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
|
||||
continue;
|
||||
r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
|
||||
|
@ -3793,6 +3791,36 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
*
|
||||
* Third resume function for hardware IPs. The list of all the hardware
|
||||
* IPs that make up the asic is walked and the resume callbacks are run for
|
||||
* all DCE. resume puts the hardware into a functional state after a suspend
|
||||
* and updates the software state as necessary. This function is also used
|
||||
* for restoring the GPU after a GPU reset.
|
||||
*
|
||||
* Returns 0 on success, negative error code on failure.
|
||||
*/
|
||||
static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
|
||||
r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_device_ip_resume - run resume for hardware IPs
|
||||
*
|
||||
|
@ -3822,6 +3850,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
|
|||
if (adev->mman.buffer_funcs_ring->sched.ready)
|
||||
amdgpu_ttm_set_buffer_funcs_status(adev, true);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_fence_driver_hw_init(adev);
|
||||
|
||||
r = amdgpu_device_ip_resume_phase3(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -4902,7 +4937,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
|
|||
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
|
||||
goto exit;
|
||||
}
|
||||
amdgpu_fence_driver_hw_init(adev);
|
||||
|
||||
if (!adev->in_s0ix) {
|
||||
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
|
||||
|
@ -5487,6 +5521,10 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
|
|||
if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
|
||||
amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
|
||||
|
||||
r = amdgpu_device_ip_resume_phase3(tmp_adev);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
if (vram_lost)
|
||||
amdgpu_device_fill_reset_magic(tmp_adev);
|
||||
|
||||
|
|
|
@ -40,10 +40,12 @@
|
|||
static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
else
|
||||
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
|
||||
|
@ -54,11 +56,13 @@ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
|
|||
amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 5))
|
||||
return;
|
||||
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
|
||||
else
|
||||
RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
|
||||
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev,
|
||||
|
|
|
@ -31,10 +31,12 @@
|
|||
static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
else
|
||||
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
|
||||
|
@ -42,6 +44,7 @@ static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
|
|||
{
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
|
||||
RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
|
||||
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
|
||||
|
|
|
@ -31,13 +31,15 @@
|
|||
static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
|
||||
0);
|
||||
else
|
||||
RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring,
|
||||
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
|
||||
0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
|
||||
|
|
|
@ -34,10 +34,12 @@
|
|||
static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
else
|
||||
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,
|
||||
|
|
|
@ -31,10 +31,12 @@
|
|||
static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
|
||||
struct amdgpu_ring *ring)
|
||||
{
|
||||
if (!ring || !ring->funcs->emit_wreg)
|
||||
if (!ring || !ring->funcs->emit_wreg) {
|
||||
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
else
|
||||
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
|
||||
} else {
|
||||
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,
|
||||
|
|
|
@ -604,7 +604,7 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
|
|||
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
bool set_clocks = !cancel_delayed_work_sync(&adev->jpeg.idle_work);
|
||||
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
int cnt = 0;
|
||||
|
||||
mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
|
||||
|
|
|
@ -1510,6 +1510,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
|||
if (adev->gfx.config.gc_tcp_size_per_cu) {
|
||||
pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu;
|
||||
pcache_info[i].cache_level = 1;
|
||||
/* Cacheline size not available in IP discovery for gc943,gc944 */
|
||||
pcache_info[i].cache_line_size = 128;
|
||||
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
|
@ -1521,6 +1523,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
|||
pcache_info[i].cache_size =
|
||||
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
|
||||
pcache_info[i].cache_level = 1;
|
||||
pcache_info[i].cache_line_size = 64;
|
||||
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_INST_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
|
@ -1531,6 +1534,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
|||
if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
|
||||
pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
|
||||
pcache_info[i].cache_level = 1;
|
||||
pcache_info[i].cache_line_size = 64;
|
||||
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
|
@ -1541,6 +1545,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
|||
if (adev->gfx.config.gc_tcc_size) {
|
||||
pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size;
|
||||
pcache_info[i].cache_level = 2;
|
||||
pcache_info[i].cache_line_size = 128;
|
||||
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
|
@ -1551,6 +1556,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
|
|||
if (adev->gmc.mall_size) {
|
||||
pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
|
||||
pcache_info[i].cache_level = 3;
|
||||
pcache_info[i].cache_line_size = 64;
|
||||
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE);
|
||||
|
|
|
@ -235,6 +235,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
|
|||
*/
|
||||
kfd->device_info.needs_pci_atomics = true;
|
||||
kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
|
||||
} else if (gc_version < IP_VERSION(13, 0, 0)) {
|
||||
kfd->device_info.needs_pci_atomics = true;
|
||||
kfd->device_info.no_atomic_fw_version = 2090;
|
||||
} else {
|
||||
kfd->device_info.needs_pci_atomics = true;
|
||||
}
|
||||
|
|
|
@ -3481,6 +3481,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
|||
caps->aux_support = false;
|
||||
else if (amdgpu_backlight == 1)
|
||||
caps->aux_support = true;
|
||||
if (caps->aux_support)
|
||||
aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX;
|
||||
|
||||
luminance_range = &conn_base->display_info.luminance_range;
|
||||
|
||||
|
|
|
@ -907,14 +907,14 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
|
|||
struct drm_connector *connector = data;
|
||||
struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
|
||||
unsigned char start = block * EDID_LENGTH;
|
||||
void *edid;
|
||||
struct edid *edid;
|
||||
int r;
|
||||
|
||||
if (!acpidev)
|
||||
return -ENODEV;
|
||||
|
||||
/* fetch the entire edid from BIOS */
|
||||
r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, &edid);
|
||||
r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, (void *)&edid);
|
||||
if (r < 0) {
|
||||
drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r);
|
||||
return r;
|
||||
|
@ -924,7 +924,14 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
memcpy(buf, edid + start, len);
|
||||
/* sanity check */
|
||||
if (edid->revision < 4 || !(edid->input & DRM_EDID_INPUT_DIGITAL) ||
|
||||
(edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_UNDEF) {
|
||||
r = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
memcpy(buf, (void *)edid + start, len);
|
||||
r = 0;
|
||||
|
||||
cleanup:
|
||||
|
|
|
@ -6109,3 +6109,21 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
|
|||
profile.power_level = dc->res_pool->funcs->get_power_profile(context);
|
||||
return profile;
|
||||
}
|
||||
|
||||
/*
|
||||
**********************************************************************************
|
||||
* dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
|
||||
*
|
||||
* Called when DM wants to log detile buffer size from dc_state
|
||||
*
|
||||
**********************************************************************************
|
||||
*/
|
||||
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
|
||||
{
|
||||
struct dc *dc = context->clk_mgr->ctx->dc;
|
||||
|
||||
if (dc->res_pool->funcs->get_det_buffer_size)
|
||||
return dc->res_pool->funcs->get_det_buffer_size(context);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2094,7 +2094,8 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master,
|
|||
count = resource_get_odm_slice_count(otg_master);
|
||||
h_active = timing->h_addressable +
|
||||
timing->h_border_left +
|
||||
timing->h_border_right;
|
||||
timing->h_border_right +
|
||||
otg_master->hblank_borrow;
|
||||
width = h_active / count;
|
||||
|
||||
if (otg_master->stream_res.tg)
|
||||
|
@ -4026,6 +4027,41 @@ fail:
|
|||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context.
|
||||
* @pipe_ctx: Pointer to the pipe context structure.
|
||||
*
|
||||
* This function calculates the horizontal blanking borrow value for a given pipe context based on the
|
||||
* display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less
|
||||
* than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the
|
||||
* total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow
|
||||
* value to 0.
|
||||
*/
|
||||
static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
uint32_t hactive;
|
||||
uint32_t ceil_slice_width;
|
||||
struct dc_stream_state *stream = NULL;
|
||||
|
||||
if (!pipe_ctx)
|
||||
return;
|
||||
|
||||
stream = pipe_ctx->stream;
|
||||
|
||||
if (stream->timing.flags.DSC) {
|
||||
hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
|
||||
|
||||
/* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/
|
||||
if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) {
|
||||
ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1;
|
||||
pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive;
|
||||
|
||||
if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32)
|
||||
pipe_ctx->hblank_borrow = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dc_validate_global_state() - Determine if hardware can support a given state
|
||||
*
|
||||
|
@ -4064,6 +4100,10 @@ enum dc_status dc_validate_global_state(
|
|||
if (pipe_ctx->stream != stream)
|
||||
continue;
|
||||
|
||||
/* Decide whether hblank borrow is needed and save it in pipe_ctx */
|
||||
if (dc->debug.enable_hblank_borrow)
|
||||
decide_hblank_borrow(pipe_ctx);
|
||||
|
||||
if (dc->res_pool->funcs->patch_unknown_plane_state &&
|
||||
pipe_ctx->plane_state &&
|
||||
pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
|
||||
|
|
|
@ -290,6 +290,7 @@ struct dc_caps {
|
|||
uint16_t subvp_vertical_int_margin_us;
|
||||
bool seamless_odm;
|
||||
uint32_t max_v_total;
|
||||
bool vtotal_limited_by_fp2;
|
||||
uint32_t max_disp_clock_khz_at_vmin;
|
||||
uint8_t subvp_drr_vblank_start_margin_us;
|
||||
bool cursor_not_scaled;
|
||||
|
@ -1068,6 +1069,7 @@ struct dc_debug_options {
|
|||
unsigned int scale_to_sharpness_policy;
|
||||
bool skip_full_updated_if_possible;
|
||||
unsigned int enable_oled_edp_power_up_opt;
|
||||
bool enable_hblank_borrow;
|
||||
};
|
||||
|
||||
|
||||
|
@ -2550,6 +2552,8 @@ struct dc_power_profile {
|
|||
|
||||
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context);
|
||||
|
||||
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
|
||||
|
||||
/* DSC Interfaces */
|
||||
#include "dc_dsc.h"
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
|
|||
spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx);
|
||||
// Make spl input basic out info output_size width point to stream h active
|
||||
spl_in->basic_out.output_size.width =
|
||||
stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
|
||||
stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow;
|
||||
// Make spl input basic out info output_size height point to v active
|
||||
spl_in->basic_out.output_size.height =
|
||||
stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
|
||||
|
|
|
@ -1222,6 +1222,7 @@ static dml_bool_t CalculatePrefetchSchedule(struct display_mode_lib_scratch_st *
|
|||
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
|
||||
|
||||
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + dml_max(p->TWait + p->TCalc, *p->Tdmdl)) / s->LineTime - (*p->DSTYAfterScaler + (dml_float_t) *p->DSTXAfterScaler / (dml_float_t)p->myPipe->HTotal);
|
||||
s->dst_y_prefetch_equ = dml_min(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
|
||||
|
||||
#ifdef __DML_VBA_DEBUG__
|
||||
dml_print("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);
|
||||
|
|
|
@ -339,11 +339,22 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
|
|||
// }
|
||||
}
|
||||
|
||||
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
|
||||
{
|
||||
unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
|
||||
|
||||
if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
|
||||
max_hw_v_total -= stream->timing.v_front_porch + 1;
|
||||
}
|
||||
|
||||
return max_hw_v_total;
|
||||
}
|
||||
|
||||
static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
|
||||
struct dc_stream_state *stream,
|
||||
struct dml2_context *dml_ctx)
|
||||
{
|
||||
unsigned int hblank_start, vblank_start;
|
||||
unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
|
||||
|
||||
timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
|
||||
timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
|
||||
|
@ -371,11 +382,23 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
|
|||
- stream->timing.v_border_top - stream->timing.v_border_bottom;
|
||||
|
||||
timing->drr_config.enabled = stream->ignore_msa_timing_param;
|
||||
timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
|
||||
timing->drr_config.drr_active_variable = stream->vrr_active_variable;
|
||||
timing->drr_config.drr_active_fixed = stream->vrr_active_fixed;
|
||||
timing->drr_config.disallowed = !stream->allow_freesync;
|
||||
|
||||
/* limit min refresh rate to DC cap */
|
||||
min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
|
||||
if (stream->ctx->dc->caps.max_v_total != 0) {
|
||||
min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
|
||||
(stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
|
||||
}
|
||||
|
||||
if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
|
||||
timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
|
||||
} else {
|
||||
timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
|
||||
}
|
||||
|
||||
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
|
||||
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
|
||||
timing->drr_config.max_instant_vtotal_delta = dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase(stream, false);
|
||||
|
@ -422,6 +445,21 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
|
|||
timing->vblank_nom = timing->v_total - timing->v_active;
|
||||
}
|
||||
|
||||
/**
|
||||
* adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration
|
||||
* based on the pipe context.
|
||||
* @timing: Pointer to the dml2_timing_cfg structure to be adjusted.
|
||||
* @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value.
|
||||
*
|
||||
* This function modifies the horizontal active and blank end timings by adding and subtracting
|
||||
* the horizontal blanking borrow value from the pipe context, respectively.
|
||||
*/
|
||||
static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe)
|
||||
{
|
||||
timing->h_active += pipe->hblank_borrow;
|
||||
timing->h_blank_end -= pipe->hblank_borrow;
|
||||
}
|
||||
|
||||
static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output,
|
||||
struct dc_stream_state *stream, const struct pipe_ctx *pipe)
|
||||
{
|
||||
|
@ -709,6 +747,7 @@ static const struct scaler_data *get_scaler_data_for_plane(
|
|||
temp_pipe->plane_state = pipe->plane_state;
|
||||
temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
|
||||
temp_pipe->stream_res = pipe->stream_res;
|
||||
temp_pipe->hblank_borrow = pipe->hblank_borrow;
|
||||
dml_ctx->config.callbacks.build_scaling_params(temp_pipe);
|
||||
break;
|
||||
}
|
||||
|
@ -973,6 +1012,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
|
|||
|
||||
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
|
||||
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
|
||||
adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
|
||||
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
|
||||
populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]);
|
||||
|
||||
|
@ -1111,12 +1151,12 @@ void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_
|
|||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||
union dml2_global_sync_programming *global_sync = &stream_programming->global_sync;
|
||||
|
||||
hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right;
|
||||
hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right + pipe_ctx->hblank_borrow;
|
||||
vactive = timing->v_addressable + timing->v_border_bottom + timing->v_border_top;
|
||||
hblank_start = pipe_ctx->stream->timing.h_total - pipe_ctx->stream->timing.h_front_porch;
|
||||
vblank_start = pipe_ctx->stream->timing.v_total - pipe_ctx->stream->timing.v_front_porch;
|
||||
|
||||
hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right;
|
||||
hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right - pipe_ctx->hblank_borrow;
|
||||
vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
|
||||
|
||||
if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
|
||||
|
|
|
@ -1049,7 +1049,8 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
|||
}
|
||||
|
||||
/* Enable DSC hw block */
|
||||
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
|
||||
dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
|
||||
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
|
||||
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
|
||||
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
|
||||
dsc_cfg.color_depth = stream->timing.display_color_depth;
|
||||
|
|
|
@ -820,6 +820,7 @@ enum dc_status dcn401_enable_stream_timing(
|
|||
int opp_cnt = 1;
|
||||
int opp_inst[MAX_PIPES] = {0};
|
||||
struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
|
||||
struct dc_crtc_timing patched_crtc_timing = stream->timing;
|
||||
bool manual_mode;
|
||||
unsigned int tmds_div = PIXEL_RATE_DIV_NA;
|
||||
unsigned int unused_div = PIXEL_RATE_DIV_NA;
|
||||
|
@ -874,9 +875,13 @@ enum dc_status dcn401_enable_stream_timing(
|
|||
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
|
||||
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
|
||||
|
||||
/* if we are borrowing from hblank, h_addressable needs to be adjusted */
|
||||
if (dc->debug.enable_hblank_borrow)
|
||||
patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->program_timing(
|
||||
pipe_ctx->stream_res.tg,
|
||||
&stream->timing,
|
||||
&patched_crtc_timing,
|
||||
pipe_ctx->pipe_dlg_param.vready_offset,
|
||||
pipe_ctx->pipe_dlg_param.vstartup_start,
|
||||
pipe_ctx->pipe_dlg_param.vupdate_offset,
|
||||
|
|
|
@ -219,6 +219,7 @@ struct resource_funcs {
|
|||
* Get indicator of power from a context that went through full validation
|
||||
*/
|
||||
int (*get_power_profile)(const struct dc_state *context);
|
||||
unsigned int (*get_det_buffer_size)(const struct dc_state *context);
|
||||
};
|
||||
|
||||
struct audio_support{
|
||||
|
@ -477,6 +478,8 @@ struct pipe_ctx {
|
|||
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
|
||||
uint8_t subvp_index;
|
||||
struct pixel_rate_divider pixel_rate_divider;
|
||||
/* pixels borrowed from hblank to hactive */
|
||||
uint8_t hblank_borrow;
|
||||
};
|
||||
|
||||
/* Data used for dynamic link encoder assignment.
|
||||
|
|
|
@ -808,7 +808,8 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
|
|||
enum optc_dsc_mode optc_dsc_mode;
|
||||
|
||||
/* Enable DSC hw block */
|
||||
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
|
||||
dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
|
||||
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
|
||||
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
|
||||
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
|
||||
dsc_cfg.color_depth = stream->timing.display_color_depth;
|
||||
|
|
|
@ -1510,6 +1510,7 @@ bool dcn20_split_stream_for_odm(
|
|||
|
||||
if (prev_odm_pipe->plane_state) {
|
||||
struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
|
||||
struct output_pixel_processor *opp = next_odm_pipe->stream_res.opp;
|
||||
int new_width;
|
||||
|
||||
/* HACTIVE halved for odm combine */
|
||||
|
@ -1543,7 +1544,28 @@ bool dcn20_split_stream_for_odm(
|
|||
sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
|
||||
sd->ratios.horz_c, sd->h_active - sd->recout.x));
|
||||
sd->recout.x = 0;
|
||||
|
||||
/*
|
||||
* When odm is used in YcbCr422 or 420 colour space, a split screen
|
||||
* will be seen with the previous calculations since the extra left
|
||||
* edge pixel is accounted for in fmt but not in viewport.
|
||||
*
|
||||
* Below are calculations which fix the split by fixing the calculations
|
||||
* if there is an extra left edge pixel.
|
||||
*/
|
||||
if (opp && opp->funcs->opp_get_left_edge_extra_pixel_count
|
||||
&& opp->funcs->opp_get_left_edge_extra_pixel_count(
|
||||
opp, next_odm_pipe->stream->timing.pixel_encoding,
|
||||
resource_is_pipe_type(next_odm_pipe, OTG_MASTER)) == 1) {
|
||||
sd->h_active += 1;
|
||||
sd->recout.width += 1;
|
||||
sd->viewport.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
|
||||
sd->viewport_c.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
|
||||
sd->viewport_c.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
|
||||
sd->viewport.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
|
||||
}
|
||||
}
|
||||
|
||||
if (!next_odm_pipe->top_pipe)
|
||||
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
|
||||
else
|
||||
|
@ -2132,6 +2154,7 @@ bool dcn20_fast_validate_bw(
|
|||
ASSERT(0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Actual dsc count per stream dsc validation*/
|
||||
if (!dcn20_validate_dsc(dc, context)) {
|
||||
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =
|
||||
|
|
|
@ -2353,6 +2353,7 @@ static bool dcn30_resource_construct(
|
|||
|
||||
dc->caps.dp_hdmi21_pcon_support = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
/* read VBIOS LTTPR caps */
|
||||
{
|
||||
|
|
|
@ -1233,6 +1233,7 @@ static bool dcn302_resource_construct(
|
|||
dc->caps.extended_aux_timeout_support = true;
|
||||
dc->caps.dmcub_support = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
/* Color pipeline capabilities */
|
||||
dc->caps.color.dpp.dcn_arch = 1;
|
||||
|
|
|
@ -1178,6 +1178,7 @@ static bool dcn303_resource_construct(
|
|||
dc->caps.extended_aux_timeout_support = true;
|
||||
dc->caps.dmcub_support = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
/* Color pipeline capabilities */
|
||||
dc->caps.color.dpp.dcn_arch = 1;
|
||||
|
|
|
@ -1720,6 +1720,12 @@ int dcn31_populate_dml_pipes_from_context(
|
|||
return pipe_cnt;
|
||||
}
|
||||
|
||||
unsigned int dcn31_get_det_buffer_size(
|
||||
const struct dc_state *context)
|
||||
{
|
||||
return context->bw_ctx.dml.ip.det_buffer_size_kbytes;
|
||||
}
|
||||
|
||||
void dcn31_calculate_wm_and_dlg(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
|
@ -1842,6 +1848,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
|
|||
.update_bw_bounding_box = dcn31_update_bw_bounding_box,
|
||||
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
|
||||
.get_panel_config_defaults = dcn31_get_panel_config_defaults,
|
||||
.get_det_buffer_size = dcn31_get_det_buffer_size,
|
||||
};
|
||||
|
||||
static struct clock_source *dcn30_clock_source_create(
|
||||
|
|
|
@ -63,6 +63,9 @@ struct resource_pool *dcn31_create_resource_pool(
|
|||
const struct dc_init_data *init_data,
|
||||
struct dc *dc);
|
||||
|
||||
unsigned int dcn31_get_det_buffer_size(
|
||||
const struct dc_state *context);
|
||||
|
||||
/*temp: B0 specific before switch to dcn313 headers*/
|
||||
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
|
||||
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e
|
||||
|
|
|
@ -1777,6 +1777,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
|
|||
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
|
||||
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
|
||||
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
|
||||
.get_det_buffer_size = dcn31_get_det_buffer_size,
|
||||
};
|
||||
|
||||
static struct clock_source *dcn30_clock_source_create(
|
||||
|
|
|
@ -1845,6 +1845,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
|
|||
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
|
||||
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
|
||||
.get_power_profile = dcn315_get_power_profile,
|
||||
.get_det_buffer_size = dcn31_get_det_buffer_size,
|
||||
};
|
||||
|
||||
static bool dcn315_resource_construct(
|
||||
|
|
|
@ -1719,6 +1719,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
|
|||
.update_bw_bounding_box = dcn316_update_bw_bounding_box,
|
||||
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
|
||||
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
|
||||
.get_det_buffer_size = dcn31_get_det_buffer_size,
|
||||
};
|
||||
|
||||
static bool dcn316_resource_construct(
|
||||
|
|
|
@ -2189,6 +2189,7 @@ static bool dcn32_resource_construct(
|
|||
dc->caps.dmcub_support = true;
|
||||
dc->caps.seamless_odm = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
/* Color pipeline capabilities */
|
||||
dc->caps.color.dpp.dcn_arch = 1;
|
||||
|
@ -2803,6 +2804,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head(
|
|||
free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx];
|
||||
free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx];
|
||||
free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst;
|
||||
free_pipe->hblank_borrow = otg_master->hblank_borrow;
|
||||
if (free_pipe->stream->timing.flags.DSC == 1) {
|
||||
dcn20_acquire_dsc(free_pipe->stream->ctx->dc,
|
||||
&new_ctx->res_ctx,
|
||||
|
|
|
@ -1742,6 +1742,7 @@ static bool dcn321_resource_construct(
|
|||
dc->caps.extended_aux_timeout_support = true;
|
||||
dc->caps.dmcub_support = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
/* Color pipeline capabilities */
|
||||
dc->caps.color.dpp.dcn_arch = 1;
|
||||
|
|
|
@ -1778,6 +1778,7 @@ static struct resource_funcs dcn35_res_pool_funcs = {
|
|||
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
|
||||
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
|
||||
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
|
||||
.get_det_buffer_size = dcn31_get_det_buffer_size,
|
||||
};
|
||||
|
||||
static bool dcn35_resource_construct(
|
||||
|
@ -1849,6 +1850,7 @@ static bool dcn35_resource_construct(
|
|||
dc->caps.zstate_support = true;
|
||||
dc->caps.ips_support = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
/* Color pipeline capabilities */
|
||||
dc->caps.color.dpp.dcn_arch = 1;
|
||||
|
|
|
@ -1757,6 +1757,7 @@ static struct resource_funcs dcn351_res_pool_funcs = {
|
|||
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
|
||||
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
|
||||
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
|
||||
.get_det_buffer_size = dcn31_get_det_buffer_size,
|
||||
};
|
||||
|
||||
static bool dcn351_resource_construct(
|
||||
|
@ -1828,6 +1829,7 @@ static bool dcn351_resource_construct(
|
|||
dc->caps.zstate_support = true;
|
||||
dc->caps.ips_support = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
/* Color pipeline capabilities */
|
||||
dc->caps.color.dpp.dcn_arch = 1;
|
||||
|
|
|
@ -1864,6 +1864,7 @@ static bool dcn401_resource_construct(
|
|||
dc->caps.extended_aux_timeout_support = true;
|
||||
dc->caps.dmcub_support = true;
|
||||
dc->caps.max_v_total = (1 << 15) - 1;
|
||||
dc->caps.vtotal_limited_by_fp2 = true;
|
||||
|
||||
if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev))
|
||||
dc->caps.dcc_plane_width_limit = 7680;
|
||||
|
|
|
@ -122,6 +122,17 @@ static unsigned int calc_duration_in_us_from_v_total(
|
|||
return duration_in_us;
|
||||
}
|
||||
|
||||
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
|
||||
{
|
||||
unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
|
||||
|
||||
if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
|
||||
max_hw_v_total -= stream->timing.v_front_porch + 1;
|
||||
}
|
||||
|
||||
return max_hw_v_total;
|
||||
}
|
||||
|
||||
unsigned int mod_freesync_calc_v_total_from_refresh(
|
||||
const struct dc_stream_state *stream,
|
||||
unsigned int refresh_in_uhz)
|
||||
|
@ -1016,7 +1027,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
|
|||
|
||||
if (stream->ctx->dc->caps.max_v_total != 0 && stream->timing.h_total != 0) {
|
||||
min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
|
||||
(stream->timing.h_total * (long long)stream->ctx->dc->caps.max_v_total));
|
||||
(stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
|
||||
}
|
||||
/* Limit minimum refresh rate to what can be supported by hardware */
|
||||
min_refresh_in_uhz = min_hardware_refresh_in_uhz > in_config->min_refresh_in_uhz ?
|
||||
|
|
|
@ -1361,7 +1361,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
|
|||
* create a custom set of heuristics, write a string of numbers to the file
|
||||
* starting with the number of the custom profile along with a setting
|
||||
* for each heuristic parameter. Due to differences across asic families
|
||||
* the heuristic parameters vary from family to family.
|
||||
* the heuristic parameters vary from family to family. Additionally,
|
||||
* you can apply the custom heuristics to different clock domains. Each
|
||||
* clock domain is considered a distinct operation so if you modify the
|
||||
* gfxclk heuristics and then the memclk heuristics, the all of the
|
||||
* custom heuristics will be retained until you switch to another profile.
|
||||
*
|
||||
*/
|
||||
|
||||
|
|
|
@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit);
|
|||
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
|
||||
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
|
||||
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
|
||||
static void smu_power_profile_mode_get(struct smu_context *smu,
|
||||
enum PP_SMC_POWER_PROFILE profile_mode);
|
||||
static void smu_power_profile_mode_put(struct smu_context *smu,
|
||||
enum PP_SMC_POWER_PROFILE profile_mode);
|
||||
|
||||
static int smu_sys_get_pp_feature_mask(void *handle,
|
||||
char *buf)
|
||||
|
@ -1259,42 +1263,19 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
|
|||
INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
|
||||
atomic64_set(&smu->throttle_int_counter, 0);
|
||||
smu->watermarks_bitmap = 0;
|
||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
smu->user_dpm_profile.user_workload_mask = 0;
|
||||
|
||||
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
|
||||
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
|
||||
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
|
||||
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
|
||||
|
||||
smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
|
||||
smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
|
||||
smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
|
||||
smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
|
||||
smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4;
|
||||
smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
|
||||
smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
|
||||
|
||||
if (smu->is_apu ||
|
||||
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) {
|
||||
smu->driver_workload_mask =
|
||||
1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
|
||||
} else {
|
||||
smu->driver_workload_mask =
|
||||
1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
|
||||
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
|
||||
}
|
||||
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
|
||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
else
|
||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
|
||||
smu_power_profile_mode_get(smu, smu->power_profile_mode);
|
||||
|
||||
smu->workload_mask = smu->driver_workload_mask |
|
||||
smu->user_dpm_profile.user_workload_mask;
|
||||
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
|
||||
smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
|
||||
smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
|
||||
smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
|
||||
smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
|
||||
smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
|
||||
smu->display_config = &adev->pm.pm_display_cfg;
|
||||
|
||||
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
|
||||
|
@ -1347,6 +1328,11 @@ static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (smu->custom_profile_params) {
|
||||
kfree(smu->custom_profile_params);
|
||||
smu->custom_profile_params = NULL;
|
||||
}
|
||||
|
||||
smu_fini_microcode(smu);
|
||||
|
||||
return 0;
|
||||
|
@ -2131,6 +2117,9 @@ static int smu_suspend(struct amdgpu_ip_block *ip_block)
|
|||
if (!ret)
|
||||
adev->gfx.gfx_off_entrycount = count;
|
||||
|
||||
/* clear this on suspend so it will get reprogrammed on resume */
|
||||
smu->workload_mask = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2243,25 +2232,49 @@ static int smu_enable_umd_pstate(void *handle,
|
|||
}
|
||||
|
||||
static int smu_bump_power_profile_mode(struct smu_context *smu,
|
||||
long *param,
|
||||
uint32_t param_size)
|
||||
long *custom_params,
|
||||
u32 custom_params_max_idx)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 workload_mask = 0;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
|
||||
if (smu->workload_refcount[i])
|
||||
workload_mask |= 1 << i;
|
||||
}
|
||||
|
||||
if (smu->workload_mask == workload_mask)
|
||||
return 0;
|
||||
|
||||
if (smu->ppt_funcs->set_power_profile_mode)
|
||||
ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
|
||||
ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
|
||||
custom_params,
|
||||
custom_params_max_idx);
|
||||
|
||||
if (!ret)
|
||||
smu->workload_mask = workload_mask;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void smu_power_profile_mode_get(struct smu_context *smu,
|
||||
enum PP_SMC_POWER_PROFILE profile_mode)
|
||||
{
|
||||
smu->workload_refcount[profile_mode]++;
|
||||
}
|
||||
|
||||
static void smu_power_profile_mode_put(struct smu_context *smu,
|
||||
enum PP_SMC_POWER_PROFILE profile_mode)
|
||||
{
|
||||
if (smu->workload_refcount[profile_mode])
|
||||
smu->workload_refcount[profile_mode]--;
|
||||
}
|
||||
|
||||
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
||||
enum amd_dpm_forced_level level,
|
||||
bool skip_display_settings,
|
||||
bool init)
|
||||
bool skip_display_settings)
|
||||
{
|
||||
int ret = 0;
|
||||
int index = 0;
|
||||
long workload[1];
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
|
||||
if (!skip_display_settings) {
|
||||
|
@ -2298,14 +2311,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
|||
}
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
|
||||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
|
||||
index = fls(smu->workload_mask);
|
||||
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
|
||||
workload[0] = smu->workload_setting[index];
|
||||
|
||||
if (init || smu->power_profile_mode != workload[0])
|
||||
smu_bump_power_profile_mode(smu, workload, 0);
|
||||
}
|
||||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
|
||||
smu_bump_power_profile_mode(smu, NULL, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2324,13 +2331,13 @@ static int smu_handle_task(struct smu_context *smu,
|
|||
ret = smu_pre_display_config_changed(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, false, false);
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, false);
|
||||
break;
|
||||
case AMD_PP_TASK_COMPLETE_INIT:
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, true, true);
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, true);
|
||||
break;
|
||||
case AMD_PP_TASK_READJUST_POWER_STATE:
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, true, false);
|
||||
ret = smu_adjust_power_state_dynamic(smu, level, true);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -2352,12 +2359,11 @@ static int smu_handle_dpm_task(void *handle,
|
|||
|
||||
static int smu_switch_power_profile(void *handle,
|
||||
enum PP_SMC_POWER_PROFILE type,
|
||||
bool en)
|
||||
bool enable)
|
||||
{
|
||||
struct smu_context *smu = handle;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
long workload[1];
|
||||
uint32_t index;
|
||||
int ret;
|
||||
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -2365,24 +2371,21 @@ static int smu_switch_power_profile(void *handle,
|
|||
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
|
||||
return -EINVAL;
|
||||
|
||||
if (!en) {
|
||||
smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]);
|
||||
index = fls(smu->workload_mask);
|
||||
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
|
||||
workload[0] = smu->workload_setting[index];
|
||||
} else {
|
||||
smu->driver_workload_mask |= (1 << smu->workload_priority[type]);
|
||||
index = fls(smu->workload_mask);
|
||||
index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
|
||||
workload[0] = smu->workload_setting[index];
|
||||
}
|
||||
|
||||
smu->workload_mask = smu->driver_workload_mask |
|
||||
smu->user_dpm_profile.user_workload_mask;
|
||||
|
||||
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
|
||||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
|
||||
smu_bump_power_profile_mode(smu, workload, 0);
|
||||
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
|
||||
if (enable)
|
||||
smu_power_profile_mode_get(smu, type);
|
||||
else
|
||||
smu_power_profile_mode_put(smu, type);
|
||||
ret = smu_bump_power_profile_mode(smu, NULL, 0);
|
||||
if (ret) {
|
||||
if (enable)
|
||||
smu_power_profile_mode_put(smu, type);
|
||||
else
|
||||
smu_power_profile_mode_get(smu, type);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3074,21 +3077,33 @@ static int smu_set_power_profile_mode(void *handle,
|
|||
uint32_t param_size)
|
||||
{
|
||||
struct smu_context *smu = handle;
|
||||
int ret;
|
||||
bool custom = false;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
|
||||
!smu->ppt_funcs->set_power_profile_mode)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (smu->user_dpm_profile.user_workload_mask &
|
||||
(1 << smu->workload_priority[param[param_size]]))
|
||||
return 0;
|
||||
if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
custom = true;
|
||||
/* clear frontend mask so custom changes propogate */
|
||||
smu->workload_mask = 0;
|
||||
}
|
||||
|
||||
smu->user_dpm_profile.user_workload_mask =
|
||||
(1 << smu->workload_priority[param[param_size]]);
|
||||
smu->workload_mask = smu->user_dpm_profile.user_workload_mask |
|
||||
smu->driver_workload_mask;
|
||||
ret = smu_bump_power_profile_mode(smu, param, param_size);
|
||||
if ((param[param_size] != smu->power_profile_mode) || custom) {
|
||||
/* clear the old user preference */
|
||||
smu_power_profile_mode_put(smu, smu->power_profile_mode);
|
||||
/* set the new user preference */
|
||||
smu_power_profile_mode_get(smu, param[param_size]);
|
||||
ret = smu_bump_power_profile_mode(smu,
|
||||
custom ? param : NULL,
|
||||
custom ? param_size : 0);
|
||||
if (ret)
|
||||
smu_power_profile_mode_put(smu, param[param_size]);
|
||||
else
|
||||
/* store the user's preference */
|
||||
smu->power_profile_mode = param[param_size];
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -240,7 +240,6 @@ struct smu_user_dpm_profile {
|
|||
/* user clock state information */
|
||||
uint32_t clk_mask[SMU_CLK_COUNT];
|
||||
uint32_t clk_dependency;
|
||||
uint32_t user_workload_mask;
|
||||
};
|
||||
|
||||
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
|
||||
|
@ -557,12 +556,13 @@ struct smu_context {
|
|||
uint32_t hard_min_uclk_req_from_dal;
|
||||
bool disable_uclk_switch;
|
||||
|
||||
/* asic agnostic workload mask */
|
||||
uint32_t workload_mask;
|
||||
uint32_t driver_workload_mask;
|
||||
uint32_t workload_priority[WORKLOAD_POLICY_MAX];
|
||||
uint32_t workload_setting[WORKLOAD_POLICY_MAX];
|
||||
/* default/user workload preference */
|
||||
uint32_t power_profile_mode;
|
||||
uint32_t default_power_profile_mode;
|
||||
uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
|
||||
/* backend specific custom workload settings */
|
||||
long *custom_profile_params;
|
||||
bool pm_enabled;
|
||||
bool is_apu;
|
||||
|
||||
|
@ -733,9 +733,12 @@ struct pptable_funcs {
|
|||
* @set_power_profile_mode: Set a power profile mode. Also used to
|
||||
* create/set custom power profile modes.
|
||||
* &input: Power profile mode parameters.
|
||||
* &size: Size of &input.
|
||||
* &workload_mask: mask of workloads to enable
|
||||
* &custom_params: custom profile parameters
|
||||
* &custom_params_max_idx: max valid idx into custom_params
|
||||
*/
|
||||
int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
|
||||
int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask,
|
||||
long *custom_params, u32 custom_params_max_idx);
|
||||
|
||||
/**
|
||||
* @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power
|
||||
|
|
|
@ -1445,97 +1445,120 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
|
|||
return size;
|
||||
}
|
||||
|
||||
static int arcturus_set_power_profile_mode(struct smu_context *smu,
|
||||
long *input,
|
||||
uint32_t size)
|
||||
#define ARCTURUS_CUSTOM_PARAMS_COUNT 10
|
||||
#define ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT 2
|
||||
#define ARCTURUS_CUSTOM_PARAMS_SIZE (ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT * ARCTURUS_CUSTOM_PARAMS_COUNT * sizeof(long))
|
||||
|
||||
static int arcturus_set_power_profile_mode_coeff(struct smu_context *smu,
|
||||
long *input)
|
||||
{
|
||||
DpmActivityMonitorCoeffInt_t activity_monitor;
|
||||
int workload_type = 0;
|
||||
uint32_t profile_mode = input[size];
|
||||
int ret = 0;
|
||||
int ret, idx;
|
||||
|
||||
if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
|
||||
(smu->smc_fw_version >= 0x360d00)) {
|
||||
if (size != 10)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor),
|
||||
false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (input[0]) {
|
||||
case 0: /* Gfxclk */
|
||||
activity_monitor.Gfx_FPS = input[1];
|
||||
activity_monitor.Gfx_UseRlcBusy = input[2];
|
||||
activity_monitor.Gfx_MinActiveFreqType = input[3];
|
||||
activity_monitor.Gfx_MinActiveFreq = input[4];
|
||||
activity_monitor.Gfx_BoosterFreqType = input[5];
|
||||
activity_monitor.Gfx_BoosterFreq = input[6];
|
||||
activity_monitor.Gfx_PD_Data_limit_c = input[7];
|
||||
activity_monitor.Gfx_PD_Data_error_coeff = input[8];
|
||||
activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
|
||||
break;
|
||||
case 1: /* Uclk */
|
||||
activity_monitor.Mem_FPS = input[1];
|
||||
activity_monitor.Mem_UseRlcBusy = input[2];
|
||||
activity_monitor.Mem_MinActiveFreqType = input[3];
|
||||
activity_monitor.Mem_MinActiveFreq = input[4];
|
||||
activity_monitor.Mem_BoosterFreqType = input[5];
|
||||
activity_monitor.Mem_BoosterFreq = input[6];
|
||||
activity_monitor.Mem_PD_Data_limit_c = input[7];
|
||||
activity_monitor.Mem_PD_Data_error_coeff = input[8];
|
||||
activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor),
|
||||
true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
|
||||
* Not all profile modes are supported on arcturus.
|
||||
*/
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
profile_mode);
|
||||
if (workload_type < 0) {
|
||||
dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetWorkloadMask,
|
||||
smu->workload_mask,
|
||||
NULL);
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor),
|
||||
false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
smu_cmn_assign_power_profile(smu);
|
||||
idx = 0 * ARCTURUS_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Gfxclk */
|
||||
activity_monitor.Gfx_FPS = input[idx + 1];
|
||||
activity_monitor.Gfx_UseRlcBusy = input[idx + 2];
|
||||
activity_monitor.Gfx_MinActiveFreqType = input[idx + 3];
|
||||
activity_monitor.Gfx_MinActiveFreq = input[idx + 4];
|
||||
activity_monitor.Gfx_BoosterFreqType = input[idx + 5];
|
||||
activity_monitor.Gfx_BoosterFreq = input[idx + 6];
|
||||
activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7];
|
||||
activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8];
|
||||
activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9];
|
||||
}
|
||||
idx = 1 * ARCTURUS_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Uclk */
|
||||
activity_monitor.Mem_FPS = input[idx + 1];
|
||||
activity_monitor.Mem_UseRlcBusy = input[idx + 2];
|
||||
activity_monitor.Mem_MinActiveFreqType = input[idx + 3];
|
||||
activity_monitor.Mem_MinActiveFreq = input[idx + 4];
|
||||
activity_monitor.Mem_BoosterFreqType = input[idx + 5];
|
||||
activity_monitor.Mem_BoosterFreq = input[idx + 6];
|
||||
activity_monitor.Mem_PD_Data_limit_c = input[idx + 7];
|
||||
activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8];
|
||||
activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9];
|
||||
}
|
||||
|
||||
return 0;
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor),
|
||||
true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int arcturus_set_power_profile_mode(struct smu_context *smu,
|
||||
u32 workload_mask,
|
||||
long *custom_params,
|
||||
u32 custom_params_max_idx)
|
||||
{
|
||||
u32 backend_workload_mask = 0;
|
||||
int ret, idx = -1, i;
|
||||
|
||||
smu_cmn_get_backend_workload_mask(smu, workload_mask,
|
||||
&backend_workload_mask);
|
||||
|
||||
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
|
||||
if (smu->smc_fw_version < 0x360d00)
|
||||
return -EINVAL;
|
||||
if (!smu->custom_profile_params) {
|
||||
smu->custom_profile_params =
|
||||
kzalloc(ARCTURUS_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
|
||||
if (!smu->custom_profile_params)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (custom_params && custom_params_max_idx) {
|
||||
if (custom_params_max_idx != ARCTURUS_CUSTOM_PARAMS_COUNT)
|
||||
return -EINVAL;
|
||||
if (custom_params[0] >= ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT)
|
||||
return -EINVAL;
|
||||
idx = custom_params[0] * ARCTURUS_CUSTOM_PARAMS_COUNT;
|
||||
smu->custom_profile_params[idx] = 1;
|
||||
for (i = 1; i < custom_params_max_idx; i++)
|
||||
smu->custom_profile_params[idx + i] = custom_params[i];
|
||||
}
|
||||
ret = arcturus_set_power_profile_mode_coeff(smu,
|
||||
smu->custom_profile_params);
|
||||
if (ret) {
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
} else if (smu->custom_profile_params) {
|
||||
memset(smu->custom_profile_params, 0, ARCTURUS_CUSTOM_PARAMS_SIZE);
|
||||
}
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetWorkloadMask,
|
||||
backend_workload_mask,
|
||||
NULL);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
|
||||
workload_mask);
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int arcturus_set_performance_level(struct smu_context *smu,
|
||||
|
|
|
@ -2006,90 +2006,122 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
|
|||
return size;
|
||||
}
|
||||
|
||||
static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
|
||||
#define NAVI10_CUSTOM_PARAMS_COUNT 10
|
||||
#define NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT 3
|
||||
#define NAVI10_CUSTOM_PARAMS_SIZE (NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT * NAVI10_CUSTOM_PARAMS_COUNT * sizeof(long))
|
||||
|
||||
static int navi10_set_power_profile_mode_coeff(struct smu_context *smu,
|
||||
long *input)
|
||||
{
|
||||
DpmActivityMonitorCoeffInt_t activity_monitor;
|
||||
int workload_type, ret = 0;
|
||||
int ret, idx;
|
||||
|
||||
smu->power_profile_mode = input[size];
|
||||
|
||||
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
|
||||
return -EINVAL;
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor), false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
if (size != 10)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor), false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (input[0]) {
|
||||
case 0: /* Gfxclk */
|
||||
activity_monitor.Gfx_FPS = input[1];
|
||||
activity_monitor.Gfx_MinFreqStep = input[2];
|
||||
activity_monitor.Gfx_MinActiveFreqType = input[3];
|
||||
activity_monitor.Gfx_MinActiveFreq = input[4];
|
||||
activity_monitor.Gfx_BoosterFreqType = input[5];
|
||||
activity_monitor.Gfx_BoosterFreq = input[6];
|
||||
activity_monitor.Gfx_PD_Data_limit_c = input[7];
|
||||
activity_monitor.Gfx_PD_Data_error_coeff = input[8];
|
||||
activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
|
||||
break;
|
||||
case 1: /* Socclk */
|
||||
activity_monitor.Soc_FPS = input[1];
|
||||
activity_monitor.Soc_MinFreqStep = input[2];
|
||||
activity_monitor.Soc_MinActiveFreqType = input[3];
|
||||
activity_monitor.Soc_MinActiveFreq = input[4];
|
||||
activity_monitor.Soc_BoosterFreqType = input[5];
|
||||
activity_monitor.Soc_BoosterFreq = input[6];
|
||||
activity_monitor.Soc_PD_Data_limit_c = input[7];
|
||||
activity_monitor.Soc_PD_Data_error_coeff = input[8];
|
||||
activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
|
||||
break;
|
||||
case 2: /* Memclk */
|
||||
activity_monitor.Mem_FPS = input[1];
|
||||
activity_monitor.Mem_MinFreqStep = input[2];
|
||||
activity_monitor.Mem_MinActiveFreqType = input[3];
|
||||
activity_monitor.Mem_MinActiveFreq = input[4];
|
||||
activity_monitor.Mem_BoosterFreqType = input[5];
|
||||
activity_monitor.Mem_BoosterFreq = input[6];
|
||||
activity_monitor.Mem_PD_Data_limit_c = input[7];
|
||||
activity_monitor.Mem_PD_Data_error_coeff = input[8];
|
||||
activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor), true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
idx = 0 * NAVI10_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Gfxclk */
|
||||
activity_monitor.Gfx_FPS = input[idx + 1];
|
||||
activity_monitor.Gfx_MinFreqStep = input[idx + 2];
|
||||
activity_monitor.Gfx_MinActiveFreqType = input[idx + 3];
|
||||
activity_monitor.Gfx_MinActiveFreq = input[idx + 4];
|
||||
activity_monitor.Gfx_BoosterFreqType = input[idx + 5];
|
||||
activity_monitor.Gfx_BoosterFreq = input[idx + 6];
|
||||
activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7];
|
||||
activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8];
|
||||
activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9];
|
||||
}
|
||||
idx = 1 * NAVI10_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Socclk */
|
||||
activity_monitor.Soc_FPS = input[idx + 1];
|
||||
activity_monitor.Soc_MinFreqStep = input[idx + 2];
|
||||
activity_monitor.Soc_MinActiveFreqType = input[idx + 3];
|
||||
activity_monitor.Soc_MinActiveFreq = input[idx + 4];
|
||||
activity_monitor.Soc_BoosterFreqType = input[idx + 5];
|
||||
activity_monitor.Soc_BoosterFreq = input[idx + 6];
|
||||
activity_monitor.Soc_PD_Data_limit_c = input[idx + 7];
|
||||
activity_monitor.Soc_PD_Data_error_coeff = input[idx + 8];
|
||||
activity_monitor.Soc_PD_Data_error_rate_coeff = input[idx + 9];
|
||||
}
|
||||
idx = 2 * NAVI10_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Memclk */
|
||||
activity_monitor.Mem_FPS = input[idx + 1];
|
||||
activity_monitor.Mem_MinFreqStep = input[idx + 2];
|
||||
activity_monitor.Mem_MinActiveFreqType = input[idx + 3];
|
||||
activity_monitor.Mem_MinActiveFreq = input[idx + 4];
|
||||
activity_monitor.Mem_BoosterFreqType = input[idx + 5];
|
||||
activity_monitor.Mem_BoosterFreq = input[idx + 6];
|
||||
activity_monitor.Mem_PD_Data_limit_c = input[idx + 7];
|
||||
activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8];
|
||||
activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9];
|
||||
}
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
smu->power_profile_mode);
|
||||
if (workload_type < 0)
|
||||
return -EINVAL;
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor), true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int navi10_set_power_profile_mode(struct smu_context *smu,
|
||||
u32 workload_mask,
|
||||
long *custom_params,
|
||||
u32 custom_params_max_idx)
|
||||
{
|
||||
u32 backend_workload_mask = 0;
|
||||
int ret, idx = -1, i;
|
||||
|
||||
smu_cmn_get_backend_workload_mask(smu, workload_mask,
|
||||
&backend_workload_mask);
|
||||
|
||||
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
|
||||
if (!smu->custom_profile_params) {
|
||||
smu->custom_profile_params = kzalloc(NAVI10_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
|
||||
if (!smu->custom_profile_params)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (custom_params && custom_params_max_idx) {
|
||||
if (custom_params_max_idx != NAVI10_CUSTOM_PARAMS_COUNT)
|
||||
return -EINVAL;
|
||||
if (custom_params[0] >= NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT)
|
||||
return -EINVAL;
|
||||
idx = custom_params[0] * NAVI10_CUSTOM_PARAMS_COUNT;
|
||||
smu->custom_profile_params[idx] = 1;
|
||||
for (i = 1; i < custom_params_max_idx; i++)
|
||||
smu->custom_profile_params[idx + i] = custom_params[i];
|
||||
}
|
||||
ret = navi10_set_power_profile_mode_coeff(smu,
|
||||
smu->custom_profile_params);
|
||||
if (ret) {
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
} else if (smu->custom_profile_params) {
|
||||
memset(smu->custom_profile_params, 0, NAVI10_CUSTOM_PARAMS_SIZE);
|
||||
}
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
|
||||
smu->workload_mask, NULL);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
|
||||
else
|
||||
smu_cmn_assign_power_profile(smu);
|
||||
backend_workload_mask, NULL);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
|
||||
workload_mask);
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1708,93 +1708,126 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
|
|||
return size;
|
||||
}
|
||||
|
||||
static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
|
||||
#define SIENNA_CICHLID_CUSTOM_PARAMS_COUNT 10
|
||||
#define SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT 3
|
||||
#define SIENNA_CICHLID_CUSTOM_PARAMS_SIZE (SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT * sizeof(long))
|
||||
|
||||
static int sienna_cichlid_set_power_profile_mode_coeff(struct smu_context *smu,
|
||||
long *input)
|
||||
{
|
||||
|
||||
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
|
||||
DpmActivityMonitorCoeffInt_t *activity_monitor =
|
||||
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
|
||||
int workload_type, ret = 0;
|
||||
int ret, idx;
|
||||
|
||||
smu->power_profile_mode = input[size];
|
||||
|
||||
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
|
||||
return -EINVAL;
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external), false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
if (size != 10)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external), false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (input[0]) {
|
||||
case 0: /* Gfxclk */
|
||||
activity_monitor->Gfx_FPS = input[1];
|
||||
activity_monitor->Gfx_MinFreqStep = input[2];
|
||||
activity_monitor->Gfx_MinActiveFreqType = input[3];
|
||||
activity_monitor->Gfx_MinActiveFreq = input[4];
|
||||
activity_monitor->Gfx_BoosterFreqType = input[5];
|
||||
activity_monitor->Gfx_BoosterFreq = input[6];
|
||||
activity_monitor->Gfx_PD_Data_limit_c = input[7];
|
||||
activity_monitor->Gfx_PD_Data_error_coeff = input[8];
|
||||
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9];
|
||||
break;
|
||||
case 1: /* Socclk */
|
||||
activity_monitor->Fclk_FPS = input[1];
|
||||
activity_monitor->Fclk_MinFreqStep = input[2];
|
||||
activity_monitor->Fclk_MinActiveFreqType = input[3];
|
||||
activity_monitor->Fclk_MinActiveFreq = input[4];
|
||||
activity_monitor->Fclk_BoosterFreqType = input[5];
|
||||
activity_monitor->Fclk_BoosterFreq = input[6];
|
||||
activity_monitor->Fclk_PD_Data_limit_c = input[7];
|
||||
activity_monitor->Fclk_PD_Data_error_coeff = input[8];
|
||||
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
|
||||
break;
|
||||
case 2: /* Memclk */
|
||||
activity_monitor->Mem_FPS = input[1];
|
||||
activity_monitor->Mem_MinFreqStep = input[2];
|
||||
activity_monitor->Mem_MinActiveFreqType = input[3];
|
||||
activity_monitor->Mem_MinActiveFreq = input[4];
|
||||
activity_monitor->Mem_BoosterFreqType = input[5];
|
||||
activity_monitor->Mem_BoosterFreq = input[6];
|
||||
activity_monitor->Mem_PD_Data_limit_c = input[7];
|
||||
activity_monitor->Mem_PD_Data_error_coeff = input[8];
|
||||
activity_monitor->Mem_PD_Data_error_rate_coeff = input[9];
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external), true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
idx = 0 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Gfxclk */
|
||||
activity_monitor->Gfx_FPS = input[idx + 1];
|
||||
activity_monitor->Gfx_MinFreqStep = input[idx + 2];
|
||||
activity_monitor->Gfx_MinActiveFreqType = input[idx + 3];
|
||||
activity_monitor->Gfx_MinActiveFreq = input[idx + 4];
|
||||
activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
|
||||
activity_monitor->Gfx_BoosterFreq = input[idx + 6];
|
||||
activity_monitor->Gfx_PD_Data_limit_c = input[idx + 7];
|
||||
activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 8];
|
||||
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 9];
|
||||
}
|
||||
idx = 1 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Socclk */
|
||||
activity_monitor->Fclk_FPS = input[idx + 1];
|
||||
activity_monitor->Fclk_MinFreqStep = input[idx + 2];
|
||||
activity_monitor->Fclk_MinActiveFreqType = input[idx + 3];
|
||||
activity_monitor->Fclk_MinActiveFreq = input[idx + 4];
|
||||
activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
|
||||
activity_monitor->Fclk_BoosterFreq = input[idx + 6];
|
||||
activity_monitor->Fclk_PD_Data_limit_c = input[idx + 7];
|
||||
activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 8];
|
||||
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 9];
|
||||
}
|
||||
idx = 2 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Memclk */
|
||||
activity_monitor->Mem_FPS = input[idx + 1];
|
||||
activity_monitor->Mem_MinFreqStep = input[idx + 2];
|
||||
activity_monitor->Mem_MinActiveFreqType = input[idx + 3];
|
||||
activity_monitor->Mem_MinActiveFreq = input[idx + 4];
|
||||
activity_monitor->Mem_BoosterFreqType = input[idx + 5];
|
||||
activity_monitor->Mem_BoosterFreq = input[idx + 6];
|
||||
activity_monitor->Mem_PD_Data_limit_c = input[idx + 7];
|
||||
activity_monitor->Mem_PD_Data_error_coeff = input[idx + 8];
|
||||
activity_monitor->Mem_PD_Data_error_rate_coeff = input[idx + 9];
|
||||
}
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
smu->power_profile_mode);
|
||||
if (workload_type < 0)
|
||||
return -EINVAL;
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external), true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu,
|
||||
u32 workload_mask,
|
||||
long *custom_params,
|
||||
u32 custom_params_max_idx)
|
||||
{
|
||||
u32 backend_workload_mask = 0;
|
||||
int ret, idx = -1, i;
|
||||
|
||||
smu_cmn_get_backend_workload_mask(smu, workload_mask,
|
||||
&backend_workload_mask);
|
||||
|
||||
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
|
||||
if (!smu->custom_profile_params) {
|
||||
smu->custom_profile_params =
|
||||
kzalloc(SIENNA_CICHLID_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
|
||||
if (!smu->custom_profile_params)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (custom_params && custom_params_max_idx) {
|
||||
if (custom_params_max_idx != SIENNA_CICHLID_CUSTOM_PARAMS_COUNT)
|
||||
return -EINVAL;
|
||||
if (custom_params[0] >= SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT)
|
||||
return -EINVAL;
|
||||
idx = custom_params[0] * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
|
||||
smu->custom_profile_params[idx] = 1;
|
||||
for (i = 1; i < custom_params_max_idx; i++)
|
||||
smu->custom_profile_params[idx + i] = custom_params[i];
|
||||
}
|
||||
ret = sienna_cichlid_set_power_profile_mode_coeff(smu,
|
||||
smu->custom_profile_params);
|
||||
if (ret) {
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
} else if (smu->custom_profile_params) {
|
||||
memset(smu->custom_profile_params, 0, SIENNA_CICHLID_CUSTOM_PARAMS_SIZE);
|
||||
}
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
|
||||
smu->workload_mask, NULL);
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
|
||||
else
|
||||
smu_cmn_assign_power_profile(smu);
|
||||
backend_workload_mask, NULL);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
|
||||
workload_mask);
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1056,42 +1056,27 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
|
|||
return size;
|
||||
}
|
||||
|
||||
static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
|
||||
static int vangogh_set_power_profile_mode(struct smu_context *smu,
|
||||
u32 workload_mask,
|
||||
long *custom_params,
|
||||
u32 custom_params_max_idx)
|
||||
{
|
||||
int workload_type, ret;
|
||||
uint32_t profile_mode = input[size];
|
||||
u32 backend_workload_mask = 0;
|
||||
int ret;
|
||||
|
||||
if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
|
||||
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
|
||||
profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
|
||||
return 0;
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
profile_mode);
|
||||
if (workload_type < 0) {
|
||||
dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
|
||||
profile_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
smu_cmn_get_backend_workload_mask(smu, workload_mask,
|
||||
&backend_workload_mask);
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
|
||||
smu->workload_mask,
|
||||
NULL);
|
||||
backend_workload_mask,
|
||||
NULL);
|
||||
if (ret) {
|
||||
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
|
||||
workload_type);
|
||||
dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n",
|
||||
workload_mask);
|
||||
return ret;
|
||||
}
|
||||
|
||||
smu_cmn_assign_power_profile(smu);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
|
||||
|
|
|
@ -864,44 +864,27 @@ static int renoir_force_clk_levels(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
|
||||
static int renoir_set_power_profile_mode(struct smu_context *smu,
|
||||
u32 workload_mask,
|
||||
long *custom_params,
|
||||
u32 custom_params_max_idx)
|
||||
{
|
||||
int workload_type, ret;
|
||||
uint32_t profile_mode = input[size];
|
||||
int ret;
|
||||
u32 backend_workload_mask = 0;
|
||||
|
||||
if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
|
||||
profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
|
||||
return 0;
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
profile_mode);
|
||||
if (workload_type < 0) {
|
||||
/*
|
||||
* TODO: If some case need switch to powersave/default power mode
|
||||
* then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
|
||||
*/
|
||||
dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
smu_cmn_get_backend_workload_mask(smu, workload_mask,
|
||||
&backend_workload_mask);
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
|
||||
smu->workload_mask,
|
||||
NULL);
|
||||
backend_workload_mask,
|
||||
NULL);
|
||||
if (ret) {
|
||||
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
|
||||
dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n",
|
||||
workload_mask);
|
||||
return ret;
|
||||
}
|
||||
|
||||
smu_cmn_assign_power_profile(smu);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int renoir_set_peak_clock_by_device(struct smu_context *smu)
|
||||
|
|
|
@ -2571,82 +2571,76 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
|
|||
return size;
|
||||
}
|
||||
|
||||
static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
|
||||
long *input,
|
||||
uint32_t size)
|
||||
#define SMU_13_0_0_CUSTOM_PARAMS_COUNT 9
|
||||
#define SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT 2
|
||||
#define SMU_13_0_0_CUSTOM_PARAMS_SIZE (SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_0_CUSTOM_PARAMS_COUNT * sizeof(long))
|
||||
|
||||
static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu,
|
||||
long *input)
|
||||
{
|
||||
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
|
||||
DpmActivityMonitorCoeffInt_t *activity_monitor =
|
||||
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
|
||||
int workload_type, ret = 0;
|
||||
u32 workload_mask;
|
||||
int ret, idx;
|
||||
|
||||
smu->power_profile_mode = input[size];
|
||||
|
||||
if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
|
||||
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
|
||||
return -EINVAL;
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external),
|
||||
false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
if (size != 9)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external),
|
||||
false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (input[0]) {
|
||||
case 0: /* Gfxclk */
|
||||
activity_monitor->Gfx_FPS = input[1];
|
||||
activity_monitor->Gfx_MinActiveFreqType = input[2];
|
||||
activity_monitor->Gfx_MinActiveFreq = input[3];
|
||||
activity_monitor->Gfx_BoosterFreqType = input[4];
|
||||
activity_monitor->Gfx_BoosterFreq = input[5];
|
||||
activity_monitor->Gfx_PD_Data_limit_c = input[6];
|
||||
activity_monitor->Gfx_PD_Data_error_coeff = input[7];
|
||||
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
|
||||
break;
|
||||
case 1: /* Fclk */
|
||||
activity_monitor->Fclk_FPS = input[1];
|
||||
activity_monitor->Fclk_MinActiveFreqType = input[2];
|
||||
activity_monitor->Fclk_MinActiveFreq = input[3];
|
||||
activity_monitor->Fclk_BoosterFreqType = input[4];
|
||||
activity_monitor->Fclk_BoosterFreq = input[5];
|
||||
activity_monitor->Fclk_PD_Data_limit_c = input[6];
|
||||
activity_monitor->Fclk_PD_Data_error_coeff = input[7];
|
||||
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external),
|
||||
true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
idx = 0 * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Gfxclk */
|
||||
activity_monitor->Gfx_FPS = input[idx + 1];
|
||||
activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
|
||||
activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
|
||||
activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
|
||||
activity_monitor->Gfx_BoosterFreq = input[idx + 5];
|
||||
activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
|
||||
activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
|
||||
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
|
||||
}
|
||||
idx = 1 * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Fclk */
|
||||
activity_monitor->Fclk_FPS = input[idx + 1];
|
||||
activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
|
||||
activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
|
||||
activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
|
||||
activity_monitor->Fclk_BoosterFreq = input[idx + 5];
|
||||
activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
|
||||
activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
|
||||
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
|
||||
}
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
smu->power_profile_mode);
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external),
|
||||
true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (workload_type < 0)
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
workload_mask = 1 << workload_type;
|
||||
static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
|
||||
u32 workload_mask,
|
||||
long *custom_params,
|
||||
u32 custom_params_max_idx)
|
||||
{
|
||||
u32 backend_workload_mask = 0;
|
||||
int workload_type, ret, idx = -1, i;
|
||||
|
||||
smu_cmn_get_backend_workload_mask(smu, workload_mask,
|
||||
&backend_workload_mask);
|
||||
|
||||
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
|
||||
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
|
||||
|
@ -2658,24 +2652,47 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
|
|||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
PP_SMC_POWER_PROFILE_POWERSAVING);
|
||||
if (workload_type >= 0)
|
||||
workload_mask |= 1 << workload_type;
|
||||
backend_workload_mask |= 1 << workload_type;
|
||||
}
|
||||
|
||||
smu->workload_mask |= workload_mask;
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetWorkloadMask,
|
||||
smu->workload_mask,
|
||||
NULL);
|
||||
if (!ret) {
|
||||
smu_cmn_assign_power_profile(smu);
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) {
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
PP_SMC_POWER_PROFILE_FULLSCREEN3D);
|
||||
smu->power_profile_mode = smu->workload_mask & (1 << workload_type)
|
||||
? PP_SMC_POWER_PROFILE_FULLSCREEN3D
|
||||
: PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
|
||||
if (!smu->custom_profile_params) {
|
||||
smu->custom_profile_params =
|
||||
kzalloc(SMU_13_0_0_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
|
||||
if (!smu->custom_profile_params)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (custom_params && custom_params_max_idx) {
|
||||
if (custom_params_max_idx != SMU_13_0_0_CUSTOM_PARAMS_COUNT)
|
||||
return -EINVAL;
|
||||
if (custom_params[0] >= SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT)
|
||||
return -EINVAL;
|
||||
idx = custom_params[0] * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
|
||||
smu->custom_profile_params[idx] = 1;
|
||||
for (i = 1; i < custom_params_max_idx; i++)
|
||||
smu->custom_profile_params[idx + i] = custom_params[i];
|
||||
}
|
||||
ret = smu_v13_0_0_set_power_profile_mode_coeff(smu,
|
||||
smu->custom_profile_params);
|
||||
if (ret) {
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
} else if (smu->custom_profile_params) {
|
||||
memset(smu->custom_profile_params, 0, SMU_13_0_0_CUSTOM_PARAMS_SIZE);
|
||||
}
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
SMU_MSG_SetWorkloadMask,
|
||||
backend_workload_mask,
|
||||
NULL);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
|
||||
workload_mask);
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -2530,79 +2530,110 @@ out:
|
|||
return result;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
|
||||
#define SMU_13_0_7_CUSTOM_PARAMS_COUNT 8
|
||||
#define SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT 2
|
||||
#define SMU_13_0_7_CUSTOM_PARAMS_SIZE (SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_7_CUSTOM_PARAMS_COUNT * sizeof(long))
|
||||
|
||||
static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu,
|
||||
long *input)
|
||||
{
|
||||
|
||||
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
|
||||
DpmActivityMonitorCoeffInt_t *activity_monitor =
|
||||
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
|
||||
int workload_type, ret = 0;
|
||||
int ret, idx;
|
||||
|
||||
smu->power_profile_mode = input[size];
|
||||
|
||||
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) {
|
||||
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
|
||||
return -EINVAL;
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external), false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
if (size != 8)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external), false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (input[0]) {
|
||||
case 0: /* Gfxclk */
|
||||
activity_monitor->Gfx_ActiveHystLimit = input[1];
|
||||
activity_monitor->Gfx_IdleHystLimit = input[2];
|
||||
activity_monitor->Gfx_FPS = input[3];
|
||||
activity_monitor->Gfx_MinActiveFreqType = input[4];
|
||||
activity_monitor->Gfx_BoosterFreqType = input[5];
|
||||
activity_monitor->Gfx_MinActiveFreq = input[6];
|
||||
activity_monitor->Gfx_BoosterFreq = input[7];
|
||||
break;
|
||||
case 1: /* Fclk */
|
||||
activity_monitor->Fclk_ActiveHystLimit = input[1];
|
||||
activity_monitor->Fclk_IdleHystLimit = input[2];
|
||||
activity_monitor->Fclk_FPS = input[3];
|
||||
activity_monitor->Fclk_MinActiveFreqType = input[4];
|
||||
activity_monitor->Fclk_BoosterFreqType = input[5];
|
||||
activity_monitor->Fclk_MinActiveFreq = input[6];
|
||||
activity_monitor->Fclk_BoosterFreq = input[7];
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external), true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
idx = 0 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Gfxclk */
|
||||
activity_monitor->Gfx_ActiveHystLimit = input[idx + 1];
|
||||
activity_monitor->Gfx_IdleHystLimit = input[idx + 2];
|
||||
activity_monitor->Gfx_FPS = input[idx + 3];
|
||||
activity_monitor->Gfx_MinActiveFreqType = input[idx + 4];
|
||||
activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
|
||||
activity_monitor->Gfx_MinActiveFreq = input[idx + 6];
|
||||
activity_monitor->Gfx_BoosterFreq = input[idx + 7];
|
||||
}
|
||||
idx = 1 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Fclk */
|
||||
activity_monitor->Fclk_ActiveHystLimit = input[idx + 1];
|
||||
activity_monitor->Fclk_IdleHystLimit = input[idx + 2];
|
||||
activity_monitor->Fclk_FPS = input[idx + 3];
|
||||
activity_monitor->Fclk_MinActiveFreqType = input[idx + 4];
|
||||
activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
|
||||
activity_monitor->Fclk_MinActiveFreq = input[idx + 6];
|
||||
activity_monitor->Fclk_BoosterFreq = input[idx + 7];
|
||||
}
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
smu->power_profile_mode);
|
||||
if (workload_type < 0)
|
||||
return -EINVAL;
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external), true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu,
|
||||
u32 workload_mask,
|
||||
long *custom_params,
|
||||
u32 custom_params_max_idx)
|
||||
{
|
||||
u32 backend_workload_mask = 0;
|
||||
int ret, idx = -1, i;
|
||||
|
||||
smu_cmn_get_backend_workload_mask(smu, workload_mask,
|
||||
&backend_workload_mask);
|
||||
|
||||
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
|
||||
if (!smu->custom_profile_params) {
|
||||
smu->custom_profile_params =
|
||||
kzalloc(SMU_13_0_7_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
|
||||
if (!smu->custom_profile_params)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (custom_params && custom_params_max_idx) {
|
||||
if (custom_params_max_idx != SMU_13_0_7_CUSTOM_PARAMS_COUNT)
|
||||
return -EINVAL;
|
||||
if (custom_params[0] >= SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT)
|
||||
return -EINVAL;
|
||||
idx = custom_params[0] * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
|
||||
smu->custom_profile_params[idx] = 1;
|
||||
for (i = 1; i < custom_params_max_idx; i++)
|
||||
smu->custom_profile_params[idx + i] = custom_params[i];
|
||||
}
|
||||
ret = smu_v13_0_7_set_power_profile_mode_coeff(smu,
|
||||
smu->custom_profile_params);
|
||||
if (ret) {
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
} else if (smu->custom_profile_params) {
|
||||
memset(smu->custom_profile_params, 0, SMU_13_0_7_CUSTOM_PARAMS_SIZE);
|
||||
}
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
|
||||
smu->workload_mask, NULL);
|
||||
backend_workload_mask, NULL);
|
||||
|
||||
if (ret)
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
|
||||
else
|
||||
smu_cmn_assign_power_profile(smu);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
|
||||
workload_mask);
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1739,89 +1739,120 @@ static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
|
|||
return size;
|
||||
}
|
||||
|
||||
static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
|
||||
long *input,
|
||||
uint32_t size)
|
||||
#define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9
|
||||
#define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2
|
||||
#define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long))
|
||||
|
||||
static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu,
|
||||
long *input)
|
||||
{
|
||||
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
|
||||
DpmActivityMonitorCoeffInt_t *activity_monitor =
|
||||
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
|
||||
int workload_type, ret = 0;
|
||||
uint32_t current_profile_mode = smu->power_profile_mode;
|
||||
smu->power_profile_mode = input[size];
|
||||
int ret, idx;
|
||||
|
||||
if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
|
||||
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
|
||||
return -EINVAL;
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external),
|
||||
false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
|
||||
if (size != 9)
|
||||
return -EINVAL;
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external),
|
||||
false);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
switch (input[0]) {
|
||||
case 0: /* Gfxclk */
|
||||
activity_monitor->Gfx_FPS = input[1];
|
||||
activity_monitor->Gfx_MinActiveFreqType = input[2];
|
||||
activity_monitor->Gfx_MinActiveFreq = input[3];
|
||||
activity_monitor->Gfx_BoosterFreqType = input[4];
|
||||
activity_monitor->Gfx_BoosterFreq = input[5];
|
||||
activity_monitor->Gfx_PD_Data_limit_c = input[6];
|
||||
activity_monitor->Gfx_PD_Data_error_coeff = input[7];
|
||||
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
|
||||
break;
|
||||
case 1: /* Fclk */
|
||||
activity_monitor->Fclk_FPS = input[1];
|
||||
activity_monitor->Fclk_MinActiveFreqType = input[2];
|
||||
activity_monitor->Fclk_MinActiveFreq = input[3];
|
||||
activity_monitor->Fclk_BoosterFreqType = input[4];
|
||||
activity_monitor->Fclk_BoosterFreq = input[5];
|
||||
activity_monitor->Fclk_PD_Data_limit_c = input[6];
|
||||
activity_monitor->Fclk_PD_Data_error_coeff = input[7];
|
||||
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external),
|
||||
true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Gfxclk */
|
||||
activity_monitor->Gfx_FPS = input[idx + 1];
|
||||
activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
|
||||
activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
|
||||
activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
|
||||
activity_monitor->Gfx_BoosterFreq = input[idx + 5];
|
||||
activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
|
||||
activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
|
||||
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
|
||||
}
|
||||
idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
|
||||
if (input[idx]) {
|
||||
/* Fclk */
|
||||
activity_monitor->Fclk_FPS = input[idx + 1];
|
||||
activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
|
||||
activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
|
||||
activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
|
||||
activity_monitor->Fclk_BoosterFreq = input[idx + 5];
|
||||
activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
|
||||
activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
|
||||
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
|
||||
}
|
||||
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
|
||||
ret = smu_cmn_update_table(smu,
|
||||
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
|
||||
WORKLOAD_PPLIB_CUSTOM_BIT,
|
||||
(void *)(&activity_monitor_external),
|
||||
true);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
|
||||
u32 workload_mask,
|
||||
long *custom_params,
|
||||
u32 custom_params_max_idx)
|
||||
{
|
||||
u32 backend_workload_mask = 0;
|
||||
int ret, idx = -1, i;
|
||||
|
||||
smu_cmn_get_backend_workload_mask(smu, workload_mask,
|
||||
&backend_workload_mask);
|
||||
|
||||
/* disable deep sleep if compute is enabled */
|
||||
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE))
|
||||
smu_v14_0_deep_sleep_control(smu, false);
|
||||
else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
|
||||
else
|
||||
smu_v14_0_deep_sleep_control(smu, true);
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
smu->power_profile_mode);
|
||||
if (workload_type < 0)
|
||||
return -EINVAL;
|
||||
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
|
||||
if (!smu->custom_profile_params) {
|
||||
smu->custom_profile_params =
|
||||
kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
|
||||
if (!smu->custom_profile_params)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (custom_params && custom_params_max_idx) {
|
||||
if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT)
|
||||
return -EINVAL;
|
||||
if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT)
|
||||
return -EINVAL;
|
||||
idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
|
||||
smu->custom_profile_params[idx] = 1;
|
||||
for (i = 1; i < custom_params_max_idx; i++)
|
||||
smu->custom_profile_params[idx + i] = custom_params[i];
|
||||
}
|
||||
ret = smu_v14_0_2_set_power_profile_mode_coeff(smu,
|
||||
smu->custom_profile_params);
|
||||
if (ret) {
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
} else if (smu->custom_profile_params) {
|
||||
memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE);
|
||||
}
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
|
||||
smu->workload_mask, NULL);
|
||||
|
||||
if (!ret)
|
||||
smu_cmn_assign_power_profile(smu);
|
||||
backend_workload_mask, NULL);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
|
||||
workload_mask);
|
||||
if (idx != -1)
|
||||
smu->custom_profile_params[idx] = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1144,14 +1144,6 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
void smu_cmn_assign_power_profile(struct smu_context *smu)
|
||||
{
|
||||
uint32_t index;
|
||||
index = fls(smu->workload_mask);
|
||||
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
|
||||
smu->power_profile_mode = smu->workload_setting[index];
|
||||
}
|
||||
|
||||
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
|
||||
{
|
||||
struct pci_dev *p = NULL;
|
||||
|
@ -1229,3 +1221,28 @@ void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
|
|||
{
|
||||
policy->desc = &xgmi_plpd_policy_desc;
|
||||
}
|
||||
|
||||
void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
|
||||
u32 workload_mask,
|
||||
u32 *backend_workload_mask)
|
||||
{
|
||||
int workload_type;
|
||||
u32 profile_mode;
|
||||
|
||||
*backend_workload_mask = 0;
|
||||
|
||||
for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
|
||||
if (!(workload_mask & (1 << profile_mode)))
|
||||
continue;
|
||||
|
||||
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
profile_mode);
|
||||
|
||||
if (workload_type < 0)
|
||||
continue;
|
||||
|
||||
*backend_workload_mask |= 1 << workload_type;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -130,8 +130,6 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
|
|||
int smu_cmn_set_mp1_state(struct smu_context *smu,
|
||||
enum pp_mp1_state mp1_state);
|
||||
|
||||
void smu_cmn_assign_power_profile(struct smu_context *smu);
|
||||
|
||||
/*
|
||||
* Helper function to make sysfs_emit_at() happy. Align buf to
|
||||
* the current page boundary and record the offset.
|
||||
|
@ -149,5 +147,9 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
|
|||
void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);
|
||||
void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);
|
||||
|
||||
void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
|
||||
u32 workload_mask,
|
||||
u32 *backend_workload_mask);
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -320,6 +320,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr
|
|||
hdr->broadcast = (buf[idx] >> 7) & 0x1;
|
||||
hdr->path_msg = (buf[idx] >> 6) & 0x1;
|
||||
hdr->msg_len = buf[idx] & 0x3f;
|
||||
if (hdr->msg_len < 1) /* min space for body CRC */
|
||||
return false;
|
||||
|
||||
idx++;
|
||||
hdr->somt = (buf[idx] >> 7) & 0x1;
|
||||
hdr->eomt = (buf[idx] >> 6) & 0x1;
|
||||
|
@ -3697,8 +3700,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
|
|||
ret = 0;
|
||||
mgr->payload_id_table_cleared = false;
|
||||
|
||||
memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
|
||||
memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
|
||||
mgr->reset_rx_state = true;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
|
@ -3856,6 +3858,11 @@ out_fail:
|
|||
}
|
||||
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
|
||||
|
||||
static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
|
||||
{
|
||||
memset(msg, 0, sizeof(*msg));
|
||||
}
|
||||
|
||||
static bool
|
||||
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
|
||||
struct drm_dp_mst_branch **mstb)
|
||||
|
@ -3934,6 +3941,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
|
|||
return true;
|
||||
}
|
||||
|
||||
static int get_msg_request_type(u8 data)
|
||||
{
|
||||
return data & 0x7f;
|
||||
}
|
||||
|
||||
static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
|
||||
const struct drm_dp_sideband_msg_tx *txmsg,
|
||||
const struct drm_dp_sideband_msg_rx *rxmsg)
|
||||
{
|
||||
const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
|
||||
const struct drm_dp_mst_branch *mstb = txmsg->dst;
|
||||
int tx_req_type = get_msg_request_type(txmsg->msg[0]);
|
||||
int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
|
||||
char rad_str[64];
|
||||
|
||||
if (tx_req_type == rx_req_type)
|
||||
return true;
|
||||
|
||||
drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
|
||||
drm_dbg_kms(mgr->dev,
|
||||
"Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
|
||||
mstb, hdr->seqno, mstb->lct, rad_str,
|
||||
drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
|
||||
drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
struct drm_dp_sideband_msg_tx *txmsg;
|
||||
|
@ -3949,9 +3984,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
|
|||
|
||||
/* find the message */
|
||||
mutex_lock(&mgr->qlock);
|
||||
|
||||
txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
|
||||
struct drm_dp_sideband_msg_tx, next);
|
||||
mutex_unlock(&mgr->qlock);
|
||||
|
||||
/* Were we actually expecting a response, and from this mstb? */
|
||||
if (!txmsg || txmsg->dst != mstb) {
|
||||
|
@ -3960,6 +3995,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
|
|||
hdr = &msg->initial_hdr;
|
||||
drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
|
||||
mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
|
||||
|
||||
mutex_unlock(&mgr->qlock);
|
||||
|
||||
goto out_clear_reply;
|
||||
}
|
||||
|
||||
if (!verify_rx_request_type(mgr, txmsg, msg)) {
|
||||
mutex_unlock(&mgr->qlock);
|
||||
|
||||
goto out_clear_reply;
|
||||
}
|
||||
|
||||
|
@ -3975,20 +4019,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
|
|||
txmsg->reply.u.nak.nak_data);
|
||||
}
|
||||
|
||||
memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
|
||||
drm_dp_mst_topology_put_mstb(mstb);
|
||||
|
||||
mutex_lock(&mgr->qlock);
|
||||
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
|
||||
list_del(&txmsg->next);
|
||||
|
||||
mutex_unlock(&mgr->qlock);
|
||||
|
||||
wake_up_all(&mgr->tx_waitq);
|
||||
|
||||
return 0;
|
||||
|
||||
out_clear_reply:
|
||||
memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
|
||||
reset_msg_rx_state(msg);
|
||||
out:
|
||||
if (mstb)
|
||||
drm_dp_mst_topology_put_mstb(mstb);
|
||||
|
@ -4070,16 +4109,20 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
|
|||
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
struct drm_dp_pending_up_req *up_req;
|
||||
struct drm_dp_mst_branch *mst_primary;
|
||||
int ret = 0;
|
||||
|
||||
if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
|
||||
goto out;
|
||||
goto out_clear_reply;
|
||||
|
||||
if (!mgr->up_req_recv.have_eomt)
|
||||
return 0;
|
||||
|
||||
up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
|
||||
if (!up_req)
|
||||
return -ENOMEM;
|
||||
if (!up_req) {
|
||||
ret = -ENOMEM;
|
||||
goto out_clear_reply;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&up_req->next);
|
||||
|
||||
|
@ -4090,10 +4133,19 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
|||
drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
|
||||
up_req->msg.req_type);
|
||||
kfree(up_req);
|
||||
goto out;
|
||||
goto out_clear_reply;
|
||||
}
|
||||
|
||||
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
|
||||
mutex_lock(&mgr->lock);
|
||||
mst_primary = mgr->mst_primary;
|
||||
if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) {
|
||||
mutex_unlock(&mgr->lock);
|
||||
kfree(up_req);
|
||||
goto out_clear_reply;
|
||||
}
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
|
||||
false);
|
||||
|
||||
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
|
||||
|
@ -4110,13 +4162,13 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
|||
conn_stat->peer_device_type);
|
||||
|
||||
mutex_lock(&mgr->probe_lock);
|
||||
handle_csn = mgr->mst_primary->link_address_sent;
|
||||
handle_csn = mst_primary->link_address_sent;
|
||||
mutex_unlock(&mgr->probe_lock);
|
||||
|
||||
if (!handle_csn) {
|
||||
drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
|
||||
kfree(up_req);
|
||||
goto out;
|
||||
goto out_put_primary;
|
||||
}
|
||||
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
|
||||
const struct drm_dp_resource_status_notify *res_stat =
|
||||
|
@ -4133,9 +4185,22 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
|
|||
mutex_unlock(&mgr->up_req_lock);
|
||||
queue_work(system_long_wq, &mgr->up_req_work);
|
||||
|
||||
out:
|
||||
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
|
||||
return 0;
|
||||
out_put_primary:
|
||||
drm_dp_mst_topology_put_mstb(mst_primary);
|
||||
out_clear_reply:
|
||||
reset_msg_rx_state(&mgr->up_req_recv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
mutex_lock(&mgr->lock);
|
||||
if (mgr->reset_rx_state) {
|
||||
mgr->reset_rx_state = false;
|
||||
reset_msg_rx_state(&mgr->down_rep_recv);
|
||||
reset_msg_rx_state(&mgr->up_req_recv);
|
||||
}
|
||||
mutex_unlock(&mgr->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4172,6 +4237,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u
|
|||
*handled = true;
|
||||
}
|
||||
|
||||
update_msg_rx_state(mgr);
|
||||
|
||||
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
|
||||
ret = drm_dp_mst_handle_down_rep(mgr);
|
||||
*handled = true;
|
||||
|
|
|
@ -137,7 +137,7 @@ static void mixer_dbg_crb(struct seq_file *s, int val)
|
|||
}
|
||||
}
|
||||
|
||||
static void mixer_dbg_mxn(struct seq_file *s, void *addr)
|
||||
static void mixer_dbg_mxn(struct seq_file *s, void __iomem *addr)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
@ -254,9 +254,9 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon)
|
|||
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel);
|
||||
}
|
||||
|
||||
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask);
|
||||
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask);
|
||||
V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask);
|
||||
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask);
|
||||
|
||||
v3d->active_perfmon = perfmon;
|
||||
}
|
||||
|
|
|
@ -155,36 +155,6 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
|
|||
ss->vm = NULL;
|
||||
}
|
||||
|
||||
static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
|
||||
{
|
||||
struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
|
||||
struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
|
||||
struct xe_device *xe = coredump_to_xe(coredump);
|
||||
unsigned int fw_ref;
|
||||
|
||||
xe_pm_runtime_get(xe);
|
||||
|
||||
/* keep going if fw fails as we still want to save the memory and SW data */
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
|
||||
xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
|
||||
xe_vm_snapshot_capture_delayed(ss->vm);
|
||||
xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
|
||||
xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
|
||||
|
||||
xe_pm_runtime_put(xe);
|
||||
|
||||
/* Calculate devcoredump size */
|
||||
ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
|
||||
|
||||
ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
|
||||
if (!ss->read.buffer)
|
||||
return;
|
||||
|
||||
__xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
|
||||
xe_devcoredump_snapshot_free(ss);
|
||||
}
|
||||
|
||||
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
|
||||
size_t count, void *data, size_t datalen)
|
||||
{
|
||||
|
@ -234,6 +204,45 @@ static void xe_devcoredump_free(void *data)
|
|||
"Xe device coredump has been deleted.\n");
|
||||
}
|
||||
|
||||
static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
|
||||
{
|
||||
struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
|
||||
struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
|
||||
struct xe_device *xe = coredump_to_xe(coredump);
|
||||
unsigned int fw_ref;
|
||||
|
||||
/*
|
||||
* NB: Despite passing a GFP_ flags parameter here, more allocations are done
|
||||
* internally using GFP_KERNEL expliictly. Hence this call must be in the worker
|
||||
* thread and not in the initial capture call.
|
||||
*/
|
||||
dev_coredumpm_timeout(gt_to_xe(ss->gt)->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
|
||||
xe_devcoredump_read, xe_devcoredump_free,
|
||||
XE_COREDUMP_TIMEOUT_JIFFIES);
|
||||
|
||||
xe_pm_runtime_get(xe);
|
||||
|
||||
/* keep going if fw fails as we still want to save the memory and SW data */
|
||||
fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
|
||||
xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
|
||||
xe_vm_snapshot_capture_delayed(ss->vm);
|
||||
xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
|
||||
xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
|
||||
|
||||
xe_pm_runtime_put(xe);
|
||||
|
||||
/* Calculate devcoredump size */
|
||||
ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
|
||||
|
||||
ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
|
||||
if (!ss->read.buffer)
|
||||
return;
|
||||
|
||||
__xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
|
||||
xe_devcoredump_snapshot_free(ss);
|
||||
}
|
||||
|
||||
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
|
||||
struct xe_sched_job *job)
|
||||
{
|
||||
|
@ -310,10 +319,6 @@ void xe_devcoredump(struct xe_sched_job *job)
|
|||
drm_info(&xe->drm, "Xe device coredump has been created\n");
|
||||
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
|
||||
xe->drm.primary->index);
|
||||
|
||||
dev_coredumpm_timeout(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
|
||||
xe_devcoredump_read, xe_devcoredump_free,
|
||||
XE_COREDUMP_TIMEOUT_JIFFIES);
|
||||
}
|
||||
|
||||
static void xe_driver_devcoredump_fini(void *arg)
|
||||
|
|
|
@ -102,6 +102,7 @@ struct __guc_capture_parsed_output {
|
|||
* A 64 bit register define requires 2 consecutive entries,
|
||||
* with low dword first and hi dword the second.
|
||||
* 2. Register name: null for incompleted define
|
||||
* 3. Incorrect order will trigger XE_WARN.
|
||||
*/
|
||||
#define COMMON_XELP_BASE_GLOBAL \
|
||||
{ FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"}
|
||||
|
@ -1675,10 +1676,10 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
|
|||
struct xe_devcoredump *devcoredump = &xe->devcoredump;
|
||||
struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot;
|
||||
struct gcap_reg_list_info *reginfo = NULL;
|
||||
u32 last_value, i;
|
||||
bool is_ext;
|
||||
u32 i, last_value = 0;
|
||||
bool is_ext, low32_ready = false;
|
||||
|
||||
if (!list || list->num_regs == 0)
|
||||
if (!list || !list->list || list->num_regs == 0)
|
||||
return;
|
||||
XE_WARN_ON(!devcore_snapshot->matched_node);
|
||||
|
||||
|
@ -1701,29 +1702,75 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
|
|||
continue;
|
||||
|
||||
value = reg->value;
|
||||
if (reg_desc->data_type == REG_64BIT_LOW_DW) {
|
||||
switch (reg_desc->data_type) {
|
||||
case REG_64BIT_LOW_DW:
|
||||
last_value = value;
|
||||
|
||||
/*
|
||||
* A 64 bit register define requires 2 consecutive
|
||||
* entries in register list, with low dword first
|
||||
* and hi dword the second, like:
|
||||
* { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
|
||||
* { XXX_REG_HI(0), REG_64BIT_HI_DW, 0, 0, "XXX_REG"},
|
||||
*
|
||||
* Incorrect order will trigger XE_WARN.
|
||||
*
|
||||
* Possible double low here, for example:
|
||||
* { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
|
||||
* { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
|
||||
*/
|
||||
XE_WARN_ON(low32_ready);
|
||||
low32_ready = true;
|
||||
/* Low 32 bit dword saved, continue for high 32 bit */
|
||||
continue;
|
||||
} else if (reg_desc->data_type == REG_64BIT_HI_DW) {
|
||||
break;
|
||||
|
||||
case REG_64BIT_HI_DW: {
|
||||
u64 value_qw = ((u64)value << 32) | last_value;
|
||||
|
||||
/*
|
||||
* Incorrect 64bit register order. Possible missing low.
|
||||
* for example:
|
||||
* { XXX_REG(0), REG_32BIT, 0, 0, NULL},
|
||||
* { XXX_REG_HI(0), REG_64BIT_HI_DW, 0, 0, NULL},
|
||||
*/
|
||||
XE_WARN_ON(!low32_ready);
|
||||
low32_ready = false;
|
||||
|
||||
drm_printf(p, "\t%s: 0x%016llx\n", reg_desc->regname, value_qw);
|
||||
continue;
|
||||
break;
|
||||
}
|
||||
|
||||
if (is_ext) {
|
||||
int dss, group, instance;
|
||||
case REG_32BIT:
|
||||
/*
|
||||
* Incorrect 64bit register order. Possible missing high.
|
||||
* for example:
|
||||
* { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
|
||||
* { XXX_REG(0), REG_32BIT, 0, 0, "XXX_REG"},
|
||||
*/
|
||||
XE_WARN_ON(low32_ready);
|
||||
|
||||
group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags);
|
||||
instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags);
|
||||
dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance);
|
||||
if (is_ext) {
|
||||
int dss, group, instance;
|
||||
|
||||
drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value);
|
||||
} else {
|
||||
drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value);
|
||||
group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags);
|
||||
instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags);
|
||||
dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance);
|
||||
|
||||
drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value);
|
||||
} else {
|
||||
drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Incorrect 64bit register order. Possible missing high.
|
||||
* for example:
|
||||
* { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
|
||||
* } // <- Register list end
|
||||
*/
|
||||
XE_WARN_ON(low32_ready);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -372,6 +372,7 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
|
|||
u8 data[8];
|
||||
u16 xy1y2[3];
|
||||
s32 h[3], s[3];
|
||||
int half_range = BIT(13);
|
||||
int i, ret;
|
||||
|
||||
mutex_lock(&yas5xx->lock);
|
||||
|
@ -406,13 +407,13 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y
|
|||
/* The second version of YAS537 needs to include calibration coefficients */
|
||||
if (yas5xx->version == YAS537_VERSION_1) {
|
||||
for (i = 0; i < 3; i++)
|
||||
s[i] = xy1y2[i] - BIT(13);
|
||||
h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / BIT(13);
|
||||
h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / BIT(13);
|
||||
h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / BIT(13);
|
||||
s[i] = xy1y2[i] - half_range;
|
||||
h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / half_range;
|
||||
h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / half_range;
|
||||
h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / half_range;
|
||||
for (i = 0; i < 3; i++) {
|
||||
clamp_val(h[i], -BIT(13), BIT(13) - 1);
|
||||
xy1y2[i] = h[i] + BIT(13);
|
||||
h[i] = clamp(h[i], -half_range, half_range - 1);
|
||||
xy1y2[i] = h[i] + half_range;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -420,8 +420,6 @@ out_put_fdno:
|
|||
put_unused_fd(fdno);
|
||||
out_fput:
|
||||
fput(filep);
|
||||
refcount_dec(&fault->obj.users);
|
||||
iommufd_ctx_put(fault->ictx);
|
||||
out_abort:
|
||||
iommufd_object_abort_and_destroy(ucmd->ictx, &fault->obj);
|
||||
|
||||
|
|
|
@ -415,7 +415,7 @@ config PARTITION_PERCPU
|
|||
config STM32MP_EXTI
|
||||
tristate "STM32MP extended interrupts and event controller"
|
||||
depends on (ARCH_STM32 && !ARM_SINGLE_ARMV7M) || COMPILE_TEST
|
||||
default y
|
||||
default ARCH_STM32 && !ARM_SINGLE_ARMV7M
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select GENERIC_IRQ_CHIP
|
||||
help
|
||||
|
|
|
@ -58,6 +58,7 @@ static struct irq_chip bcm2836_arm_irqchip_timer = {
|
|||
.name = "bcm2836-timer",
|
||||
.irq_mask = bcm2836_arm_irqchip_mask_timer_irq,
|
||||
.irq_unmask = bcm2836_arm_irqchip_unmask_timer_irq,
|
||||
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d)
|
||||
|
@ -74,6 +75,7 @@ static struct irq_chip bcm2836_arm_irqchip_pmu = {
|
|||
.name = "bcm2836-pmu",
|
||||
.irq_mask = bcm2836_arm_irqchip_mask_pmu_irq,
|
||||
.irq_unmask = bcm2836_arm_irqchip_unmask_pmu_irq,
|
||||
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d)
|
||||
|
@ -88,6 +90,7 @@ static struct irq_chip bcm2836_arm_irqchip_gpu = {
|
|||
.name = "bcm2836-gpu",
|
||||
.irq_mask = bcm2836_arm_irqchip_mask_gpu_irq,
|
||||
.irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq,
|
||||
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
static void bcm2836_arm_irqchip_dummy_op(struct irq_data *d)
|
||||
|
|
|
@ -817,7 +817,7 @@ static void gic_deactivate_unhandled(u32 irqnr)
|
|||
* register state is not stale, as these may have been indirectly written
|
||||
* *after* exception entry.
|
||||
*
|
||||
* (2) Deactivate the interrupt when EOI mode 1 is in use.
|
||||
* (2) Execute an interrupt priority drop when EOI mode 1 is in use.
|
||||
*/
|
||||
static inline void gic_complete_ack(u32 irqnr)
|
||||
{
|
||||
|
|
|
@ -1718,7 +1718,7 @@ static CLOSURE_CALLBACK(cache_set_flush)
|
|||
if (!IS_ERR_OR_NULL(c->gc_thread))
|
||||
kthread_stop(c->gc_thread);
|
||||
|
||||
if (!IS_ERR(c->root))
|
||||
if (!IS_ERR_OR_NULL(c->root))
|
||||
list_add(&c->root->list, &c->btree_cache);
|
||||
|
||||
/*
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue