diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.54-55.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.54-55.patch new file mode 100644 index 000000000..e5fd421f2 --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.54-55.patch @@ -0,0 +1,19054 @@ +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index a7fe1138973611..d83a3f47e20074 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -4639,6 +4639,16 @@ + printk.time= Show timing data prefixed to each printk message line + Format: (1/Y/y=enable, 0/N/n=disable) + ++ proc_mem.force_override= [KNL] ++ Format: {always | ptrace | never} ++ Traditionally /proc/pid/mem allows memory permissions to be ++ overridden without restrictions. This option may be set to ++ restrict that. Can be one of: ++ - 'always': traditional behavior always allows mem overrides. ++ - 'ptrace': only allow mem overrides for active ptracers. ++ - 'never': never allow mem overrides. ++ If not specified, default is the CONFIG_PROC_MEM_* choice. ++ + processor.max_cstate= [HW,ACPI] + Limit processor to maximum C-state + max_cstate=9 overrides any DMI blacklist limit. +diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst +index 3084f5cf5e40db..3cf806733083c7 100644 +--- a/Documentation/arch/arm64/silicon-errata.rst ++++ b/Documentation/arch/arm64/silicon-errata.rst +@@ -141,6 +141,8 @@ stable kernels. + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Cortex-A715 | #3456084 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A720 | #3456091 | ARM64_ERRATUM_3194386 | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Cortex-A725 | #3456106 | ARM64_ERRATUM_3194386 | +@@ -177,6 +179,8 @@ stable kernels. + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Neoverse-N2 | #3324339 | ARM64_ERRATUM_3194386 | + +----------------+-----------------+-----------------+-----------------------------+ ++| ARM | Neoverse-N3 | #3456111 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ + | ARM | Neoverse-V1 | #3324341 | ARM64_ERRATUM_3194386 | + +----------------+-----------------+-----------------+-----------------------------+ + | ARM | Neoverse-V2 | #3324336 | ARM64_ERRATUM_3194386 | +@@ -280,3 +284,5 @@ stable kernels. + +----------------+-----------------+-----------------+-----------------------------+ + | Microsoft | Azure Cobalt 100| #2253138 | ARM64_ERRATUM_2253138 | + +----------------+-----------------+-----------------+-----------------------------+ ++| Microsoft | Azure Cobalt 100| #3324339 | ARM64_ERRATUM_3194386 | +++----------------+-----------------+-----------------+-----------------------------+ +diff --git a/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml b/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml +index 1d33d80af11c3c..652d696bc9e90b 100644 +--- a/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml ++++ b/Documentation/devicetree/bindings/net/xlnx,axi-ethernet.yaml +@@ -34,6 +34,7 @@ properties: + and length of the AXI DMA controller IO space, unless + axistream-connected is specified, in which case the reg + attribute of the node referenced by it is used. ++ minItems: 1 + maxItems: 2 + + interrupts: +@@ -165,7 +166,7 @@ examples: + clock-names = "s_axi_lite_clk", "axis_clk", "ref_clk", "mgt_clk"; + clocks = <&axi_clk>, <&axi_clk>, <&pl_enet_ref_clk>, <&mgt_clk>; + phy-mode = "mii"; +- reg = <0x00 0x40000000 0x00 0x40000>; ++ reg = <0x40000000 0x40000>; + xlnx,rxcsum = <0x2>; + xlnx,rxmem = <0x800>; + xlnx,txcsum = <0x2>; +diff --git a/Makefile b/Makefile +index 1e382bacd8eac0..6e297758842d9c 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 54 ++SUBLEVEL = 55 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c +index b668c97663ec0c..f5b66f4cf45d96 100644 +--- a/arch/arm/crypto/aes-ce-glue.c ++++ b/arch/arm/crypto/aes-ce-glue.c +@@ -711,7 +711,7 @@ static int __init aes_init(void) + algname = aes_algs[i].base.cra_name + 2; + drvname = aes_algs[i].base.cra_driver_name + 2; + basename = aes_algs[i].base.cra_driver_name; +- simd = simd_skcipher_create_compat(algname, drvname, basename); ++ simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename); + err = PTR_ERR(simd); + if (IS_ERR(simd)) + goto unregister_simds; +diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c +index f00f042ef3570e..0ca94b90bc4ec5 100644 +--- a/arch/arm/crypto/aes-neonbs-glue.c ++++ b/arch/arm/crypto/aes-neonbs-glue.c +@@ -539,7 +539,7 @@ static int __init aes_init(void) + algname = aes_algs[i].base.cra_name + 2; + drvname = aes_algs[i].base.cra_driver_name + 2; + basename = aes_algs[i].base.cra_driver_name; +- simd = simd_skcipher_create_compat(algname, drvname, basename); ++ simd = simd_skcipher_create_compat(aes_algs + i, algname, drvname, basename); + err = PTR_ERR(simd); + if (IS_ERR(simd)) + goto unregister_simds; +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 5ea7b331967108..eab866d6903347 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -191,7 +191,8 @@ config ARM64 + select HAVE_DMA_CONTIGUOUS + select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_ARGS \ +- if $(cc-option,-fpatchable-function-entry=2) ++ if (GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS || \ ++ CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS) + select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS \ + if DYNAMIC_FTRACE_WITH_ARGS && DYNAMIC_FTRACE_WITH_CALL_OPS + select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \ +@@ -262,12 +263,10 @@ config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS + def_bool CC_IS_CLANG + # https://github.com/ClangBuiltLinux/linux/issues/1507 + depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600)) +- select HAVE_DYNAMIC_FTRACE_WITH_ARGS + + config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS + def_bool CC_IS_GCC + depends on $(cc-option,-fpatchable-function-entry=2) +- select HAVE_DYNAMIC_FTRACE_WITH_ARGS + + config 64BIT + def_bool y +@@ -1080,6 +1079,7 @@ config ARM64_ERRATUM_3194386 + * ARM Cortex-A78C erratum 3324346 + * ARM Cortex-A78C erratum 3324347 + * ARM Cortex-A710 erratam 3324338 ++ * ARM Cortex-A715 errartum 3456084 + * ARM Cortex-A720 erratum 3456091 + * ARM Cortex-A725 erratum 3456106 + * ARM Cortex-X1 erratum 3324344 +@@ -1090,6 +1090,7 @@ config ARM64_ERRATUM_3194386 + * ARM Cortex-X925 erratum 3324334 + * ARM Neoverse-N1 erratum 3324349 + * ARM Neoverse N2 erratum 3324339 ++ * ARM Neoverse-N3 erratum 3456111 + * ARM Neoverse-V1 erratum 3324341 + * ARM Neoverse V2 erratum 3324336 + * ARM Neoverse-V3 erratum 3312417 +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h +index 5a7dfeb8e8eb55..488f8e75134959 100644 +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -94,6 +94,7 @@ + #define ARM_CPU_PART_NEOVERSE_V3 0xD84 + #define ARM_CPU_PART_CORTEX_X925 0xD85 + #define ARM_CPU_PART_CORTEX_A725 0xD87 ++#define ARM_CPU_PART_NEOVERSE_N3 0xD8E + + #define APM_CPU_PART_XGENE 0x000 + #define APM_CPU_VAR_POTENZA 0x00 +@@ -176,6 +177,7 @@ + #define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3) + #define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925) + #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725) ++#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3) + #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) + #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) + #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c +index f8d94902fbb59c..463b48d0f92500 100644 +--- a/arch/arm64/kernel/cpu_errata.c ++++ b/arch/arm64/kernel/cpu_errata.c +@@ -455,6 +455,7 @@ static const struct midr_range erratum_spec_ssbs_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A715), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A725), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), +@@ -463,8 +464,10 @@ static const struct midr_range erratum_spec_ssbs_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X4), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X925), ++ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N3), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V3), +diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig +index a3b52aaa83b336..e5f70642ed2062 100644 +--- a/arch/loongarch/configs/loongson3_defconfig ++++ b/arch/loongarch/configs/loongson3_defconfig +@@ -83,7 +83,6 @@ CONFIG_ZPOOL=y + CONFIG_ZSWAP=y + CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y + CONFIG_ZBUD=y +-CONFIG_Z3FOLD=y + CONFIG_ZSMALLOC=m + # CONFIG_COMPAT_BRK is not set + CONFIG_MEMORY_HOTPLUG=y +diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h +index 47c5a1991d1034..89b6beeda0b869 100644 +--- a/arch/parisc/include/asm/mman.h ++++ b/arch/parisc/include/asm/mman.h +@@ -11,4 +11,18 @@ static inline bool arch_memory_deny_write_exec_supported(void) + } + #define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported + ++static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) ++{ ++ /* ++ * The stack on parisc grows upwards, so if userspace requests memory ++ * for a stack, mark it with VM_GROWSUP so that the stack expansion in ++ * the fault handler will work. ++ */ ++ if (flags & MAP_STACK) ++ return VM_GROWSUP; ++ ++ return 0; ++} ++#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) ++ + #endif /* __ASM_MMAN_H__ */ +diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S +index ab23e61a6f016a..ea57bcc21dc5fe 100644 +--- a/arch/parisc/kernel/entry.S ++++ b/arch/parisc/kernel/entry.S +@@ -1051,8 +1051,7 @@ ENTRY_CFI(intr_save) /* for os_hpmc */ + STREG %r16, PT_ISR(%r29) + STREG %r17, PT_IOR(%r29) + +-#if 0 && defined(CONFIG_64BIT) +- /* Revisit when we have 64-bit code above 4Gb */ ++#if defined(CONFIG_64BIT) + b,n intr_save2 + + skip_save_ior: +@@ -1060,8 +1059,7 @@ skip_save_ior: + * need to adjust iasq/iaoq here in the same way we adjusted isr/ior + * above. + */ +- extrd,u,* %r8,PSW_W_BIT,1,%r1 +- cmpib,COND(=),n 1,%r1,intr_save2 ++ bb,COND(>=),n %r8,PSW_W_BIT,intr_save2 + LDREG PT_IASQ0(%r29), %r16 + LDREG PT_IAOQ0(%r29), %r17 + /* adjust iasq/iaoq */ +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S +index 1f51aa9c8230cc..0fa81bf1466b15 100644 +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -243,10 +243,10 @@ linux_gateway_entry: + + #ifdef CONFIG_64BIT + ldil L%sys_call_table, %r1 +- or,= %r2,%r2,%r2 +- addil L%(sys_call_table64-sys_call_table), %r1 ++ or,ev %r2,%r2,%r2 ++ ldil L%sys_call_table64, %r1 + ldo R%sys_call_table(%r1), %r19 +- or,= %r2,%r2,%r2 ++ or,ev %r2,%r2,%r2 + ldo R%sys_call_table64(%r1), %r19 + #else + load32 sys_call_table, %r19 +@@ -379,10 +379,10 @@ tracesys_next: + extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */ + + ldil L%sys_call_table, %r1 +- or,= %r2,%r2,%r2 +- addil L%(sys_call_table64-sys_call_table), %r1 ++ or,ev %r2,%r2,%r2 ++ ldil L%sys_call_table64, %r1 + ldo R%sys_call_table(%r1), %r19 +- or,= %r2,%r2,%r2 ++ or,ev %r2,%r2,%r2 + ldo R%sys_call_table64(%r1), %r19 + #else + load32 sys_call_table, %r19 +@@ -1327,6 +1327,8 @@ ENTRY(sys_call_table) + END(sys_call_table) + + #ifdef CONFIG_64BIT ++#undef __SYSCALL_WITH_COMPAT ++#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) + .align 8 + ENTRY(sys_call_table64) + #include /* 64-bit syscalls */ +diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig +index 6e7b9e8fd2251a..65e518dde2c2fe 100644 +--- a/arch/powerpc/configs/ppc64_defconfig ++++ b/arch/powerpc/configs/ppc64_defconfig +@@ -81,7 +81,6 @@ CONFIG_MODULE_SIG_SHA512=y + CONFIG_PARTITION_ADVANCED=y + CONFIG_BINFMT_MISC=m + CONFIG_ZSWAP=y +-CONFIG_Z3FOLD=y + CONFIG_ZSMALLOC=y + # CONFIG_SLAB_MERGE_DEFAULT is not set + CONFIG_SLAB_FREELIST_RANDOM=y +diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h +index a585c8e538ff0f..939daf6b695ef1 100644 +--- a/arch/powerpc/include/asm/vdso_datapage.h ++++ b/arch/powerpc/include/asm/vdso_datapage.h +@@ -111,6 +111,21 @@ extern struct vdso_arch_data *vdso_data; + addi \ptr, \ptr, (_vdso_datapage - 999b)@l + .endm + ++#include ++#include ++ ++.macro get_realdatapage ptr scratch ++ get_datapage \ptr ++#ifdef CONFIG_TIME_NS ++ lwz \scratch, VDSO_CLOCKMODE_OFFSET(\ptr) ++ xoris \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@h ++ xori \scratch, \scratch, VDSO_CLOCKMODE_TIMENS@l ++ cntlzw \scratch, \scratch ++ rlwinm \scratch, \scratch, PAGE_SHIFT - 5, 1 << PAGE_SHIFT ++ add \ptr, \ptr, \scratch ++#endif ++.endm ++ + #endif /* __ASSEMBLY__ */ + + #endif /* __KERNEL__ */ +diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c +index 9f14d95b8b32fd..2affd30468bc4c 100644 +--- a/arch/powerpc/kernel/asm-offsets.c ++++ b/arch/powerpc/kernel/asm-offsets.c +@@ -348,6 +348,8 @@ int main(void) + #else + OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, syscall_map); + #endif ++ OFFSET(VDSO_CLOCKMODE_OFFSET, vdso_arch_data, data[0].clock_mode); ++ DEFINE(VDSO_CLOCKMODE_TIMENS, VDSO_CLOCKMODE_TIMENS); + + #ifdef CONFIG_BUG + DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); +diff --git a/arch/powerpc/kernel/vdso/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S +index 0085ae464dac9c..3b2479bd2f9a1d 100644 +--- a/arch/powerpc/kernel/vdso/cacheflush.S ++++ b/arch/powerpc/kernel/vdso/cacheflush.S +@@ -30,7 +30,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) + #ifdef CONFIG_PPC64 + mflr r12 + .cfi_register lr,r12 +- get_datapage r10 ++ get_realdatapage r10, r11 + mtlr r12 + .cfi_restore lr + #endif +diff --git a/arch/powerpc/kernel/vdso/datapage.S b/arch/powerpc/kernel/vdso/datapage.S +index db8e167f01667e..2b19b6201a33a8 100644 +--- a/arch/powerpc/kernel/vdso/datapage.S ++++ b/arch/powerpc/kernel/vdso/datapage.S +@@ -28,7 +28,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map) + mflr r12 + .cfi_register lr,r12 + mr. r4,r3 +- get_datapage r3 ++ get_realdatapage r3, r11 + mtlr r12 + #ifdef __powerpc64__ + addi r3,r3,CFG_SYSCALL_MAP64 +@@ -52,7 +52,7 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq) + .cfi_startproc + mflr r12 + .cfi_register lr,r12 +- get_datapage r3 ++ get_realdatapage r3, r11 + #ifndef __powerpc64__ + lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3) + #endif +diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c +index 47f8eabd1bee31..9873b916b23704 100644 +--- a/arch/powerpc/platforms/pseries/dlpar.c ++++ b/arch/powerpc/platforms/pseries/dlpar.c +@@ -334,23 +334,6 @@ int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) + { + int rc; + +- /* pseries error logs are in BE format, convert to cpu type */ +- switch (hp_elog->id_type) { +- case PSERIES_HP_ELOG_ID_DRC_COUNT: +- hp_elog->_drc_u.drc_count = +- be32_to_cpu(hp_elog->_drc_u.drc_count); +- break; +- case PSERIES_HP_ELOG_ID_DRC_INDEX: +- hp_elog->_drc_u.drc_index = +- be32_to_cpu(hp_elog->_drc_u.drc_index); +- break; +- case PSERIES_HP_ELOG_ID_DRC_IC: +- hp_elog->_drc_u.ic.count = +- be32_to_cpu(hp_elog->_drc_u.ic.count); +- hp_elog->_drc_u.ic.index = +- be32_to_cpu(hp_elog->_drc_u.ic.index); +- } +- + switch (hp_elog->resource) { + case PSERIES_HP_ELOG_RESOURCE_MEM: + rc = dlpar_memory(hp_elog); +diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c +index e62835a12d73fc..6838a0fcda296b 100644 +--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c ++++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c +@@ -757,7 +757,7 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) + u32 drc_index; + int rc; + +- drc_index = hp_elog->_drc_u.drc_index; ++ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); + + lock_device_hotplug(); + +diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c +index 4adca5b61daba8..95ff84c55cb144 100644 +--- a/arch/powerpc/platforms/pseries/hotplug-memory.c ++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c +@@ -811,16 +811,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) + case PSERIES_HP_ELOG_ACTION_ADD: + switch (hp_elog->id_type) { + case PSERIES_HP_ELOG_ID_DRC_COUNT: +- count = hp_elog->_drc_u.drc_count; ++ count = be32_to_cpu(hp_elog->_drc_u.drc_count); + rc = dlpar_memory_add_by_count(count); + break; + case PSERIES_HP_ELOG_ID_DRC_INDEX: +- drc_index = hp_elog->_drc_u.drc_index; ++ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); + rc = dlpar_memory_add_by_index(drc_index); + break; + case PSERIES_HP_ELOG_ID_DRC_IC: +- count = hp_elog->_drc_u.ic.count; +- drc_index = hp_elog->_drc_u.ic.index; ++ count = be32_to_cpu(hp_elog->_drc_u.ic.count); ++ drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index); + rc = dlpar_memory_add_by_ic(count, drc_index); + break; + default: +@@ -832,16 +832,16 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) + case PSERIES_HP_ELOG_ACTION_REMOVE: + switch (hp_elog->id_type) { + case PSERIES_HP_ELOG_ID_DRC_COUNT: +- count = hp_elog->_drc_u.drc_count; ++ count = be32_to_cpu(hp_elog->_drc_u.drc_count); + rc = dlpar_memory_remove_by_count(count); + break; + case PSERIES_HP_ELOG_ID_DRC_INDEX: +- drc_index = hp_elog->_drc_u.drc_index; ++ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); + rc = dlpar_memory_remove_by_index(drc_index); + break; + case PSERIES_HP_ELOG_ID_DRC_IC: +- count = hp_elog->_drc_u.ic.count; +- drc_index = hp_elog->_drc_u.ic.index; ++ count = be32_to_cpu(hp_elog->_drc_u.ic.count); ++ drc_index = be32_to_cpu(hp_elog->_drc_u.ic.index); + rc = dlpar_memory_remove_by_ic(count, drc_index); + break; + default: +diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c +index 3c290b9ed01b39..0f1d45f32e4a44 100644 +--- a/arch/powerpc/platforms/pseries/pmem.c ++++ b/arch/powerpc/platforms/pseries/pmem.c +@@ -121,7 +121,7 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog) + return -EINVAL; + } + +- drc_index = hp_elog->_drc_u.drc_index; ++ drc_index = be32_to_cpu(hp_elog->_drc_u.drc_index); + + lock_device_hotplug(); + +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig +index fe30b24d52e189..1304992232adbe 100644 +--- a/arch/riscv/Kconfig ++++ b/arch/riscv/Kconfig +@@ -259,6 +259,11 @@ config GENERIC_HWEIGHT + config FIX_EARLYCON_MEM + def_bool MMU + ++config ILLEGAL_POINTER_VALUE ++ hex ++ default 0 if 32BIT ++ default 0xdead000000000000 if 64BIT ++ + config PGTABLE_LEVELS + int + default 5 if 64BIT +@@ -628,8 +633,7 @@ config IRQ_STACKS + config THREAD_SIZE_ORDER + int "Kernel stack size (in power-of-two numbers of page size)" if VMAP_STACK && EXPERT + range 0 4 +- default 1 if 32BIT && !KASAN +- default 3 if 64BIT && KASAN ++ default 1 if 32BIT + default 2 + help + Specify the Pages of thread stack size (from 4KB to 64KB), which also +diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h +index d18ce0113ca1f1..4fb84c2e94c650 100644 +--- a/arch/riscv/include/asm/thread_info.h ++++ b/arch/riscv/include/asm/thread_info.h +@@ -12,7 +12,12 @@ + #include + + /* thread information allocation */ +-#define THREAD_SIZE_ORDER CONFIG_THREAD_SIZE_ORDER ++#ifdef CONFIG_KASAN ++#define KASAN_STACK_ORDER 1 ++#else ++#define KASAN_STACK_ORDER 0 ++#endif ++#define THREAD_SIZE_ORDER (CONFIG_THREAD_SIZE_ORDER + KASAN_STACK_ORDER) + #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) + + /* +diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S +index 0ffb072be95615..0bbec1c75cd0be 100644 +--- a/arch/x86/crypto/sha256-avx2-asm.S ++++ b/arch/x86/crypto/sha256-avx2-asm.S +@@ -592,22 +592,22 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx) + leaq K256+0*32(%rip), INP ## reuse INP as scratch reg + vpaddd (INP, SRND), X0, XFER + vmovdqa XFER, 0*32+_XFER(%rsp, SRND) +- FOUR_ROUNDS_AND_SCHED _XFER + 0*32 ++ FOUR_ROUNDS_AND_SCHED (_XFER + 0*32) + + leaq K256+1*32(%rip), INP + vpaddd (INP, SRND), X0, XFER + vmovdqa XFER, 1*32+_XFER(%rsp, SRND) +- FOUR_ROUNDS_AND_SCHED _XFER + 1*32 ++ FOUR_ROUNDS_AND_SCHED (_XFER + 1*32) + + leaq K256+2*32(%rip), INP + vpaddd (INP, SRND), X0, XFER + vmovdqa XFER, 2*32+_XFER(%rsp, SRND) +- FOUR_ROUNDS_AND_SCHED _XFER + 2*32 ++ FOUR_ROUNDS_AND_SCHED (_XFER + 2*32) + + leaq K256+3*32(%rip), INP + vpaddd (INP, SRND), X0, XFER + vmovdqa XFER, 3*32+_XFER(%rsp, SRND) +- FOUR_ROUNDS_AND_SCHED _XFER + 3*32 ++ FOUR_ROUNDS_AND_SCHED (_XFER + 3*32) + + add $4*32, SRND + cmp $3*4*32, SRND +@@ -618,12 +618,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx) + leaq K256+0*32(%rip), INP + vpaddd (INP, SRND), X0, XFER + vmovdqa XFER, 0*32+_XFER(%rsp, SRND) +- DO_4ROUNDS _XFER + 0*32 ++ DO_4ROUNDS (_XFER + 0*32) + + leaq K256+1*32(%rip), INP + vpaddd (INP, SRND), X1, XFER + vmovdqa XFER, 1*32+_XFER(%rsp, SRND) +- DO_4ROUNDS _XFER + 1*32 ++ DO_4ROUNDS (_XFER + 1*32) + add $2*32, SRND + + vmovdqa X2, X0 +@@ -651,8 +651,8 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx) + xor SRND, SRND + .align 16 + .Lloop3: +- DO_4ROUNDS _XFER + 0*32 + 16 +- DO_4ROUNDS _XFER + 1*32 + 16 ++ DO_4ROUNDS (_XFER + 0*32 + 16) ++ DO_4ROUNDS (_XFER + 1*32 + 16) + add $2*32, SRND + cmp $4*4*32, SRND + jb .Lloop3 +diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c +index 8811fedc9776a2..150a365b4fbc89 100644 +--- a/arch/x86/events/core.c ++++ b/arch/x86/events/core.c +@@ -41,6 +41,8 @@ + #include + #include + #include ++#include ++#include + + #include "perf_event.h" + +@@ -2816,6 +2818,46 @@ static unsigned long get_segment_base(unsigned int segment) + return get_desc_base(desc); + } + ++#ifdef CONFIG_UPROBES ++/* ++ * Heuristic-based check if uprobe is installed at the function entry. ++ * ++ * Under assumption of user code being compiled with frame pointers, ++ * `push %rbp/%ebp` is a good indicator that we indeed are. ++ * ++ * Similarly, `endbr64` (assuming 64-bit mode) is also a common pattern. ++ * If we get this wrong, captured stack trace might have one extra bogus ++ * entry, but the rest of stack trace will still be meaningful. ++ */ ++static bool is_uprobe_at_func_entry(struct pt_regs *regs) ++{ ++ struct arch_uprobe *auprobe; ++ ++ if (!current->utask) ++ return false; ++ ++ auprobe = current->utask->auprobe; ++ if (!auprobe) ++ return false; ++ ++ /* push %rbp/%ebp */ ++ if (auprobe->insn[0] == 0x55) ++ return true; ++ ++ /* endbr64 (64-bit only) */ ++ if (user_64bit_mode(regs) && is_endbr(*(u32 *)auprobe->insn)) ++ return true; ++ ++ return false; ++} ++ ++#else ++static bool is_uprobe_at_func_entry(struct pt_regs *regs) ++{ ++ return false; ++} ++#endif /* CONFIG_UPROBES */ ++ + #ifdef CONFIG_IA32_EMULATION + + #include +@@ -2827,6 +2869,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent + unsigned long ss_base, cs_base; + struct stack_frame_ia32 frame; + const struct stack_frame_ia32 __user *fp; ++ u32 ret_addr; + + if (user_64bit_mode(regs)) + return 0; +@@ -2836,6 +2879,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent + + fp = compat_ptr(ss_base + regs->bp); + pagefault_disable(); ++ ++ /* see perf_callchain_user() below for why we do this */ ++ if (is_uprobe_at_func_entry(regs) && ++ !get_user(ret_addr, (const u32 __user *)regs->sp)) ++ perf_callchain_store(entry, ret_addr); ++ + while (entry->nr < entry->max_stack) { + if (!valid_user_frame(fp, sizeof(frame))) + break; +@@ -2864,6 +2913,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs + { + struct stack_frame frame; + const struct stack_frame __user *fp; ++ unsigned long ret_addr; + + if (perf_guest_state()) { + /* TODO: We don't support guest os callchain now */ +@@ -2887,6 +2937,19 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs + return; + + pagefault_disable(); ++ ++ /* ++ * If we are called from uprobe handler, and we are indeed at the very ++ * entry to user function (which is normally a `push %rbp` instruction, ++ * under assumption of application being compiled with frame pointers), ++ * we should read return address from *regs->sp before proceeding ++ * to follow frame pointers, otherwise we'll skip immediate caller ++ * as %rbp is not yet setup. ++ */ ++ if (is_uprobe_at_func_entry(regs) && ++ !get_user(ret_addr, (const unsigned long __user *)regs->sp)) ++ perf_callchain_store(entry, ret_addr); ++ + while (entry->nr < entry->max_stack) { + if (!valid_user_frame(fp, sizeof(frame))) + break; +diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h +index 611fa41711affd..eccc75bc9c4f3d 100644 +--- a/arch/x86/include/asm/fpu/signal.h ++++ b/arch/x86/include/asm/fpu/signal.h +@@ -29,7 +29,7 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame, + + unsigned long fpu__get_fpstate_size(void); + +-extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); ++extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size, u32 pkru); + extern void fpu__clear_user_states(struct fpu *fpu); + extern bool fpu__restore_sig(void __user *buf, int ia32_frame); + +diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h +index 03bb950eba690f..228a42585d5c97 100644 +--- a/arch/x86/include/asm/syscall.h ++++ b/arch/x86/include/asm/syscall.h +@@ -82,7 +82,12 @@ static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) + { +- memcpy(args, ®s->bx, 6 * sizeof(args[0])); ++ args[0] = regs->bx; ++ args[1] = regs->cx; ++ args[2] = regs->dx; ++ args[3] = regs->si; ++ args[4] = regs->di; ++ args[5] = regs->bp; + } + + static inline int syscall_get_arch(struct task_struct *task) +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index 00da6cf6b07dcb..d0c5325d175102 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -352,27 +352,26 @@ static void ioapic_mask_entry(int apic, int pin) + * shared ISA-space IRQs, so we have to support them. We are super + * fast in the common case, and fast for shared ISA-space IRQs. + */ +-static int __add_pin_to_irq_node(struct mp_chip_data *data, +- int node, int apic, int pin) ++static bool add_pin_to_irq_node(struct mp_chip_data *data, int node, int apic, int pin) + { + struct irq_pin_list *entry; + +- /* don't allow duplicates */ +- for_each_irq_pin(entry, data->irq_2_pin) ++ /* Don't allow duplicates */ ++ for_each_irq_pin(entry, data->irq_2_pin) { + if (entry->apic == apic && entry->pin == pin) +- return 0; ++ return true; ++ } + + entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node); + if (!entry) { +- pr_err("can not alloc irq_pin_list (%d,%d,%d)\n", +- node, apic, pin); +- return -ENOMEM; ++ pr_err("Cannot allocate irq_pin_list (%d,%d,%d)\n", node, apic, pin); ++ return false; + } ++ + entry->apic = apic; + entry->pin = pin; + list_add_tail(&entry->list, &data->irq_2_pin); +- +- return 0; ++ return true; + } + + static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin) +@@ -387,13 +386,6 @@ static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin) + } + } + +-static void add_pin_to_irq_node(struct mp_chip_data *data, +- int node, int apic, int pin) +-{ +- if (__add_pin_to_irq_node(data, node, apic, pin)) +- panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); +-} +- + /* + * Reroute an IRQ to a different pin. + */ +@@ -1002,8 +994,7 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain, + if (irq_data && irq_data->parent_data) { + if (!mp_check_pin_attr(irq, info)) + return -EBUSY; +- if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic, +- info->ioapic.pin)) ++ if (!add_pin_to_irq_node(irq_data->chip_data, node, ioapic, info->ioapic.pin)) + return -ENOMEM; + } else { + info->flags |= X86_IRQ_ALLOC_LEGACY; +@@ -3037,10 +3028,8 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, + return -ENOMEM; + + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info); +- if (ret < 0) { +- kfree(data); +- return ret; +- } ++ if (ret < 0) ++ goto free_data; + + INIT_LIST_HEAD(&data->irq_2_pin); + irq_data->hwirq = info->ioapic.pin; +@@ -3049,7 +3038,10 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, + irq_data->chip_data = data; + mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info); + +- add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); ++ if (!add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin)) { ++ ret = -ENOMEM; ++ goto free_irqs; ++ } + + mp_preconfigure_entry(data); + mp_register_handler(virq, data->is_level); +@@ -3064,6 +3056,12 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, + ioapic, mpc_ioapic_id(ioapic), pin, virq, + data->is_level, data->active_low); + return 0; ++ ++free_irqs: ++ irq_domain_free_irqs_parent(domain, virq, nr_irqs); ++free_data: ++ kfree(data); ++ return ret; + } + + void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, +diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c +index 247f2225aa9f36..2b3b9e140dd41b 100644 +--- a/arch/x86/kernel/fpu/signal.c ++++ b/arch/x86/kernel/fpu/signal.c +@@ -156,7 +156,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, + return !err; + } + +-static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) ++static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru) + { + if (use_xsave()) + return xsave_to_user_sigframe(buf); +@@ -185,7 +185,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) + * For [f]xsave state, update the SW reserved fields in the [f]xsave frame + * indicating the absence/presence of the extended state to the user. + */ +-bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) ++bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size, u32 pkru) + { + struct task_struct *tsk = current; + struct fpstate *fpstate = tsk->thread.fpu.fpstate; +@@ -228,7 +228,7 @@ bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) + fpregs_restore_userregs(); + + pagefault_disable(); +- ret = copy_fpregs_to_sigframe(buf_fx); ++ ret = copy_fpregs_to_sigframe(buf_fx, pkru); + pagefault_enable(); + fpregs_unlock(); + +diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c +index d287fe290c9ab5..2fa12d1dc67602 100644 +--- a/arch/x86/kernel/machine_kexec_64.c ++++ b/arch/x86/kernel/machine_kexec_64.c +@@ -28,6 +28,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_ACPI + /* +@@ -90,6 +91,8 @@ map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p) + { + #ifdef CONFIG_EFI + unsigned long mstart, mend; ++ void *kaddr; ++ int ret; + + if (!efi_enabled(EFI_BOOT)) + return 0; +@@ -105,6 +108,30 @@ map_efi_systab(struct x86_mapping_info *info, pgd_t *level4p) + if (!mstart) + return 0; + ++ ret = kernel_ident_mapping_init(info, level4p, mstart, mend); ++ if (ret) ++ return ret; ++ ++ kaddr = memremap(mstart, mend - mstart, MEMREMAP_WB); ++ if (!kaddr) { ++ pr_err("Could not map UEFI system table\n"); ++ return -ENOMEM; ++ } ++ ++ mstart = efi_config_table; ++ ++ if (efi_enabled(EFI_64BIT)) { ++ efi_system_table_64_t *stbl = (efi_system_table_64_t *)kaddr; ++ ++ mend = mstart + sizeof(efi_config_table_64_t) * stbl->nr_tables; ++ } else { ++ efi_system_table_32_t *stbl = (efi_system_table_32_t *)kaddr; ++ ++ mend = mstart + sizeof(efi_config_table_32_t) * stbl->nr_tables; ++ } ++ ++ memunmap(kaddr); ++ + return kernel_ident_mapping_init(info, level4p, mstart, mend); + #endif + return 0; +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c +index 65fe2094da59b8..876d3b30c2c774 100644 +--- a/arch/x86/kernel/signal.c ++++ b/arch/x86/kernel/signal.c +@@ -83,6 +83,7 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size, + unsigned long math_size = 0; + unsigned long sp = regs->sp; + unsigned long buf_fx = 0; ++ u32 pkru = read_pkru(); + + /* redzone */ + if (!ia32_frame) +@@ -138,7 +139,7 @@ get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size, + } + + /* save i387 and extended state */ +- if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size)) ++ if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size, pkru)) + return (void __user *)-1L; + + return (void __user *)sp; +diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c +index 23d8aaf8d9fd19..449a6ed0b8c982 100644 +--- a/arch/x86/kernel/signal_64.c ++++ b/arch/x86/kernel/signal_64.c +@@ -260,13 +260,13 @@ SYSCALL_DEFINE0(rt_sigreturn) + + set_current_blocked(&set); + +- if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) ++ if (restore_altstack(&frame->uc.uc_stack)) + goto badframe; + +- if (restore_signal_shadow_stack()) ++ if (!restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags)) + goto badframe; + +- if (restore_altstack(&frame->uc.uc_stack)) ++ if (restore_signal_shadow_stack()) + goto badframe; + + return regs->ax; +diff --git a/block/blk-iocost.c b/block/blk-iocost.c +index 0dca77591d66c9..c3cb9c20b306cf 100644 +--- a/block/blk-iocost.c ++++ b/block/blk-iocost.c +@@ -2076,7 +2076,7 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, + struct ioc_now *now) + { + struct ioc_gq *iocg; +- u64 dur, usage_pct, nr_cycles; ++ u64 dur, usage_pct, nr_cycles, nr_cycles_shift; + + /* if no debtor, reset the cycle */ + if (!nr_debtors) { +@@ -2138,10 +2138,12 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, + old_debt = iocg->abs_vdebt; + old_delay = iocg->delay; + ++ nr_cycles_shift = min_t(u64, nr_cycles, BITS_PER_LONG - 1); + if (iocg->abs_vdebt) +- iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1; ++ iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles_shift ?: 1; ++ + if (iocg->delay) +- iocg->delay = iocg->delay >> nr_cycles ?: 1; ++ iocg->delay = iocg->delay >> nr_cycles_shift ?: 1; + + iocg_kick_waitq(iocg, true, now); + +diff --git a/crypto/simd.c b/crypto/simd.c +index edaa479a1ec5e5..d109866641a265 100644 +--- a/crypto/simd.c ++++ b/crypto/simd.c +@@ -136,27 +136,19 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm) + return 0; + } + +-struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, ++struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg, ++ const char *algname, + const char *drvname, + const char *basename) + { + struct simd_skcipher_alg *salg; +- struct crypto_skcipher *tfm; +- struct skcipher_alg *ialg; + struct skcipher_alg *alg; + int err; + +- tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, +- CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); +- if (IS_ERR(tfm)) +- return ERR_CAST(tfm); +- +- ialg = crypto_skcipher_alg(tfm); +- + salg = kzalloc(sizeof(*salg), GFP_KERNEL); + if (!salg) { + salg = ERR_PTR(-ENOMEM); +- goto out_put_tfm; ++ goto out; + } + + salg->ialg_name = basename; +@@ -195,30 +187,16 @@ struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, + if (err) + goto out_free_salg; + +-out_put_tfm: +- crypto_free_skcipher(tfm); ++out: + return salg; + + out_free_salg: + kfree(salg); + salg = ERR_PTR(err); +- goto out_put_tfm; ++ goto out; + } + EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); + +-struct simd_skcipher_alg *simd_skcipher_create(const char *algname, +- const char *basename) +-{ +- char drvname[CRYPTO_MAX_ALG_NAME]; +- +- if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= +- CRYPTO_MAX_ALG_NAME) +- return ERR_PTR(-ENAMETOOLONG); +- +- return simd_skcipher_create_compat(algname, drvname, basename); +-} +-EXPORT_SYMBOL_GPL(simd_skcipher_create); +- + void simd_skcipher_free(struct simd_skcipher_alg *salg) + { + crypto_unregister_skcipher(&salg->alg); +@@ -246,7 +224,7 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, + algname = algs[i].base.cra_name + 2; + drvname = algs[i].base.cra_driver_name + 2; + basename = algs[i].base.cra_driver_name; +- simd = simd_skcipher_create_compat(algname, drvname, basename); ++ simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename); + err = PTR_ERR(simd); + if (IS_ERR(simd)) + goto err_unregister; +@@ -383,27 +361,19 @@ static int simd_aead_init(struct crypto_aead *tfm) + return 0; + } + +-struct simd_aead_alg *simd_aead_create_compat(const char *algname, +- const char *drvname, +- const char *basename) ++static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg, ++ const char *algname, ++ const char *drvname, ++ const char *basename) + { + struct simd_aead_alg *salg; +- struct crypto_aead *tfm; +- struct aead_alg *ialg; + struct aead_alg *alg; + int err; + +- tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL, +- CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); +- if (IS_ERR(tfm)) +- return ERR_CAST(tfm); +- +- ialg = crypto_aead_alg(tfm); +- + salg = kzalloc(sizeof(*salg), GFP_KERNEL); + if (!salg) { + salg = ERR_PTR(-ENOMEM); +- goto out_put_tfm; ++ goto out; + } + + salg->ialg_name = basename; +@@ -442,36 +412,20 @@ struct simd_aead_alg *simd_aead_create_compat(const char *algname, + if (err) + goto out_free_salg; + +-out_put_tfm: +- crypto_free_aead(tfm); ++out: + return salg; + + out_free_salg: + kfree(salg); + salg = ERR_PTR(err); +- goto out_put_tfm; +-} +-EXPORT_SYMBOL_GPL(simd_aead_create_compat); +- +-struct simd_aead_alg *simd_aead_create(const char *algname, +- const char *basename) +-{ +- char drvname[CRYPTO_MAX_ALG_NAME]; +- +- if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= +- CRYPTO_MAX_ALG_NAME) +- return ERR_PTR(-ENAMETOOLONG); +- +- return simd_aead_create_compat(algname, drvname, basename); ++ goto out; + } +-EXPORT_SYMBOL_GPL(simd_aead_create); + +-void simd_aead_free(struct simd_aead_alg *salg) ++static void simd_aead_free(struct simd_aead_alg *salg) + { + crypto_unregister_aead(&salg->alg); + kfree(salg); + } +-EXPORT_SYMBOL_GPL(simd_aead_free); + + int simd_register_aeads_compat(struct aead_alg *algs, int count, + struct simd_aead_alg **simd_algs) +@@ -493,7 +447,7 @@ int simd_register_aeads_compat(struct aead_alg *algs, int count, + algname = algs[i].base.cra_name + 2; + drvname = algs[i].base.cra_driver_name + 2; + basename = algs[i].base.cra_driver_name; +- simd = simd_aead_create_compat(algname, drvname, basename); ++ simd = simd_aead_create_compat(algs + i, algname, drvname, basename); + err = PTR_ERR(simd); + if (IS_ERR(simd)) + goto err_unregister; +diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c +index a277bbae78fc45..3b35d262ddd43a 100644 +--- a/drivers/accel/ivpu/ivpu_fw.c ++++ b/drivers/accel/ivpu/ivpu_fw.c +@@ -55,6 +55,10 @@ static struct { + { IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" }, + }; + ++/* Production fw_names from the table above */ ++MODULE_FIRMWARE("intel/vpu/vpu_37xx_v0.0.bin"); ++MODULE_FIRMWARE("intel/vpu/vpu_40xx_v0.0.bin"); ++ + static int ivpu_fw_request(struct ivpu_device *vdev) + { + int ret = -ENOENT; +diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c +index 7a453c5ff303a9..71e25c79897628 100644 +--- a/drivers/acpi/acpi_pad.c ++++ b/drivers/acpi/acpi_pad.c +@@ -131,8 +131,10 @@ static void exit_round_robin(unsigned int tsk_index) + { + struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); + +- cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); +- tsk_in_cpu[tsk_index] = -1; ++ if (tsk_in_cpu[tsk_index] != -1) { ++ cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); ++ tsk_in_cpu[tsk_index] = -1; ++ } + } + + static unsigned int idle_pct = 5; /* percentage */ +diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c +index 2b84ac093698a3..8dbab693204998 100644 +--- a/drivers/acpi/acpica/dbconvert.c ++++ b/drivers/acpi/acpica/dbconvert.c +@@ -174,6 +174,8 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object) + elements = + ACPI_ALLOCATE_ZEROED(DB_DEFAULT_PKG_ELEMENTS * + sizeof(union acpi_object)); ++ if (!elements) ++ return (AE_NO_MEMORY); + + this = string; + for (i = 0; i < (DB_DEFAULT_PKG_ELEMENTS - 1); i++) { +diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c +index 08196fa17080e2..82b1fa2d201fed 100644 +--- a/drivers/acpi/acpica/exprep.c ++++ b/drivers/acpi/acpica/exprep.c +@@ -437,6 +437,9 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info) + + if (info->connection_node) { + second_desc = info->connection_node->object; ++ if (second_desc == NULL) { ++ break; ++ } + if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) { + status = + acpi_ds_get_buffer_arguments(second_desc); +diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c +index 422c074ed2897b..28582adfc0acaf 100644 +--- a/drivers/acpi/acpica/psargs.c ++++ b/drivers/acpi/acpica/psargs.c +@@ -25,6 +25,8 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state); + static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state + *parser_state); + ++static void acpi_ps_free_field_list(union acpi_parse_object *start); ++ + /******************************************************************************* + * + * FUNCTION: acpi_ps_get_next_package_length +@@ -683,6 +685,39 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state + return_PTR(field); + } + ++/******************************************************************************* ++ * ++ * FUNCTION: acpi_ps_free_field_list ++ * ++ * PARAMETERS: start - First Op in field list ++ * ++ * RETURN: None. ++ * ++ * DESCRIPTION: Free all Op objects inside a field list. ++ * ++ ******************************************************************************/ ++ ++static void acpi_ps_free_field_list(union acpi_parse_object *start) ++{ ++ union acpi_parse_object *cur = start; ++ union acpi_parse_object *next; ++ union acpi_parse_object *arg; ++ ++ while (cur) { ++ next = cur->common.next; ++ ++ /* AML_INT_CONNECTION_OP can have a single argument */ ++ ++ arg = acpi_ps_get_arg(cur, 0); ++ if (arg) { ++ acpi_ps_free_op(arg); ++ } ++ ++ acpi_ps_free_op(cur); ++ cur = next; ++ } ++} ++ + /******************************************************************************* + * + * FUNCTION: acpi_ps_get_next_arg +@@ -751,6 +786,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, + while (parser_state->aml < parser_state->pkg_end) { + field = acpi_ps_get_next_field(parser_state); + if (!field) { ++ if (arg) { ++ acpi_ps_free_field_list(arg); ++ } ++ + return_ACPI_STATUS(AE_NO_MEMORY); + } + +@@ -820,6 +859,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, + acpi_ps_get_next_namepath(walk_state, parser_state, + arg, + ACPI_NOT_METHOD_CALL); ++ if (ACPI_FAILURE(status)) { ++ acpi_ps_free_op(arg); ++ return_ACPI_STATUS(status); ++ } + } else { + /* Single complex argument, nothing returned */ + +@@ -854,6 +897,10 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state, + acpi_ps_get_next_namepath(walk_state, parser_state, + arg, + ACPI_POSSIBLE_METHOD_CALL); ++ if (ACPI_FAILURE(status)) { ++ acpi_ps_free_op(arg); ++ return_ACPI_STATUS(status); ++ } + + if (arg->common.aml_opcode == AML_INT_METHODCALL_OP) { + +diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c +index 7f7ad94f22b911..e3cbaf3c3bbc15 100644 +--- a/drivers/acpi/battery.c ++++ b/drivers/acpi/battery.c +@@ -703,28 +703,35 @@ static LIST_HEAD(acpi_battery_list); + static LIST_HEAD(battery_hook_list); + static DEFINE_MUTEX(hook_mutex); + +-static void __battery_hook_unregister(struct acpi_battery_hook *hook, int lock) ++static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook) + { + struct acpi_battery *battery; ++ + /* + * In order to remove a hook, we first need to + * de-register all the batteries that are registered. + */ +- if (lock) +- mutex_lock(&hook_mutex); + list_for_each_entry(battery, &acpi_battery_list, list) { + if (!hook->remove_battery(battery->bat, hook)) + power_supply_changed(battery->bat); + } +- list_del(&hook->list); +- if (lock) +- mutex_unlock(&hook_mutex); ++ list_del_init(&hook->list); ++ + pr_info("extension unregistered: %s\n", hook->name); + } + + void battery_hook_unregister(struct acpi_battery_hook *hook) + { +- __battery_hook_unregister(hook, 1); ++ mutex_lock(&hook_mutex); ++ /* ++ * Ignore already unregistered battery hooks. This might happen ++ * if a battery hook was previously unloaded due to an error when ++ * adding a new battery. ++ */ ++ if (!list_empty(&hook->list)) ++ battery_hook_unregister_unlocked(hook); ++ ++ mutex_unlock(&hook_mutex); + } + EXPORT_SYMBOL_GPL(battery_hook_unregister); + +@@ -733,7 +740,6 @@ void battery_hook_register(struct acpi_battery_hook *hook) + struct acpi_battery *battery; + + mutex_lock(&hook_mutex); +- INIT_LIST_HEAD(&hook->list); + list_add(&hook->list, &battery_hook_list); + /* + * Now that the driver is registered, we need +@@ -750,7 +756,7 @@ void battery_hook_register(struct acpi_battery_hook *hook) + * hooks. + */ + pr_err("extension failed to load: %s", hook->name); +- __battery_hook_unregister(hook, 0); ++ battery_hook_unregister_unlocked(hook); + goto end; + } + +@@ -789,7 +795,7 @@ static void battery_hook_add_battery(struct acpi_battery *battery) + */ + pr_err("error in extension, unloading: %s", + hook_node->name); +- __battery_hook_unregister(hook_node, 0); ++ battery_hook_unregister_unlocked(hook_node); + } + } + mutex_unlock(&hook_mutex); +@@ -822,7 +828,7 @@ static void __exit battery_hook_exit(void) + * need to remove the hooks. + */ + list_for_each_entry_safe(hook, ptr, &battery_hook_list, list) { +- __battery_hook_unregister(hook, 1); ++ battery_hook_unregister(hook); + } + mutex_destroy(&hook_mutex); + } +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c +index 28217a995f795c..7aced0b9bad7cc 100644 +--- a/drivers/acpi/cppc_acpi.c ++++ b/drivers/acpi/cppc_acpi.c +@@ -100,6 +100,11 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); + (cpc)->cpc_entry.reg.space_id == \ + ACPI_ADR_SPACE_PLATFORM_COMM) + ++/* Check if a CPC register is in FFH */ ++#define CPC_IN_FFH(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ ++ (cpc)->cpc_entry.reg.space_id == \ ++ ACPI_ADR_SPACE_FIXED_HARDWARE) ++ + /* Check if a CPC register is in SystemMemory */ + #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ + (cpc)->cpc_entry.reg.space_id == \ +@@ -1514,9 +1519,12 @@ int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable) + /* after writing CPC, transfer the ownership of PCC to platform */ + ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE); + up_write(&pcc_ss_data->pcc_lock); ++ } else if (osc_cpc_flexible_adr_space_confirmed && ++ CPC_SUPPORTED(epp_set_reg) && CPC_IN_FFH(epp_set_reg)) { ++ ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf); + } else { + ret = -ENOTSUPP; +- pr_debug("_CPC in PCC is not supported\n"); ++ pr_debug("_CPC in PCC and _CPC in FFH are not supported\n"); + } + + return ret; +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 35e22a2af4e4b9..115994dfefec1e 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -783,6 +783,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, + unsigned long tmp; + int ret = 0; + ++ if (t->rdata) ++ memset(t->rdata, 0, t->rlen); ++ + /* start transaction */ + spin_lock_irqsave(&ec->lock, tmp); + /* Enable GPE for command processing (IBF=0/OBF=1) */ +@@ -819,8 +822,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) + + if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) + return -EINVAL; +- if (t->rdata) +- memset(t->rdata, 0, t->rlen); + + mutex_lock(&ec->mutex); + if (ec->global_lock) { +@@ -847,7 +848,7 @@ static int acpi_ec_burst_enable(struct acpi_ec *ec) + .wdata = NULL, .rdata = &d, + .wlen = 0, .rlen = 1}; + +- return acpi_ec_transaction(ec, &t); ++ return acpi_ec_transaction_unlocked(ec, &t); + } + + static int acpi_ec_burst_disable(struct acpi_ec *ec) +@@ -857,7 +858,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec) + .wlen = 0, .rlen = 0}; + + return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ? +- acpi_ec_transaction(ec, &t) : 0; ++ acpi_ec_transaction_unlocked(ec, &t) : 0; + } + + static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) +@@ -873,6 +874,19 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data) + return result; + } + ++static int acpi_ec_read_unlocked(struct acpi_ec *ec, u8 address, u8 *data) ++{ ++ int result; ++ u8 d; ++ struct transaction t = {.command = ACPI_EC_COMMAND_READ, ++ .wdata = &address, .rdata = &d, ++ .wlen = 1, .rlen = 1}; ++ ++ result = acpi_ec_transaction_unlocked(ec, &t); ++ *data = d; ++ return result; ++} ++ + static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) + { + u8 wdata[2] = { address, data }; +@@ -883,6 +897,16 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) + return acpi_ec_transaction(ec, &t); + } + ++static int acpi_ec_write_unlocked(struct acpi_ec *ec, u8 address, u8 data) ++{ ++ u8 wdata[2] = { address, data }; ++ struct transaction t = {.command = ACPI_EC_COMMAND_WRITE, ++ .wdata = wdata, .rdata = NULL, ++ .wlen = 2, .rlen = 0}; ++ ++ return acpi_ec_transaction_unlocked(ec, &t); ++} ++ + int ec_read(u8 addr, u8 *val) + { + int err; +@@ -1323,6 +1347,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, + struct acpi_ec *ec = handler_context; + int result = 0, i, bytes = bits / 8; + u8 *value = (u8 *)value64; ++ u32 glk; + + if ((address > 0xFF) || !value || !handler_context) + return AE_BAD_PARAMETER; +@@ -1330,13 +1355,25 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, + if (function != ACPI_READ && function != ACPI_WRITE) + return AE_BAD_PARAMETER; + ++ mutex_lock(&ec->mutex); ++ ++ if (ec->global_lock) { ++ acpi_status status; ++ ++ status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); ++ if (ACPI_FAILURE(status)) { ++ result = -ENODEV; ++ goto unlock; ++ } ++ } ++ + if (ec->busy_polling || bits > 8) + acpi_ec_burst_enable(ec); + + for (i = 0; i < bytes; ++i, ++address, ++value) { + result = (function == ACPI_READ) ? +- acpi_ec_read(ec, address, value) : +- acpi_ec_write(ec, address, *value); ++ acpi_ec_read_unlocked(ec, address, value) : ++ acpi_ec_write_unlocked(ec, address, *value); + if (result < 0) + break; + } +@@ -1344,6 +1381,12 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, + if (ec->busy_polling || bits > 8) + acpi_ec_burst_disable(ec); + ++ if (ec->global_lock) ++ acpi_release_global_lock(glk); ++ ++unlock: ++ mutex_unlock(&ec->mutex); ++ + switch (result) { + case -EINVAL: + return AE_BAD_PARAMETER; +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c +index 3a13b22c8d9a54..95233b413c1ac5 100644 +--- a/drivers/acpi/resource.c ++++ b/drivers/acpi/resource.c +@@ -439,6 +439,13 @@ static const struct dmi_system_id asus_laptop[] = { + DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), + }, + }, ++ { ++ /* Asus Vivobook X1704VAP */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_MATCH(DMI_BOARD_NAME, "X1704VAP"), ++ }, ++ }, + { + .ident = "Asus ExpertBook B1402CBA", + .matches = { +@@ -502,6 +509,13 @@ static const struct dmi_system_id maingear_laptop[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"), + } + }, ++ { ++ /* Asus ExpertBook B2502CVA */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_MATCH(DMI_BOARD_NAME, "B2502CVA"), ++ }, ++ }, + { + /* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */ + .matches = { +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c +index 16ab2d9ef67f34..e96afb1622f95f 100644 +--- a/drivers/acpi/video_detect.c ++++ b/drivers/acpi/video_detect.c +@@ -260,6 +260,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "PCG-FRV35"), + }, + }, ++ { ++ .callback = video_detect_force_vendor, ++ /* Panasonic Toughbook CF-18 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Matsushita Electric Industrial"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"), ++ }, ++ }, + + /* + * Toshiba models with Transflective display, these need to use +diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c +index 549ff24a982311..4edddf6bcc1507 100644 +--- a/drivers/ata/pata_serverworks.c ++++ b/drivers/ata/pata_serverworks.c +@@ -46,10 +46,11 @@ + #define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */ + #define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */ + +-/* Seagate Barracuda ATA IV Family drives in UDMA mode 5 +- * can overrun their FIFOs when used with the CSB5 */ +- +-static const char *csb_bad_ata100[] = { ++/* ++ * Seagate Barracuda ATA IV Family drives in UDMA mode 5 ++ * can overrun their FIFOs when used with the CSB5. ++ */ ++static const char * const csb_bad_ata100[] = { + "ST320011A", + "ST340016A", + "ST360021A", +@@ -163,10 +164,11 @@ static unsigned int serverworks_osb4_filter(struct ata_device *adev, unsigned in + * @adev: ATA device + * @mask: Mask of proposed modes + * +- * Check the blacklist and disable UDMA5 if matched ++ * Check the list of devices with broken UDMA5 and ++ * disable UDMA5 if matched. + */ +- +-static unsigned int serverworks_csb_filter(struct ata_device *adev, unsigned int mask) ++static unsigned int serverworks_csb_filter(struct ata_device *adev, ++ unsigned int mask) + { + const char *p; + char model_num[ATA_ID_PROD_LEN + 1]; +diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c +index cc77c024828431..df095659bae0f5 100644 +--- a/drivers/ata/sata_sil.c ++++ b/drivers/ata/sata_sil.c +@@ -128,7 +128,7 @@ static const struct pci_device_id sil_pci_tbl[] = { + static const struct sil_drivelist { + const char *product; + unsigned int quirk; +-} sil_blacklist [] = { ++} sil_quirks[] = { + { "ST320012AS", SIL_QUIRK_MOD15WRITE }, + { "ST330013AS", SIL_QUIRK_MOD15WRITE }, + { "ST340017AS", SIL_QUIRK_MOD15WRITE }, +@@ -600,8 +600,8 @@ static void sil_thaw(struct ata_port *ap) + * list, and apply the fixups to only the specific + * devices/hosts/firmwares that need it. + * +- * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted +- * The Maxtor quirk is in the blacklist, but I'm keeping the original ++ * 20040111 - Seagate drives affected by the Mod15Write bug are quirked ++ * The Maxtor quirk is in sil_quirks, but I'm keeping the original + * pessimistic fix for the following reasons... + * - There seems to be less info on it, only one device gleaned off the + * Windows driver, maybe only one is affected. More info would be greatly +@@ -620,9 +620,9 @@ static void sil_dev_config(struct ata_device *dev) + + ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); + +- for (n = 0; sil_blacklist[n].product; n++) +- if (!strcmp(sil_blacklist[n].product, model_num)) { +- quirks = sil_blacklist[n].quirk; ++ for (n = 0; sil_quirks[n].product; n++) ++ if (!strcmp(sil_quirks[n].product, model_num)) { ++ quirks = sil_quirks[n].quirk; + break; + } + +diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c +index cc9077b588d7e7..d1f4ddc576451a 100644 +--- a/drivers/block/aoe/aoecmd.c ++++ b/drivers/block/aoe/aoecmd.c +@@ -361,6 +361,7 @@ ata_rw_frameinit(struct frame *f) + } + + ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit; ++ dev_hold(t->ifp->nd); + skb->dev = t->ifp->nd; + } + +@@ -401,6 +402,8 @@ aoecmd_ata_rw(struct aoedev *d) + __skb_queue_head_init(&queue); + __skb_queue_tail(&queue, skb); + aoenet_xmit(&queue); ++ } else { ++ dev_put(f->t->ifp->nd); + } + return 1; + } +@@ -483,10 +486,13 @@ resend(struct aoedev *d, struct frame *f) + memcpy(h->dst, t->addr, sizeof h->dst); + memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); + ++ dev_hold(t->ifp->nd); + skb->dev = t->ifp->nd; + skb = skb_clone(skb, GFP_ATOMIC); +- if (skb == NULL) ++ if (skb == NULL) { ++ dev_put(t->ifp->nd); + return; ++ } + f->sent = ktime_get(); + __skb_queue_head_init(&queue); + __skb_queue_tail(&queue, skb); +@@ -617,6 +623,8 @@ probe(struct aoetgt *t) + __skb_queue_head_init(&queue); + __skb_queue_tail(&queue, skb); + aoenet_xmit(&queue); ++ } else { ++ dev_put(f->t->ifp->nd); + } + } + +@@ -1395,6 +1403,7 @@ aoecmd_ata_id(struct aoedev *d) + ah->cmdstat = ATA_CMD_ID_ATA; + ah->lba3 = 0xa0; + ++ dev_hold(t->ifp->nd); + skb->dev = t->ifp->nd; + + d->rttavg = RTTAVG_INIT; +@@ -1404,6 +1413,8 @@ aoecmd_ata_id(struct aoedev *d) + skb = skb_clone(skb, GFP_ATOMIC); + if (skb) + f->sent = ktime_get(); ++ else ++ dev_put(t->ifp->nd); + + return skb; + } +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index 552f56a84a7eb2..886c6359903779 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -211,13 +211,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) + if (lo->lo_state == Lo_bound) + blk_mq_freeze_queue(lo->lo_queue); + lo->use_dio = use_dio; +- if (use_dio) { +- blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue); ++ if (use_dio) + lo->lo_flags |= LO_FLAGS_DIRECT_IO; +- } else { +- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); ++ else + lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; +- } + if (lo->lo_state == Lo_bound) + blk_mq_unfreeze_queue(lo->lo_queue); + } +@@ -2038,14 +2035,6 @@ static int loop_add(int i) + + blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS); + +- /* +- * By default, we do buffer IO, so it doesn't make sense to enable +- * merge because the I/O submitted to backing file is handled page by +- * page. For directio mode, merge does help to dispatch bigger request +- * to underlayer disk. We will enable merge once directio is enabled. +- */ +- blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); +- + /* + * Disable partition scanning by default. The in-kernel partition + * scanning can be requested individually per-device during its +diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c +index f1b7d7fdffec84..97ed3bd9707f41 100644 +--- a/drivers/block/null_blk/main.c ++++ b/drivers/block/null_blk/main.c +@@ -392,13 +392,25 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev, + static int nullb_apply_submit_queues(struct nullb_device *dev, + unsigned int submit_queues) + { +- return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues); ++ int ret; ++ ++ mutex_lock(&lock); ++ ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues); ++ mutex_unlock(&lock); ++ ++ return ret; + } + + static int nullb_apply_poll_queues(struct nullb_device *dev, + unsigned int poll_queues) + { +- return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues); ++ int ret; ++ ++ mutex_lock(&lock); ++ ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues); ++ mutex_unlock(&lock); ++ ++ return ret; + } + + NULLB_DEVICE_ATTR(size, ulong, NULL); +@@ -444,28 +456,32 @@ static ssize_t nullb_device_power_store(struct config_item *item, + if (ret < 0) + return ret; + ++ ret = count; ++ mutex_lock(&lock); + if (!dev->power && newp) { + if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) +- return count; ++ goto out; ++ + ret = null_add_dev(dev); + if (ret) { + clear_bit(NULLB_DEV_FL_UP, &dev->flags); +- return ret; ++ goto out; + } + + set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); + dev->power = newp; ++ ret = count; + } else if (dev->power && !newp) { + if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { +- mutex_lock(&lock); + dev->power = newp; + null_del_dev(dev->nullb); +- mutex_unlock(&lock); + } + clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); + } + +- return count; ++out: ++ mutex_unlock(&lock); ++ return ret; + } + + CONFIGFS_ATTR(nullb_device_, power); +@@ -1819,7 +1835,7 @@ static void null_del_dev(struct nullb *nullb) + + dev = nullb->dev; + +- ida_simple_remove(&nullb_indexes, nullb->index); ++ ida_free(&nullb_indexes, nullb->index); + + list_del_init(&nullb->list); + +@@ -2153,15 +2169,12 @@ static int null_add_dev(struct nullb_device *dev) + nullb->q->queuedata = nullb; + blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); + +- mutex_lock(&lock); +- rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); +- if (rv < 0) { +- mutex_unlock(&lock); ++ rv = ida_alloc(&nullb_indexes, GFP_KERNEL); ++ if (rv < 0) + goto out_cleanup_zone; +- } ++ + nullb->index = rv; + dev->index = rv; +- mutex_unlock(&lock); + + blk_queue_logical_block_size(nullb->q, dev->blocksize); + blk_queue_physical_block_size(nullb->q, dev->blocksize); +@@ -2185,9 +2198,7 @@ static int null_add_dev(struct nullb_device *dev) + if (rv) + goto out_ida_free; + +- mutex_lock(&lock); + list_add_tail(&nullb->list, &nullb_list); +- mutex_unlock(&lock); + + pr_info("disk %s created\n", nullb->disk_name); + +@@ -2236,7 +2247,9 @@ static int null_create_dev(void) + if (!dev) + return -ENOMEM; + ++ mutex_lock(&lock); + ret = null_add_dev(dev); ++ mutex_unlock(&lock); + if (ret) { + null_free_dev(dev); + return ret; +diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c +index d76c799553aaa1..468e4165c7cc0e 100644 +--- a/drivers/bluetooth/btmrvl_sdio.c ++++ b/drivers/bluetooth/btmrvl_sdio.c +@@ -92,7 +92,7 @@ static int btmrvl_sdio_probe_of(struct device *dev, + } else { + ret = devm_request_irq(dev, cfg->irq_bt, + btmrvl_wake_irq_bt, +- 0, "bt_wake", card); ++ IRQF_NO_AUTOEN, "bt_wake", card); + if (ret) { + dev_err(dev, + "Failed to request irq_bt %d (%d)\n", +@@ -101,7 +101,6 @@ static int btmrvl_sdio_probe_of(struct device *dev, + + /* Configure wakeup (enabled by default) */ + device_init_wakeup(dev, true); +- disable_irq(cfg->irq_bt); + } + } + +diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c +index 277d039ecbb429..1e7c1f9db9e4b9 100644 +--- a/drivers/bluetooth/btrtl.c ++++ b/drivers/bluetooth/btrtl.c +@@ -1285,6 +1285,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) + btrealtek_set_flag(hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP); + + if (btrtl_dev->project_id == CHIP_ID_8852A || ++ btrtl_dev->project_id == CHIP_ID_8852B || + btrtl_dev->project_id == CHIP_ID_8852C) + set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks); + +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 0a58106207b0c3..bc53da383f855a 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -537,6 +537,8 @@ static const struct usb_device_id quirks_table[] = { + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, ++ { USB_DEVICE(0x0489, 0xe122), .driver_info = BTUSB_REALTEK | ++ BTUSB_WIDEBAND_SPEECH }, + + /* Realtek 8852BE Bluetooth devices */ + { USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK | +diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c +index 7464cf64803fd7..8b3e5f84e89a77 100644 +--- a/drivers/clk/qcom/clk-alpha-pll.c ++++ b/drivers/clk/qcom/clk-alpha-pll.c +@@ -1638,7 +1638,7 @@ static int __alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate, + if (ret < 0) + return ret; + +- regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); ++ regmap_update_bits(pll->clkr.regmap, PLL_L_VAL(pll), LUCID_EVO_PLL_L_VAL_MASK, l); + regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a); + + /* Latch the PLL input */ +diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c +index 4c5b552b47b6a1..a556c9e77d192d 100644 +--- a/drivers/clk/qcom/clk-rpmh.c ++++ b/drivers/clk/qcom/clk-rpmh.c +@@ -263,6 +263,8 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable) + cmd_state = 0; + } + ++ cmd_state = min(cmd_state, BCM_TCS_CMD_VOTE_MASK); ++ + if (c->last_sent_aggr_state != cmd_state) { + cmd.addr = c->res_addr; + cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state); +diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c +index 9a9e0852c91f23..317a7e2b50bfbc 100644 +--- a/drivers/clk/qcom/dispcc-sm8250.c ++++ b/drivers/clk/qcom/dispcc-sm8250.c +@@ -851,6 +851,7 @@ static struct clk_branch disp_cc_mdss_dp_link1_intf_clk = { + &disp_cc_mdss_dp_link1_div_clk_src.clkr.hw, + }, + .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -886,6 +887,7 @@ static struct clk_branch disp_cc_mdss_dp_link_intf_clk = { + &disp_cc_mdss_dp_link_div_clk_src.clkr.hw, + }, + .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +@@ -1011,6 +1013,7 @@ static struct clk_branch disp_cc_mdss_mdp_lut_clk = { + &disp_cc_mdss_mdp_clk_src.clkr.hw, + }, + .num_parents = 1, ++ .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +diff --git a/drivers/clk/qcom/gcc-sc8180x.c b/drivers/clk/qcom/gcc-sc8180x.c +index ae21473815596d..ec0c45881c67a7 100644 +--- a/drivers/clk/qcom/gcc-sc8180x.c ++++ b/drivers/clk/qcom/gcc-sc8180x.c +@@ -142,6 +142,23 @@ static struct clk_alpha_pll gpll7 = { + }, + }; + ++static struct clk_alpha_pll gpll9 = { ++ .offset = 0x1c000, ++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], ++ .clkr = { ++ .enable_reg = 0x52000, ++ .enable_mask = BIT(9), ++ .hw.init = &(const struct clk_init_data) { ++ .name = "gpll9", ++ .parent_data = &(const struct clk_parent_data) { ++ .fw_name = "bi_tcxo", ++ }, ++ .num_parents = 1, ++ .ops = &clk_alpha_pll_fixed_trion_ops, ++ }, ++ }, ++}; ++ + static const struct parent_map gcc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, +@@ -241,7 +258,7 @@ static const struct parent_map gcc_parent_map_7[] = { + static const struct clk_parent_data gcc_parents_7[] = { + { .fw_name = "bi_tcxo", }, + { .hw = &gpll0.clkr.hw }, +- { .name = "gppl9" }, ++ { .hw = &gpll9.clkr.hw }, + { .hw = &gpll4.clkr.hw }, + { .hw = &gpll0_out_even.clkr.hw }, + }; +@@ -260,28 +277,6 @@ static const struct clk_parent_data gcc_parents_8[] = { + { .hw = &gpll0_out_even.clkr.hw }, + }; + +-static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = { +- F(19200000, P_BI_TCXO, 1, 0, 0), +- F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), +- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), +- { } +-}; +- +-static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { +- .cmd_rcgr = 0x48014, +- .mnd_width = 0, +- .hid_width = 5, +- .parent_map = gcc_parent_map_0, +- .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, +- .clkr.hw.init = &(struct clk_init_data){ +- .name = "gcc_cpuss_ahb_clk_src", +- .parent_data = gcc_parents_0, +- .num_parents = ARRAY_SIZE(gcc_parents_0), +- .flags = CLK_SET_RATE_PARENT, +- .ops = &clk_rcg2_ops, +- }, +-}; +- + static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), +@@ -916,7 +911,7 @@ static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = { + F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2), + F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), +- F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), ++ F(202000000, P_GPLL9_OUT_MAIN, 4, 0, 0), + { } + }; + +@@ -939,9 +934,8 @@ static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = { + F(400000, P_BI_TCXO, 12, 1, 4), + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), +- F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0), + F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0), +- F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0), ++ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } + }; + +@@ -1599,25 +1593,6 @@ static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = { + }, + }; + +-/* For CPUSS functionality the AHB clock needs to be left enabled */ +-static struct clk_branch gcc_cpuss_ahb_clk = { +- .halt_reg = 0x48000, +- .halt_check = BRANCH_HALT_VOTED, +- .clkr = { +- .enable_reg = 0x52004, +- .enable_mask = BIT(21), +- .hw.init = &(struct clk_init_data){ +- .name = "gcc_cpuss_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ +- &gcc_cpuss_ahb_clk_src.clkr.hw +- }, +- .num_parents = 1, +- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, +- .ops = &clk_branch2_ops, +- }, +- }, +-}; +- + static struct clk_branch gcc_cpuss_rbcpr_clk = { + .halt_reg = 0x48008, + .halt_check = BRANCH_HALT, +@@ -3150,25 +3125,6 @@ static struct clk_branch gcc_sdcc4_apps_clk = { + }, + }; + +-/* For CPUSS functionality the SYS NOC clock needs to be left enabled */ +-static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = { +- .halt_reg = 0x4819c, +- .halt_check = BRANCH_HALT_VOTED, +- .clkr = { +- .enable_reg = 0x52004, +- .enable_mask = BIT(0), +- .hw.init = &(struct clk_init_data){ +- .name = "gcc_sys_noc_cpuss_ahb_clk", +- .parent_hws = (const struct clk_hw *[]){ +- &gcc_cpuss_ahb_clk_src.clkr.hw +- }, +- .num_parents = 1, +- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, +- .ops = &clk_branch2_ops, +- }, +- }, +-}; +- + static struct clk_branch gcc_tsif_ahb_clk = { + .halt_reg = 0x36004, + .halt_check = BRANCH_HALT, +@@ -4258,8 +4214,6 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = { + [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr, + [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, + [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr, +- [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr, +- [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr, + [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr, + [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, + [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, +@@ -4396,7 +4350,6 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = { + [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr, + [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr, + [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr, +- [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr, + [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr, + [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr, + [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr, +@@ -4483,6 +4436,7 @@ static struct clk_regmap *gcc_sc8180x_clocks[] = { + [GPLL1] = &gpll1.clkr, + [GPLL4] = &gpll4.clkr, + [GPLL7] = &gpll7.clkr, ++ [GPLL9] = &gpll9.clkr, + }; + + static const struct qcom_reset_map gcc_sc8180x_resets[] = { +diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c +index c6c5261264f118..272da807c6945d 100644 +--- a/drivers/clk/qcom/gcc-sm8250.c ++++ b/drivers/clk/qcom/gcc-sm8250.c +@@ -3226,7 +3226,7 @@ static struct gdsc pcie_0_gdsc = { + .pd = { + .name = "pcie_0_gdsc", + }, +- .pwrsts = PWRSTS_OFF_ON, ++ .pwrsts = PWRSTS_RET_ON, + }; + + static struct gdsc pcie_1_gdsc = { +@@ -3234,7 +3234,7 @@ static struct gdsc pcie_1_gdsc = { + .pd = { + .name = "pcie_1_gdsc", + }, +- .pwrsts = PWRSTS_OFF_ON, ++ .pwrsts = PWRSTS_RET_ON, + }; + + static struct gdsc pcie_2_gdsc = { +@@ -3242,7 +3242,7 @@ static struct gdsc pcie_2_gdsc = { + .pd = { + .name = "pcie_2_gdsc", + }, +- .pwrsts = PWRSTS_OFF_ON, ++ .pwrsts = PWRSTS_RET_ON, + }; + + static struct gdsc ufs_card_gdsc = { +diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c +index 56354298255160..4c55df89ddca7d 100644 +--- a/drivers/clk/qcom/gcc-sm8450.c ++++ b/drivers/clk/qcom/gcc-sm8450.c +@@ -2974,7 +2974,7 @@ static struct gdsc pcie_0_gdsc = { + .pd = { + .name = "pcie_0_gdsc", + }, +- .pwrsts = PWRSTS_OFF_ON, ++ .pwrsts = PWRSTS_RET_ON, + }; + + static struct gdsc pcie_1_gdsc = { +@@ -2982,7 +2982,7 @@ static struct gdsc pcie_1_gdsc = { + .pd = { + .name = "pcie_1_gdsc", + }, +- .pwrsts = PWRSTS_OFF_ON, ++ .pwrsts = PWRSTS_RET_ON, + }; + + static struct gdsc ufs_phy_gdsc = { +diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c +index 4059d9365ae642..b9d6ffcb340912 100644 +--- a/drivers/clk/rockchip/clk.c ++++ b/drivers/clk/rockchip/clk.c +@@ -433,12 +433,13 @@ void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx, + struct rockchip_clk_branch *list, + unsigned int nr_clk) + { +- struct clk *clk = NULL; ++ struct clk *clk; + unsigned int idx; + unsigned long flags; + + for (idx = 0; idx < nr_clk; idx++, list++) { + flags = list->flags; ++ clk = NULL; + + /* catch simple muxes */ + switch (list->branch_type) { +diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c +index f7d7427a558ba0..87387d4cbf48a2 100644 +--- a/drivers/clk/samsung/clk-exynos7885.c ++++ b/drivers/clk/samsung/clk-exynos7885.c +@@ -20,7 +20,7 @@ + #define CLKS_NR_TOP (CLK_GOUT_FSYS_USB30DRD + 1) + #define CLKS_NR_CORE (CLK_GOUT_TREX_P_CORE_PCLK_P_CORE + 1) + #define CLKS_NR_PERI (CLK_GOUT_WDT1_PCLK + 1) +-#define CLKS_NR_FSYS (CLK_GOUT_MMC_SDIO_SDCLKIN + 1) ++#define CLKS_NR_FSYS (CLK_MOUT_FSYS_USB30DRD_USER + 1) + + /* ---- CMU_TOP ------------------------------------------------------------- */ + +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index 0ee3a04bb1022f..8a4fdf212ce0de 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -1632,7 +1632,7 @@ static void intel_pstate_notify_work(struct work_struct *work) + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); + } + +-static DEFINE_SPINLOCK(hwp_notify_lock); ++static DEFINE_RAW_SPINLOCK(hwp_notify_lock); + static cpumask_t hwp_intr_enable_mask; + + void notify_hwp_interrupt(void) +@@ -1649,7 +1649,7 @@ void notify_hwp_interrupt(void) + if (!(value & 0x01)) + return; + +- spin_lock_irqsave(&hwp_notify_lock, flags); ++ raw_spin_lock_irqsave(&hwp_notify_lock, flags); + + if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) + goto ack_intr; +@@ -1673,13 +1673,13 @@ void notify_hwp_interrupt(void) + + schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10)); + +- spin_unlock_irqrestore(&hwp_notify_lock, flags); ++ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); + + return; + + ack_intr: + wrmsrl_safe(MSR_HWP_STATUS, 0); +- spin_unlock_irqrestore(&hwp_notify_lock, flags); ++ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); + } + + static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) +@@ -1692,10 +1692,10 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) + /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); + +- spin_lock_irqsave(&hwp_notify_lock, flags); ++ raw_spin_lock_irqsave(&hwp_notify_lock, flags); + if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) + cancel_delayed_work(&cpudata->hwp_notify_work); +- spin_unlock_irqrestore(&hwp_notify_lock, flags); ++ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); + } + + static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) +@@ -1704,10 +1704,10 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) + if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { + unsigned long flags; + +- spin_lock_irqsave(&hwp_notify_lock, flags); ++ raw_spin_lock_irqsave(&hwp_notify_lock, flags); + INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); + cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); +- spin_unlock_irqrestore(&hwp_notify_lock, flags); ++ raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); + + /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); +@@ -3136,10 +3136,10 @@ static void intel_pstate_driver_cleanup(void) + if (intel_pstate_driver == &intel_pstate) + intel_pstate_clear_update_util_hook(cpu); + +- spin_lock(&hwp_notify_lock); ++ raw_spin_lock(&hwp_notify_lock); + kfree(all_cpu_data[cpu]); + WRITE_ONCE(all_cpu_data[cpu], NULL); +- spin_unlock(&hwp_notify_lock); ++ raw_spin_unlock(&hwp_notify_lock); + } + } + cpus_read_unlock(); +diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig +index a48591af12d025..78217577aa5403 100644 +--- a/drivers/crypto/marvell/Kconfig ++++ b/drivers/crypto/marvell/Kconfig +@@ -28,6 +28,7 @@ config CRYPTO_DEV_OCTEONTX_CPT + select CRYPTO_SKCIPHER + select CRYPTO_HASH + select CRYPTO_AEAD ++ select CRYPTO_AUTHENC + select CRYPTO_DEV_MARVELL + help + This driver allows you to utilize the Marvell Cryptographic +@@ -47,6 +48,7 @@ config CRYPTO_DEV_OCTEONTX2_CPT + select CRYPTO_SKCIPHER + select CRYPTO_HASH + select CRYPTO_AEAD ++ select CRYPTO_AUTHENC + select NET_DEVLINK + help + This driver allows you to utilize the Marvell Cryptographic +diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +index 1c2c870e887aab..f64b72398eced9 100644 +--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c ++++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +@@ -17,7 +17,6 @@ + #include + #include + #include +-#include + #include + #include + #include "otx_cptvf.h" +@@ -66,6 +65,8 @@ static struct cpt_device_table ae_devices = { + .count = ATOMIC_INIT(0) + }; + ++static struct otx_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg); ++ + static inline int get_se_device(struct pci_dev **pdev, int *cpu_num) + { + int count, ret = 0; +@@ -515,44 +516,61 @@ static int cpt_aead_init(struct crypto_aead *tfm, u8 cipher_type, u8 mac_type) + ctx->cipher_type = cipher_type; + ctx->mac_type = mac_type; + ++ switch (ctx->mac_type) { ++ case OTX_CPT_SHA1: ++ ctx->hashalg = crypto_alloc_shash("sha1", 0, 0); ++ break; ++ ++ case OTX_CPT_SHA256: ++ ctx->hashalg = crypto_alloc_shash("sha256", 0, 0); ++ break; ++ ++ case OTX_CPT_SHA384: ++ ctx->hashalg = crypto_alloc_shash("sha384", 0, 0); ++ break; ++ ++ case OTX_CPT_SHA512: ++ ctx->hashalg = crypto_alloc_shash("sha512", 0, 0); ++ break; ++ } ++ ++ if (IS_ERR(ctx->hashalg)) ++ return PTR_ERR(ctx->hashalg); ++ ++ crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx)); ++ ++ if (!ctx->hashalg) ++ return 0; ++ + /* + * When selected cipher is NULL we use HMAC opcode instead of + * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms + * for calculating ipad and opad + */ + if (ctx->cipher_type != OTX_CPT_CIPHER_NULL) { +- switch (ctx->mac_type) { +- case OTX_CPT_SHA1: +- ctx->hashalg = crypto_alloc_shash("sha1", 0, +- CRYPTO_ALG_ASYNC); +- if (IS_ERR(ctx->hashalg)) +- return PTR_ERR(ctx->hashalg); +- break; +- +- case OTX_CPT_SHA256: +- ctx->hashalg = crypto_alloc_shash("sha256", 0, +- CRYPTO_ALG_ASYNC); +- if (IS_ERR(ctx->hashalg)) +- return PTR_ERR(ctx->hashalg); +- break; ++ int ss = crypto_shash_statesize(ctx->hashalg); + +- case OTX_CPT_SHA384: +- ctx->hashalg = crypto_alloc_shash("sha384", 0, +- CRYPTO_ALG_ASYNC); +- if (IS_ERR(ctx->hashalg)) +- return PTR_ERR(ctx->hashalg); +- break; ++ ctx->ipad = kzalloc(ss, GFP_KERNEL); ++ if (!ctx->ipad) { ++ crypto_free_shash(ctx->hashalg); ++ return -ENOMEM; ++ } + +- case OTX_CPT_SHA512: +- ctx->hashalg = crypto_alloc_shash("sha512", 0, +- CRYPTO_ALG_ASYNC); +- if (IS_ERR(ctx->hashalg)) +- return PTR_ERR(ctx->hashalg); +- break; ++ ctx->opad = kzalloc(ss, GFP_KERNEL); ++ if (!ctx->opad) { ++ kfree(ctx->ipad); ++ crypto_free_shash(ctx->hashalg); ++ return -ENOMEM; + } + } + +- crypto_aead_set_reqsize_dma(tfm, sizeof(struct otx_cpt_req_ctx)); ++ ctx->sdesc = alloc_sdesc(ctx->hashalg); ++ if (!ctx->sdesc) { ++ kfree(ctx->opad); ++ kfree(ctx->ipad); ++ crypto_free_shash(ctx->hashalg); ++ return -ENOMEM; ++ } + + return 0; + } +@@ -608,8 +626,7 @@ static void otx_cpt_aead_exit(struct crypto_aead *tfm) + + kfree(ctx->ipad); + kfree(ctx->opad); +- if (ctx->hashalg) +- crypto_free_shash(ctx->hashalg); ++ crypto_free_shash(ctx->hashalg); + kfree(ctx->sdesc); + } + +@@ -705,7 +722,7 @@ static inline void swap_data64(void *buf, u32 len) + *dst = cpu_to_be64p(src); + } + +-static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) ++static int swap_pad(u8 mac_type, u8 *pad) + { + struct sha512_state *sha512; + struct sha256_state *sha256; +@@ -713,22 +730,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) + + switch (mac_type) { + case OTX_CPT_SHA1: +- sha1 = (struct sha1_state *) in_pad; ++ sha1 = (struct sha1_state *)pad; + swap_data32(sha1->state, SHA1_DIGEST_SIZE); +- memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE); + break; + + case OTX_CPT_SHA256: +- sha256 = (struct sha256_state *) in_pad; ++ sha256 = (struct sha256_state *)pad; + swap_data32(sha256->state, SHA256_DIGEST_SIZE); +- memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE); + break; + + case OTX_CPT_SHA384: + case OTX_CPT_SHA512: +- sha512 = (struct sha512_state *) in_pad; ++ sha512 = (struct sha512_state *)pad; + swap_data64(sha512->state, SHA512_DIGEST_SIZE); +- memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE); + break; + + default: +@@ -738,55 +752,53 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) + return 0; + } + +-static int aead_hmac_init(struct crypto_aead *cipher) ++static int aead_hmac_init(struct crypto_aead *cipher, ++ struct crypto_authenc_keys *keys) + { + struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); +- int state_size = crypto_shash_statesize(ctx->hashalg); + int ds = crypto_shash_digestsize(ctx->hashalg); + int bs = crypto_shash_blocksize(ctx->hashalg); +- int authkeylen = ctx->auth_key_len; ++ int authkeylen = keys->authkeylen; + u8 *ipad = NULL, *opad = NULL; +- int ret = 0, icount = 0; ++ int icount = 0; ++ int ret; + +- ctx->sdesc = alloc_sdesc(ctx->hashalg); +- if (!ctx->sdesc) +- return -ENOMEM; ++ if (authkeylen > bs) { ++ ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey, ++ authkeylen, ctx->key); ++ if (ret) ++ return ret; ++ authkeylen = ds; ++ } else ++ memcpy(ctx->key, keys->authkey, authkeylen); + +- ctx->ipad = kzalloc(bs, GFP_KERNEL); +- if (!ctx->ipad) { +- ret = -ENOMEM; +- goto calc_fail; +- } ++ ctx->enc_key_len = keys->enckeylen; ++ ctx->auth_key_len = authkeylen; + +- ctx->opad = kzalloc(bs, GFP_KERNEL); +- if (!ctx->opad) { +- ret = -ENOMEM; +- goto calc_fail; +- } ++ if (ctx->cipher_type == OTX_CPT_CIPHER_NULL) ++ return keys->enckeylen ? -EINVAL : 0; + +- ipad = kzalloc(state_size, GFP_KERNEL); +- if (!ipad) { +- ret = -ENOMEM; +- goto calc_fail; ++ switch (keys->enckeylen) { ++ case AES_KEYSIZE_128: ++ ctx->key_type = OTX_CPT_AES_128_BIT; ++ break; ++ case AES_KEYSIZE_192: ++ ctx->key_type = OTX_CPT_AES_192_BIT; ++ break; ++ case AES_KEYSIZE_256: ++ ctx->key_type = OTX_CPT_AES_256_BIT; ++ break; ++ default: ++ /* Invalid key length */ ++ return -EINVAL; + } + +- opad = kzalloc(state_size, GFP_KERNEL); +- if (!opad) { +- ret = -ENOMEM; +- goto calc_fail; +- } ++ memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen); + +- if (authkeylen > bs) { +- ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key, +- authkeylen, ipad); +- if (ret) +- goto calc_fail; +- +- authkeylen = ds; +- } else { +- memcpy(ipad, ctx->key, authkeylen); +- } ++ ipad = ctx->ipad; ++ opad = ctx->opad; + ++ memcpy(ipad, ctx->key, authkeylen); + memset(ipad + authkeylen, 0, bs - authkeylen); + memcpy(opad, ipad, bs); + +@@ -804,7 +816,7 @@ static int aead_hmac_init(struct crypto_aead *cipher) + crypto_shash_init(&ctx->sdesc->shash); + crypto_shash_update(&ctx->sdesc->shash, ipad, bs); + crypto_shash_export(&ctx->sdesc->shash, ipad); +- ret = copy_pad(ctx->mac_type, ctx->ipad, ipad); ++ ret = swap_pad(ctx->mac_type, ipad); + if (ret) + goto calc_fail; + +@@ -812,25 +824,9 @@ static int aead_hmac_init(struct crypto_aead *cipher) + crypto_shash_init(&ctx->sdesc->shash); + crypto_shash_update(&ctx->sdesc->shash, opad, bs); + crypto_shash_export(&ctx->sdesc->shash, opad); +- ret = copy_pad(ctx->mac_type, ctx->opad, opad); +- if (ret) +- goto calc_fail; +- +- kfree(ipad); +- kfree(opad); +- +- return 0; ++ ret = swap_pad(ctx->mac_type, opad); + + calc_fail: +- kfree(ctx->ipad); +- ctx->ipad = NULL; +- kfree(ctx->opad); +- ctx->opad = NULL; +- kfree(ipad); +- kfree(opad); +- kfree(ctx->sdesc); +- ctx->sdesc = NULL; +- + return ret; + } + +@@ -838,57 +834,15 @@ static int otx_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, + const unsigned char *key, + unsigned int keylen) + { +- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); +- struct crypto_authenc_key_param *param; +- int enckeylen = 0, authkeylen = 0; +- struct rtattr *rta = (void *)key; +- int status = -EINVAL; +- +- if (!RTA_OK(rta, keylen)) +- goto badkey; +- +- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) +- goto badkey; +- +- if (RTA_PAYLOAD(rta) < sizeof(*param)) +- goto badkey; +- +- param = RTA_DATA(rta); +- enckeylen = be32_to_cpu(param->enckeylen); +- key += RTA_ALIGN(rta->rta_len); +- keylen -= RTA_ALIGN(rta->rta_len); +- if (keylen < enckeylen) +- goto badkey; ++ struct crypto_authenc_keys authenc_keys; ++ int status; + +- if (keylen > OTX_CPT_MAX_KEY_SIZE) +- goto badkey; +- +- authkeylen = keylen - enckeylen; +- memcpy(ctx->key, key, keylen); +- +- switch (enckeylen) { +- case AES_KEYSIZE_128: +- ctx->key_type = OTX_CPT_AES_128_BIT; +- break; +- case AES_KEYSIZE_192: +- ctx->key_type = OTX_CPT_AES_192_BIT; +- break; +- case AES_KEYSIZE_256: +- ctx->key_type = OTX_CPT_AES_256_BIT; +- break; +- default: +- /* Invalid key length */ +- goto badkey; +- } +- +- ctx->enc_key_len = enckeylen; +- ctx->auth_key_len = authkeylen; +- +- status = aead_hmac_init(cipher); ++ status = crypto_authenc_extractkeys(&authenc_keys, key, keylen); + if (status) + goto badkey; + +- return 0; ++ status = aead_hmac_init(cipher, &authenc_keys); ++ + badkey: + return status; + } +@@ -897,36 +851,7 @@ static int otx_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, + const unsigned char *key, + unsigned int keylen) + { +- struct otx_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); +- struct crypto_authenc_key_param *param; +- struct rtattr *rta = (void *)key; +- int enckeylen = 0; +- +- if (!RTA_OK(rta, keylen)) +- goto badkey; +- +- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) +- goto badkey; +- +- if (RTA_PAYLOAD(rta) < sizeof(*param)) +- goto badkey; +- +- param = RTA_DATA(rta); +- enckeylen = be32_to_cpu(param->enckeylen); +- key += RTA_ALIGN(rta->rta_len); +- keylen -= RTA_ALIGN(rta->rta_len); +- if (enckeylen != 0) +- goto badkey; +- +- if (keylen > OTX_CPT_MAX_KEY_SIZE) +- goto badkey; +- +- memcpy(ctx->key, key, keylen); +- ctx->enc_key_len = enckeylen; +- ctx->auth_key_len = keylen; +- return 0; +-badkey: +- return -EINVAL; ++ return otx_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen); + } + + static int otx_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, +diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +index e27ddd3c4e5581..4385d3df52b4d4 100644 +--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c ++++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +@@ -11,7 +11,6 @@ + #include + #include + #include +-#include + #include + #include + #include "otx2_cptvf.h" +@@ -54,6 +53,8 @@ static struct cpt_device_table se_devices = { + .count = ATOMIC_INIT(0) + }; + ++static struct otx2_cpt_sdesc *alloc_sdesc(struct crypto_shash *alg); ++ + static inline int get_se_device(struct pci_dev **pdev, int *cpu_num) + { + int count; +@@ -580,40 +581,56 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type) + ctx->cipher_type = cipher_type; + ctx->mac_type = mac_type; + ++ switch (ctx->mac_type) { ++ case OTX2_CPT_SHA1: ++ ctx->hashalg = crypto_alloc_shash("sha1", 0, 0); ++ break; ++ ++ case OTX2_CPT_SHA256: ++ ctx->hashalg = crypto_alloc_shash("sha256", 0, 0); ++ break; ++ ++ case OTX2_CPT_SHA384: ++ ctx->hashalg = crypto_alloc_shash("sha384", 0, 0); ++ break; ++ ++ case OTX2_CPT_SHA512: ++ ctx->hashalg = crypto_alloc_shash("sha512", 0, 0); ++ break; ++ } ++ ++ if (IS_ERR(ctx->hashalg)) ++ return PTR_ERR(ctx->hashalg); ++ ++ if (ctx->hashalg) { ++ ctx->sdesc = alloc_sdesc(ctx->hashalg); ++ if (!ctx->sdesc) { ++ crypto_free_shash(ctx->hashalg); ++ return -ENOMEM; ++ } ++ } ++ + /* + * When selected cipher is NULL we use HMAC opcode instead of + * FLEXICRYPTO opcode therefore we don't need to use HASH algorithms + * for calculating ipad and opad + */ +- if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL) { +- switch (ctx->mac_type) { +- case OTX2_CPT_SHA1: +- ctx->hashalg = crypto_alloc_shash("sha1", 0, +- CRYPTO_ALG_ASYNC); +- if (IS_ERR(ctx->hashalg)) +- return PTR_ERR(ctx->hashalg); +- break; +- +- case OTX2_CPT_SHA256: +- ctx->hashalg = crypto_alloc_shash("sha256", 0, +- CRYPTO_ALG_ASYNC); +- if (IS_ERR(ctx->hashalg)) +- return PTR_ERR(ctx->hashalg); +- break; ++ if (ctx->cipher_type != OTX2_CPT_CIPHER_NULL && ctx->hashalg) { ++ int ss = crypto_shash_statesize(ctx->hashalg); + +- case OTX2_CPT_SHA384: +- ctx->hashalg = crypto_alloc_shash("sha384", 0, +- CRYPTO_ALG_ASYNC); +- if (IS_ERR(ctx->hashalg)) +- return PTR_ERR(ctx->hashalg); +- break; ++ ctx->ipad = kzalloc(ss, GFP_KERNEL); ++ if (!ctx->ipad) { ++ kfree(ctx->sdesc); ++ crypto_free_shash(ctx->hashalg); ++ return -ENOMEM; ++ } + +- case OTX2_CPT_SHA512: +- ctx->hashalg = crypto_alloc_shash("sha512", 0, +- CRYPTO_ALG_ASYNC); +- if (IS_ERR(ctx->hashalg)) +- return PTR_ERR(ctx->hashalg); +- break; ++ ctx->opad = kzalloc(ss, GFP_KERNEL); ++ if (!ctx->opad) { ++ kfree(ctx->ipad); ++ kfree(ctx->sdesc); ++ crypto_free_shash(ctx->hashalg); ++ return -ENOMEM; + } + } + switch (ctx->cipher_type) { +@@ -686,8 +703,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm) + + kfree(ctx->ipad); + kfree(ctx->opad); +- if (ctx->hashalg) +- crypto_free_shash(ctx->hashalg); ++ crypto_free_shash(ctx->hashalg); + kfree(ctx->sdesc); + + if (ctx->fbk_cipher) { +@@ -760,7 +776,7 @@ static inline void swap_data64(void *buf, u32 len) + cpu_to_be64s(src); + } + +-static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) ++static int swap_pad(u8 mac_type, u8 *pad) + { + struct sha512_state *sha512; + struct sha256_state *sha256; +@@ -768,22 +784,19 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) + + switch (mac_type) { + case OTX2_CPT_SHA1: +- sha1 = (struct sha1_state *) in_pad; ++ sha1 = (struct sha1_state *)pad; + swap_data32(sha1->state, SHA1_DIGEST_SIZE); +- memcpy(out_pad, &sha1->state, SHA1_DIGEST_SIZE); + break; + + case OTX2_CPT_SHA256: +- sha256 = (struct sha256_state *) in_pad; ++ sha256 = (struct sha256_state *)pad; + swap_data32(sha256->state, SHA256_DIGEST_SIZE); +- memcpy(out_pad, &sha256->state, SHA256_DIGEST_SIZE); + break; + + case OTX2_CPT_SHA384: + case OTX2_CPT_SHA512: +- sha512 = (struct sha512_state *) in_pad; ++ sha512 = (struct sha512_state *)pad; + swap_data64(sha512->state, SHA512_DIGEST_SIZE); +- memcpy(out_pad, &sha512->state, SHA512_DIGEST_SIZE); + break; + + default: +@@ -793,55 +806,54 @@ static int copy_pad(u8 mac_type, u8 *out_pad, u8 *in_pad) + return 0; + } + +-static int aead_hmac_init(struct crypto_aead *cipher) ++static int aead_hmac_init(struct crypto_aead *cipher, ++ struct crypto_authenc_keys *keys) + { + struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); +- int state_size = crypto_shash_statesize(ctx->hashalg); + int ds = crypto_shash_digestsize(ctx->hashalg); + int bs = crypto_shash_blocksize(ctx->hashalg); +- int authkeylen = ctx->auth_key_len; ++ int authkeylen = keys->authkeylen; + u8 *ipad = NULL, *opad = NULL; +- int ret = 0, icount = 0; ++ int icount = 0; ++ int ret; + +- ctx->sdesc = alloc_sdesc(ctx->hashalg); +- if (!ctx->sdesc) +- return -ENOMEM; ++ if (authkeylen > bs) { ++ ret = crypto_shash_digest(&ctx->sdesc->shash, keys->authkey, ++ authkeylen, ctx->key); ++ if (ret) ++ goto calc_fail; + +- ctx->ipad = kzalloc(bs, GFP_KERNEL); +- if (!ctx->ipad) { +- ret = -ENOMEM; +- goto calc_fail; +- } ++ authkeylen = ds; ++ } else ++ memcpy(ctx->key, keys->authkey, authkeylen); + +- ctx->opad = kzalloc(bs, GFP_KERNEL); +- if (!ctx->opad) { +- ret = -ENOMEM; +- goto calc_fail; +- } ++ ctx->enc_key_len = keys->enckeylen; ++ ctx->auth_key_len = authkeylen; + +- ipad = kzalloc(state_size, GFP_KERNEL); +- if (!ipad) { +- ret = -ENOMEM; +- goto calc_fail; +- } ++ if (ctx->cipher_type == OTX2_CPT_CIPHER_NULL) ++ return keys->enckeylen ? -EINVAL : 0; + +- opad = kzalloc(state_size, GFP_KERNEL); +- if (!opad) { +- ret = -ENOMEM; +- goto calc_fail; ++ switch (keys->enckeylen) { ++ case AES_KEYSIZE_128: ++ ctx->key_type = OTX2_CPT_AES_128_BIT; ++ break; ++ case AES_KEYSIZE_192: ++ ctx->key_type = OTX2_CPT_AES_192_BIT; ++ break; ++ case AES_KEYSIZE_256: ++ ctx->key_type = OTX2_CPT_AES_256_BIT; ++ break; ++ default: ++ /* Invalid key length */ ++ return -EINVAL; + } + +- if (authkeylen > bs) { +- ret = crypto_shash_digest(&ctx->sdesc->shash, ctx->key, +- authkeylen, ipad); +- if (ret) +- goto calc_fail; ++ memcpy(ctx->key + authkeylen, keys->enckey, keys->enckeylen); + +- authkeylen = ds; +- } else { +- memcpy(ipad, ctx->key, authkeylen); +- } ++ ipad = ctx->ipad; ++ opad = ctx->opad; + ++ memcpy(ipad, ctx->key, authkeylen); + memset(ipad + authkeylen, 0, bs - authkeylen); + memcpy(opad, ipad, bs); + +@@ -859,7 +871,7 @@ static int aead_hmac_init(struct crypto_aead *cipher) + crypto_shash_init(&ctx->sdesc->shash); + crypto_shash_update(&ctx->sdesc->shash, ipad, bs); + crypto_shash_export(&ctx->sdesc->shash, ipad); +- ret = copy_pad(ctx->mac_type, ctx->ipad, ipad); ++ ret = swap_pad(ctx->mac_type, ipad); + if (ret) + goto calc_fail; + +@@ -867,25 +879,9 @@ static int aead_hmac_init(struct crypto_aead *cipher) + crypto_shash_init(&ctx->sdesc->shash); + crypto_shash_update(&ctx->sdesc->shash, opad, bs); + crypto_shash_export(&ctx->sdesc->shash, opad); +- ret = copy_pad(ctx->mac_type, ctx->opad, opad); +- if (ret) +- goto calc_fail; +- +- kfree(ipad); +- kfree(opad); +- +- return 0; ++ ret = swap_pad(ctx->mac_type, opad); + + calc_fail: +- kfree(ctx->ipad); +- ctx->ipad = NULL; +- kfree(ctx->opad); +- ctx->opad = NULL; +- kfree(ipad); +- kfree(opad); +- kfree(ctx->sdesc); +- ctx->sdesc = NULL; +- + return ret; + } + +@@ -893,87 +889,17 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, + const unsigned char *key, + unsigned int keylen) + { +- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); +- struct crypto_authenc_key_param *param; +- int enckeylen = 0, authkeylen = 0; +- struct rtattr *rta = (void *)key; +- +- if (!RTA_OK(rta, keylen)) +- return -EINVAL; ++ struct crypto_authenc_keys authenc_keys; + +- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) +- return -EINVAL; +- +- if (RTA_PAYLOAD(rta) < sizeof(*param)) +- return -EINVAL; +- +- param = RTA_DATA(rta); +- enckeylen = be32_to_cpu(param->enckeylen); +- key += RTA_ALIGN(rta->rta_len); +- keylen -= RTA_ALIGN(rta->rta_len); +- if (keylen < enckeylen) +- return -EINVAL; +- +- if (keylen > OTX2_CPT_MAX_KEY_SIZE) +- return -EINVAL; +- +- authkeylen = keylen - enckeylen; +- memcpy(ctx->key, key, keylen); +- +- switch (enckeylen) { +- case AES_KEYSIZE_128: +- ctx->key_type = OTX2_CPT_AES_128_BIT; +- break; +- case AES_KEYSIZE_192: +- ctx->key_type = OTX2_CPT_AES_192_BIT; +- break; +- case AES_KEYSIZE_256: +- ctx->key_type = OTX2_CPT_AES_256_BIT; +- break; +- default: +- /* Invalid key length */ +- return -EINVAL; +- } +- +- ctx->enc_key_len = enckeylen; +- ctx->auth_key_len = authkeylen; +- +- return aead_hmac_init(cipher); ++ return crypto_authenc_extractkeys(&authenc_keys, key, keylen) ?: ++ aead_hmac_init(cipher, &authenc_keys); + } + + static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, + const unsigned char *key, + unsigned int keylen) + { +- struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(cipher); +- struct crypto_authenc_key_param *param; +- struct rtattr *rta = (void *)key; +- int enckeylen = 0; +- +- if (!RTA_OK(rta, keylen)) +- return -EINVAL; +- +- if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) +- return -EINVAL; +- +- if (RTA_PAYLOAD(rta) < sizeof(*param)) +- return -EINVAL; +- +- param = RTA_DATA(rta); +- enckeylen = be32_to_cpu(param->enckeylen); +- key += RTA_ALIGN(rta->rta_len); +- keylen -= RTA_ALIGN(rta->rta_len); +- if (enckeylen != 0) +- return -EINVAL; +- +- if (keylen > OTX2_CPT_MAX_KEY_SIZE) +- return -EINVAL; +- +- memcpy(ctx->key, key, keylen); +- ctx->enc_key_len = enckeylen; +- ctx->auth_key_len = keylen; +- +- return 0; ++ return otx2_cpt_aead_cbc_aes_sha_setkey(cipher, key, keylen); + } + + static int otx2_cpt_aead_gcm_aes_setkey(struct crypto_aead *cipher, +diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c +index 79fb687bb90f96..6c3d84d9bcc16f 100644 +--- a/drivers/firmware/efi/unaccepted_memory.c ++++ b/drivers/firmware/efi/unaccepted_memory.c +@@ -3,6 +3,7 @@ + #include + #include + #include ++#include + #include + + /* Protects unaccepted memory bitmap and accepting_list */ +@@ -148,6 +149,9 @@ void accept_memory(phys_addr_t start, phys_addr_t end) + } + + list_del(&range.list); ++ ++ touch_softlockup_watchdog(); ++ + spin_unlock_irqrestore(&unaccepted_memory_lock, flags); + } + +diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c +index c1590d3aa9cb78..c3a1dc3449617f 100644 +--- a/drivers/firmware/tegra/bpmp.c ++++ b/drivers/firmware/tegra/bpmp.c +@@ -24,12 +24,6 @@ + #define MSG_RING BIT(1) + #define TAG_SZ 32 + +-static inline struct tegra_bpmp * +-mbox_client_to_bpmp(struct mbox_client *client) +-{ +- return container_of(client, struct tegra_bpmp, mbox.client); +-} +- + static inline const struct tegra_bpmp_ops * + channel_to_ops(struct tegra_bpmp_channel *channel) + { +diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c +index aa3ce8aa99dc83..75107ede3bf8f4 100644 +--- a/drivers/gpio/gpio-davinci.c ++++ b/drivers/gpio/gpio-davinci.c +@@ -289,7 +289,7 @@ static int davinci_gpio_probe(struct platform_device *pdev) + * serve as EDMA event triggers. + */ + +-static void gpio_irq_disable(struct irq_data *d) ++static void gpio_irq_mask(struct irq_data *d) + { + struct davinci_gpio_regs __iomem *g = irq2regs(d); + uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d); +@@ -298,7 +298,7 @@ static void gpio_irq_disable(struct irq_data *d) + writel_relaxed(mask, &g->clr_rising); + } + +-static void gpio_irq_enable(struct irq_data *d) ++static void gpio_irq_unmask(struct irq_data *d) + { + struct davinci_gpio_regs __iomem *g = irq2regs(d); + uintptr_t mask = (uintptr_t)irq_data_get_irq_handler_data(d); +@@ -324,8 +324,8 @@ static int gpio_irq_type(struct irq_data *d, unsigned trigger) + + static struct irq_chip gpio_irqchip = { + .name = "GPIO", +- .irq_enable = gpio_irq_enable, +- .irq_disable = gpio_irq_disable, ++ .irq_unmask = gpio_irq_unmask, ++ .irq_mask = gpio_irq_mask, + .irq_set_type = gpio_irq_type, + .flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_SKIP_SET_WAKE, + }; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +index 25d5fda5b243e3..af6c6d89e63afb 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +@@ -335,15 +335,15 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, + return r; + } + +-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj) ++void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj) + { +- struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; ++ struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj; + +- amdgpu_bo_reserve(bo, true); +- amdgpu_bo_kunmap(bo); +- amdgpu_bo_unpin(bo); +- amdgpu_bo_unreserve(bo); +- amdgpu_bo_unref(&(bo)); ++ amdgpu_bo_reserve(*bo, true); ++ amdgpu_bo_kunmap(*bo); ++ amdgpu_bo_unpin(*bo); ++ amdgpu_bo_unreserve(*bo); ++ amdgpu_bo_unref(bo); + } + + int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +index db5b1c6beba75b..3134e6ad81d1d4 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +@@ -221,7 +221,7 @@ int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, + int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, + void **mem_obj, uint64_t *gpu_addr, + void **cpu_ptr, bool mqd_gfx9); +-void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj); ++void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj); + int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, + void **mem_obj); + void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +index e361dc37a0890b..7abcd618e70bd1 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +@@ -263,6 +263,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, + if (size < sizeof(struct drm_amdgpu_bo_list_in)) + goto free_partial_kdata; + ++ /* Only a single BO list is allowed to simplify handling. */ ++ if (p->bo_list) ++ ret = -EINVAL; ++ + ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata); + if (ret) + goto free_partial_kdata; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +index 0ca51df46cc0d3..e7b053898f9e90 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +@@ -793,8 +793,11 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r + int r; + + if (amdgpu_ras_is_supported(adev, ras_block->block)) { +- if (!amdgpu_persistent_edc_harvesting_supported(adev)) +- amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); ++ if (!amdgpu_persistent_edc_harvesting_supported(adev)) { ++ r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); ++ if (r) ++ return r; ++ } + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) +@@ -938,7 +941,10 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) + pr_err("critical bug! too many kiq readers\n"); + goto failed_unlock; + } +- amdgpu_ring_alloc(ring, 32); ++ r = amdgpu_ring_alloc(ring, 32); ++ if (r) ++ goto failed_unlock; ++ + amdgpu_ring_emit_rreg(ring, reg, reg_val_offs); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) +@@ -1004,7 +1010,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) + } + + spin_lock_irqsave(&kiq->ring_lock, flags); +- amdgpu_ring_alloc(ring, 32); ++ r = amdgpu_ring_alloc(ring, 32); ++ if (r) ++ goto failed_unlock; ++ + amdgpu_ring_emit_wreg(ring, reg, v); + r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); + if (r) +@@ -1040,6 +1049,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) + + failed_undo: + amdgpu_ring_undo(ring); ++failed_unlock: + spin_unlock_irqrestore(&kiq->ring_lock, flags); + failed_kiq_write: + dev_err(adev->dev, "failed to write reg:%x\n", reg); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +index 58dab4f73a9a2a..5797055b1148f7 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +@@ -43,6 +43,7 @@ + #include "amdgpu_gem.h" + #include "amdgpu_display.h" + #include "amdgpu_ras.h" ++#include "amdgpu_reset.h" + #include "amd_pcie.h" + + void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) +@@ -722,6 +723,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) + ? -EFAULT : 0; + } + case AMDGPU_INFO_READ_MMR_REG: { ++ int ret = 0; + unsigned int n, alloc_size; + uint32_t *regs; + unsigned int se_num = (info->read_mmr_reg.instance >> +@@ -731,24 +733,37 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) + AMDGPU_INFO_MMR_SH_INDEX_SHIFT) & + AMDGPU_INFO_MMR_SH_INDEX_MASK; + ++ if (!down_read_trylock(&adev->reset_domain->sem)) ++ return -ENOENT; ++ + /* set full masks if the userspace set all bits + * in the bitfields + */ +- if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) ++ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) { + se_num = 0xffffffff; +- else if (se_num >= AMDGPU_GFX_MAX_SE) +- return -EINVAL; +- if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) ++ } else if (se_num >= AMDGPU_GFX_MAX_SE) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) { + sh_num = 0xffffffff; +- else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) +- return -EINVAL; ++ } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) { ++ ret = -EINVAL; ++ goto out; ++ } + +- if (info->read_mmr_reg.count > 128) +- return -EINVAL; ++ if (info->read_mmr_reg.count > 128) { ++ ret = -EINVAL; ++ goto out; ++ } + + regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL); +- if (!regs) +- return -ENOMEM; ++ if (!regs) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ + alloc_size = info->read_mmr_reg.count * sizeof(*regs); + + amdgpu_gfx_off_ctrl(adev, false); +@@ -760,13 +775,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) + info->read_mmr_reg.dword_offset + i); + kfree(regs); + amdgpu_gfx_off_ctrl(adev, true); +- return -EFAULT; ++ ret = -EFAULT; ++ goto out; + } + } + amdgpu_gfx_off_ctrl(adev, true); + n = copy_to_user(out, regs, min(size, alloc_size)); + kfree(regs); +- return n ? -EFAULT : 0; ++ ret = (n ? -EFAULT : 0); ++out: ++ up_read(&adev->reset_domain->sem); ++ return ret; + } + case AMDGPU_INFO_DEV_INFO: { + struct drm_amdgpu_info_device *dev_info; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +index 9a1036aeec2a0b..9142238e7791a5 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +@@ -179,6 +179,6 @@ amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from) + + #define for_each_xcp(xcp_mgr, xcp, i) \ + for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \ +- xcp = amdgpu_get_next_xcp(xcp_mgr, &i)) ++ ++i, xcp = amdgpu_get_next_xcp(xcp_mgr, &i)) + + #endif +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +index cd594b92c61298..53c99bc6abb333 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +@@ -8748,7 +8748,9 @@ static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring, + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); ++ amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + WREG32_SOC15(GC, 0, mmSQ_CMD, value); ++ amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + } + + static void +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +index c813cd7b015e14..54ec9b32562c28 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +@@ -5701,7 +5701,9 @@ static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); ++ amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + WREG32_SOC15(GC, 0, regSQ_CMD, value); ++ amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + } + + static void +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +index 8168836a08d2ef..895060f6948f30 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +@@ -1172,6 +1172,10 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 }, + /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */ + { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 }, ++ /* https://bbs.openkylin.top/t/topic/171497 */ ++ { 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 }, ++ /* HP 705G4 DM with R5 2400G */ ++ { 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 }, + { 0, 0, 0, 0, 0 }, + }; + +@@ -5705,7 +5709,9 @@ static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) + value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); + value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); + value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); ++ amdgpu_gfx_rlc_enter_safe_mode(adev, 0); + WREG32_SOC15(GC, 0, mmSQ_CMD, value); ++ amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + } + + static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +index 9d105302837054..19d46be6394295 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +@@ -417,7 +417,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, + + err_create_queue: + if (wptr_bo) +- amdgpu_amdkfd_free_gtt_mem(dev->adev, wptr_bo); ++ amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&wptr_bo); + err_wptr_map_gart: + err_bind_process: + err_pdd: +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +index 0c94bdfadaabf7..9d0b0bf70ad1ea 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c +@@ -838,7 +838,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, + kfd_doorbell_error: + kfd_gtt_sa_fini(kfd); + kfd_gtt_sa_init_error: +- amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); ++ amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem); + alloc_gtt_mem_failure: + dev_err(kfd_device, + "device %x:%x NOT added due to errors\n", +@@ -856,7 +856,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) + kfd_doorbell_fini(kfd); + ida_destroy(&kfd->doorbell_ida); + kfd_gtt_sa_fini(kfd); +- amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); ++ amdgpu_amdkfd_free_gtt_mem(kfd->adev, &kfd->gtt_mem); + } + + kfree(kfd); +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +index 60d98301ef0418..4d9a406925e189 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +@@ -2610,7 +2610,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, + { + WARN(!mqd, "No hiq sdma mqd trunk to free"); + +- amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem); ++ amdgpu_amdkfd_free_gtt_mem(dev->adev, &mqd->gtt_mem); + } + + void device_queue_manager_uninit(struct device_queue_manager *dqm) +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +index 447829c22295c6..4c3f379803117e 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +@@ -223,7 +223,7 @@ void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd, + struct kfd_mem_obj *mqd_mem_obj) + { + if (mqd_mem_obj->gtt_mem) { +- amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, mqd_mem_obj->gtt_mem); ++ amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, &mqd_mem_obj->gtt_mem); + kfree(mqd_mem_obj); + } else { + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c +index d98e45aec76b45..43f520b3796700 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c +@@ -1047,7 +1047,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) + + if (pdd->dev->kfd->shared_resources.enable_mes) + amdgpu_amdkfd_free_gtt_mem(pdd->dev->adev, +- pdd->proc_ctx_bo); ++ &pdd->proc_ctx_bo); + /* + * before destroying pdd, make sure to report availability + * for auto suspend +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +index 8aca92624a77ea..0583af4e84fa3f 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +@@ -199,9 +199,9 @@ static void pqm_clean_queue_resource(struct process_queue_manager *pqm, + } + + if (dev->kfd->shared_resources.enable_mes) { +- amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo); ++ amdgpu_amdkfd_free_gtt_mem(dev->adev, &pqn->q->gang_ctx_bo); + if (pqn->q->wptr_bo) +- amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo); ++ amdgpu_amdkfd_free_gtt_mem(dev->adev, (void **)&pqn->q->wptr_bo); + } + } + +@@ -982,6 +982,7 @@ int kfd_criu_restore_queue(struct kfd_process *p, + pr_debug("Queue id %d was restored successfully\n", queue_id); + + kfree(q_data); ++ kfree(q_extra_data); + + return ret; + } +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index 7731ce7762b77d..a3f17c572bf06e 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -714,6 +714,12 @@ static void dmub_hpd_callback(struct amdgpu_device *adev, + return; + } + ++ /* Skip DMUB HPD IRQ in suspend/resume. We will probe them later. */ ++ if (notify->type == DMUB_NOTIFICATION_HPD && adev->in_suspend) { ++ DRM_INFO("Skip DMUB HPD IRQ callback in suspend/resume\n"); ++ return; ++ } ++ + link_index = notify->link_index; + link = adev->dm.dc->links[link_index]; + dev = adev->dm.ddev; +@@ -4058,7 +4064,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, + int spread = caps.max_input_signal - caps.min_input_signal; + + if (caps.max_input_signal > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || +- caps.min_input_signal < AMDGPU_DM_DEFAULT_MIN_BACKLIGHT || ++ caps.min_input_signal < 0 || + spread > AMDGPU_DM_DEFAULT_MAX_BACKLIGHT || + spread < AMDGPU_DM_MIN_SPREAD) { + DRM_DEBUG_KMS("DM: Invalid backlight caps: min=%d, max=%d\n", +@@ -6153,12 +6159,21 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { ++ const struct dc_edid_caps *edid_caps; ++ unsigned int disable_colorimetry = 0; ++ ++ if (aconnector->dc_sink) { ++ edid_caps = &aconnector->dc_sink->edid_caps; ++ disable_colorimetry = edid_caps->panel_patch.disable_colorimetry; ++ } ++ + // + // should decide stream support vsc sdp colorimetry capability + // before building vsc info packet + // + stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && +- stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; ++ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && ++ !disable_colorimetry; + + if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) + tf = TRANSFER_FUNC_GAMMA_22; +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +index fdd2d16b859f21..227a148b0f82a5 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +@@ -71,6 +71,10 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) + DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.remove_sink_ext_caps = true; + break; ++ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154): ++ DRM_DEBUG_DRIVER("Disabling VSC on monitor with panel id %X\n", panel_id); ++ edid_caps->panel_patch.disable_colorimetry = true; ++ break; + default: + return; + } +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +index 7fb11445a28fa5..d390e3d62e56e3 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +@@ -1266,9 +1266,6 @@ static bool is_dsc_need_re_compute( + } + } + +- if (new_stream_on_link_num == 0) +- return false; +- + if (new_stream_on_link_num == 0) + return false; + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +index fa9f53b3107938..d1329f20b7bd4b 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +@@ -1281,7 +1281,8 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, + adev->dm.dc->caps.color.dpp.gamma_corr) + attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; + +- attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; ++ if (afb) ++ attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; + + if (crtc_state->stream) { + mutex_lock(&adev->dm.dc_lock); +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c +index 50e643bfdfbad1..a7a6f6c5c76556 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c +@@ -3797,7 +3797,8 @@ static void commit_planes_for_stream(struct dc *dc, + } + + if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) +- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { ++ if (top_pipe_to_program && ++ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { + top_pipe_to_program->stream_res.tg->funcs->wait_for_state( + top_pipe_to_program->stream_res.tg, + CRTC_STATE_VACTIVE); +@@ -4715,7 +4716,8 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) + if (allow == dc->idle_optimizations_allowed) + return; + +- if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow)) ++ if (dc->hwss.apply_idle_power_optimizations && dc->clk_mgr != NULL && ++ dc->hwss.apply_idle_power_optimizations(dc, allow)) + dc->idle_optimizations_allowed = allow; + } + +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +index 733e445331ea50..99fcd39bb15e0d 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +@@ -2154,6 +2154,8 @@ static bool are_stream_backends_same( + bool dc_is_stream_unchanged( + struct dc_stream_state *old_stream, struct dc_stream_state *stream) + { ++ if (!old_stream || !stream) ++ return false; + + if (!are_stream_backends_same(old_stream, stream)) + return false; +@@ -2877,8 +2879,10 @@ static bool planes_changed_for_existing_stream(struct dc_state *context, + } + } + +- if (!stream_status) ++ if (!stream_status) { + ASSERT(0); ++ return false; ++ } + + for (i = 0; i < set_count; i++) + if (set[i].stream == stream) +diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h +index cc173ecf78e0cf..6eaa02a80344bc 100644 +--- a/drivers/gpu/drm/amd/display/dc/dc_types.h ++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h +@@ -190,6 +190,7 @@ struct dc_panel_patch { + unsigned int skip_avmute; + unsigned int mst_start_top_delay; + unsigned int remove_sink_ext_caps; ++ unsigned int disable_colorimetry; + }; + + struct dc_edid_caps { +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +index c0372aa4ec8380..684e30f9cf8985 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +@@ -571,6 +571,8 @@ bool cm_helper_translate_curve_to_degamma_hw_format( + i += increment) { + if (j == hw_points - 1) + break; ++ if (i >= TRANSFER_FUNC_POINTS) ++ return false; + rgb_resulted[j].red = output_tf->tf_pts.red[i]; + rgb_resulted[j].green = output_tf->tf_pts.green[i]; + rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; +diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +index e0df9b0065f9c0..62c02adae7e76e 100644 +--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c ++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +@@ -178,6 +178,8 @@ bool cm3_helper_translate_curve_to_hw_format( + i += increment) { + if (j == hw_points - 1) + break; ++ if (i >= TRANSFER_FUNC_POINTS) ++ return false; + rgb_resulted[j].red = output_tf->tf_pts.red[i]; + rgb_resulted[j].green = output_tf->tf_pts.green[i]; + rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; +@@ -355,6 +357,8 @@ bool cm3_helper_translate_curve_to_degamma_hw_format( + i += increment) { + if (j == hw_points - 1) + break; ++ if (i >= TRANSFER_FUNC_POINTS) ++ return false; + rgb_resulted[j].red = output_tf->tf_pts.red[i]; + rgb_resulted[j].green = output_tf->tf_pts.green[i]; + rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +index 0fc9f3e3ffaefd..f603486af6e306 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +@@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, + + static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma) + { +- unsigned int ret_val = 0; ++ unsigned int ret_val = 1; + + if (source_format == dm_444_16) { + if (!is_chroma) +diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +index 618f4b682ab1b1..9f28e4d3c664c7 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c ++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +@@ -53,7 +53,7 @@ static void calculate_ttu_cursor( + + static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma) + { +- unsigned int ret_val = 0; ++ unsigned int ret_val = 1; + + if (source_format == dm_444_16) { + if (!is_chroma) +diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c +index e1257404357b11..cec68c5dba1322 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c ++++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c +@@ -28,6 +28,8 @@ + #include "dccg.h" + #include "clk_mgr.h" + ++#define DC_LOGGER link->ctx->logger ++ + void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size) + { +@@ -108,6 +110,11 @@ void enable_hpo_dp_link_output(struct dc_link *link, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) + { ++ if (!link_res->hpo_dp_link_enc) { ++ DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__); ++ return; ++ } ++ + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, +@@ -124,6 +131,11 @@ void disable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) + { ++ if (!link_res->hpo_dp_link_enc) { ++ DC_LOG_ERROR("%s: invalid hpo_dp_link_enc\n", __func__); ++ return; ++ } ++ + link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); + link_res->hpo_dp_link_enc->funcs->disable_link_phy( + link_res->hpo_dp_link_enc, signal); +diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c +index 33bb96f770b866..eb7c9f226af5cb 100644 +--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c ++++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c +@@ -403,7 +403,7 @@ static void link_destruct(struct dc_link *link) + if (link->panel_cntl) + link->panel_cntl->funcs->destroy(&link->panel_cntl); + +- if (link->link_enc) { ++ if (link->link_enc && !link->is_dig_mapping_flexible) { + /* Update link encoder resource tracking variables. These are used for + * the dynamic assignment of link encoders to streams. Virtual links + * are not assigned encoder resources on creation. +diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c +index 5794b64507bf94..56a22575258064 100644 +--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c ++++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c +@@ -1185,6 +1185,8 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, + fw_info = smu_atom_get_data_table(hwmgr->adev, + GetIndexIntoMasterTable(DATA, FirmwareInfo), + &size, &frev, &crev); ++ PP_ASSERT_WITH_CODE(fw_info != NULL, ++ "Missing firmware info!", return -EINVAL); + + if ((fw_info->ucTableFormatRevision == 1) + && (le16_to_cpu(fw_info->usStructureSize) >= sizeof(ATOM_FIRMWARE_INFO_V1_4))) +diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +index 2611afd2c1c136..ef2b6ce544d0a8 100644 +--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c ++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +@@ -1291,17 +1291,6 @@ static int adv7511_probe(struct i2c_client *i2c) + + INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work); + +- if (i2c->irq) { +- init_waitqueue_head(&adv7511->wq); +- +- ret = devm_request_threaded_irq(dev, i2c->irq, NULL, +- adv7511_irq_handler, +- IRQF_ONESHOT, dev_name(dev), +- adv7511); +- if (ret) +- goto err_unregister_cec; +- } +- + adv7511_power_off(adv7511); + + i2c_set_clientdata(i2c, adv7511); +@@ -1325,6 +1314,17 @@ static int adv7511_probe(struct i2c_client *i2c) + + adv7511_audio_init(dev, adv7511); + ++ if (i2c->irq) { ++ init_waitqueue_head(&adv7511->wq); ++ ++ ret = devm_request_threaded_irq(dev, i2c->irq, NULL, ++ adv7511_irq_handler, ++ IRQF_ONESHOT, dev_name(dev), ++ adv7511); ++ if (ret) ++ goto err_unregister_audio; ++ } ++ + if (adv7511->type == ADV7533 || adv7511->type == ADV7535) { + ret = adv7533_attach_dsi(adv7511); + if (ret) +diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c +index 98d3b10c08ae19..ab03b08433f8f3 100644 +--- a/drivers/gpu/drm/drm_atomic_uapi.c ++++ b/drivers/gpu/drm/drm_atomic_uapi.c +@@ -585,7 +585,7 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane, + &state->fb_damage_clips, + val, + -1, +- sizeof(struct drm_rect), ++ sizeof(struct drm_mode_rect), + &replaced); + return ret; + } else if (property == plane->scaling_filter_property) { +diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c +index 5b93c11895bb1e..aab76334083e8a 100644 +--- a/drivers/gpu/drm/drm_print.c ++++ b/drivers/gpu/drm/drm_print.c +@@ -100,8 +100,9 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str) + copy = iterator->remain; + + /* Copy out the bit of the string that we need */ +- memcpy(iterator->data, +- str + (iterator->start - iterator->offset), copy); ++ if (iterator->data) ++ memcpy(iterator->data, ++ str + (iterator->start - iterator->offset), copy); + + iterator->offset = iterator->start + copy; + iterator->remain -= copy; +@@ -110,7 +111,8 @@ void __drm_puts_coredump(struct drm_printer *p, const char *str) + + len = min_t(ssize_t, strlen(str), iterator->remain); + +- memcpy(iterator->data + pos, str, len); ++ if (iterator->data) ++ memcpy(iterator->data + pos, str, len); + + iterator->offset += len; + iterator->remain -= len; +@@ -140,8 +142,9 @@ void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf) + if ((iterator->offset >= iterator->start) && (len < iterator->remain)) { + ssize_t pos = iterator->offset - iterator->start; + +- snprintf(((char *) iterator->data) + pos, +- iterator->remain, "%pV", vaf); ++ if (iterator->data) ++ snprintf(((char *) iterator->data) + pos, ++ iterator->remain, "%pV", vaf); + + iterator->offset += len; + iterator->remain -= len; +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +index 9227f8146a583f..6dc097a2ac07b3 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +@@ -1136,7 +1136,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) + GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(bo->resource)); + } + +- if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) ++ if (wakeref && CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND != 0) + intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref, + msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); + +diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +index 8f0c47e8687485..036028b8f5248d 100644 +--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c ++++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c +@@ -436,8 +436,10 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma + } + + comp_pdev = of_find_device_by_node(node); +- if (!comp_pdev) ++ if (!comp_pdev) { ++ of_node_put(node); + return -EPROBE_DEFER; ++ } + + priv->ovl_adaptor_comp[id] = &comp_pdev->dev; + +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +index 4127e2762dcd10..a2df8bd7aa940f 100644 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c +@@ -1071,6 +1071,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, + adreno_gpu->chip_id = config->chip_id; + + gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1; ++ gpu->pdev = pdev; + + /* Only handle the core clock when GMU is not in use (or is absent). */ + if (adreno_has_gmu_wrapper(adreno_gpu) || +diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c +index 5c10b559a5957a..5a7541597d0ce8 100644 +--- a/drivers/gpu/drm/msm/msm_gpu.c ++++ b/drivers/gpu/drm/msm/msm_gpu.c +@@ -927,7 +927,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, + if (IS_ERR(gpu->gpu_cx)) + gpu->gpu_cx = NULL; + +- gpu->pdev = pdev; + platform_set_drvdata(pdev, &gpu->adreno_smmu); + + msm_devfreq_init(gpu); +diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c +index e000577a95dd75..21996b713d1c3a 100644 +--- a/drivers/gpu/drm/omapdrm/omap_drv.c ++++ b/drivers/gpu/drm/omapdrm/omap_drv.c +@@ -695,6 +695,10 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) + soc = soc_device_match(omapdrm_soc_devices); + priv->omaprev = soc ? (uintptr_t)soc->data : 0; + priv->wq = alloc_ordered_workqueue("omapdrm", 0); ++ if (!priv->wq) { ++ ret = -ENOMEM; ++ goto err_alloc_workqueue; ++ } + + mutex_init(&priv->list_lock); + INIT_LIST_HEAD(&priv->obj_list); +@@ -753,6 +757,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) + drm_mode_config_cleanup(ddev); + omap_gem_deinit(ddev); + destroy_workqueue(priv->wq); ++err_alloc_workqueue: + omap_disconnect_pipelines(ddev); + drm_dev_put(ddev); + return ret; +diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c +index cfeca2694d5f91..b63b6b4e9b2818 100644 +--- a/drivers/gpu/drm/radeon/r100.c ++++ b/drivers/gpu/drm/radeon/r100.c +@@ -1015,45 +1015,65 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) + + DRM_DEBUG_KMS("\n"); + +- if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) || +- (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) || +- (rdev->family == CHIP_RS200)) { ++ switch (rdev->family) { ++ case CHIP_R100: ++ case CHIP_RV100: ++ case CHIP_RV200: ++ case CHIP_RS100: ++ case CHIP_RS200: + DRM_INFO("Loading R100 Microcode\n"); + fw_name = FIRMWARE_R100; +- } else if ((rdev->family == CHIP_R200) || +- (rdev->family == CHIP_RV250) || +- (rdev->family == CHIP_RV280) || +- (rdev->family == CHIP_RS300)) { ++ break; ++ ++ case CHIP_R200: ++ case CHIP_RV250: ++ case CHIP_RV280: ++ case CHIP_RS300: + DRM_INFO("Loading R200 Microcode\n"); + fw_name = FIRMWARE_R200; +- } else if ((rdev->family == CHIP_R300) || +- (rdev->family == CHIP_R350) || +- (rdev->family == CHIP_RV350) || +- (rdev->family == CHIP_RV380) || +- (rdev->family == CHIP_RS400) || +- (rdev->family == CHIP_RS480)) { ++ break; ++ ++ case CHIP_R300: ++ case CHIP_R350: ++ case CHIP_RV350: ++ case CHIP_RV380: ++ case CHIP_RS400: ++ case CHIP_RS480: + DRM_INFO("Loading R300 Microcode\n"); + fw_name = FIRMWARE_R300; +- } else if ((rdev->family == CHIP_R420) || +- (rdev->family == CHIP_R423) || +- (rdev->family == CHIP_RV410)) { ++ break; ++ ++ case CHIP_R420: ++ case CHIP_R423: ++ case CHIP_RV410: + DRM_INFO("Loading R400 Microcode\n"); + fw_name = FIRMWARE_R420; +- } else if ((rdev->family == CHIP_RS690) || +- (rdev->family == CHIP_RS740)) { ++ break; ++ ++ case CHIP_RS690: ++ case CHIP_RS740: + DRM_INFO("Loading RS690/RS740 Microcode\n"); + fw_name = FIRMWARE_RS690; +- } else if (rdev->family == CHIP_RS600) { ++ break; ++ ++ case CHIP_RS600: + DRM_INFO("Loading RS600 Microcode\n"); + fw_name = FIRMWARE_RS600; +- } else if ((rdev->family == CHIP_RV515) || +- (rdev->family == CHIP_R520) || +- (rdev->family == CHIP_RV530) || +- (rdev->family == CHIP_R580) || +- (rdev->family == CHIP_RV560) || +- (rdev->family == CHIP_RV570)) { ++ break; ++ ++ case CHIP_RV515: ++ case CHIP_R520: ++ case CHIP_RV530: ++ case CHIP_R580: ++ case CHIP_RV560: ++ case CHIP_RV570: + DRM_INFO("Loading R500 Microcode\n"); + fw_name = FIRMWARE_R520; ++ break; ++ ++ default: ++ DRM_ERROR("Unsupported Radeon family %u\n", rdev->family); ++ return -EINVAL; + } + + err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +index c6fbfc0baeccd4..ee72e8c6ad69bd 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +@@ -1566,6 +1566,10 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc, + VOP_AFBC_SET(vop, enable, s->enable_afbc); + vop_cfg_done(vop); + ++ /* Ack the DMA transfer of the previous frame (RK3066). */ ++ if (VOP_HAS_REG(vop, common, dma_stop)) ++ VOP_REG_SET(vop, common, dma_stop, 0); ++ + spin_unlock(&vop->reg_lock); + + /* +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +index 5f56e0597df84a..c5c716a69171a8 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +@@ -122,6 +122,7 @@ struct vop_common { + struct vop_reg lut_buffer_index; + struct vop_reg gate_en; + struct vop_reg mmu_en; ++ struct vop_reg dma_stop; + struct vop_reg out_mode; + struct vop_reg standby; + }; +diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +index 7b28050067769d..f7d0edd762b36c 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c ++++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +@@ -435,6 +435,7 @@ static const struct vop_output rk3066_output = { + }; + + static const struct vop_common rk3066_common = { ++ .dma_stop = VOP_REG(RK3066_SYS_CTRL0, 0x1, 0), + .standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1), + .out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0), + .cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0), +@@ -483,6 +484,7 @@ static const struct vop_data rk3066_vop = { + .output = &rk3066_output, + .win = rk3066_vop_win_data, + .win_size = ARRAY_SIZE(rk3066_vop_win_data), ++ .feature = VOP_FEATURE_INTERNAL_RGB, + .max_output = { 1920, 1080 }, + }; + +diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c +index a42763e1429dc1..d3462be7493037 100644 +--- a/drivers/gpu/drm/scheduler/sched_entity.c ++++ b/drivers/gpu/drm/scheduler/sched_entity.c +@@ -111,8 +111,10 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, + { + WARN_ON(!num_sched_list || !sched_list); + ++ spin_lock(&entity->rq_lock); + entity->sched_list = sched_list; + entity->num_sched_list = num_sched_list; ++ spin_unlock(&entity->rq_lock); + } + EXPORT_SYMBOL(drm_sched_entity_modify_sched); + +diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c +index 4d2db079ad4ff3..e1232f74dfa537 100644 +--- a/drivers/gpu/drm/stm/drv.c ++++ b/drivers/gpu/drm/stm/drv.c +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + + #include "ltdc.h" + +@@ -75,7 +76,7 @@ static int drv_load(struct drm_device *ddev) + + DRM_DEBUG("%s\n", __func__); + +- ldev = devm_kzalloc(ddev->dev, sizeof(*ldev), GFP_KERNEL); ++ ldev = drmm_kzalloc(ddev, sizeof(*ldev), GFP_KERNEL); + if (!ldev) + return -ENOMEM; + +diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c +index 5aec1e58c968c2..0832b749b66e7f 100644 +--- a/drivers/gpu/drm/stm/ltdc.c ++++ b/drivers/gpu/drm/stm/ltdc.c +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + + #include