diff -u linux-6.2.0/Documentation/admin-guide/hw-vuln/index.rst linux-6.2.0/Documentation/admin-guide/hw-vuln/index.rst --- linux-6.2.0/Documentation/admin-guide/hw-vuln/index.rst +++ linux-6.2.0/Documentation/admin-guide/hw-vuln/index.rst @@ -16,7 +16,8 @@ - multihit.rst - special-register-buffer-data-sampling.rst - core-scheduling.rst - l1d_flush.rst - processor_mmio_stale_data.rst - cross-thread-rsb.rst - gather_data_sampling.rst + multihit + special-register-buffer-data-sampling + core-scheduling + l1d_flush + processor_mmio_stale_data + cross-thread-rsb + srso + gather_data_sampling diff -u linux-6.2.0/Documentation/admin-guide/kernel-parameters.txt linux-6.2.0/Documentation/admin-guide/kernel-parameters.txt --- linux-6.2.0/Documentation/admin-guide/kernel-parameters.txt +++ linux-6.2.0/Documentation/admin-guide/kernel-parameters.txt @@ -5809,6 +5809,17 @@ Not specifying this option is equivalent to spectre_v2_user=auto. + spec_rstack_overflow= + [X86] Control RAS overflow mitigation on AMD Zen CPUs + + off - Disable mitigation + microcode - Enable microcode mitigation only + safe-ret - Enable sw-only safe RET mitigation (default) + ibpb - Enable mitigation by issuing IBPB on + kernel entry + ibpb-vmexit - Issue IBPB only on VMEXIT + (cloud-specific mitigation) + spec_store_bypass_disable= [HW] Control Speculative Store Bypass (SSB) Disable mitigation (Speculative Store Bypass vulnerability) diff -u linux-6.2.0/Documentation/networking/ip-sysctl.rst linux-6.2.0/Documentation/networking/ip-sysctl.rst --- linux-6.2.0/Documentation/networking/ip-sysctl.rst +++ linux-6.2.0/Documentation/networking/ip-sysctl.rst @@ -1349,8 +1349,8 @@ Restrict ICMP_PROTO datagram sockets to users in the group range. The default is "1 0", meaning, that nobody (not even root) may create ping sockets. Setting it to "100 100" would grant permissions - to the single group. "0 4294967295" would enable it for the world, "100 - 4294967295" would enable it for the users, but not daemons. + to the single group. "0 4294967294" would enable it for the world, "100 + 4294967294" would enable it for the users, but not daemons. tcp_early_demux - BOOLEAN Enable early demux for established TCP sockets. diff -u linux-6.2.0/arch/arm64/boot/dts/qcom/sc8280xp.dtsi linux-6.2.0/arch/arm64/boot/dts/qcom/sc8280xp.dtsi --- linux-6.2.0/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +++ linux-6.2.0/arch/arm64/boot/dts/qcom/sc8280xp.dtsi @@ -2308,6 +2308,7 @@ qcom,tcs-config = , , , ; label = "apps_rsc"; + power-domains = <&CLUSTER_PD>; apps_bcm_voter: bcm-voter { compatible = "qcom,bcm-voter"; diff -u linux-6.2.0/arch/arm64/include/asm/kvm_pgtable.h linux-6.2.0/arch/arm64/include/asm/kvm_pgtable.h --- linux-6.2.0/arch/arm64/include/asm/kvm_pgtable.h +++ linux-6.2.0/arch/arm64/include/asm/kvm_pgtable.h @@ -624,9 +624,9 @@ * * The walker will walk the page-table entries corresponding to the input * address range specified, visiting entries according to the walker flags. - * Invalid entries are treated as leaf entries. Leaf entries are reloaded - * after invoking the walker callback, allowing the walker to descend into - * a newly installed table. + * Invalid entries are treated as leaf entries. The visited page table entry is + * reloaded after invoking the walker callback, allowing the walker to descend + * into a newly installed table. * * Returning a negative error code from the walker callback function will * terminate the walk immediately with the same error code. diff -u linux-6.2.0/arch/arm64/kvm/hyp/pgtable.c linux-6.2.0/arch/arm64/kvm/hyp/pgtable.c --- linux-6.2.0/arch/arm64/kvm/hyp/pgtable.c +++ linux-6.2.0/arch/arm64/kvm/hyp/pgtable.c @@ -190,14 +190,26 @@ .flags = flags, }; int ret = 0; + bool reload = false; kvm_pteref_t childp; bool table = kvm_pte_table(ctx.old, level); - if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) + if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) { ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE); + reload = true; + } if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) { ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF); + reload = true; + } + + /* + * Reload the page table after invoking the walker callback for leaf + * entries or after pre-order traversal, to allow the walker to descend + * into a newly installed or replaced table. + */ + if (reload) { ctx.old = READ_ONCE(*ptep); table = kvm_pte_table(ctx.old, level); } @@ -1292,2 +1304,5 @@ WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1)); + + WARN_ON(mm_ops->page_count(pgtable) != 1); + mm_ops->put_page(pgtable); } diff -u linux-6.2.0/arch/arm64/kvm/vgic/vgic-init.c linux-6.2.0/arch/arm64/kvm/vgic/vgic-init.c --- linux-6.2.0/arch/arm64/kvm/vgic/vgic-init.c +++ linux-6.2.0/arch/arm64/kvm/vgic/vgic-init.c @@ -235,9 +235,9 @@ * KVM io device for the redistributor that belongs to this VCPU. */ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { - mutex_lock(&vcpu->kvm->arch.config_lock); + mutex_lock(&vcpu->kvm->slots_lock); ret = vgic_register_redist_iodev(vcpu); - mutex_unlock(&vcpu->kvm->arch.config_lock); + mutex_unlock(&vcpu->kvm->slots_lock); } return ret; } @@ -446,11 +446,13 @@ int kvm_vgic_map_resources(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; + gpa_t dist_base; int ret = 0; if (likely(vgic_ready(kvm))) return 0; + mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->arch.config_lock); if (vgic_ready(kvm)) goto out; @@ -463,13 +465,26 @@ else ret = vgic_v3_map_resources(kvm); - if (ret) + if (ret) { __kvm_vgic_destroy(kvm); - else - dist->ready = true; + goto out; + } + dist->ready = true; + dist_base = dist->vgic_dist_base; + mutex_unlock(&kvm->arch.config_lock); + + ret = vgic_register_dist_iodev(kvm, dist_base, + kvm_vgic_global_state.type); + if (ret) { + kvm_err("Unable to register VGIC dist MMIO regions\n"); + kvm_vgic_destroy(kvm); + } + mutex_unlock(&kvm->slots_lock); + return ret; out: mutex_unlock(&kvm->arch.config_lock); + mutex_unlock(&kvm->slots_lock); return ret; } diff -u linux-6.2.0/arch/arm64/kvm/vgic/vgic-its.c linux-6.2.0/arch/arm64/kvm/vgic/vgic-its.c --- linux-6.2.0/arch/arm64/kvm/vgic/vgic-its.c +++ linux-6.2.0/arch/arm64/kvm/vgic/vgic-its.c @@ -1936,6 +1936,7 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) { + int ret; struct vgic_its *its; if (type != KVM_DEV_TYPE_ARM_VGIC_ITS) @@ -1945,9 +1946,12 @@ if (!its) return -ENOMEM; + mutex_lock(&dev->kvm->arch.config_lock); + if (vgic_initialized(dev->kvm)) { - int ret = vgic_v4_init(dev->kvm); + ret = vgic_v4_init(dev->kvm); if (ret < 0) { + mutex_unlock(&dev->kvm->arch.config_lock); kfree(its); return ret; } @@ -1960,12 +1964,10 @@ /* Yep, even more trickery for lock ordering... */ #ifdef CONFIG_LOCKDEP - mutex_lock(&dev->kvm->arch.config_lock); mutex_lock(&its->cmd_lock); mutex_lock(&its->its_lock); mutex_unlock(&its->its_lock); mutex_unlock(&its->cmd_lock); - mutex_unlock(&dev->kvm->arch.config_lock); #endif its->vgic_its_base = VGIC_ADDR_UNDEF; @@ -1986,7 +1988,11 @@ dev->private = its; - return vgic_its_set_abi(its, NR_ITS_ABIS - 1); + ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1); + + mutex_unlock(&dev->kvm->arch.config_lock); + + return ret; } static void vgic_its_destroy(struct kvm_device *kvm_dev) diff -u linux-6.2.0/arch/arm64/kvm/vgic/vgic-kvm-device.c linux-6.2.0/arch/arm64/kvm/vgic/vgic-kvm-device.c --- linux-6.2.0/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ linux-6.2.0/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -102,7 +102,11 @@ if (get_user(addr, uaddr)) return -EFAULT; - mutex_lock(&kvm->arch.config_lock); + /* + * Since we can't hold config_lock while registering the redistributor + * iodevs, take the slots_lock immediately. + */ + mutex_lock(&kvm->slots_lock); switch (attr->attr) { case KVM_VGIC_V2_ADDR_TYPE_DIST: r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); @@ -182,6 +186,7 @@ if (r) goto out; + mutex_lock(&kvm->arch.config_lock); if (write) { r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size); if (!r) @@ -189,9 +194,10 @@ } else { addr = *addr_ptr; } + mutex_unlock(&kvm->arch.config_lock); out: - mutex_unlock(&kvm->arch.config_lock); + mutex_unlock(&kvm->slots_lock); if (!r && !write) r = put_user(addr, uaddr); diff -u linux-6.2.0/arch/arm64/kvm/vgic/vgic-mmio-v3.c linux-6.2.0/arch/arm64/kvm/vgic/vgic-mmio-v3.c --- linux-6.2.0/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ linux-6.2.0/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -769,10 +769,13 @@ struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; struct vgic_redist_region *rdreg; gpa_t rd_base; - int ret; + int ret = 0; + + lockdep_assert_held(&kvm->slots_lock); + mutex_lock(&kvm->arch.config_lock); if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) - return 0; + goto out_unlock; /* * We may be creating VCPUs before having set the base address for the @@ -782,10 +785,12 @@ */ rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions); if (!rdreg) - return 0; + goto out_unlock; - if (!vgic_v3_check_base(kvm)) - return -EINVAL; + if (!vgic_v3_check_base(kvm)) { + ret = -EINVAL; + goto out_unlock; + } vgic_cpu->rdreg = rdreg; vgic_cpu->rdreg_index = rdreg->free_index; @@ -799,16 +804,20 @@ rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers); rd_dev->redist_vcpu = vcpu; - mutex_lock(&kvm->slots_lock); + mutex_unlock(&kvm->arch.config_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base, 2 * SZ_64K, &rd_dev->dev); - mutex_unlock(&kvm->slots_lock); - if (ret) return ret; + /* Protected by slots_lock */ rdreg->free_index++; return 0; + +out_unlock: + mutex_unlock(&kvm->arch.config_lock); + return ret; } static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) @@ -834,12 +843,10 @@ /* The current c failed, so iterate over the previous ones. */ int i; - mutex_lock(&kvm->slots_lock); for (i = 0; i < c; i++) { vcpu = kvm_get_vcpu(kvm, i); vgic_unregister_redist_iodev(vcpu); } - mutex_unlock(&kvm->slots_lock); } return ret; @@ -938,7 +945,9 @@ { int ret; + mutex_lock(&kvm->arch.config_lock); ret = vgic_v3_alloc_redist_region(kvm, index, addr, count); + mutex_unlock(&kvm->arch.config_lock); if (ret) return ret; @@ -950,8 +959,10 @@ if (ret) { struct vgic_redist_region *rdreg; + mutex_lock(&kvm->arch.config_lock); rdreg = vgic_v3_rdist_region_from_index(kvm, index); vgic_v3_free_redist_region(rdreg); + mutex_unlock(&kvm->arch.config_lock); return ret; } diff -u linux-6.2.0/arch/arm64/kvm/vgic/vgic-mmio.c linux-6.2.0/arch/arm64/kvm/vgic/vgic-mmio.c --- linux-6.2.0/arch/arm64/kvm/vgic/vgic-mmio.c +++ linux-6.2.0/arch/arm64/kvm/vgic/vgic-mmio.c @@ -1093,7 +1093,6 @@ enum vgic_type type) { struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; - int ret = 0; unsigned int len; switch (type) { @@ -1113,8 +1112,4 @@ - mutex_lock(&kvm->slots_lock); - ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, - len, &io_device->dev); - mutex_unlock(&kvm->slots_lock); - - return ret; + return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, + len, &io_device->dev); } diff -u linux-6.2.0/arch/arm64/kvm/vgic/vgic-v4.c linux-6.2.0/arch/arm64/kvm/vgic/vgic-v4.c --- linux-6.2.0/arch/arm64/kvm/vgic/vgic-v4.c +++ linux-6.2.0/arch/arm64/kvm/vgic/vgic-v4.c @@ -184,13 +184,14 @@ } } -/* Must be called with the kvm lock held */ void vgic_v4_configure_vsgis(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; unsigned long i; + lockdep_assert_held(&kvm->arch.config_lock); + kvm_arm_halt_guest(kvm); kvm_for_each_vcpu(i, vcpu, kvm) { diff -u linux-6.2.0/arch/arm64/mm/fault.c linux-6.2.0/arch/arm64/mm/fault.c --- linux-6.2.0/arch/arm64/mm/fault.c +++ linux-6.2.0/arch/arm64/mm/fault.c @@ -480,8 +480,8 @@ } } -#define VM_FAULT_BADMAP 0x010000 -#define VM_FAULT_BADACCESS 0x020000 +#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) +#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) static vm_fault_t __do_page_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, diff -u linux-6.2.0/arch/powerpc/platforms/pseries/iommu.c linux-6.2.0/arch/powerpc/platforms/pseries/iommu.c --- linux-6.2.0/arch/powerpc/platforms/pseries/iommu.c +++ linux-6.2.0/arch/powerpc/platforms/pseries/iommu.c @@ -311,13 +311,22 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages) { u64 rc; + long rpages = npages; + unsigned long limit; if (!firmware_has_feature(FW_FEATURE_STUFF_TCE)) return tce_free_pSeriesLP(tbl->it_index, tcenum, tbl->it_page_shift, npages); - rc = plpar_tce_stuff((u64)tbl->it_index, - (u64)tcenum << tbl->it_page_shift, 0, npages); + do { + limit = min_t(unsigned long, rpages, 512); + + rc = plpar_tce_stuff((u64)tbl->it_index, + (u64)tcenum << tbl->it_page_shift, 0, limit); + + rpages -= limit; + tcenum += limit; + } while (rpages > 0 && !rc); if (rc && printk_ratelimit()) { printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n"); diff -u linux-6.2.0/arch/powerpc/xmon/xmon.c linux-6.2.0/arch/powerpc/xmon/xmon.c --- linux-6.2.0/arch/powerpc/xmon/xmon.c +++ linux-6.2.0/arch/powerpc/xmon/xmon.c @@ -91,7 +91,7 @@ static unsigned long nidump = 16; static unsigned long ncsum = 4096; static int termch; -static char tmpstr[128]; +static char tmpstr[KSYM_NAME_LEN]; static int tracing_enabled; static long bus_error_jmp[JMP_BUF_LEN]; diff -u linux-6.2.0/arch/riscv/Kconfig linux-6.2.0/arch/riscv/Kconfig --- linux-6.2.0/arch/riscv/Kconfig +++ linux-6.2.0/arch/riscv/Kconfig @@ -25,6 +25,7 @@ select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_KCOV select ARCH_HAS_MMIOWB + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PMEM_API select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_SET_DIRECT_MAP if MMU diff -u linux-6.2.0/arch/riscv/include/asm/pgtable.h linux-6.2.0/arch/riscv/include/asm/pgtable.h --- linux-6.2.0/arch/riscv/include/asm/pgtable.h +++ linux-6.2.0/arch/riscv/include/asm/pgtable.h @@ -165,8 +165,7 @@ _PAGE_EXEC | _PAGE_WRITE) #define PAGE_COPY PAGE_READ -#define PAGE_COPY_EXEC PAGE_EXEC -#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC +#define PAGE_COPY_EXEC PAGE_READ_EXEC #define PAGE_SHARED PAGE_WRITE #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC diff -u linux-6.2.0/arch/riscv/mm/init.c linux-6.2.0/arch/riscv/mm/init.c --- linux-6.2.0/arch/riscv/mm/init.c +++ linux-6.2.0/arch/riscv/mm/init.c @@ -286,7 +286,7 @@ [VM_EXEC] = PAGE_EXEC, [VM_EXEC | VM_READ] = PAGE_READ_EXEC, [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC, - [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_READ_EXEC, + [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC, [VM_SHARED] = PAGE_NONE, [VM_SHARED | VM_READ] = PAGE_READ, [VM_SHARED | VM_WRITE] = PAGE_SHARED, @@ -850,9 +850,9 @@ static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va, uintptr_t dtb_pa) { +#ifndef CONFIG_BUILTIN_DTB uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1); -#ifndef CONFIG_BUILTIN_DTB /* Make sure the fdt fixmap address is always aligned on PMD size */ BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE)); diff -u linux-6.2.0/arch/x86/Kconfig linux-6.2.0/arch/x86/Kconfig --- linux-6.2.0/arch/x86/Kconfig +++ linux-6.2.0/arch/x86/Kconfig @@ -2630,6 +2630,13 @@ This mitigates both spectre_v2 and retbleed at great cost to performance. +config CPU_SRSO + bool "Mitigate speculative RAS overflow on AMD" + depends on CPU_SUP_AMD && X86_64 && RETHUNK + default y + help + Enable the SRSO mitigation needed on AMD Zen1-4 machines. + config SLS bool "Mitigate Straight-Line-Speculation" depends on CC_HAS_SLS && X86_64 diff -u linux-6.2.0/arch/x86/include/asm/cpufeatures.h linux-6.2.0/arch/x86/include/asm/cpufeatures.h --- linux-6.2.0/arch/x86/include/asm/cpufeatures.h +++ linux-6.2.0/arch/x86/include/asm/cpufeatures.h @@ -13,8 +13,8 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 20 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NCAPINTS 21 /* N 32-bit words worth of info */ +#define NBUGINTS 2 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used @@ -308,6 +308,10 @@ #define X86_FEATURE_CALL_DEPTH (11*32+19) /* "" Call depth tracking for RSB stuffing */ #define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */ +#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ +#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ +#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ + /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ @@ -426,6 +430,12 @@ #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ +/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ + +#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */ +#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ +#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ + /* * BUG word(s) */ @@ -471,2 +481,5 @@ +/* BUG word 2 */ +#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ +#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ #endif /* _ASM_X86_CPUFEATURES_H */ diff -u linux-6.2.0/arch/x86/include/asm/msr-index.h linux-6.2.0/arch/x86/include/asm/msr-index.h --- linux-6.2.0/arch/x86/include/asm/msr-index.h +++ linux-6.2.0/arch/x86/include/asm/msr-index.h @@ -55,6 +55,7 @@ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ +#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */ #define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN 0x0000004f diff -u linux-6.2.0/arch/x86/include/asm/processor.h linux-6.2.0/arch/x86/include/asm/processor.h --- linux-6.2.0/arch/x86/include/asm/processor.h +++ linux-6.2.0/arch/x86/include/asm/processor.h @@ -680,9 +680,13 @@ #ifdef CONFIG_CPU_SUP_AMD extern u32 amd_get_nodes_per_socket(void); extern u32 amd_get_highest_perf(void); +extern bool cpu_has_ibpb_brtype_microcode(void); +extern void amd_clear_divider(void); #else static inline u32 amd_get_nodes_per_socket(void) { return 0; } static inline u32 amd_get_highest_perf(void) { return 0; } +static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; } +static inline void amd_clear_divider(void) { } #endif extern unsigned long arch_align_stack(unsigned long sp); @@ -727,2 +731,4 @@ +extern bool gds_ucode_mitigated(void); + #endif /* _ASM_X86_PROCESSOR_H */ diff -u linux-6.2.0/arch/x86/kernel/alternative.c linux-6.2.0/arch/x86/kernel/alternative.c --- linux-6.2.0/arch/x86/kernel/alternative.c +++ linux-6.2.0/arch/x86/kernel/alternative.c @@ -573,10 +573,6 @@ #ifdef CONFIG_RETHUNK -#ifdef CONFIG_CALL_THUNKS -void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; -#endif - /* * Rewrite the compiler generated return thunk tail-calls. * @@ -592,13 +588,12 @@ { int i = 0; + /* Patch the custom return thunks... */ if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { - if (x86_return_thunk == __x86_return_thunk) - return -1; - i = JMP32_INSN_SIZE; __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); } else { + /* ... or patch them out if not needed. */ bytes[i++] = RET_INSN_OPCODE; } @@ -611,6 +606,14 @@ { s32 *s; + /* + * Do not patch out the default return thunks if those needed are the + * ones generated by the compiler. + */ + if (cpu_feature_enabled(X86_FEATURE_RETHUNK) && + (x86_return_thunk == __x86_return_thunk)) + return; + for (s = start; s < end; s++) { void *dest = NULL, *addr = (void *)s + *s; struct insn insn; diff -u linux-6.2.0/arch/x86/kernel/cpu/amd.c linux-6.2.0/arch/x86/kernel/cpu/amd.c --- linux-6.2.0/arch/x86/kernel/cpu/amd.c +++ linux-6.2.0/arch/x86/kernel/cpu/amd.c @@ -75,6 +75,10 @@ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); +static const int amd_div0[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), + AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { int osvw_id = *erratum++; @@ -1115,6 +1119,11 @@ check_null_seg_clears_base(c); zenbleed_check(c); + + if (cpu_has_amd_erratum(c, amd_div0)) { + pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); + setup_force_cpu_bug(X86_BUG_DIV0); + } } #ifdef CONFIG_X86_32 @@ -1258,0 +1268,30 @@ + +bool cpu_has_ibpb_brtype_microcode(void) +{ + switch (boot_cpu_data.x86) { + /* Zen1/2 IBPB flushes branch type predictions too. */ + case 0x17: + return boot_cpu_has(X86_FEATURE_AMD_IBPB); + case 0x19: + /* Poke the MSR bit on Zen3/4 to check its presence. */ + if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { + setup_force_cpu_cap(X86_FEATURE_SBPB); + return true; + } else { + return false; + } + default: + return false; + } +} + +/* + * Issue a DIV 0/1 insn to clear any division data from previous DIV + * operations. + */ +void noinstr amd_clear_divider(void) +{ + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) + :: "a" (0), "d" (0), "r" (1)); +} +EXPORT_SYMBOL_GPL(amd_clear_divider); diff -u linux-6.2.0/arch/x86/kernel/cpu/bugs.c linux-6.2.0/arch/x86/kernel/cpu/bugs.c --- linux-6.2.0/arch/x86/kernel/cpu/bugs.c +++ linux-6.2.0/arch/x86/kernel/cpu/bugs.c @@ -46,6 +46,7 @@ static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); static void __init l1d_flush_select_mitigation(void); +static void __init srso_select_mitigation(void); static void __init gds_select_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ @@ -56,8 +57,13 @@ DEFINE_PER_CPU(u64, x86_spec_ctrl_current); EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); +u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; +EXPORT_SYMBOL_GPL(x86_pred_cmd); + static DEFINE_MUTEX(spec_ctrl_mutex); +void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { @@ -160,6 +166,12 @@ md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); + + /* + * srso_select_mitigation() depends and must run after + * retbleed_select_mitigation(). + */ + srso_select_mitigation(); gds_select_mitigation(); } @@ -1030,6 +1042,9 @@ setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); + if (IS_ENABLED(CONFIG_RETHUNK)) + x86_return_thunk = retbleed_return_thunk; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); @@ -2334,6 +2349,170 @@ early_param("l1tf", l1tf_cmdline); #undef pr_fmt +#define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt + +enum srso_mitigation { + SRSO_MITIGATION_NONE, + SRSO_MITIGATION_MICROCODE, + SRSO_MITIGATION_SAFE_RET, + SRSO_MITIGATION_IBPB, + SRSO_MITIGATION_IBPB_ON_VMEXIT, +}; + +enum srso_mitigation_cmd { + SRSO_CMD_OFF, + SRSO_CMD_MICROCODE, + SRSO_CMD_SAFE_RET, + SRSO_CMD_IBPB, + SRSO_CMD_IBPB_ON_VMEXIT, +}; + +static const char * const srso_strings[] = { + [SRSO_MITIGATION_NONE] = "Vulnerable", + [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", + [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", + [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", + [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" +}; + +static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; +static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET; + +static int __init srso_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + srso_cmd = SRSO_CMD_OFF; + else if (!strcmp(str, "microcode")) + srso_cmd = SRSO_CMD_MICROCODE; + else if (!strcmp(str, "safe-ret")) + srso_cmd = SRSO_CMD_SAFE_RET; + else if (!strcmp(str, "ibpb")) + srso_cmd = SRSO_CMD_IBPB; + else if (!strcmp(str, "ibpb-vmexit")) + srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT; + else + pr_err("Ignoring unknown SRSO option (%s).", str); + + return 0; +} +early_param("spec_rstack_overflow", srso_parse_cmdline); + +#define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." + +static void __init srso_select_mitigation(void) +{ + bool has_microcode; + + if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) + goto pred_cmd; + + /* + * The first check is for the kernel running as a guest in order + * for guests to verify whether IBPB is a viable mitigation. + */ + has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode(); + if (!has_microcode) { + pr_warn("IBPB-extending microcode not applied!\n"); + pr_warn(SRSO_NOTICE); + } else { + /* + * Enable the synthetic (even if in a real CPUID leaf) + * flags for guests. + */ + setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); + + /* + * Zen1/2 with SMT off aren't vulnerable after the right + * IBPB microcode has been applied. + */ + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { + setup_force_cpu_cap(X86_FEATURE_SRSO_NO); + return; + } + } + + if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { + if (has_microcode) { + pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n"); + srso_mitigation = SRSO_MITIGATION_IBPB; + goto pred_cmd; + } + } + + switch (srso_cmd) { + case SRSO_CMD_OFF: + return; + + case SRSO_CMD_MICROCODE: + if (has_microcode) { + srso_mitigation = SRSO_MITIGATION_MICROCODE; + pr_warn(SRSO_NOTICE); + } + break; + + case SRSO_CMD_SAFE_RET: + if (IS_ENABLED(CONFIG_CPU_SRSO)) { + /* + * Enable the return thunk for generated code + * like ftrace, static_call, etc. + */ + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); + + if (boot_cpu_data.x86 == 0x19) { + setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); + x86_return_thunk = srso_alias_return_thunk; + } else { + setup_force_cpu_cap(X86_FEATURE_SRSO); + x86_return_thunk = srso_return_thunk; + } + srso_mitigation = SRSO_MITIGATION_SAFE_RET; + } else { + pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); + goto pred_cmd; + } + break; + + case SRSO_CMD_IBPB: + if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { + if (has_microcode) { + setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + srso_mitigation = SRSO_MITIGATION_IBPB; + } + } else { + pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); + goto pred_cmd; + } + break; + + case SRSO_CMD_IBPB_ON_VMEXIT: + if (IS_ENABLED(CONFIG_CPU_SRSO)) { + if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); + srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; + } + } else { + pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); + goto pred_cmd; + } + break; + + default: + break; + } + + pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); + +pred_cmd: + if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) && + boot_cpu_has(X86_FEATURE_SBPB)) + x86_pred_cmd = PRED_CMD_SBPB; +} + +#undef pr_fmt #define pr_fmt(fmt) fmt #ifdef CONFIG_SYSFS @@ -2530,6 +2709,16 @@ return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); } +static ssize_t srso_show_state(char *buf) +{ + if (boot_cpu_has(X86_FEATURE_SRSO_NO)) + return sysfs_emit(buf, "Mitigation: SMT disabled\n"); + + return sysfs_emit(buf, "%s%s\n", + srso_strings[srso_mitigation], + (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); +} + static ssize_t gds_show_state(char *buf) { return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); @@ -2584,6 +2773,9 @@ case X86_BUG_RETBLEED: return retbleed_show_state(buf); + case X86_BUG_SRSO: + return srso_show_state(buf); + case X86_BUG_GDS: return gds_show_state(buf); @@ -2652,6 +2844,11 @@ return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); } +ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); +} + ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) { return cpu_show_common(dev, attr, buf, X86_BUG_GDS); diff -u linux-6.2.0/arch/x86/kernel/cpu/common.c linux-6.2.0/arch/x86/kernel/cpu/common.c --- linux-6.2.0/arch/x86/kernel/cpu/common.c +++ linux-6.2.0/arch/x86/kernel/cpu/common.c @@ -1097,6 +1097,9 @@ if (c->extended_cpuid_level >= 0x8000001f) c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); + if (c->extended_cpuid_level >= 0x80000021) + c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021); + init_scattered_cpuid_features(c); init_speculation_control(c); @@ -1262,8 +1265,10 @@ #define RETBLEED BIT(3) /* CPU is affected by SMT (cross-thread) return predictions */ #define SMT_RSB BIT(4) +/* CPU is affected by SRSO */ +#define SRSO BIT(5) /* CPU is affected by GDS */ -#define GDS BIT(5) +#define GDS BIT(6) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), @@ -1297,8 +1302,9 @@ VULNBL_AMD(0x15, RETBLEED), VULNBL_AMD(0x16, RETBLEED), - VULNBL_AMD(0x17, RETBLEED | SMT_RSB), + VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB), + VULNBL_AMD(0x19, SRSO), {} }; @@ -1419,6 +1425,11 @@ if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) setup_force_cpu_bug(X86_BUG_SMT_RSB); + if (!cpu_has(c, X86_FEATURE_SRSO_NO)) { + if (cpu_matches(cpu_vuln_blacklist, SRSO)) + setup_force_cpu_bug(X86_BUG_SRSO); + } + /* * Check if CPU is vulnerable to GDS. If running in a virtual machine on * an affected processor, the VMM may have disabled the use of GATHER by diff -u linux-6.2.0/arch/x86/kernel/kprobes/opt.c linux-6.2.0/arch/x86/kernel/kprobes/opt.c --- linux-6.2.0/arch/x86/kernel/kprobes/opt.c +++ linux-6.2.0/arch/x86/kernel/kprobes/opt.c @@ -226,7 +226,7 @@ } /* Check whether insn is indirect jump */ -static int __insn_is_indirect_jump(struct insn *insn) +static int insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ @@ -260,26 +260,6 @@ return (start <= target && target <= start + len); } -static int insn_is_indirect_jump(struct insn *insn) -{ - int ret = __insn_is_indirect_jump(insn); - -#ifdef CONFIG_RETPOLINE - /* - * Jump to x86_indirect_thunk_* is treated as an indirect jump. - * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with - * older gcc may use indirect jump. So we add this check instead of - * replace indirect-jump check. - */ - if (!ret) - ret = insn_jump_into_range(insn, - (unsigned long)__indirect_thunk_start, - (unsigned long)__indirect_thunk_end - - (unsigned long)__indirect_thunk_start); -#endif - return ret; -} - /* Decode whole function to ensure any instructions don't jump into target */ static int can_optimize(unsigned long paddr) { @@ -334,9 +314,21 @@ /* Recover address */ insn.kaddr = (void *)addr; insn.next_byte = (void *)(addr + insn.length); - /* Check any instructions don't jump into target */ - if (insn_is_indirect_jump(&insn) || - insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, + /* + * Check any instructions don't jump into target, indirectly or + * directly. + * + * The indirect case is present to handle a code with jump + * tables. When the kernel uses retpolines, the check should in + * theory additionally look for jumps to indirect thunks. + * However, the kernel built with retpolines or IBT has jump + * tables disabled so the check can be skipped altogether. + */ + if (!IS_ENABLED(CONFIG_RETPOLINE) && + !IS_ENABLED(CONFIG_X86_KERNEL_IBT) && + insn_is_indirect_jump(&insn)) + return 0; + if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE, DISP32_SIZE)) return 0; addr += insn.length; diff -u linux-6.2.0/arch/x86/kernel/static_call.c linux-6.2.0/arch/x86/kernel/static_call.c --- linux-6.2.0/arch/x86/kernel/static_call.c +++ linux-6.2.0/arch/x86/kernel/static_call.c @@ -186,6 +186,19 @@ */ bool __static_call_fixup(void *tramp, u8 op, void *dest) { + unsigned long addr = (unsigned long)tramp; + /* + * Not all .return_sites are a static_call trampoline (most are not). + * Check if the 3 bytes after the return are still kernel text, if not, + * then this definitely is not a trampoline and we need not worry + * further. + * + * This avoids the memcmp() below tripping over pagefaults etc.. + */ + if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) && + !kernel_text_address(addr + 7)) + return false; + if (memcmp(tramp+5, tramp_ud, 3)) { /* Not a trampoline site, not our problem. */ return false; diff -u linux-6.2.0/arch/x86/kvm/mmu/mmu.c linux-6.2.0/arch/x86/kvm/mmu/mmu.c --- linux-6.2.0/arch/x86/kvm/mmu/mmu.c +++ linux-6.2.0/arch/x86/kvm/mmu/mmu.c @@ -6945,7 +6945,10 @@ */ slot = NULL; if (atomic_read(&kvm->nr_memslots_dirty_logging)) { - slot = gfn_to_memslot(kvm, sp->gfn); + struct kvm_memslots *slots; + + slots = kvm_memslots_for_spte_role(kvm, sp->role); + slot = __gfn_to_memslot(slots, sp->gfn); WARN_ON_ONCE(!slot); } diff -u linux-6.2.0/arch/x86/kvm/svm/sev.c linux-6.2.0/arch/x86/kvm/svm/sev.c --- linux-6.2.0/arch/x86/kvm/svm/sev.c +++ linux-6.2.0/arch/x86/kvm/svm/sev.c @@ -2410,15 +2410,18 @@ */ memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); - vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb); - vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb); + BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap)); + memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap)); - svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); + vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb); + vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb); - if (ghcb_xcr0_is_valid(ghcb)) { + svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb); + + if (kvm_ghcb_xcr0_is_valid(svm)) { vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb); kvm_update_cpuid_runtime(vcpu); } @@ -2429,14 +2432,21 @@ control->exit_code_hi = upper_32_bits(exit_code); control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb); control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb); + svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb); /* Clear the valid entries fields */ memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); } +static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control) +{ + return (((u64)control->exit_code_hi) << 32) | control->exit_code; +} + static int sev_es_validate_vmgexit(struct vcpu_svm *svm) { - struct kvm_vcpu *vcpu; + struct vmcb_control_area *control = &svm->vmcb->control; + struct kvm_vcpu *vcpu = &svm->vcpu; struct ghcb *ghcb; u64 exit_code; u64 reason; @@ -2447,7 +2457,7 @@ * Retrieve the exit code now even though it may not be marked valid * as it could help with debugging. */ - exit_code = ghcb_get_sw_exit_code(ghcb); + exit_code = kvm_ghcb_get_sw_exit_code(control); /* Only GHCB Usage code 0 is supported */ if (ghcb->ghcb_usage) { @@ -2457,56 +2467,56 @@ reason = GHCB_ERR_MISSING_INPUT; - if (!ghcb_sw_exit_code_is_valid(ghcb) || - !ghcb_sw_exit_info_1_is_valid(ghcb) || - !ghcb_sw_exit_info_2_is_valid(ghcb)) + if (!kvm_ghcb_sw_exit_code_is_valid(svm) || + !kvm_ghcb_sw_exit_info_1_is_valid(svm) || + !kvm_ghcb_sw_exit_info_2_is_valid(svm)) goto vmgexit_err; - switch (ghcb_get_sw_exit_code(ghcb)) { + switch (exit_code) { case SVM_EXIT_READ_DR7: break; case SVM_EXIT_WRITE_DR7: - if (!ghcb_rax_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_RDTSC: break; case SVM_EXIT_RDPMC: - if (!ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_CPUID: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; - if (ghcb_get_rax(ghcb) == 0xd) - if (!ghcb_xcr0_is_valid(ghcb)) + if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd) + if (!kvm_ghcb_xcr0_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_INVD: break; case SVM_EXIT_IOIO: - if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) { - if (!ghcb_sw_scratch_is_valid(ghcb)) + if (control->exit_info_1 & SVM_IOIO_STR_MASK) { + if (!kvm_ghcb_sw_scratch_is_valid(svm)) goto vmgexit_err; } else { - if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK)) - if (!ghcb_rax_is_valid(ghcb)) + if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK)) + if (!kvm_ghcb_rax_is_valid(svm)) goto vmgexit_err; } break; case SVM_EXIT_MSR: - if (!ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; - if (ghcb_get_sw_exit_info_1(ghcb)) { - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rdx_is_valid(ghcb)) + if (control->exit_info_1) { + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rdx_is_valid(svm)) goto vmgexit_err; } break; case SVM_EXIT_VMMCALL: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_cpl_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_cpl_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_RDTSCP: @@ -2514,19 +2524,19 @@ case SVM_EXIT_WBINVD: break; case SVM_EXIT_MONITOR: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb) || - !ghcb_rdx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm) || + !kvm_ghcb_rdx_is_valid(svm)) goto vmgexit_err; break; case SVM_EXIT_MWAIT: - if (!ghcb_rax_is_valid(ghcb) || - !ghcb_rcx_is_valid(ghcb)) + if (!kvm_ghcb_rax_is_valid(svm) || + !kvm_ghcb_rcx_is_valid(svm)) goto vmgexit_err; break; case SVM_VMGEXIT_MMIO_READ: case SVM_VMGEXIT_MMIO_WRITE: - if (!ghcb_sw_scratch_is_valid(ghcb)) + if (!kvm_ghcb_sw_scratch_is_valid(svm)) goto vmgexit_err; break; case SVM_VMGEXIT_NMI_COMPLETE: @@ -2542,8 +2552,6 @@ return 0; vmgexit_err: - vcpu = &svm->vcpu; - if (reason == GHCB_ERR_INVALID_USAGE) { vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n", ghcb->ghcb_usage); @@ -2556,9 +2564,6 @@ dump_ghcb(svm); } - /* Clear the valid entries fields */ - memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap)); - ghcb_set_sw_exit_info_1(ghcb, 2); ghcb_set_sw_exit_info_2(ghcb, reason); @@ -2579,7 +2584,7 @@ */ if (svm->sev_es.ghcb_sa_sync) { kvm_write_guest(svm->vcpu.kvm, - ghcb_get_sw_scratch(svm->sev_es.ghcb), + svm->sev_es.sw_scratch, svm->sev_es.ghcb_sa, svm->sev_es.ghcb_sa_len); svm->sev_es.ghcb_sa_sync = false; @@ -2630,7 +2635,7 @@ u64 scratch_gpa_beg, scratch_gpa_end; void *scratch_va; - scratch_gpa_beg = ghcb_get_sw_scratch(ghcb); + scratch_gpa_beg = svm->sev_es.sw_scratch; if (!scratch_gpa_beg) { pr_err("vmgexit: scratch gpa not provided\n"); goto e_scratch; @@ -2844,16 +2849,15 @@ trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb); - exit_code = ghcb_get_sw_exit_code(ghcb); - + sev_es_sync_from_ghcb(svm); ret = sev_es_validate_vmgexit(svm); if (ret) return ret; - sev_es_sync_from_ghcb(svm); ghcb_set_sw_exit_info_1(ghcb, 0); ghcb_set_sw_exit_info_2(ghcb, 0); + exit_code = kvm_ghcb_get_sw_exit_code(control); switch (exit_code) { case SVM_VMGEXIT_MMIO_READ: ret = setup_vmgexit_scratch(svm, true, control->exit_info_2); diff -u linux-6.2.0/arch/x86/kvm/svm/svm.c linux-6.2.0/arch/x86/kvm/svm/svm.c --- linux-6.2.0/arch/x86/kvm/svm/svm.c +++ linux-6.2.0/arch/x86/kvm/svm/svm.c @@ -1486,7 +1486,9 @@ if (sd->current_vmcb != svm->vmcb) { sd->current_vmcb = svm->vmcb; - indirect_branch_prediction_barrier(); + + if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT)) + indirect_branch_prediction_barrier(); } if (kvm_vcpu_apicv_active(vcpu)) avic_vcpu_load(vcpu, cpu); @@ -3945,6 +3947,8 @@ guest_state_enter_irqoff(); + amd_clear_divider(); + if (sev_es_guest(vcpu->kvm)) __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); else diff -u linux-6.2.0/arch/x86/kvm/svm/svm.h linux-6.2.0/arch/x86/kvm/svm/svm.h --- linux-6.2.0/arch/x86/kvm/svm/svm.h +++ linux-6.2.0/arch/x86/kvm/svm/svm.h @@ -196,10 +196,12 @@ /* SEV-ES support */ struct sev_es_save_area *vmsa; struct ghcb *ghcb; + u8 valid_bitmap[16]; struct kvm_host_map ghcb_map; bool received_first_sipi; /* SEV-ES scratch area support */ + u64 sw_scratch; void *ghcb_sa; u32 ghcb_sa_len; bool ghcb_sa_sync; @@ -690,2 +692,26 @@ +#define DEFINE_KVM_GHCB_ACCESSORS(field) \ + static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \ + { \ + return test_bit(GHCB_BITMAP_IDX(field), \ + (unsigned long *)&svm->sev_es.valid_bitmap); \ + } \ + \ + static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \ + { \ + return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \ + } \ + +DEFINE_KVM_GHCB_ACCESSORS(cpl) +DEFINE_KVM_GHCB_ACCESSORS(rax) +DEFINE_KVM_GHCB_ACCESSORS(rcx) +DEFINE_KVM_GHCB_ACCESSORS(rdx) +DEFINE_KVM_GHCB_ACCESSORS(rbx) +DEFINE_KVM_GHCB_ACCESSORS(rsi) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1) +DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2) +DEFINE_KVM_GHCB_ACCESSORS(sw_scratch) +DEFINE_KVM_GHCB_ACCESSORS(xcr0) + #endif diff -u linux-6.2.0/arch/x86/kvm/x86.c linux-6.2.0/arch/x86/kvm/x86.c --- linux-6.2.0/arch/x86/kvm/x86.c +++ linux-6.2.0/arch/x86/kvm/x86.c @@ -310,8 +310,6 @@ static struct kmem_cache *x86_emulator_cache; -extern bool gds_ucode_mitigated(void); - /* * When called, it means the previous get/set msr reached an invalid msr. * Return true if we want to ignore/silent this failed msr access. @@ -10550,6 +10548,9 @@ exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED; break; } + + /* Note, VM-Exits that go down the "slow" path are accounted below. */ + ++vcpu->stat.exits; } /* diff -u linux-6.2.0/arch/x86/lib/retpoline.S linux-6.2.0/arch/x86/lib/retpoline.S --- linux-6.2.0/arch/x86/lib/retpoline.S +++ linux-6.2.0/arch/x86/lib/retpoline.S @@ -11,8 +11,9 @@ #include #include #include +#include - .section .text.__x86.indirect_thunk + .section .text..__x86.indirect_thunk .macro POLINE reg @@ -131,36 +132,107 @@ */ #ifdef CONFIG_RETHUNK - .section .text.__x86.return_thunk +/* + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at + * special addresses: + * + * - srso_alias_untrain_ret() is 2M aligned + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 + * and 20 in its virtual address are set (while those bits in the + * srso_alias_untrain_ret() function are cleared). + * + * This guarantees that those two addresses will alias in the branch + * target buffer of Zen3/4 generations, leading to any potential + * poisoned entries at that BTB slot to get evicted. + * + * As a result, srso_alias_safe_ret() becomes a safe return. + */ +#ifdef CONFIG_CPU_SRSO + .section .text..__x86.rethunk_untrain + +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ASM_NOP2 + lfence + jmp srso_alias_return_thunk +SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret) + + .section .text..__x86.rethunk_safe +#else +/* dummy definition for alternatives */ +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_alias_untrain_ret) +#endif + +SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) + lea 8(%_ASM_SP), %_ASM_SP + UNWIND_HINT_FUNC + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_alias_safe_ret) + + .section .text..__x86.return_thunk + +SYM_CODE_START(srso_alias_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_alias_safe_ret + ud2 +SYM_CODE_END(srso_alias_return_thunk) + +/* + * Some generic notes on the untraining sequences: + * + * They are interchangeable when it comes to flushing potentially wrong + * RET predictions from the BTB. + * + * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the + * Retbleed sequence because the return sequence done there + * (srso_safe_ret()) is longer and the return sequence must fully nest + * (end before) the untraining sequence. Therefore, the untraining + * sequence must fully overlap the return sequence. + * + * Regarding alignment - the instructions which need to be untrained, + * must all start at a cacheline boundary for Zen1/2 generations. That + * is, instruction sequences starting at srso_safe_ret() and + * the respective instruction sequences at retbleed_return_thunk() + * must start at a cacheline boundary. + */ /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for + * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. - * 2) The instruction at zen_untrain_ret must contain, and not + * 2) The instruction at retbleed_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * from re-poisioning the BTB prediction. */ .align 64 - .skip 63, 0xcc -SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc +SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /* - * As executed from zen_untrain_ret, this is: + * As executed from retbleed_untrain_ret, this is: * * TEST $0xcc, %bl * LFENCE - * JMP __x86_return_thunk + * JMP retbleed_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * __x86_return_thunk + 1 isn't an instruction boundary at the moment. + * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* - * As executed from __x86_return_thunk, this is a plain RET. + * As executed from retbleed_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -172,13 +244,13 @@ * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, __x86_return_thunk will suffer Straight Line Speculation + * evicted, retbleed_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL) +SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__x86_return_thunk) +SYM_CODE_END(retbleed_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -189,11 +261,67 @@ * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __x86_return_thunk + jmp retbleed_return_thunk int3 -SYM_FUNC_END(zen_untrain_ret) -__EXPORT_THUNK(zen_untrain_ret) +SYM_FUNC_END(retbleed_untrain_ret) +__EXPORT_THUNK(retbleed_untrain_ret) +/* + * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() + * above. On kernel entry, srso_untrain_ret() is executed which is a + * + * movabs $0xccccc30824648d48,%rax + * + * and when the return thunk executes the inner label srso_safe_ret() + * later, it is a stack manipulation and a RET which is mispredicted and + * thus a "safe" one to use. + */ + .align 64 + .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc +SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_NOENDBR + .byte 0x48, 0xb8 + +/* + * This forces the function return instruction to speculate into a trap + * (UD2 in srso_return_thunk() below). This RET will then mispredict + * and execution will continue at the return site read from the top of + * the stack. + */ +SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) + lea 8(%_ASM_SP), %_ASM_SP + ret + int3 + int3 + /* end of movabs */ + lfence + call srso_safe_ret + ud2 +SYM_CODE_END(srso_safe_ret) +SYM_FUNC_END(srso_untrain_ret) +__EXPORT_THUNK(srso_untrain_ret) + +SYM_CODE_START(srso_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_safe_ret + ud2 +SYM_CODE_END(srso_return_thunk) + +SYM_FUNC_START(entry_untrain_ret) + ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ + "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ + "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS +SYM_FUNC_END(entry_untrain_ret) +__EXPORT_THUNK(entry_untrain_ret) + +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) #endif /* CONFIG_RETHUNK */ diff -u linux-6.2.0/block/blk-mq.c linux-6.2.0/block/blk-mq.c --- linux-6.2.0/block/blk-mq.c +++ linux-6.2.0/block/blk-mq.c @@ -717,6 +717,10 @@ blk_crypto_free_request(rq); blk_pm_mark_last_busy(rq); rq->mq_hctx = NULL; + + if (rq->rq_flags & RQF_MQ_INFLIGHT) + __blk_mq_dec_active_requests(hctx); + if (rq->tag != BLK_MQ_NO_TAG) blk_mq_put_tag(hctx->tags, ctx, rq->tag); if (sched_tag != BLK_MQ_NO_TAG) @@ -728,15 +732,11 @@ void blk_mq_free_request(struct request *rq) { struct request_queue *q = rq->q; - struct blk_mq_hw_ctx *hctx = rq->mq_hctx; if ((rq->rq_flags & RQF_ELVPRIV) && q->elevator->type->ops.finish_request) q->elevator->type->ops.finish_request(rq); - if (rq->rq_flags & RQF_MQ_INFLIGHT) - __blk_mq_dec_active_requests(hctx); - if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) laptop_io_completion(q->disk->bdi); diff -u linux-6.2.0/block/fops.c linux-6.2.0/block/fops.c --- linux-6.2.0/block/fops.c +++ linux-6.2.0/block/fops.c @@ -678,6 +678,16 @@ return error; } +static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct inode *bd_inode = bdev_file_inode(file); + + if (bdev_read_only(I_BDEV(bd_inode))) + return generic_file_readonly_mmap(file, vma); + + return generic_file_mmap(file, vma); +} + const struct file_operations def_blk_fops = { .open = blkdev_open, .release = blkdev_close, @@ -685,7 +695,7 @@ .read_iter = blkdev_read_iter, .write_iter = blkdev_write_iter, .iopoll = iocb_bio_iopoll, - .mmap = generic_file_mmap, + .mmap = blkdev_mmap, .fsync = blkdev_fsync, .unlocked_ioctl = blkdev_ioctl, #ifdef CONFIG_COMPAT diff -u linux-6.2.0/crypto/asymmetric_keys/public_key.c linux-6.2.0/crypto/asymmetric_keys/public_key.c --- linux-6.2.0/crypto/asymmetric_keys/public_key.c +++ linux-6.2.0/crypto/asymmetric_keys/public_key.c @@ -380,9 +380,10 @@ struct crypto_wait cwait; struct crypto_akcipher *tfm; struct akcipher_request *req; - struct scatterlist src_sg[2]; + struct scatterlist src_sg; char alg_name[CRYPTO_MAX_ALG_NAME]; - char *key, *ptr; + char *buf, *ptr; + size_t buf_len; int ret; pr_devel("==>%s()\n", __func__); @@ -420,34 +421,37 @@ if (!req) goto error_free_tfm; - key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, - GFP_KERNEL); - if (!key) + buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen, + sig->s_size + sig->digest_size); + + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) goto error_free_req; - memcpy(key, pkey->key, pkey->keylen); - ptr = key + pkey->keylen; + memcpy(buf, pkey->key, pkey->keylen); + ptr = buf + pkey->keylen; ptr = pkey_pack_u32(ptr, pkey->algo); ptr = pkey_pack_u32(ptr, pkey->paramlen); memcpy(ptr, pkey->params, pkey->paramlen); if (pkey->key_is_private) - ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen); + ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen); else - ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); + ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen); if (ret) - goto error_free_key; + goto error_free_buf; if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) { ret = cert_sig_digest_update(sig, tfm); if (ret) - goto error_free_key; + goto error_free_buf; } - sg_init_table(src_sg, 2); - sg_set_buf(&src_sg[0], sig->s, sig->s_size); - sg_set_buf(&src_sg[1], sig->digest, sig->digest_size); - akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size, + memcpy(buf, sig->s, sig->s_size); + memcpy(buf + sig->s_size, sig->digest, sig->digest_size); + + sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size); + akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size, sig->digest_size); crypto_init_wait(&cwait); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | @@ -455,8 +459,8 @@ crypto_req_done, &cwait); ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait); -error_free_key: - kfree(key); +error_free_buf: + kfree(buf); error_free_req: akcipher_request_free(req); error_free_tfm: diff -u linux-6.2.0/debian.master/abi/abiname linux-6.2.0/debian.master/abi/abiname --- linux-6.2.0/debian.master/abi/abiname +++ linux-6.2.0/debian.master/abi/abiname @@ -1 +1 @@ -30 +32 diff -u linux-6.2.0/debian.master/abi/arm64/generic-64k.modules linux-6.2.0/debian.master/abi/arm64/generic-64k.modules --- linux-6.2.0/debian.master/abi/arm64/generic-64k.modules +++ linux-6.2.0/debian.master/abi/arm64/generic-64k.modules @@ -5429,7 +5429,6 @@ sgi_w1 sgp30 sgp40 -sh-sci sh_eth sh_mmcif sh_mobile_lcdcfb diff -u linux-6.2.0/debian.master/abi/arm64/generic.modules linux-6.2.0/debian.master/abi/arm64/generic.modules --- linux-6.2.0/debian.master/abi/arm64/generic.modules +++ linux-6.2.0/debian.master/abi/arm64/generic.modules @@ -5430,7 +5430,6 @@ sgi_w1 sgp30 sgp40 -sh-sci sh_eth sh_mmcif sh_mobile_lcdcfb diff -u linux-6.2.0/debian.master/abi/armhf/generic-lpae.modules linux-6.2.0/debian.master/abi/armhf/generic-lpae.modules --- linux-6.2.0/debian.master/abi/armhf/generic-lpae.modules +++ linux-6.2.0/debian.master/abi/armhf/generic-lpae.modules @@ -5229,7 +5229,6 @@ sgi_w1 sgp30 sgp40 -sh-sci sh_eth sh_mmcif sh_mobile_lcdcfb diff -u linux-6.2.0/debian.master/abi/armhf/generic.modules linux-6.2.0/debian.master/abi/armhf/generic.modules --- linux-6.2.0/debian.master/abi/armhf/generic.modules +++ linux-6.2.0/debian.master/abi/armhf/generic.modules @@ -5235,7 +5235,6 @@ sgi_w1 sgp30 sgp40 -sh-sci sh_eth sh_mmcif sh_mobile_lcdcfb diff -u linux-6.2.0/debian.master/abi/version linux-6.2.0/debian.master/abi/version --- linux-6.2.0/debian.master/abi/version +++ linux-6.2.0/debian.master/abi/version @@ -1 +1 @@ -6.2.0-30.30 +6.2.0-32.32 diff -u linux-6.2.0/debian.master/changelog linux-6.2.0/debian.master/changelog --- linux-6.2.0/debian.master/changelog +++ linux-6.2.0/debian.master/changelog @@ -1,3 +1,571 @@ +linux (6.2.0-34.34) lunar; urgency=medium + + * lunar/linux: 6.2.0-34.34 -proposed tracker (LP: #2033779) + + * CVE-2023-20569 + - x86/cpu, kvm: Add support for CPUID_80000021_EAX + - tools headers x86 cpufeatures: Sync with the kernel sources + - x86/alternative: Optimize returns patching + - x86/retbleed: Add __x86_return_thunk alignment checks + - x86/srso: Add a Speculative RAS Overflow mitigation + - x86/srso: Add IBPB_BRTYPE support + - x86/srso: Add SRSO_NO support + - x86/srso: Add IBPB + - x86/srso: Add IBPB on VMEXIT + - x86/srso: Fix return thunks in generated code + - x86/srso: Add a forgotten NOENDBR annotation + - x86/srso: Tie SBPB bit setting to microcode patch detection + - Documentation/hw-vuln: Unify filename specification in index + - Documentation/srso: Document IBPB aspect and fix formatting + - x86/srso: Fix build breakage with the LLVM linker + - x86: Move gds_ucode_mitigated() declaration to header + - x86/retpoline: Don't clobber RFLAGS during srso_safe_ret() + - x86/srso: Disable the mitigation on unaffected configurations + - x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG + - x86/retpoline,kprobes: Skip optprobe check for indirect jumps with + retpolines and IBT + - x86/cpu: Fix __x86_return_thunk symbol type + - x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk() + - objtool/x86: Fix SRSO mess + - x86/alternative: Make custom return thunk unconditional + - x86/cpu: Clean up SRSO return thunk mess + - x86/cpu: Rename original retbleed methods + - x86/cpu: Rename srso_(.*)_alias to srso_alias_\1 + - x86/cpu: Cleanup the untrain mess + - x86/srso: Explain the untraining sequences a bit more + - objtool/x86: Fixup frame-pointer vs rethunk + - x86/static_call: Fix __static_call_fixup() + - x86/srso: Correct the mitigation status when SMT is disabled + - Ubuntu: [Config]: enable Speculative Return Stack Overflow mitigation + + * Please enable Renesas RZ platform serial installer (LP: #2022361) + - [Config] enable hihope RZ/G2M serial console + - [Config] Mark sh-sci as built-in + + * dGPU cannot resume because system firmware stuck in IPCS method + (LP: #2021572) + - drm/i915/tc: Abort DP AUX transfer on a disconnected TC port + - drm/i915/tc: switch to intel_de_* register accessors in display code + - drm/i915: Enable a PIPEDMC whenever its corresponding pipe is enabled + - drm/i915/tc: Fix TC port link ref init for DP MST during HW readout + - drm/i915/tc: Fix system resume MST mode restore for DP-alt sinks + - drm/i915/tc: Wait for IOM/FW PHY initialization of legacy TC ports + - drm/i915/tc: Factor out helpers converting HPD mask to TC mode + - drm/i915/tc: Fix target TC mode for a disconnected legacy port + - drm/i915/tc: Fix TC mode for a legacy port if the PHY is not ready + - drm/i915/tc: Fix initial TC mode on disabled legacy ports + - drm/i915/tc: Make the TC mode readout consistent in all PHY states + - drm/i915: Add encoder hook to get the PLL type used by TC ports + - drm/i915/tc: Assume a TC port is legacy if VBT says the port has HDMI + - drm/i915/tc: Factor out a function querying active links on a TC port + - drm/i915/tc: Check the PLL type used by an enabled TC port + - drm/i915/tc: Group the TC PHY setup/query functions per platform + - drm/i915/tc: Use the adlp prefix for ADLP TC PHY functions + - drm/i915/tc: Rename tc_phy_status_complete() to tc_phy_is_ready() + - drm/i915/tc: Use the tc_phy prefix for all TC PHY functions + - drm/i915/tc: Move TC port fields to a new intel_tc_port struct + - drm/i915/tc: Check for TC PHY explicitly in + intel_tc_port_fia_max_lane_count() + - drm/i915/tc: Move the intel_tc_port struct declaration to intel_tc.c + - drm/i915/tc: Add TC PHY hook to get the PHY HPD live status + - drm/i915/tc: Add TC PHY hooks to get the PHY ready/owned state + - drm/i915/tc: Add TC PHY hook to read out the PHY HW state + - drm/i915/tc: Add generic TC PHY connect/disconnect handlers + - drm/i915/tc: Factor out tc_phy_verify_legacy_or_dp_alt_mode() + - drm/i915/tc: Add TC PHY hooks to connect/disconnect the PHY + - drm/i915/tc: Fix up the legacy VBT flag only in disconnected mode + - drm/i915/tc: Check TC mode instead of the VBT legacy flag + - drm/i915/tc: Block/unblock TC-cold in the PHY connect/disconnect hooks + - drm/i915/tc: Remove redundant wakeref=0 check from unblock_tc_cold() + - drm/i915/tc: Drop tc_cold_block()/unblock()'s power domain parameter + - drm/i915/tc: Add TC PHY hook to get the TC-cold blocking power domain + - drm/i915/tc: Add asserts in TC PHY hooks that the required power is on + - drm/i915/tc: Add TC PHY hook to init the PHY + - drm/i915/adlp/tc: Use the DE HPD ISR register for hotplug detection + - drm/i915/tc: Get power ref for reading the HPD live status register + - drm/i915/tc: Don't connect the PHY in intel_tc_port_connected() + - drm/i915/adlp/tc: Align the connect/disconnect PHY sequence with bspec + - drm/i915: Move shared DPLL disabling into CRTC disable hook + - drm/i915: Disable DPLLs before disconnecting the TC PHY + - drm/i915: Remove TC PHY disconnect workaround + - drm/i915: Remove the encoder update_prepare()/complete() hooks + - drm/i915/dp_mst: Fix active port PLL selection for secondary MST streams + - drm/i915: Fix PIPEDMC disabling for a bigjoiner configuration + - drm/i915: Add helpers to reference/unreference a DPLL for a CRTC + - drm/i915: Make the CRTC state consistent during sanitize-disabling + - drm/i915: Update connector atomic state before crtc sanitize-disabling + - drm/i915: Separate intel_crtc_disable_noatomic_begin/complete() + - drm/i915: Factor out set_encoder_for_connector() + - drm/i915: Add support for disabling any CRTCs during HW readout/sanitization + - drm/i915/dp: Prevent link training fallback on disconnected port + - drm/i915/dp: Factor out intel_dp_get_active_pipes() + - drm/i915: Factor out a helper for handling atomic modeset locks/state + - drm/i915/tc: Call TypeC port flush_work/cleanup without modeset locks held + - drm/i915/tc: Reset TypeC PHYs left enabled in DP-alt mode after the sink + disconnects + + * amdgpu: Fixes for S0i3 resume on Phoenix (LP: #2033654) + - drm/amd/pm: skip the RLC stop when S0i3 suspend for SMU v13.0.4/11 + - drm/amdgpu: skip fence GFX interrupts disable/enable for S0ix + - drm/amd: flush any delayed gfxoff on suspend entry + + * Fix panel brightness issues on HP laptops (LP: #2032704) + - ACPI: video: Put ACPI video and its child devices into D0 on boot + + * Fix ACPI TAD on some Intel based systems (LP: #2032767) + - ACPI: TAD: Install SystemCMOS address space handler for ACPI000E + + * kdump doesn't work with UEFI secure boot and kernel lockdown enabled on + ARM64 (LP: #2033007) + - [Config]: Enable CONFIG_KEXEC_IMAGE_VERIFY_SIG + + * Request backport of xen timekeeping performance improvements (LP: #2033122) + - x86/xen/time: prefer tsc as clocksource when it is invariant + + * Fix numerous AER related issues (LP: #2033025) + - SAUCE: PCI/AER: Disable AER service during suspend, again + - SAUCE: PCI/DPC: Disable DPC service during suspend, again + + * Enable D3cold at s2idle for Intel DG2 GPU (LP: #2033452) + - drm/i915/dgfx: Enable d3cold at s2idle + + * CVE-2023-4569 + - netfilter: nf_tables: deactivate catchall elements in next generation + + * Fix non-working MT7921e when pre-boot WiFi is enabled (LP: #2026322) + - wifi: mt76: mt7921e: fix init command fail with enabled device + + * Fix unreliable ethernet cable detection on I219 NIC (LP: #2028122) + - e1000e: Use PME poll to circumvent unreliable ACPI wake + + * [SRU][Ubuntu 22.04.1] Unable to interpret the frequency values in + cpuinfo_min_freq and cpuino_max_freq sysfs files. (LP: #2030924) + - cpufreq: intel_pstate: Fix scaling for hybrid-capable + + * CVE-2023-40283 + - Bluetooth: L2CAP: Fix use-after-free in l2cap_sock_ready_cb + + * CVE-2023-20588 + - x86/bugs: Increase the x86 bugs vector size to two u32s + - x86/CPU/AMD: Do not leak quotient data after a division by 0 + - x86/CPU/AMD: Fix the DIV(0) initial fix attempt + + * CVE-2023-4194 + - net: tun_chr_open(): set sk_uid from current_fsuid() + - net: tap_open(): set sk_uid from current_fsuid() + + * CVE-2023-4155 + - KVM: SEV: snapshot the GHCB before accessing it + - KVM: SEV: only access GHCB fields once + + * CVE-2023-1206 + - tcp: Reduce chance of collisions in inet6_hashfn(). + + * Lunar update: upstream stable patchset 2023-08-03 (LP: #2029808) + - RDMA/bnxt_re: Fix the page_size used during the MR creation + - phy: amlogic: phy-meson-g12a-mipi-dphy-analog: fix CNTL2_DIF_TX_CTL0 value + - RDMA/efa: Fix unsupported page sizes in device + - RDMA/hns: Fix timeout attr in query qp for HIP08 + - RDMA/hns: Fix base address table allocation + - RDMA/hns: Modify the value of long message loopback slice + - dmaengine: at_xdmac: fix potential Oops in at_xdmac_prep_interleaved() + - RDMA/bnxt_re: Fix a possible memory leak + - RDMA/bnxt_re: Fix return value of bnxt_re_process_raw_qp_pkt_rx + - iommu/rockchip: Fix unwind goto issue + - iommu/amd: Don't block updates to GATag if guest mode is on + - iommu/amd: Handle GALog overflows + - iommu/amd: Fix up merge conflict resolution + - nfsd: make a copy of struct iattr before calling notify_change + - dmaengine: pl330: rename _start to prevent build error + - riscv: Fix unused variable warning when BUILTIN_DTB is set + - net/mlx5: Drain health before unregistering devlink + - net/mlx5: SF, Drain health before removing device + - net/mlx5: fw_tracer, Fix event handling + - net/mlx5e: Don't attach netdev profile while handling internal error + - net: mellanox: mlxbf_gige: Fix skb_panic splat under memory pressure + - netrom: fix info-leak in nr_write_internal() + - af_packet: Fix data-races of pkt_sk(sk)->num. + - tls: improve lockless access safety of tls_err_abort() + - amd-xgbe: fix the false linkup in xgbe_phy_status + - perf ftrace latency: Remove unnecessary "--" from --use-nsec option + - mtd: rawnand: ingenic: fix empty stub helper definitions + - RDMA/irdma: Prevent QP use after free + - RDMA/irdma: Fix Local Invalidate fencing + - af_packet: do not use READ_ONCE() in packet_bind() + - tcp: deny tcp_disconnect() when threads are waiting + - tcp: Return user_mss for TCP_MAXSEG in CLOSE/LISTEN state if user_mss set + - net/smc: Scan from current RMB list when no position specified + - net/smc: Don't use RMBs not mapped to new link in SMCRv2 ADD LINK + - net/sched: sch_ingress: Only create under TC_H_INGRESS + - net/sched: sch_clsact: Only create under TC_H_CLSACT + - net/sched: Reserve TC_H_INGRESS (TC_H_CLSACT) for ingress (clsact) Qdiscs + - net/sched: Prohibit regrafting ingress or clsact Qdiscs + - net: sched: fix NULL pointer dereference in mq_attach + - net/netlink: fix NETLINK_LIST_MEMBERSHIPS length report + - udp6: Fix race condition in udp6_sendmsg & connect + - nfsd: fix double fget() bug in __write_ports_addfd() + - nvme: fix the name of Zone Append for verbose logging + - net/mlx5e: Fix error handling in mlx5e_refresh_tirs + - net/mlx5: Read embedded cpu after init bit cleared + - iommu/mediatek: Flush IOTLB completely only if domain has been attached + - tcp: fix mishandling when the sack compression is deferred. + - net: dsa: mv88e6xxx: Increase wait after reset deactivation + - mtd: rawnand: marvell: ensure timing values are written + - mtd: rawnand: marvell: don't set the NAND frequency select + - rtnetlink: call validate_linkmsg in rtnl_create_link + - mptcp: avoid unneeded __mptcp_nmpc_socket() usage + - mptcp: add annotations around msk->subflow accesses + - mptcp: avoid unneeded address copy + - mptcp: simplify subflow_syn_recv_sock() + - mptcp: consolidate passive msk socket initialization + - mptcp: fix data race around msk->first access + - mptcp: add annotations around sk->sk_shutdown accesses + - drm/amdgpu: release gpu full access after "amdgpu_device_ip_late_init" + - watchdog: menz069_wdt: fix watchdog initialisation + - ALSA: hda: Glenfly: add HD Audio PCI IDs and HDMI Codec Vendor IDs. + - ASoC: Intel: soc-acpi-cht: Add quirk for Nextbook Ares 8A tablet + - drm/amdgpu: Use the default reset when loading or reloading the driver + - mailbox: mailbox-test: Fix potential double-free in + mbox_test_message_write() + - btrfs: abort transaction when sibling keys check fails for leaves + - ARM: 9295/1: unwind:fix unwind abort for uleb128 case + - hwmon: (k10temp) Add PCI ID for family 19, model 78h + - media: rcar-vin: Select correct interrupt mode for V4L2_FIELD_ALTERNATE + - platform/x86: intel_scu_pcidrv: Add back PCI ID for Medfield + - platform/mellanox: fix potential race in mlxbf-tmfifo driver + - drm/amdgpu: set gfx9 onwards APU atomics support to be true + - fbdev: imsttfb: Fix use after free bug in imsttfb_probe + - fbdev: modedb: Add 1920x1080 at 60 Hz video mode + - fbdev: stifb: Fix info entry in sti_struct on error path + - nbd: Fix debugfs_create_dir error checking + - block/rnbd: replace REQ_OP_FLUSH with REQ_OP_WRITE + - nvme-pci: add NVME_QUIRK_BOGUS_NID for HS-SSD-FUTURE 2048G + - nvme-pci: add quirk for missing secondary temperature thresholds + - ASoC: amd: yc: Add DMI entry to support System76 Pangolin 12 + - ASoC: dwc: limit the number of overrun messages + - um: harddog: fix modular build + - xfrm: Check if_id in inbound policy/secpath match + - ASoC: dt-bindings: Adjust #sound-dai-cells on TI's single-DAI codecs + - ALSA: hda/realtek: Add quirks for ASUS GU604V and GU603V + - ASoC: ssm2602: Add workaround for playback distortions + - media: dvb_demux: fix a bug for the continuity counter + - media: dvb-usb: az6027: fix three null-ptr-deref in az6027_i2c_xfer() + - media: dvb-usb-v2: ec168: fix null-ptr-deref in ec168_i2c_xfer() + - media: dvb-usb-v2: ce6230: fix null-ptr-deref in ce6230_i2c_master_xfer() + - media: dvb-usb-v2: rtl28xxu: fix null-ptr-deref in rtl28xxu_i2c_xfer + - media: dvb-usb: digitv: fix null-ptr-deref in digitv_i2c_xfer() + - media: dvb-usb: dw2102: fix uninit-value in su3000_read_mac_address + - media: netup_unidvb: fix irq init by register it at the end of probe + - media: dvb_ca_en50221: fix a size write bug + - media: ttusb-dec: fix memory leak in ttusb_dec_exit_dvb() + - media: mn88443x: fix !CONFIG_OF error by drop of_match_ptr from ID table + - media: dvb-core: Fix use-after-free due on race condition at dvb_net + - media: dvb-core: Fix use-after-free due to race at dvb_register_device() + - media: dvb-core: Fix use-after-free due to race condition at dvb_ca_en50221 + - ASoC: SOF: debug: conditionally bump runtime_pm counter on exceptions + - ASoC: SOF: pcm: fix pm_runtime imbalance in error handling + - ASoC: SOF: sof-client-probes: fix pm_runtime imbalance in error handling + - ASoC: SOF: pm: save io region state in case of errors in resume + - s390/pkey: zeroize key blobs + - s390/topology: honour nr_cpu_ids when adding CPUs + - ACPI: resource: Add IRQ override quirk for LG UltraPC 17U70P + - wifi: rtl8xxxu: fix authentication timeout due to incorrect RCR value + - ARM: dts: stm32: add pin map for CAN controller on stm32f7 + - arm64/mm: mark private VM_FAULT_X defines as vm_fault_t + - arm64: vdso: Pass (void *) to virt_to_page() + - wifi: mac80211: simplify chanctx allocation + - wifi: mac80211: consider reserved chanctx for mindef + - wifi: mac80211: recalc chanctx mindef before assigning + - wifi: iwlwifi: mvm: Add locking to the rate read flow + - scsi: core: Decrease scsi_device's iorequest_cnt if dispatch failed + - wifi: b43: fix incorrect __packed annotation + - netfilter: conntrack: define variables exp_nat_nla_policy and any_addr with + CONFIG_NF_NAT + - nvme-multipath: don't call blk_mark_disk_dead in nvme_mpath_remove_disk + - nvme: do not let the user delete a ctrl before a complete initialization + - ALSA: oss: avoid missing-prototype warnings + - drm/msm: Be more shouty if per-process pgtables aren't working + - atm: hide unused procfs functions + - ceph: silence smatch warning in reconnect_caps_cb() + - drm/amdgpu: skip disabling fence driver src_irqs when device is unplugged + - ublk: fix AB-BA lockdep warning + - nvme-pci: Add quirk for Teamgroup MP33 SSD + - block: Deny writable memory mapping if block is read-only + - KVM: arm64: vgic: Fix a circular locking issue + - KVM: arm64: vgic: Wrap vgic_its_create() with config_lock + - KVM: arm64: vgic: Fix locking comment + - media: mediatek: vcodec: Only apply 4K frame sizes on decoder formats + - mailbox: mailbox-test: fix a locking issue in mbox_test_message_write() + - drivers: base: cacheinfo: Fix shared_cpu_map changes in event of CPU hotplug + - media: uvcvideo: Don't expose unsupported formats to userspace + - iio: accel: st_accel: Fix invalid mount_matrix on devices without ACPI _ONT + method + - iio: adc: mxs-lradc: fix the order of two cleanup operations + - HID: google: add jewel USB id + - HID: wacom: avoid integer overflow in wacom_intuos_inout() + - iio: imu: inv_icm42600: fix timestamp reset + - dt-bindings: iio: adc: renesas,rcar-gyroadc: Fix adi,ad7476 compatible value + - iio: light: vcnl4035: fixed chip ID check + - iio: adc: stm32-adc: skip adc-channels setup if none is present + - iio: adc: ad_sigma_delta: Fix IRQ issue by setting IRQ_DISABLE_UNLAZY flag + - iio: dac: mcp4725: Fix i2c_master_send() return value handling + - iio: addac: ad74413: fix resistance input processing + - iio: adc: ad7192: Change "shorted" channels to differential + - iio: adc: stm32-adc: skip adc-diff-channels setup if none is present + - iio: dac: build ad5758 driver when AD5758 is selected + - net: usb: qmi_wwan: Set DTR quirk for BroadMobi BM818 + - dt-bindings: usb: snps,dwc3: Fix "snps,hsphy_interface" type + - usb: cdns3: fix NCM gadget RX speed 20x slow than expection at iMX8QM + - usb: gadget: f_fs: Add unbind event before functionfs_unbind + - md/raid5: fix miscalculation of 'end_sector' in raid5_read_one_chunk() + - misc: fastrpc: return -EPIPE to invocations on device removal + - misc: fastrpc: reject new invocations during device removal + - scsi: stex: Fix gcc 13 warnings + - ata: libata-scsi: Use correct device no in ata_find_dev() + - drm/amdgpu: enable tmz by default for GC 11.0.1 + - drm/amd/pm: reverse mclk and fclk clocks levels for SMU v13.0.4 + - drm/amd/pm: reverse mclk and fclk clocks levels for vangogh + - drm/amd/pm: resolve reboot exception for si oland + - drm/amd/pm: reverse mclk clocks levels for SMU v13.0.5 + - drm/amd/pm: reverse mclk and fclk clocks levels for yellow carp + - drm/amd/pm: reverse mclk and fclk clocks levels for renoir + - mmc: vub300: fix invalid response handling + - mmc: pwrseq: sd8787: Fix WILC CHIP_EN and RESETN toggling order + - tty: serial: fsl_lpuart: use UARTCTRL_TXINV to send break instead of + UARTCTRL_SBK + - btrfs: fix csum_tree_block page iteration to avoid tripping on + -Werror=array-bounds + - phy: qcom-qmp-combo: fix init-count imbalance + - phy: qcom-qmp-pcie-msm8996: fix init-count imbalance + - block: fix revalidate performance regression + - powerpc/iommu: Limit number of TCEs to 512 for H_STUFF_TCE hcall + - iommu/amd: Fix domain flush size when syncing iotlb + - tpm, tpm_tis: correct tpm_tis_flags enumeration values + - riscv: perf: Fix callchain parse error with kernel tracepoint events + - io_uring: undeprecate epoll_ctl support + - selinux: don't use make's grouped targets feature yet + - mtdchar: mark bits of ioctl handler noinline + - tracing/timerlat: Always wakeup the timerlat thread + - tracing/histograms: Allow variables to have some modifiers + - tracing/probe: trace_probe_primary_from_call(): checked list_first_entry + - selftests: mptcp: connect: skip if MPTCP is not supported + - selftests: mptcp: pm nl: skip if MPTCP is not supported + - selftests: mptcp: join: skip if MPTCP is not supported + - selftests: mptcp: sockopt: skip if MPTCP is not supported + - selftests: mptcp: userspace pm: skip if MPTCP is not supported + - mptcp: fix connect timeout handling + - mptcp: fix active subflow finalization + - ext4: add EA_INODE checking to ext4_iget() + - ext4: set lockdep subclass for the ea_inode in ext4_xattr_inode_cache_find() + - ext4: disallow ea_inodes with extended attributes + - ext4: add lockdep annotations for i_data_sem for ea_inode's + - fbcon: Fix null-ptr-deref in soft_cursor + - serial: 8250_tegra: Fix an error handling path in tegra_uart_probe() + - serial: cpm_uart: Fix a COMPILE_TEST dependency + - powerpc/xmon: Use KSYM_NAME_LEN in array size + - test_firmware: fix a memory leak with reqs buffer + - test_firmware: fix the memory leak of the allocated firmware buffer + - KVM: arm64: Populate fault info for watchpoint + - KVM: x86: Account fastpath-only VM-Exits in vCPU stats + - ksmbd: fix credit count leakage + - ksmbd: fix UAF issue from opinfo->conn + - ksmbd: fix incorrect AllocationSize set in smb2_get_info + - ksmbd: fix slab-out-of-bounds read in smb2_handle_negotiate + - ksmbd: fix multiple out-of-bounds read during context decoding + - KEYS: asymmetric: Copy sig and digest in public_key_verify_signature() + - fs/ntfs3: Validate MFT flags before replaying logs + - regmap: Account for register length when chunking + - tpm, tpm_tis: Request threaded interrupt handler + - iommu/amd/pgtbl_v2: Fix domain max address + - drm/amd/display: Have Payload Properly Created After Resume + - tls: rx: strp: don't use GFP_KERNEL in softirq context + - selftests: mptcp: diag: skip if MPTCP is not supported + - selftests: mptcp: simult flows: skip if MPTCP is not supported + - selftests: mptcp: join: avoid using 'cmp --bytes' + - ext4: enable the lazy init thread when remounting read/write + - iommu: Make IPMMU_VMSA dependencies more strict + - [Config] updateconfigs for IPMMU_VMSA + - iommu/amd: Add missing domain type checks + - efi: Bump stub image version for macOS HVF compatibility + - rxrpc: Truncate UTS_RELEASE for rxrpc version + - net: renesas: rswitch: Fix return value in error path of xmit + - KVM: arm64: Prevent unconditional donation of unmapped regions from the host + - KVM: arm64: Reload PTE after invoking walker callback on preorder traversal + - iio: ad4130: Make sure clock provider gets removed + - iio: adc: mt6370: Fix ibus and ibat scaling value of some specific vendor ID + chips + - iio: accel: kx022a fix irq getting + - misc: fastrpc: Reassign memory ownership only for remote heap + - module/decompress: Fix error checking on zstd decompression + - dmaengine: at_hdmac: Repair bitfield macros for peripheral ID handling + - dmaengine: at_hdmac: Extend the Flow Controller bitfield to three bits + - test_firmware: prevent race conditions by a correct implementation of + locking + - KVM: arm64: Drop last page ref in kvm_pgtable_stage2_free_removed() + - KVM: x86/mmu: Grab memslot for correct address space in NX recovery worker + - Upstream stable to v6.1.33, v6.3.7 + - scsi: megaraid_sas: Add flexible array member for SGLs + - net: sfp: fix state loss when updating state_hw_mask + - spi: mt65xx: make sure operations completed before unloading + - platform/surface: aggregator: Allow completion work-items to be executed in + parallel + - platform/surface: aggregator_tabletsw: Add support for book mode in KIP + subsystem + - spi: qup: Request DMA before enabling clocks + - afs: Fix setting of mtime when creating a file/dir/symlink + - wifi: mt76: mt7615: fix possible race in mt7615_mac_sta_poll + - bpf, sockmap: Avoid potential NULL dereference in + sk_psock_verdict_data_ready() + - neighbour: fix unaligned access to pneigh_entry + - net: dsa: lan9303: allow vid != 0 in port_fdb_{add|del} methods + - net/ipv4: ping_group_range: allow GID from 2147483648 to 4294967294 + - bpf: Fix UAF in task local storage + - bpf: Fix elem_size not being set for inner maps + - net/ipv6: fix bool/int mismatch for skip_notify_on_dev_down + - net/smc: Avoid to access invalid RMBs' MRs in SMCRv1 ADD LINK CONT + - net: enetc: correct the statistics of rx bytes + - net: enetc: correct rx_bytes statistics of XDP + - net/sched: fq_pie: ensure reasonable TCA_FQ_PIE_QUANTUM values + - Bluetooth: hci_sync: add lock to protect HCI_UNREGISTER + - Bluetooth: Fix l2cap_disconnect_req deadlock + - Bluetooth: ISO: don't try to remove CIG if there are bound CIS left + - Bluetooth: L2CAP: Add missing checks for invalid DCID + - wifi: mac80211: use correct iftype HE cap + - wifi: cfg80211: reject bad AP MLD address + - wifi: mac80211: mlme: fix non-inheritence element + - wifi: mac80211: don't translate beacon/presp addrs + - qed/qede: Fix scheduling while atomic + - wifi: cfg80211: fix locking in sched scan stop work + - selftests/bpf: Verify optval=NULL case + - selftests/bpf: Fix sockopt_sk selftest + - netfilter: nft_bitwise: fix register tracking + - netfilter: conntrack: fix NULL pointer dereference in nf_confirm_cthelper + - netfilter: ipset: Add schedule point in call_ad(). + - netfilter: nf_tables: out-of-bound check in chain blob + - ipv6: rpl: Fix Route of Death. + - tcp: gso: really support BIG TCP + - rfs: annotate lockless accesses to sk->sk_rxhash + - rfs: annotate lockless accesses to RFS sock flow table + - net: sched: add rcu annotations around qdisc->qdisc_sleeping + - drm/i915/selftests: Add some missing error propagation + - net: sched: move rtm_tca_policy declaration to include file + - net: sched: act_police: fix sparse errors in tcf_police_dump() + - net: sched: fix possible refcount leak in tc_chain_tmplt_add() + - bpf: Add extra path pointer check to d_path helper + - drm/amdgpu: fix Null pointer dereference error in amdgpu_device_recover_vram + - lib: cpu_rmap: Fix potential use-after-free in irq_cpu_rmap_release() + - net: bcmgenet: Fix EEE implementation + - bnxt_en: Don't issue AP reset during ethtool's reset operation + - bnxt_en: Query default VLAN before VNIC setup on a VF + - bnxt_en: Skip firmware fatal error recovery if chip is not accessible + - bnxt_en: Prevent kernel panic when receiving unexpected PHC_UPDATE event + - bnxt_en: Implement .set_port / .unset_port UDP tunnel callbacks + - batman-adv: Broken sync while rescheduling delayed work + - Input: xpad - delete a Razer DeathAdder mouse VID/PID entry + - Input: psmouse - fix OOB access in Elantech protocol + - Input: fix open count when closing inhibited device + - ALSA: hda: Fix kctl->id initialization + - ALSA: ymfpci: Fix kctl->id initialization + - ALSA: gus: Fix kctl->id initialization + - ALSA: cmipci: Fix kctl->id initialization + - ALSA: hda/realtek: Add quirk for Clevo NS50AU + - ALSA: ice1712,ice1724: fix the kcontrol->id initialization + - ALSA: hda/realtek: Add a quirk for HP Slim Desktop S01 + - ALSA: hda/realtek: Add quirks for Asus ROG 2024 laptops using CS35L41 + - drm/i915/gt: Use the correct error value when kernel_context() fails + - drm/amdgpu: fix xclk freq on CHIP_STONEY + - drm/amdgpu: change reserved vram info print + - drm/amd/pm: Fix power context allocation in SMU13 + - drm/amd/display: Reduce sdp bw after urgent to 90% + - wifi: iwlwifi: mvm: Fix -Warray-bounds bug in iwl_mvm_wait_d3_notif() + - can: j1939: j1939_sk_send_loop_abort(): improved error queue handling in + J1939 Socket + - can: j1939: change j1939_netdev_lock type to mutex + - can: j1939: avoid possible use-after-free when j1939_can_rx_register fails + - mptcp: only send RM_ADDR in nl_cmd_remove + - mptcp: add address into userspace pm list + - mptcp: update userspace pm infos + - selftests: mptcp: update userspace pm addr tests + - selftests: mptcp: update userspace pm subflow tests + - ceph: fix use-after-free bug for inodes when flushing capsnaps + - s390/dasd: Use correct lock while counting channel queue length + - Bluetooth: Fix use-after-free in hci_remove_ltk/hci_remove_irk + - Bluetooth: fix debugfs registration + - Bluetooth: hci_qca: fix debugfs registration + - tee: amdtee: Add return_origin to 'struct tee_cmd_load_ta' + - rbd: move RBD_OBJ_FLAG_COPYUP_ENABLED flag setting + - rbd: get snapshot context after exclusive lock is ensured to be held + - virtio_net: use control_buf for coalesce params + - soc: qcom: icc-bwmon: fix incorrect error code passed to dev_err_probe() + - pinctrl: meson-axg: add missing GPIOA_18 gpio group + - usb: usbfs: Enforce page requirements for mmap + - usb: usbfs: Use consistent mmap functions + - mm: page_table_check: Make it dependent on EXCLUSIVE_SYSTEM_RAM + - mm: page_table_check: Ensure user pages are not slab pages + - arm64: dts: qcom: sc8280xp: Flush RSC sleep & wake votes + - ARM: at91: pm: fix imbalanced reference counter for ethernet devices + - ARM: dts: at91: sama7g5ek: fix debounce delay property for shdwc + - ASoC: codecs: wsa883x: do not set can_multi_write flag + - ASoC: codecs: wsa881x: do not set can_multi_write flag + - arm64: dts: qcom: sc7180-lite: Fix SDRAM freq for misidentified sc7180-lite + boards + - arm64: dts: imx8qm-mek: correct GPIOs for USDHC2 CD and WP signals + - arm64: dts: imx8-ss-dma: assign default clock rate for lpuarts + - ASoC: mediatek: mt8195-afe-pcm: Convert to platform remove callback + returning void + - ASoC: mediatek: mt8195: fix use-after-free in driver remove path + - ASoC: simple-card-utils: fix PCM constraint error check + - blk-mq: fix blk_mq_hw_ctx active request accounting + - arm64: dts: imx8mn-beacon: Fix SPI CS pinmux + - i2c: mv64xxx: Fix reading invalid status value in atomic mode + - firmware: arm_ffa: Set handle field to zero in memory descriptor + - gpio: sim: fix memory corruption when adding named lines and unnamed hogs + - i2c: sprd: Delete i2c adapter in .remove's error path + - riscv: mm: Ensure prot of VM_WRITE and VM_EXEC must be readable + - eeprom: at24: also select REGMAP + - soundwire: stream: Add missing clear of alloc_slave_rt + - riscv: fix kprobe __user string arg print fault issue + - [Config] updateconfigs for ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + - vduse: avoid empty string for dev name + - vhost: support PACKED when setting-getting vring_base + - vhost_vdpa: support PACKED when setting-getting vring_base + - ksmbd: fix out-of-bound read in deassemble_neg_contexts() + - ksmbd: fix out-of-bound read in parse_lease_state() + - ksmbd: check the validation of pdu_size in ksmbd_conn_handler_loop + - ext4: only check dquot_initialize_needed() when debugging + - wifi: rtw89: correct PS calculation for SUPPORTS_DYNAMIC_PS + - wifi: rtw88: correct PS calculation for SUPPORTS_DYNAMIC_PS + - Bluetooth: Split bt_iso_qos into dedicated structures + - Bluetooth: ISO: consider right CIS when removing CIG at cleanup + - Bluetooth: ISO: Fix CIG auto-allocation to select configurable CIG + - netfilter: nf_tables: Add null check for nla_nest_start_noflag() in + nft_dump_basechain_hook() + - drm/lima: fix sched context destroy + - net: openvswitch: fix upcall counter access before allocation + - bnxt_en: Fix bnxt_hwrm_update_rss_hash_cfg() + - Input: cyttsp5 - fix array length + - soc: qcom: rpmh-rsc: drop redundant unsigned >=0 comparision + - arm64: dts: qcom: sm6375-pdx225: Fix remoteproc firmware paths + - vdpa/mlx5: Fix hang when cvq commands are triggered during device unregister + - ksmbd: fix posix_acls and acls dereferencing possible ERR_PTR() + - Upstream stable to v6.1.34, v6.3.8 + + * CVE-2023-4273 + - exfat: check if filename entries exceeds max filename length + + * CVE-2023-4128 + - net/sched: cls_u32: No longer copy tcf_result on update to avoid use-after- + free + - net/sched: cls_fw: No longer copy tcf_result on update to avoid use-after- + free + - net/sched: cls_route: No longer copy tcf_result on update to avoid use- + after-free + + * CVE-2023-3212 + - gfs2: Don't deref jdesc in evict + + -- Stefan Bader Mon, 04 Sep 2023 11:20:15 +0200 + linux (6.2.0-32.32) lunar; urgency=medium * lunar/linux: 6.2.0-32.32 -proposed tracker (LP: #2031134) diff -u linux-6.2.0/debian.master/config/annotations linux-6.2.0/debian.master/config/annotations --- linux-6.2.0/debian.master/config/annotations +++ linux-6.2.0/debian.master/config/annotations @@ -315,7 +315,7 @@ CONFIG_IPDDP policy<{'amd64': 'n', 'arm64': 'n', 'armhf': 'n', 'ppc64el': 'n', 'riscv64': 'n', 's390x': '-'}> CONFIG_IPDDP note<'LP: #1559772'> -CONFIG_IPMMU_VMSA policy<{'arm64': 'n', 'armhf': 'n', 'riscv64': 'n'}> +CONFIG_IPMMU_VMSA policy<{'arm64': 'n', 'armhf': 'n', 'riscv64': '-'}> CONFIG_IPMMU_VMSA note<'LP: #1718734'> CONFIG_IPV6 policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 'riscv64': 'y', 's390x': 'y'}> @@ -561,6 +561,15 @@ CONFIG_SERIAL_DEV_CTRL_TTYPORT policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 'riscv64': 'y', 's390x': 'y'}> CONFIG_SERIAL_DEV_CTRL_TTYPORT note<'LP: #1739939'> +CONFIG_SERIAL_SH_SCI policy<{'arm64': 'y', 'armhf': 'y', 'riscv64': 'y'}> +CONFIG_SERIAL_SH_SCI note<'LP: #2022361'> + +CONFIG_SERIAL_SH_SCI_CONSOLE policy<{'arm64': 'y', 'armhf': 'y', 'riscv64': 'y'}> +CONFIG_SERIAL_SH_SCI_CONSOLE note<'LP: #2022361'> + +CONFIG_SERIAL_SH_SCI_EARLYCON policy<{'arm64': 'y', 'armhf': 'y', 'riscv64': 'y'}> +CONFIG_SERIAL_SH_SCI_EARLYCON note<'LP: #2022361'> + CONFIG_SETEND_EMULATION policy<{'arm64': 'y'}> CONFIG_SETEND_EMULATION note<'LP: #1545542'> @@ -1223,7 +1232,7 @@ CONFIG_ARCH_HAS_MMIOWB policy<{'ppc64el': 'y', 'riscv64': 'y'}> CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS policy<{'amd64': 'y', 'arm64': 'y', 's390x': 'y'}> CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG policy<{'amd64': 'y'}> -CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y'}> +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 'riscv64': 'y'}> CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH policy<{'amd64': 'y'}> CONFIG_ARCH_HAS_PHYS_TO_DMA policy<{'ppc64el': 'y'}> CONFIG_ARCH_HAS_PKEYS policy<{'amd64': 'y'}> @@ -3116,6 +3125,7 @@ CONFIG_CPU_PM policy<{'arm64': 'y', 'armhf': 'y', 'riscv64': 'y'}> CONFIG_CPU_RMAP policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 'riscv64': 'y', 's390x': 'y'}> CONFIG_CPU_SPECTRE policy<{'armhf': 'y'}> +CONFIG_CPU_SRSO policy<{'amd64': 'y'}> CONFIG_CPU_SUP_AMD policy<{'amd64': 'y'}> CONFIG_CPU_SUP_CENTAUR policy<{'amd64': 'y'}> CONFIG_CPU_SUP_HYGON policy<{'amd64': 'y'}> @@ -6772,7 +6782,7 @@ CONFIG_KEXEC_CORE policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 'riscv64': 'y', 's390x': 'y'}> CONFIG_KEXEC_ELF policy<{'ppc64el': 'y', 'riscv64': 'y'}> CONFIG_KEXEC_FILE policy<{'amd64': 'y', 'arm64': 'y', 'ppc64el': 'y', 'riscv64': 'y', 's390x': 'y'}> -CONFIG_KEXEC_IMAGE_VERIFY_SIG policy<{'arm64': 'n'}> +CONFIG_KEXEC_IMAGE_VERIFY_SIG policy<{'arm64': 'y'}> CONFIG_KEXEC_JUMP policy<{'amd64': 'y'}> CONFIG_KEXEC_SIG policy<{'amd64': 'y', 'arm64': 'y', 's390x': 'y'}> CONFIG_KEXEC_SIG_FORCE policy<{'amd64': 'n'}> @@ -11492,7 +11502,6 @@ CONFIG_SERIAL_SC16IS7XX_SPI policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 'riscv64': 'y'}> CONFIG_SERIAL_SCCNXP policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 'riscv64': 'y', 's390x': 'n'}> CONFIG_SERIAL_SCCNXP_CONSOLE policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 'riscv64': 'y'}> -CONFIG_SERIAL_SH_SCI policy<{'arm64': 'm', 'armhf': 'm', 'riscv64': 'm'}> CONFIG_SERIAL_SH_SCI_DMA policy<{'arm64': 'y', 'armhf': 'y', 'riscv64': 'y'}> CONFIG_SERIAL_SH_SCI_NR_UARTS policy<{'arm64': '2', 'armhf': '2', 'riscv64': '18'}> CONFIG_SERIAL_SIFIVE policy<{'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 'riscv64': 'y'}> diff -u linux-6.2.0/debian.master/tracking-bug linux-6.2.0/debian.master/tracking-bug --- linux-6.2.0/debian.master/tracking-bug +++ linux-6.2.0/debian.master/tracking-bug @@ -1 +1 @@ -2031134 2023.08.07-3 +2033779 2023.09.04-1 diff -u linux-6.2.0/debian.master/upstream-stable linux-6.2.0/debian.master/upstream-stable --- linux-6.2.0/debian.master/upstream-stable +++ linux-6.2.0/debian.master/upstream-stable @@ -3,3 +3,3 @@ - linux-6.1.y = v6.1.32 + linux-6.1.y = v6.1.34 linux-6.2.y = v6.2.16 - linux-6.3.y = v6.3.6 + linux-6.3.y = v6.3.8 diff -u linux-6.2.0/debian/changelog linux-6.2.0/debian/changelog --- linux-6.2.0/debian/changelog +++ linux-6.2.0/debian/changelog @@ -1,3 +1,571 @@ +linux (6.2.0-34.34) lunar; urgency=medium + + * lunar/linux: 6.2.0-34.34 -proposed tracker (LP: #2033779) + + * CVE-2023-20569 + - x86/cpu, kvm: Add support for CPUID_80000021_EAX + - tools headers x86 cpufeatures: Sync with the kernel sources + - x86/alternative: Optimize returns patching + - x86/retbleed: Add __x86_return_thunk alignment checks + - x86/srso: Add a Speculative RAS Overflow mitigation + - x86/srso: Add IBPB_BRTYPE support + - x86/srso: Add SRSO_NO support + - x86/srso: Add IBPB + - x86/srso: Add IBPB on VMEXIT + - x86/srso: Fix return thunks in generated code + - x86/srso: Add a forgotten NOENDBR annotation + - x86/srso: Tie SBPB bit setting to microcode patch detection + - Documentation/hw-vuln: Unify filename specification in index + - Documentation/srso: Document IBPB aspect and fix formatting + - x86/srso: Fix build breakage with the LLVM linker + - x86: Move gds_ucode_mitigated() declaration to header + - x86/retpoline: Don't clobber RFLAGS during srso_safe_ret() + - x86/srso: Disable the mitigation on unaffected configurations + - x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG + - x86/retpoline,kprobes: Skip optprobe check for indirect jumps with + retpolines and IBT + - x86/cpu: Fix __x86_return_thunk symbol type + - x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk() + - objtool/x86: Fix SRSO mess + - x86/alternative: Make custom return thunk unconditional + - x86/cpu: Clean up SRSO return thunk mess + - x86/cpu: Rename original retbleed methods + - x86/cpu: Rename srso_(.*)_alias to srso_alias_\1 + - x86/cpu: Cleanup the untrain mess + - x86/srso: Explain the untraining sequences a bit more + - objtool/x86: Fixup frame-pointer vs rethunk + - x86/static_call: Fix __static_call_fixup() + - x86/srso: Correct the mitigation status when SMT is disabled + - Ubuntu: [Config]: enable Speculative Return Stack Overflow mitigation + + * Please enable Renesas RZ platform serial installer (LP: #2022361) + - [Config] enable hihope RZ/G2M serial console + - [Config] Mark sh-sci as built-in + + * dGPU cannot resume because system firmware stuck in IPCS method + (LP: #2021572) + - drm/i915/tc: Abort DP AUX transfer on a disconnected TC port + - drm/i915/tc: switch to intel_de_* register accessors in display code + - drm/i915: Enable a PIPEDMC whenever its corresponding pipe is enabled + - drm/i915/tc: Fix TC port link ref init for DP MST during HW readout + - drm/i915/tc: Fix system resume MST mode restore for DP-alt sinks + - drm/i915/tc: Wait for IOM/FW PHY initialization of legacy TC ports + - drm/i915/tc: Factor out helpers converting HPD mask to TC mode + - drm/i915/tc: Fix target TC mode for a disconnected legacy port + - drm/i915/tc: Fix TC mode for a legacy port if the PHY is not ready + - drm/i915/tc: Fix initial TC mode on disabled legacy ports + - drm/i915/tc: Make the TC mode readout consistent in all PHY states + - drm/i915: Add encoder hook to get the PLL type used by TC ports + - drm/i915/tc: Assume a TC port is legacy if VBT says the port has HDMI + - drm/i915/tc: Factor out a function querying active links on a TC port + - drm/i915/tc: Check the PLL type used by an enabled TC port + - drm/i915/tc: Group the TC PHY setup/query functions per platform + - drm/i915/tc: Use the adlp prefix for ADLP TC PHY functions + - drm/i915/tc: Rename tc_phy_status_complete() to tc_phy_is_ready() + - drm/i915/tc: Use the tc_phy prefix for all TC PHY functions + - drm/i915/tc: Move TC port fields to a new intel_tc_port struct + - drm/i915/tc: Check for TC PHY explicitly in + intel_tc_port_fia_max_lane_count() + - drm/i915/tc: Move the intel_tc_port struct declaration to intel_tc.c + - drm/i915/tc: Add TC PHY hook to get the PHY HPD live status + - drm/i915/tc: Add TC PHY hooks to get the PHY ready/owned state + - drm/i915/tc: Add TC PHY hook to read out the PHY HW state + - drm/i915/tc: Add generic TC PHY connect/disconnect handlers + - drm/i915/tc: Factor out tc_phy_verify_legacy_or_dp_alt_mode() + - drm/i915/tc: Add TC PHY hooks to connect/disconnect the PHY + - drm/i915/tc: Fix up the legacy VBT flag only in disconnected mode + - drm/i915/tc: Check TC mode instead of the VBT legacy flag + - drm/i915/tc: Block/unblock TC-cold in the PHY connect/disconnect hooks + - drm/i915/tc: Remove redundant wakeref=0 check from unblock_tc_cold() + - drm/i915/tc: Drop tc_cold_block()/unblock()'s power domain parameter + - drm/i915/tc: Add TC PHY hook to get the TC-cold blocking power domain + - drm/i915/tc: Add asserts in TC PHY hooks that the required power is on + - drm/i915/tc: Add TC PHY hook to init the PHY + - drm/i915/adlp/tc: Use the DE HPD ISR register for hotplug detection + - drm/i915/tc: Get power ref for reading the HPD live status register + - drm/i915/tc: Don't connect the PHY in intel_tc_port_connected() + - drm/i915/adlp/tc: Align the connect/disconnect PHY sequence with bspec + - drm/i915: Move shared DPLL disabling into CRTC disable hook + - drm/i915: Disable DPLLs before disconnecting the TC PHY + - drm/i915: Remove TC PHY disconnect workaround + - drm/i915: Remove the encoder update_prepare()/complete() hooks + - drm/i915/dp_mst: Fix active port PLL selection for secondary MST streams + - drm/i915: Fix PIPEDMC disabling for a bigjoiner configuration + - drm/i915: Add helpers to reference/unreference a DPLL for a CRTC + - drm/i915: Make the CRTC state consistent during sanitize-disabling + - drm/i915: Update connector atomic state before crtc sanitize-disabling + - drm/i915: Separate intel_crtc_disable_noatomic_begin/complete() + - drm/i915: Factor out set_encoder_for_connector() + - drm/i915: Add support for disabling any CRTCs during HW readout/sanitization + - drm/i915/dp: Prevent link training fallback on disconnected port + - drm/i915/dp: Factor out intel_dp_get_active_pipes() + - drm/i915: Factor out a helper for handling atomic modeset locks/state + - drm/i915/tc: Call TypeC port flush_work/cleanup without modeset locks held + - drm/i915/tc: Reset TypeC PHYs left enabled in DP-alt mode after the sink + disconnects + + * amdgpu: Fixes for S0i3 resume on Phoenix (LP: #2033654) + - drm/amd/pm: skip the RLC stop when S0i3 suspend for SMU v13.0.4/11 + - drm/amdgpu: skip fence GFX interrupts disable/enable for S0ix + - drm/amd: flush any delayed gfxoff on suspend entry + + * Fix panel brightness issues on HP laptops (LP: #2032704) + - ACPI: video: Put ACPI video and its child devices into D0 on boot + + * Fix ACPI TAD on some Intel based systems (LP: #2032767) + - ACPI: TAD: Install SystemCMOS address space handler for ACPI000E + + * kdump doesn't work with UEFI secure boot and kernel lockdown enabled on + ARM64 (LP: #2033007) + - [Config]: Enable CONFIG_KEXEC_IMAGE_VERIFY_SIG + + * Request backport of xen timekeeping performance improvements (LP: #2033122) + - x86/xen/time: prefer tsc as clocksource when it is invariant + + * Fix numerous AER related issues (LP: #2033025) + - SAUCE: PCI/AER: Disable AER service during suspend, again + - SAUCE: PCI/DPC: Disable DPC service during suspend, again + + * Enable D3cold at s2idle for Intel DG2 GPU (LP: #2033452) + - drm/i915/dgfx: Enable d3cold at s2idle + + * CVE-2023-4569 + - netfilter: nf_tables: deactivate catchall elements in next generation + + * Fix non-working MT7921e when pre-boot WiFi is enabled (LP: #2026322) + - wifi: mt76: mt7921e: fix init command fail with enabled device + + * Fix unreliable ethernet cable detection on I219 NIC (LP: #2028122) + - e1000e: Use PME poll to circumvent unreliable ACPI wake + + * [SRU][Ubuntu 22.04.1] Unable to interpret the frequency values in + cpuinfo_min_freq and cpuino_max_freq sysfs files. (LP: #2030924) + - cpufreq: intel_pstate: Fix scaling for hybrid-capable + + * CVE-2023-40283 + - Bluetooth: L2CAP: Fix use-after-free in l2cap_sock_ready_cb + + * CVE-2023-20588 + - x86/bugs: Increase the x86 bugs vector size to two u32s + - x86/CPU/AMD: Do not leak quotient data after a division by 0 + - x86/CPU/AMD: Fix the DIV(0) initial fix attempt + + * CVE-2023-4194 + - net: tun_chr_open(): set sk_uid from current_fsuid() + - net: tap_open(): set sk_uid from current_fsuid() + + * CVE-2023-4155 + - KVM: SEV: snapshot the GHCB before accessing it + - KVM: SEV: only access GHCB fields once + + * CVE-2023-1206 + - tcp: Reduce chance of collisions in inet6_hashfn(). + + * Lunar update: upstream stable patchset 2023-08-03 (LP: #2029808) + - RDMA/bnxt_re: Fix the page_size used during the MR creation + - phy: amlogic: phy-meson-g12a-mipi-dphy-analog: fix CNTL2_DIF_TX_CTL0 value + - RDMA/efa: Fix unsupported page sizes in device + - RDMA/hns: Fix timeout attr in query qp for HIP08 + - RDMA/hns: Fix base address table allocation + - RDMA/hns: Modify the value of long message loopback slice + - dmaengine: at_xdmac: fix potential Oops in at_xdmac_prep_interleaved() + - RDMA/bnxt_re: Fix a possible memory leak + - RDMA/bnxt_re: Fix return value of bnxt_re_process_raw_qp_pkt_rx + - iommu/rockchip: Fix unwind goto issue + - iommu/amd: Don't block updates to GATag if guest mode is on + - iommu/amd: Handle GALog overflows + - iommu/amd: Fix up merge conflict resolution + - nfsd: make a copy of struct iattr before calling notify_change + - dmaengine: pl330: rename _start to prevent build error + - riscv: Fix unused variable warning when BUILTIN_DTB is set + - net/mlx5: Drain health before unregistering devlink + - net/mlx5: SF, Drain health before removing device + - net/mlx5: fw_tracer, Fix event handling + - net/mlx5e: Don't attach netdev profile while handling internal error + - net: mellanox: mlxbf_gige: Fix skb_panic splat under memory pressure + - netrom: fix info-leak in nr_write_internal() + - af_packet: Fix data-races of pkt_sk(sk)->num. + - tls: improve lockless access safety of tls_err_abort() + - amd-xgbe: fix the false linkup in xgbe_phy_status + - perf ftrace latency: Remove unnecessary "--" from --use-nsec option + - mtd: rawnand: ingenic: fix empty stub helper definitions + - RDMA/irdma: Prevent QP use after free + - RDMA/irdma: Fix Local Invalidate fencing + - af_packet: do not use READ_ONCE() in packet_bind() + - tcp: deny tcp_disconnect() when threads are waiting + - tcp: Return user_mss for TCP_MAXSEG in CLOSE/LISTEN state if user_mss set + - net/smc: Scan from current RMB list when no position specified + - net/smc: Don't use RMBs not mapped to new link in SMCRv2 ADD LINK + - net/sched: sch_ingress: Only create under TC_H_INGRESS + - net/sched: sch_clsact: Only create under TC_H_CLSACT + - net/sched: Reserve TC_H_INGRESS (TC_H_CLSACT) for ingress (clsact) Qdiscs + - net/sched: Prohibit regrafting ingress or clsact Qdiscs + - net: sched: fix NULL pointer dereference in mq_attach + - net/netlink: fix NETLINK_LIST_MEMBERSHIPS length report + - udp6: Fix race condition in udp6_sendmsg & connect + - nfsd: fix double fget() bug in __write_ports_addfd() + - nvme: fix the name of Zone Append for verbose logging + - net/mlx5e: Fix error handling in mlx5e_refresh_tirs + - net/mlx5: Read embedded cpu after init bit cleared + - iommu/mediatek: Flush IOTLB completely only if domain has been attached + - tcp: fix mishandling when the sack compression is deferred. + - net: dsa: mv88e6xxx: Increase wait after reset deactivation + - mtd: rawnand: marvell: ensure timing values are written + - mtd: rawnand: marvell: don't set the NAND frequency select + - rtnetlink: call validate_linkmsg in rtnl_create_link + - mptcp: avoid unneeded __mptcp_nmpc_socket() usage + - mptcp: add annotations around msk->subflow accesses + - mptcp: avoid unneeded address copy + - mptcp: simplify subflow_syn_recv_sock() + - mptcp: consolidate passive msk socket initialization + - mptcp: fix data race around msk->first access + - mptcp: add annotations around sk->sk_shutdown accesses + - drm/amdgpu: release gpu full access after "amdgpu_device_ip_late_init" + - watchdog: menz069_wdt: fix watchdog initialisation + - ALSA: hda: Glenfly: add HD Audio PCI IDs and HDMI Codec Vendor IDs. + - ASoC: Intel: soc-acpi-cht: Add quirk for Nextbook Ares 8A tablet + - drm/amdgpu: Use the default reset when loading or reloading the driver + - mailbox: mailbox-test: Fix potential double-free in + mbox_test_message_write() + - btrfs: abort transaction when sibling keys check fails for leaves + - ARM: 9295/1: unwind:fix unwind abort for uleb128 case + - hwmon: (k10temp) Add PCI ID for family 19, model 78h + - media: rcar-vin: Select correct interrupt mode for V4L2_FIELD_ALTERNATE + - platform/x86: intel_scu_pcidrv: Add back PCI ID for Medfield + - platform/mellanox: fix potential race in mlxbf-tmfifo driver + - drm/amdgpu: set gfx9 onwards APU atomics support to be true + - fbdev: imsttfb: Fix use after free bug in imsttfb_probe + - fbdev: modedb: Add 1920x1080 at 60 Hz video mode + - fbdev: stifb: Fix info entry in sti_struct on error path + - nbd: Fix debugfs_create_dir error checking + - block/rnbd: replace REQ_OP_FLUSH with REQ_OP_WRITE + - nvme-pci: add NVME_QUIRK_BOGUS_NID for HS-SSD-FUTURE 2048G + - nvme-pci: add quirk for missing secondary temperature thresholds + - ASoC: amd: yc: Add DMI entry to support System76 Pangolin 12 + - ASoC: dwc: limit the number of overrun messages + - um: harddog: fix modular build + - xfrm: Check if_id in inbound policy/secpath match + - ASoC: dt-bindings: Adjust #sound-dai-cells on TI's single-DAI codecs + - ALSA: hda/realtek: Add quirks for ASUS GU604V and GU603V + - ASoC: ssm2602: Add workaround for playback distortions + - media: dvb_demux: fix a bug for the continuity counter + - media: dvb-usb: az6027: fix three null-ptr-deref in az6027_i2c_xfer() + - media: dvb-usb-v2: ec168: fix null-ptr-deref in ec168_i2c_xfer() + - media: dvb-usb-v2: ce6230: fix null-ptr-deref in ce6230_i2c_master_xfer() + - media: dvb-usb-v2: rtl28xxu: fix null-ptr-deref in rtl28xxu_i2c_xfer + - media: dvb-usb: digitv: fix null-ptr-deref in digitv_i2c_xfer() + - media: dvb-usb: dw2102: fix uninit-value in su3000_read_mac_address + - media: netup_unidvb: fix irq init by register it at the end of probe + - media: dvb_ca_en50221: fix a size write bug + - media: ttusb-dec: fix memory leak in ttusb_dec_exit_dvb() + - media: mn88443x: fix !CONFIG_OF error by drop of_match_ptr from ID table + - media: dvb-core: Fix use-after-free due on race condition at dvb_net + - media: dvb-core: Fix use-after-free due to race at dvb_register_device() + - media: dvb-core: Fix use-after-free due to race condition at dvb_ca_en50221 + - ASoC: SOF: debug: conditionally bump runtime_pm counter on exceptions + - ASoC: SOF: pcm: fix pm_runtime imbalance in error handling + - ASoC: SOF: sof-client-probes: fix pm_runtime imbalance in error handling + - ASoC: SOF: pm: save io region state in case of errors in resume + - s390/pkey: zeroize key blobs + - s390/topology: honour nr_cpu_ids when adding CPUs + - ACPI: resource: Add IRQ override quirk for LG UltraPC 17U70P + - wifi: rtl8xxxu: fix authentication timeout due to incorrect RCR value + - ARM: dts: stm32: add pin map for CAN controller on stm32f7 + - arm64/mm: mark private VM_FAULT_X defines as vm_fault_t + - arm64: vdso: Pass (void *) to virt_to_page() + - wifi: mac80211: simplify chanctx allocation + - wifi: mac80211: consider reserved chanctx for mindef + - wifi: mac80211: recalc chanctx mindef before assigning + - wifi: iwlwifi: mvm: Add locking to the rate read flow + - scsi: core: Decrease scsi_device's iorequest_cnt if dispatch failed + - wifi: b43: fix incorrect __packed annotation + - netfilter: conntrack: define variables exp_nat_nla_policy and any_addr with + CONFIG_NF_NAT + - nvme-multipath: don't call blk_mark_disk_dead in nvme_mpath_remove_disk + - nvme: do not let the user delete a ctrl before a complete initialization + - ALSA: oss: avoid missing-prototype warnings + - drm/msm: Be more shouty if per-process pgtables aren't working + - atm: hide unused procfs functions + - ceph: silence smatch warning in reconnect_caps_cb() + - drm/amdgpu: skip disabling fence driver src_irqs when device is unplugged + - ublk: fix AB-BA lockdep warning + - nvme-pci: Add quirk for Teamgroup MP33 SSD + - block: Deny writable memory mapping if block is read-only + - KVM: arm64: vgic: Fix a circular locking issue + - KVM: arm64: vgic: Wrap vgic_its_create() with config_lock + - KVM: arm64: vgic: Fix locking comment + - media: mediatek: vcodec: Only apply 4K frame sizes on decoder formats + - mailbox: mailbox-test: fix a locking issue in mbox_test_message_write() + - drivers: base: cacheinfo: Fix shared_cpu_map changes in event of CPU hotplug + - media: uvcvideo: Don't expose unsupported formats to userspace + - iio: accel: st_accel: Fix invalid mount_matrix on devices without ACPI _ONT + method + - iio: adc: mxs-lradc: fix the order of two cleanup operations + - HID: google: add jewel USB id + - HID: wacom: avoid integer overflow in wacom_intuos_inout() + - iio: imu: inv_icm42600: fix timestamp reset + - dt-bindings: iio: adc: renesas,rcar-gyroadc: Fix adi,ad7476 compatible value + - iio: light: vcnl4035: fixed chip ID check + - iio: adc: stm32-adc: skip adc-channels setup if none is present + - iio: adc: ad_sigma_delta: Fix IRQ issue by setting IRQ_DISABLE_UNLAZY flag + - iio: dac: mcp4725: Fix i2c_master_send() return value handling + - iio: addac: ad74413: fix resistance input processing + - iio: adc: ad7192: Change "shorted" channels to differential + - iio: adc: stm32-adc: skip adc-diff-channels setup if none is present + - iio: dac: build ad5758 driver when AD5758 is selected + - net: usb: qmi_wwan: Set DTR quirk for BroadMobi BM818 + - dt-bindings: usb: snps,dwc3: Fix "snps,hsphy_interface" type + - usb: cdns3: fix NCM gadget RX speed 20x slow than expection at iMX8QM + - usb: gadget: f_fs: Add unbind event before functionfs_unbind + - md/raid5: fix miscalculation of 'end_sector' in raid5_read_one_chunk() + - misc: fastrpc: return -EPIPE to invocations on device removal + - misc: fastrpc: reject new invocations during device removal + - scsi: stex: Fix gcc 13 warnings + - ata: libata-scsi: Use correct device no in ata_find_dev() + - drm/amdgpu: enable tmz by default for GC 11.0.1 + - drm/amd/pm: reverse mclk and fclk clocks levels for SMU v13.0.4 + - drm/amd/pm: reverse mclk and fclk clocks levels for vangogh + - drm/amd/pm: resolve reboot exception for si oland + - drm/amd/pm: reverse mclk clocks levels for SMU v13.0.5 + - drm/amd/pm: reverse mclk and fclk clocks levels for yellow carp + - drm/amd/pm: reverse mclk and fclk clocks levels for renoir + - mmc: vub300: fix invalid response handling + - mmc: pwrseq: sd8787: Fix WILC CHIP_EN and RESETN toggling order + - tty: serial: fsl_lpuart: use UARTCTRL_TXINV to send break instead of + UARTCTRL_SBK + - btrfs: fix csum_tree_block page iteration to avoid tripping on + -Werror=array-bounds + - phy: qcom-qmp-combo: fix init-count imbalance + - phy: qcom-qmp-pcie-msm8996: fix init-count imbalance + - block: fix revalidate performance regression + - powerpc/iommu: Limit number of TCEs to 512 for H_STUFF_TCE hcall + - iommu/amd: Fix domain flush size when syncing iotlb + - tpm, tpm_tis: correct tpm_tis_flags enumeration values + - riscv: perf: Fix callchain parse error with kernel tracepoint events + - io_uring: undeprecate epoll_ctl support + - selinux: don't use make's grouped targets feature yet + - mtdchar: mark bits of ioctl handler noinline + - tracing/timerlat: Always wakeup the timerlat thread + - tracing/histograms: Allow variables to have some modifiers + - tracing/probe: trace_probe_primary_from_call(): checked list_first_entry + - selftests: mptcp: connect: skip if MPTCP is not supported + - selftests: mptcp: pm nl: skip if MPTCP is not supported + - selftests: mptcp: join: skip if MPTCP is not supported + - selftests: mptcp: sockopt: skip if MPTCP is not supported + - selftests: mptcp: userspace pm: skip if MPTCP is not supported + - mptcp: fix connect timeout handling + - mptcp: fix active subflow finalization + - ext4: add EA_INODE checking to ext4_iget() + - ext4: set lockdep subclass for the ea_inode in ext4_xattr_inode_cache_find() + - ext4: disallow ea_inodes with extended attributes + - ext4: add lockdep annotations for i_data_sem for ea_inode's + - fbcon: Fix null-ptr-deref in soft_cursor + - serial: 8250_tegra: Fix an error handling path in tegra_uart_probe() + - serial: cpm_uart: Fix a COMPILE_TEST dependency + - powerpc/xmon: Use KSYM_NAME_LEN in array size + - test_firmware: fix a memory leak with reqs buffer + - test_firmware: fix the memory leak of the allocated firmware buffer + - KVM: arm64: Populate fault info for watchpoint + - KVM: x86: Account fastpath-only VM-Exits in vCPU stats + - ksmbd: fix credit count leakage + - ksmbd: fix UAF issue from opinfo->conn + - ksmbd: fix incorrect AllocationSize set in smb2_get_info + - ksmbd: fix slab-out-of-bounds read in smb2_handle_negotiate + - ksmbd: fix multiple out-of-bounds read during context decoding + - KEYS: asymmetric: Copy sig and digest in public_key_verify_signature() + - fs/ntfs3: Validate MFT flags before replaying logs + - regmap: Account for register length when chunking + - tpm, tpm_tis: Request threaded interrupt handler + - iommu/amd/pgtbl_v2: Fix domain max address + - drm/amd/display: Have Payload Properly Created After Resume + - tls: rx: strp: don't use GFP_KERNEL in softirq context + - selftests: mptcp: diag: skip if MPTCP is not supported + - selftests: mptcp: simult flows: skip if MPTCP is not supported + - selftests: mptcp: join: avoid using 'cmp --bytes' + - ext4: enable the lazy init thread when remounting read/write + - iommu: Make IPMMU_VMSA dependencies more strict + - [Config] updateconfigs for IPMMU_VMSA + - iommu/amd: Add missing domain type checks + - efi: Bump stub image version for macOS HVF compatibility + - rxrpc: Truncate UTS_RELEASE for rxrpc version + - net: renesas: rswitch: Fix return value in error path of xmit + - KVM: arm64: Prevent unconditional donation of unmapped regions from the host + - KVM: arm64: Reload PTE after invoking walker callback on preorder traversal + - iio: ad4130: Make sure clock provider gets removed + - iio: adc: mt6370: Fix ibus and ibat scaling value of some specific vendor ID + chips + - iio: accel: kx022a fix irq getting + - misc: fastrpc: Reassign memory ownership only for remote heap + - module/decompress: Fix error checking on zstd decompression + - dmaengine: at_hdmac: Repair bitfield macros for peripheral ID handling + - dmaengine: at_hdmac: Extend the Flow Controller bitfield to three bits + - test_firmware: prevent race conditions by a correct implementation of + locking + - KVM: arm64: Drop last page ref in kvm_pgtable_stage2_free_removed() + - KVM: x86/mmu: Grab memslot for correct address space in NX recovery worker + - Upstream stable to v6.1.33, v6.3.7 + - scsi: megaraid_sas: Add flexible array member for SGLs + - net: sfp: fix state loss when updating state_hw_mask + - spi: mt65xx: make sure operations completed before unloading + - platform/surface: aggregator: Allow completion work-items to be executed in + parallel + - platform/surface: aggregator_tabletsw: Add support for book mode in KIP + subsystem + - spi: qup: Request DMA before enabling clocks + - afs: Fix setting of mtime when creating a file/dir/symlink + - wifi: mt76: mt7615: fix possible race in mt7615_mac_sta_poll + - bpf, sockmap: Avoid potential NULL dereference in + sk_psock_verdict_data_ready() + - neighbour: fix unaligned access to pneigh_entry + - net: dsa: lan9303: allow vid != 0 in port_fdb_{add|del} methods + - net/ipv4: ping_group_range: allow GID from 2147483648 to 4294967294 + - bpf: Fix UAF in task local storage + - bpf: Fix elem_size not being set for inner maps + - net/ipv6: fix bool/int mismatch for skip_notify_on_dev_down + - net/smc: Avoid to access invalid RMBs' MRs in SMCRv1 ADD LINK CONT + - net: enetc: correct the statistics of rx bytes + - net: enetc: correct rx_bytes statistics of XDP + - net/sched: fq_pie: ensure reasonable TCA_FQ_PIE_QUANTUM values + - Bluetooth: hci_sync: add lock to protect HCI_UNREGISTER + - Bluetooth: Fix l2cap_disconnect_req deadlock + - Bluetooth: ISO: don't try to remove CIG if there are bound CIS left + - Bluetooth: L2CAP: Add missing checks for invalid DCID + - wifi: mac80211: use correct iftype HE cap + - wifi: cfg80211: reject bad AP MLD address + - wifi: mac80211: mlme: fix non-inheritence element + - wifi: mac80211: don't translate beacon/presp addrs + - qed/qede: Fix scheduling while atomic + - wifi: cfg80211: fix locking in sched scan stop work + - selftests/bpf: Verify optval=NULL case + - selftests/bpf: Fix sockopt_sk selftest + - netfilter: nft_bitwise: fix register tracking + - netfilter: conntrack: fix NULL pointer dereference in nf_confirm_cthelper + - netfilter: ipset: Add schedule point in call_ad(). + - netfilter: nf_tables: out-of-bound check in chain blob + - ipv6: rpl: Fix Route of Death. + - tcp: gso: really support BIG TCP + - rfs: annotate lockless accesses to sk->sk_rxhash + - rfs: annotate lockless accesses to RFS sock flow table + - net: sched: add rcu annotations around qdisc->qdisc_sleeping + - drm/i915/selftests: Add some missing error propagation + - net: sched: move rtm_tca_policy declaration to include file + - net: sched: act_police: fix sparse errors in tcf_police_dump() + - net: sched: fix possible refcount leak in tc_chain_tmplt_add() + - bpf: Add extra path pointer check to d_path helper + - drm/amdgpu: fix Null pointer dereference error in amdgpu_device_recover_vram + - lib: cpu_rmap: Fix potential use-after-free in irq_cpu_rmap_release() + - net: bcmgenet: Fix EEE implementation + - bnxt_en: Don't issue AP reset during ethtool's reset operation + - bnxt_en: Query default VLAN before VNIC setup on a VF + - bnxt_en: Skip firmware fatal error recovery if chip is not accessible + - bnxt_en: Prevent kernel panic when receiving unexpected PHC_UPDATE event + - bnxt_en: Implement .set_port / .unset_port UDP tunnel callbacks + - batman-adv: Broken sync while rescheduling delayed work + - Input: xpad - delete a Razer DeathAdder mouse VID/PID entry + - Input: psmouse - fix OOB access in Elantech protocol + - Input: fix open count when closing inhibited device + - ALSA: hda: Fix kctl->id initialization + - ALSA: ymfpci: Fix kctl->id initialization + - ALSA: gus: Fix kctl->id initialization + - ALSA: cmipci: Fix kctl->id initialization + - ALSA: hda/realtek: Add quirk for Clevo NS50AU + - ALSA: ice1712,ice1724: fix the kcontrol->id initialization + - ALSA: hda/realtek: Add a quirk for HP Slim Desktop S01 + - ALSA: hda/realtek: Add quirks for Asus ROG 2024 laptops using CS35L41 + - drm/i915/gt: Use the correct error value when kernel_context() fails + - drm/amdgpu: fix xclk freq on CHIP_STONEY + - drm/amdgpu: change reserved vram info print + - drm/amd/pm: Fix power context allocation in SMU13 + - drm/amd/display: Reduce sdp bw after urgent to 90% + - wifi: iwlwifi: mvm: Fix -Warray-bounds bug in iwl_mvm_wait_d3_notif() + - can: j1939: j1939_sk_send_loop_abort(): improved error queue handling in + J1939 Socket + - can: j1939: change j1939_netdev_lock type to mutex + - can: j1939: avoid possible use-after-free when j1939_can_rx_register fails + - mptcp: only send RM_ADDR in nl_cmd_remove + - mptcp: add address into userspace pm list + - mptcp: update userspace pm infos + - selftests: mptcp: update userspace pm addr tests + - selftests: mptcp: update userspace pm subflow tests + - ceph: fix use-after-free bug for inodes when flushing capsnaps + - s390/dasd: Use correct lock while counting channel queue length + - Bluetooth: Fix use-after-free in hci_remove_ltk/hci_remove_irk + - Bluetooth: fix debugfs registration + - Bluetooth: hci_qca: fix debugfs registration + - tee: amdtee: Add return_origin to 'struct tee_cmd_load_ta' + - rbd: move RBD_OBJ_FLAG_COPYUP_ENABLED flag setting + - rbd: get snapshot context after exclusive lock is ensured to be held + - virtio_net: use control_buf for coalesce params + - soc: qcom: icc-bwmon: fix incorrect error code passed to dev_err_probe() + - pinctrl: meson-axg: add missing GPIOA_18 gpio group + - usb: usbfs: Enforce page requirements for mmap + - usb: usbfs: Use consistent mmap functions + - mm: page_table_check: Make it dependent on EXCLUSIVE_SYSTEM_RAM + - mm: page_table_check: Ensure user pages are not slab pages + - arm64: dts: qcom: sc8280xp: Flush RSC sleep & wake votes + - ARM: at91: pm: fix imbalanced reference counter for ethernet devices + - ARM: dts: at91: sama7g5ek: fix debounce delay property for shdwc + - ASoC: codecs: wsa883x: do not set can_multi_write flag + - ASoC: codecs: wsa881x: do not set can_multi_write flag + - arm64: dts: qcom: sc7180-lite: Fix SDRAM freq for misidentified sc7180-lite + boards + - arm64: dts: imx8qm-mek: correct GPIOs for USDHC2 CD and WP signals + - arm64: dts: imx8-ss-dma: assign default clock rate for lpuarts + - ASoC: mediatek: mt8195-afe-pcm: Convert to platform remove callback + returning void + - ASoC: mediatek: mt8195: fix use-after-free in driver remove path + - ASoC: simple-card-utils: fix PCM constraint error check + - blk-mq: fix blk_mq_hw_ctx active request accounting + - arm64: dts: imx8mn-beacon: Fix SPI CS pinmux + - i2c: mv64xxx: Fix reading invalid status value in atomic mode + - firmware: arm_ffa: Set handle field to zero in memory descriptor + - gpio: sim: fix memory corruption when adding named lines and unnamed hogs + - i2c: sprd: Delete i2c adapter in .remove's error path + - riscv: mm: Ensure prot of VM_WRITE and VM_EXEC must be readable + - eeprom: at24: also select REGMAP + - soundwire: stream: Add missing clear of alloc_slave_rt + - riscv: fix kprobe __user string arg print fault issue + - [Config] updateconfigs for ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + - vduse: avoid empty string for dev name + - vhost: support PACKED when setting-getting vring_base + - vhost_vdpa: support PACKED when setting-getting vring_base + - ksmbd: fix out-of-bound read in deassemble_neg_contexts() + - ksmbd: fix out-of-bound read in parse_lease_state() + - ksmbd: check the validation of pdu_size in ksmbd_conn_handler_loop + - ext4: only check dquot_initialize_needed() when debugging + - wifi: rtw89: correct PS calculation for SUPPORTS_DYNAMIC_PS + - wifi: rtw88: correct PS calculation for SUPPORTS_DYNAMIC_PS + - Bluetooth: Split bt_iso_qos into dedicated structures + - Bluetooth: ISO: consider right CIS when removing CIG at cleanup + - Bluetooth: ISO: Fix CIG auto-allocation to select configurable CIG + - netfilter: nf_tables: Add null check for nla_nest_start_noflag() in + nft_dump_basechain_hook() + - drm/lima: fix sched context destroy + - net: openvswitch: fix upcall counter access before allocation + - bnxt_en: Fix bnxt_hwrm_update_rss_hash_cfg() + - Input: cyttsp5 - fix array length + - soc: qcom: rpmh-rsc: drop redundant unsigned >=0 comparision + - arm64: dts: qcom: sm6375-pdx225: Fix remoteproc firmware paths + - vdpa/mlx5: Fix hang when cvq commands are triggered during device unregister + - ksmbd: fix posix_acls and acls dereferencing possible ERR_PTR() + - Upstream stable to v6.1.34, v6.3.8 + + * CVE-2023-4273 + - exfat: check if filename entries exceeds max filename length + + * CVE-2023-4128 + - net/sched: cls_u32: No longer copy tcf_result on update to avoid use-after- + free + - net/sched: cls_fw: No longer copy tcf_result on update to avoid use-after- + free + - net/sched: cls_route: No longer copy tcf_result on update to avoid use- + after-free + + * CVE-2023-3212 + - gfs2: Don't deref jdesc in evict + + -- Stefan Bader Mon, 04 Sep 2023 11:20:15 +0200 + linux (6.2.0-32.32) lunar; urgency=medium * lunar/linux: 6.2.0-32.32 -proposed tracker (LP: #2031134) diff -u linux-6.2.0/debian/control linux-6.2.0/debian/control --- linux-6.2.0/debian/control +++ linux-6.2.0/debian/control @@ -93,7 +93,7 @@ you do not want this package. Install the appropriate linux-headers package instead. -Package: linux-headers-6.2.0-32 +Package: linux-headers-6.2.0-34 Build-Profiles: Architecture: all Multi-Arch: foreign @@ -103,7 +103,7 @@ Description: Header files related to Linux kernel version 6.2.0 This package provides kernel header files for version 6.2.0, for sites that want the latest kernel headers. Please read - /usr/share/doc/linux-headers-6.2.0-32/debian.README.gz for details + /usr/share/doc/linux-headers-6.2.0-34/debian.README.gz for details Package: linux-tools-common Build-Profiles: @@ -118,18 +118,18 @@ version locked tools (such as perf and x86_energy_perf_policy) for version 6.2.0. -Package: linux-tools-6.2.0-32 +Package: linux-tools-6.2.0-34 Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: devel Priority: optional Depends: ${misc:Depends}, ${shlibs:Depends}, linux-tools-common -Description: Linux kernel version specific tools for version 6.2.0-32 +Description: Linux kernel version specific tools for version 6.2.0-34 This package provides the architecture dependant parts for kernel version locked tools (such as perf and x86_energy_perf_policy) for - version 6.2.0-32 on + version 6.2.0-34 on 64 bit x86. - You probably want to install linux-tools-6.2.0-32-. + You probably want to install linux-tools-6.2.0-34-. Package: linux-cloud-tools-common Build-Profiles: @@ -142,17 +142,17 @@ This package provides the architecture independent parts for kernel version locked tools for cloud tools for version 6.2.0. -Package: linux-cloud-tools-6.2.0-32 +Package: linux-cloud-tools-6.2.0-34 Build-Profiles: Architecture: amd64 armhf Section: devel Priority: optional Depends: ${misc:Depends}, ${shlibs:Depends}, linux-cloud-tools-common -Description: Linux kernel version specific cloud tools for version 6.2.0-32 +Description: Linux kernel version specific cloud tools for version 6.2.0-34 This package provides the architecture dependant parts for kernel - version locked tools for cloud tools for version 6.2.0-32 on + version locked tools for cloud tools for version 6.2.0-34 on 64 bit x86. - You probably want to install linux-cloud-tools-6.2.0-32-. + You probably want to install linux-cloud-tools-6.2.0-34-. Package: linux-tools-host Build-Profiles: @@ -192,17 +192,17 @@ contained in each file. -Package: linux-image-unsigned-6.2.0-32-generic +Package: linux-image-unsigned-6.2.0-34-generic Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: kernel Priority: optional Provides: linux-image, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules, virtualbox-guest-modules [amd64], ${linux:rprovides} -Depends: ${misc:Depends}, ${shlibs:Depends}, kmod, linux-base (>= 4.5ubuntu1~16.04.1), linux-modules-6.2.0-32-generic +Depends: ${misc:Depends}, ${shlibs:Depends}, kmod, linux-base (>= 4.5ubuntu1~16.04.1), linux-modules-6.2.0-34-generic Recommends: grub-pc [amd64] | grub-efi-amd64 [amd64] | grub-efi-ia32 [amd64] | grub [amd64] | lilo [amd64] | flash-kernel [armhf arm64] | grub-efi-arm64 [arm64] | grub-efi-arm [armhf] | grub-ieee1275 [ppc64el], initramfs-tools | linux-initramfs-tool Breaks: flash-kernel (<< 3.90ubuntu2) [arm64 armhf], s390-tools (<< 2.3.0-0ubuntu3) [s390x] -Conflicts: linux-image-6.2.0-32-generic -Suggests: fdutils, linux-doc | linux-source-6.2.0, linux-tools, linux-headers-6.2.0-32-generic, linux-modules-extra-6.2.0-32-generic +Conflicts: linux-image-6.2.0-34-generic +Suggests: fdutils, linux-doc | linux-source-6.2.0, linux-tools, linux-headers-6.2.0-34-generic, linux-modules-extra-6.2.0-34-generic Description: Linux kernel image for version 6.2.0 on 64 bit x86 SMP This package contains the unsigned Linux kernel image for version 6.2.0 on 64 bit x86 SMP. @@ -215,7 +215,7 @@ the linux-generic meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-modules-6.2.0-32-generic +Package: linux-modules-6.2.0-34-generic Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: kernel @@ -235,12 +235,12 @@ the linux-generic meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-modules-extra-6.2.0-32-generic +Package: linux-modules-extra-6.2.0-34-generic Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: kernel Priority: optional -Depends: ${misc:Depends}, ${shlibs:Depends}, linux-modules-6.2.0-32-generic, wireless-regdb +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-modules-6.2.0-34-generic, wireless-regdb Description: Linux kernel extra modules for version 6.2.0 on 64 bit x86 SMP This package contains the Linux kernel extra modules for version 6.2.0 on 64 bit x86 SMP. @@ -257,21 +257,21 @@ the linux-generic meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-headers-6.2.0-32-generic +Package: linux-headers-6.2.0-34-generic Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: devel Priority: optional -Depends: ${misc:Depends}, linux-headers-6.2.0-32, ${shlibs:Depends} +Depends: ${misc:Depends}, linux-headers-6.2.0-34, ${shlibs:Depends} Provides: linux-headers, linux-headers-3.0 Description: Linux kernel headers for version 6.2.0 on 64 bit x86 SMP This package provides kernel header files for version 6.2.0 on 64 bit x86 SMP. . This is for sites that want the latest kernel headers. Please read - /usr/share/doc/linux-headers-6.2.0-32/debian.README.gz for details. + /usr/share/doc/linux-headers-6.2.0-34/debian.README.gz for details. -Package: linux-lib-rust-6.2.0-32-generic +Package: linux-lib-rust-6.2.0-34-generic Build-Profiles: Architecture: amd64 Multi-Arch: foreign @@ -282,7 +282,7 @@ This package provides kernel library files for version 6.2.0, that allow to compile out-of-tree kernel modules written in Rust. -Package: linux-image-unsigned-6.2.0-32-generic-dbgsym +Package: linux-image-unsigned-6.2.0-34-generic-dbgsym Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: devel @@ -299,31 +299,31 @@ is uncompressed, and unstripped. This package also includes the unstripped modules. -Package: linux-tools-6.2.0-32-generic +Package: linux-tools-6.2.0-34-generic Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: devel Priority: optional -Depends: ${misc:Depends}, linux-tools-6.2.0-32 -Description: Linux kernel version specific tools for version 6.2.0-32 +Depends: ${misc:Depends}, linux-tools-6.2.0-34 +Description: Linux kernel version specific tools for version 6.2.0-34 This package provides the architecture dependant parts for kernel version locked tools (such as perf and x86_energy_perf_policy) for - version 6.2.0-32 on + version 6.2.0-34 on 64 bit x86. -Package: linux-cloud-tools-6.2.0-32-generic +Package: linux-cloud-tools-6.2.0-34-generic Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: devel Priority: optional -Depends: ${misc:Depends}, linux-cloud-tools-6.2.0-32 -Description: Linux kernel version specific cloud tools for version 6.2.0-32 +Depends: ${misc:Depends}, linux-cloud-tools-6.2.0-34 +Description: Linux kernel version specific cloud tools for version 6.2.0-34 This package provides the architecture dependant parts for kernel - version locked tools for cloud for version 6.2.0-32 on + version locked tools for cloud for version 6.2.0-34 on 64 bit x86. -Package: linux-buildinfo-6.2.0-32-generic +Package: linux-buildinfo-6.2.0-34-generic Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: kernel @@ -337,18 +337,18 @@ You likely do not want to install this package. -Package: linux-modules-ipu6-6.2.0-32-generic +Package: linux-modules-ipu6-6.2.0-34-generic Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: kernel Priority: optional Depends: ${misc:Depends}, - linux-image-6.2.0-32-generic | linux-image-unsigned-6.2.0-32-generic, + linux-image-6.2.0-34-generic | linux-image-unsigned-6.2.0-34-generic, Built-Using: ${linux:BuiltUsing} -Description: Linux kernel ipu6 modules for version 6.2.0-32 +Description: Linux kernel ipu6 modules for version 6.2.0-34 This package provides the Linux kernel ipu6 modules for version - 6.2.0-32. + 6.2.0-34. . You likely do not want to install this package directly. Instead, install the one of the linux-modules-ipu6-generic* meta-packages, @@ -356,18 +356,18 @@ also installed. -Package: linux-modules-ivsc-6.2.0-32-generic +Package: linux-modules-ivsc-6.2.0-34-generic Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: kernel Priority: optional Depends: ${misc:Depends}, - linux-image-6.2.0-32-generic | linux-image-unsigned-6.2.0-32-generic, + linux-image-6.2.0-34-generic | linux-image-unsigned-6.2.0-34-generic, Built-Using: ${linux:BuiltUsing} -Description: Linux kernel ivsc modules for version 6.2.0-32 +Description: Linux kernel ivsc modules for version 6.2.0-34 This package provides the Linux kernel ivsc modules for version - 6.2.0-32. + 6.2.0-34. . You likely do not want to install this package directly. Instead, install the one of the linux-modules-ivsc-generic* meta-packages, @@ -375,18 +375,18 @@ also installed. -Package: linux-modules-iwlwifi-6.2.0-32-generic +Package: linux-modules-iwlwifi-6.2.0-34-generic Build-Profiles: Architecture: amd64 armhf arm64 ppc64el s390x Section: kernel Priority: optional Depends: ${misc:Depends}, - linux-image-6.2.0-32-generic | linux-image-unsigned-6.2.0-32-generic, + linux-image-6.2.0-34-generic | linux-image-unsigned-6.2.0-34-generic, Built-Using: ${linux:BuiltUsing} -Description: Linux kernel iwlwifi modules for version 6.2.0-32 +Description: Linux kernel iwlwifi modules for version 6.2.0-34 This package provides the Linux kernel iwlwifi modules for version - 6.2.0-32. + 6.2.0-34. . You likely do not want to install this package directly. Instead, install the one of the linux-modules-iwlwifi-generic* meta-packages, @@ -394,17 +394,17 @@ also installed. -Package: linux-image-unsigned-6.2.0-32-generic-64k +Package: linux-image-unsigned-6.2.0-34-generic-64k Build-Profiles: Architecture: arm64 Section: kernel Priority: optional Provides: linux-image, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules, ${linux:rprovides} -Depends: ${misc:Depends}, ${shlibs:Depends}, kmod, linux-base (>= 4.5ubuntu1~16.04.1), linux-modules-6.2.0-32-generic-64k +Depends: ${misc:Depends}, ${shlibs:Depends}, kmod, linux-base (>= 4.5ubuntu1~16.04.1), linux-modules-6.2.0-34-generic-64k Recommends: grub-efi-arm64 [arm64] | flash-kernel [arm64], initramfs-tools | linux-initramfs-tool Breaks: flash-kernel (<< 3.90ubuntu2) [arm64 armhf], s390-tools (<< 2.3.0-0ubuntu3) [s390x] -Conflicts: linux-image-6.2.0-32-generic-64k -Suggests: fdutils, linux-doc | linux-source-6.2.0, linux-tools, linux-headers-6.2.0-32-generic-64k, linux-modules-extra-6.2.0-32-generic-64k +Conflicts: linux-image-6.2.0-34-generic-64k +Suggests: fdutils, linux-doc | linux-source-6.2.0, linux-tools, linux-headers-6.2.0-34-generic-64k, linux-modules-extra-6.2.0-34-generic-64k Description: Linux kernel image for version 6.2.0 on 64 bit x86 SMP This package contains the unsigned Linux kernel image for version 6.2.0 on 64 bit x86 SMP. @@ -417,7 +417,7 @@ the linux-generic-64k meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-modules-6.2.0-32-generic-64k +Package: linux-modules-6.2.0-34-generic-64k Build-Profiles: Architecture: arm64 Section: kernel @@ -437,12 +437,12 @@ the linux-generic-64k meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-modules-extra-6.2.0-32-generic-64k +Package: linux-modules-extra-6.2.0-34-generic-64k Build-Profiles: Architecture: arm64 Section: kernel Priority: optional -Depends: ${misc:Depends}, ${shlibs:Depends}, linux-modules-6.2.0-32-generic-64k, wireless-regdb +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-modules-6.2.0-34-generic-64k, wireless-regdb Description: Linux kernel extra modules for version 6.2.0 on 64 bit x86 SMP This package contains the Linux kernel extra modules for version 6.2.0 on 64 bit x86 SMP. @@ -459,21 +459,21 @@ the linux-generic-64k meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-headers-6.2.0-32-generic-64k +Package: linux-headers-6.2.0-34-generic-64k Build-Profiles: Architecture: arm64 Section: devel Priority: optional -Depends: ${misc:Depends}, linux-headers-6.2.0-32, ${shlibs:Depends} +Depends: ${misc:Depends}, linux-headers-6.2.0-34, ${shlibs:Depends} Provides: linux-headers, linux-headers-3.0 Description: Linux kernel headers for version 6.2.0 on 64 bit x86 SMP This package provides kernel header files for version 6.2.0 on 64 bit x86 SMP. . This is for sites that want the latest kernel headers. Please read - /usr/share/doc/linux-headers-6.2.0-32/debian.README.gz for details. + /usr/share/doc/linux-headers-6.2.0-34/debian.README.gz for details. -Package: linux-lib-rust-6.2.0-32-generic-64k +Package: linux-lib-rust-6.2.0-34-generic-64k Build-Profiles: Architecture: amd64 Multi-Arch: foreign @@ -484,7 +484,7 @@ This package provides kernel library files for version 6.2.0, that allow to compile out-of-tree kernel modules written in Rust. -Package: linux-image-unsigned-6.2.0-32-generic-64k-dbgsym +Package: linux-image-unsigned-6.2.0-34-generic-64k-dbgsym Build-Profiles: Architecture: arm64 Section: devel @@ -501,31 +501,31 @@ is uncompressed, and unstripped. This package also includes the unstripped modules. -Package: linux-tools-6.2.0-32-generic-64k +Package: linux-tools-6.2.0-34-generic-64k Build-Profiles: Architecture: arm64 Section: devel Priority: optional -Depends: ${misc:Depends}, linux-tools-6.2.0-32 -Description: Linux kernel version specific tools for version 6.2.0-32 +Depends: ${misc:Depends}, linux-tools-6.2.0-34 +Description: Linux kernel version specific tools for version 6.2.0-34 This package provides the architecture dependant parts for kernel version locked tools (such as perf and x86_energy_perf_policy) for - version 6.2.0-32 on + version 6.2.0-34 on 64 bit x86. -Package: linux-cloud-tools-6.2.0-32-generic-64k +Package: linux-cloud-tools-6.2.0-34-generic-64k Build-Profiles: Architecture: arm64 Section: devel Priority: optional -Depends: ${misc:Depends}, linux-cloud-tools-6.2.0-32 -Description: Linux kernel version specific cloud tools for version 6.2.0-32 +Depends: ${misc:Depends}, linux-cloud-tools-6.2.0-34 +Description: Linux kernel version specific cloud tools for version 6.2.0-34 This package provides the architecture dependant parts for kernel - version locked tools for cloud for version 6.2.0-32 on + version locked tools for cloud for version 6.2.0-34 on 64 bit x86. -Package: linux-buildinfo-6.2.0-32-generic-64k +Package: linux-buildinfo-6.2.0-34-generic-64k Build-Profiles: Architecture: arm64 Section: kernel @@ -539,18 +539,18 @@ You likely do not want to install this package. -Package: linux-modules-ipu6-6.2.0-32-generic-64k +Package: linux-modules-ipu6-6.2.0-34-generic-64k Build-Profiles: Architecture: arm64 Section: kernel Priority: optional Depends: ${misc:Depends}, - linux-image-6.2.0-32-generic-64k | linux-image-unsigned-6.2.0-32-generic-64k, + linux-image-6.2.0-34-generic-64k | linux-image-unsigned-6.2.0-34-generic-64k, Built-Using: ${linux:BuiltUsing} -Description: Linux kernel ipu6 modules for version 6.2.0-32 +Description: Linux kernel ipu6 modules for version 6.2.0-34 This package provides the Linux kernel ipu6 modules for version - 6.2.0-32. + 6.2.0-34. . You likely do not want to install this package directly. Instead, install the one of the linux-modules-ipu6-generic-64k* meta-packages, @@ -558,18 +558,18 @@ also installed. -Package: linux-modules-ivsc-6.2.0-32-generic-64k +Package: linux-modules-ivsc-6.2.0-34-generic-64k Build-Profiles: Architecture: arm64 Section: kernel Priority: optional Depends: ${misc:Depends}, - linux-image-6.2.0-32-generic-64k | linux-image-unsigned-6.2.0-32-generic-64k, + linux-image-6.2.0-34-generic-64k | linux-image-unsigned-6.2.0-34-generic-64k, Built-Using: ${linux:BuiltUsing} -Description: Linux kernel ivsc modules for version 6.2.0-32 +Description: Linux kernel ivsc modules for version 6.2.0-34 This package provides the Linux kernel ivsc modules for version - 6.2.0-32. + 6.2.0-34. . You likely do not want to install this package directly. Instead, install the one of the linux-modules-ivsc-generic-64k* meta-packages, @@ -577,18 +577,18 @@ also installed. -Package: linux-modules-iwlwifi-6.2.0-32-generic-64k +Package: linux-modules-iwlwifi-6.2.0-34-generic-64k Build-Profiles: Architecture: arm64 Section: kernel Priority: optional Depends: ${misc:Depends}, - linux-image-6.2.0-32-generic-64k | linux-image-unsigned-6.2.0-32-generic-64k, + linux-image-6.2.0-34-generic-64k | linux-image-unsigned-6.2.0-34-generic-64k, Built-Using: ${linux:BuiltUsing} -Description: Linux kernel iwlwifi modules for version 6.2.0-32 +Description: Linux kernel iwlwifi modules for version 6.2.0-34 This package provides the Linux kernel iwlwifi modules for version - 6.2.0-32. + 6.2.0-34. . You likely do not want to install this package directly. Instead, install the one of the linux-modules-iwlwifi-generic-64k* meta-packages, @@ -596,17 +596,17 @@ also installed. -Package: linux-image-unsigned-6.2.0-32-generic-lpae +Package: linux-image-unsigned-6.2.0-34-generic-lpae Build-Profiles: Architecture: armhf Section: kernel Priority: optional Provides: linux-image, fuse-module, kvm-api-4, redhat-cluster-modules, ivtv-modules, ${linux:rprovides} -Depends: ${misc:Depends}, ${shlibs:Depends}, kmod, linux-base (>= 4.5ubuntu1~16.04.1), linux-modules-6.2.0-32-generic-lpae +Depends: ${misc:Depends}, ${shlibs:Depends}, kmod, linux-base (>= 4.5ubuntu1~16.04.1), linux-modules-6.2.0-34-generic-lpae Recommends: flash-kernel [armhf] | grub-efi-arm [armhf], initramfs-tools | linux-initramfs-tool Breaks: flash-kernel (<< 3.90ubuntu2) [arm64 armhf], s390-tools (<< 2.3.0-0ubuntu3) [s390x] -Conflicts: linux-image-6.2.0-32-generic-lpae -Suggests: fdutils, linux-doc | linux-source-6.2.0, linux-tools, linux-headers-6.2.0-32-generic-lpae, linux-modules-extra-6.2.0-32-generic-lpae +Conflicts: linux-image-6.2.0-34-generic-lpae +Suggests: fdutils, linux-doc | linux-source-6.2.0, linux-tools, linux-headers-6.2.0-34-generic-lpae, linux-modules-extra-6.2.0-34-generic-lpae Description: Linux kernel image for version 6.2.0 on 64 bit x86 SMP This package contains the unsigned Linux kernel image for version 6.2.0 on 64 bit x86 SMP. @@ -619,7 +619,7 @@ the linux-generic-lpae meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-modules-6.2.0-32-generic-lpae +Package: linux-modules-6.2.0-34-generic-lpae Build-Profiles: Architecture: armhf Section: kernel @@ -639,12 +639,12 @@ the linux-generic-lpae meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-modules-extra-6.2.0-32-generic-lpae +Package: linux-modules-extra-6.2.0-34-generic-lpae Build-Profiles: Architecture: armhf Section: kernel Priority: optional -Depends: ${misc:Depends}, ${shlibs:Depends}, linux-modules-6.2.0-32-generic-lpae, wireless-regdb +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-modules-6.2.0-34-generic-lpae, wireless-regdb Description: Linux kernel extra modules for version 6.2.0 on 64 bit x86 SMP This package contains the Linux kernel extra modules for version 6.2.0 on 64 bit x86 SMP. @@ -661,21 +661,21 @@ the linux-generic-lpae meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-headers-6.2.0-32-generic-lpae +Package: linux-headers-6.2.0-34-generic-lpae Build-Profiles: Architecture: armhf Section: devel Priority: optional -Depends: ${misc:Depends}, linux-headers-6.2.0-32, ${shlibs:Depends} +Depends: ${misc:Depends}, linux-headers-6.2.0-34, ${shlibs:Depends} Provides: linux-headers, linux-headers-3.0 Description: Linux kernel headers for version 6.2.0 on 64 bit x86 SMP This package provides kernel header files for version 6.2.0 on 64 bit x86 SMP. . This is for sites that want the latest kernel headers. Please read - /usr/share/doc/linux-headers-6.2.0-32/debian.README.gz for details. + /usr/share/doc/linux-headers-6.2.0-34/debian.README.gz for details. -Package: linux-lib-rust-6.2.0-32-generic-lpae +Package: linux-lib-rust-6.2.0-34-generic-lpae Build-Profiles: Architecture: amd64 Multi-Arch: foreign @@ -686,7 +686,7 @@ This package provides kernel library files for version 6.2.0, that allow to compile out-of-tree kernel modules written in Rust. -Package: linux-image-unsigned-6.2.0-32-generic-lpae-dbgsym +Package: linux-image-unsigned-6.2.0-34-generic-lpae-dbgsym Build-Profiles: Architecture: armhf Section: devel @@ -703,31 +703,31 @@ is uncompressed, and unstripped. This package also includes the unstripped modules. -Package: linux-tools-6.2.0-32-generic-lpae +Package: linux-tools-6.2.0-34-generic-lpae Build-Profiles: Architecture: armhf Section: devel Priority: optional -Depends: ${misc:Depends}, linux-tools-6.2.0-32 -Description: Linux kernel version specific tools for version 6.2.0-32 +Depends: ${misc:Depends}, linux-tools-6.2.0-34 +Description: Linux kernel version specific tools for version 6.2.0-34 This package provides the architecture dependant parts for kernel version locked tools (such as perf and x86_energy_perf_policy) for - version 6.2.0-32 on + version 6.2.0-34 on 64 bit x86. -Package: linux-cloud-tools-6.2.0-32-generic-lpae +Package: linux-cloud-tools-6.2.0-34-generic-lpae Build-Profiles: Architecture: armhf Section: devel Priority: optional -Depends: ${misc:Depends}, linux-cloud-tools-6.2.0-32 -Description: Linux kernel version specific cloud tools for version 6.2.0-32 +Depends: ${misc:Depends}, linux-cloud-tools-6.2.0-34 +Description: Linux kernel version specific cloud tools for version 6.2.0-34 This package provides the architecture dependant parts for kernel - version locked tools for cloud for version 6.2.0-32 on + version locked tools for cloud for version 6.2.0-34 on 64 bit x86. -Package: linux-buildinfo-6.2.0-32-generic-lpae +Package: linux-buildinfo-6.2.0-34-generic-lpae Build-Profiles: Architecture: armhf Section: kernel @@ -741,18 +741,18 @@ You likely do not want to install this package. -Package: linux-modules-ipu6-6.2.0-32-generic-lpae +Package: linux-modules-ipu6-6.2.0-34-generic-lpae Build-Profiles: Architecture: armhf Section: kernel Priority: optional Depends: ${misc:Depends}, - linux-image-6.2.0-32-generic-lpae | linux-image-unsigned-6.2.0-32-generic-lpae, + linux-image-6.2.0-34-generic-lpae | linux-image-unsigned-6.2.0-34-generic-lpae, Built-Using: ${linux:BuiltUsing} -Description: Linux kernel ipu6 modules for version 6.2.0-32 +Description: Linux kernel ipu6 modules for version 6.2.0-34 This package provides the Linux kernel ipu6 modules for version - 6.2.0-32. + 6.2.0-34. . You likely do not want to install this package directly. Instead, install the one of the linux-modules-ipu6-generic-lpae* meta-packages, @@ -760,18 +760,18 @@ also installed. -Package: linux-modules-ivsc-6.2.0-32-generic-lpae +Package: linux-modules-ivsc-6.2.0-34-generic-lpae Build-Profiles: Architecture: armhf Section: kernel Priority: optional Depends: ${misc:Depends}, - linux-image-6.2.0-32-generic-lpae | linux-image-unsigned-6.2.0-32-generic-lpae, + linux-image-6.2.0-34-generic-lpae | linux-image-unsigned-6.2.0-34-generic-lpae, Built-Using: ${linux:BuiltUsing} -Description: Linux kernel ivsc modules for version 6.2.0-32 +Description: Linux kernel ivsc modules for version 6.2.0-34 This package provides the Linux kernel ivsc modules for version - 6.2.0-32. + 6.2.0-34. . You likely do not want to install this package directly. Instead, install the one of the linux-modules-ivsc-generic-lpae* meta-packages, @@ -779,18 +779,18 @@ also installed. -Package: linux-modules-iwlwifi-6.2.0-32-generic-lpae +Package: linux-modules-iwlwifi-6.2.0-34-generic-lpae Build-Profiles: Architecture: armhf Section: kernel Priority: optional Depends: ${misc:Depends}, - linux-image-6.2.0-32-generic-lpae | linux-image-unsigned-6.2.0-32-generic-lpae, + linux-image-6.2.0-34-generic-lpae | linux-image-unsigned-6.2.0-34-generic-lpae, Built-Using: ${linux:BuiltUsing} -Description: Linux kernel iwlwifi modules for version 6.2.0-32 +Description: Linux kernel iwlwifi modules for version 6.2.0-34 This package provides the Linux kernel iwlwifi modules for version - 6.2.0-32. + 6.2.0-34. . You likely do not want to install this package directly. Instead, install the one of the linux-modules-iwlwifi-generic-lpae* meta-packages, diff -u linux-6.2.0/drivers/acpi/acpi_video.c linux-6.2.0/drivers/acpi/acpi_video.c --- linux-6.2.0/drivers/acpi/acpi_video.c +++ linux-6.2.0/drivers/acpi/acpi_video.c @@ -2036,6 +2036,12 @@ if (error) goto err_put_video; + /* + * HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0 + * evaluated to have functional panel brightness control. + */ + acpi_device_fix_up_power_extended(device); + pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n", ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), video->flags.multihead ? "yes" : "no", diff -u linux-6.2.0/drivers/acpi/resource.c linux-6.2.0/drivers/acpi/resource.c --- linux-6.2.0/drivers/acpi/resource.c +++ linux-6.2.0/drivers/acpi/resource.c @@ -502,6 +502,17 @@ { } }; +static const struct dmi_system_id lg_laptop[] = { + { + .ident = "LG Electronics 17U70P", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), + DMI_MATCH(DMI_BOARD_NAME, "17U70P"), + }, + }, + { } +}; + struct irq_override_cmp { const struct dmi_system_id *system; unsigned char irq; @@ -518,6 +529,7 @@ { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true }, + { lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, }; static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, diff -u linux-6.2.0/drivers/ata/libata-scsi.c linux-6.2.0/drivers/ata/libata-scsi.c --- linux-6.2.0/drivers/ata/libata-scsi.c +++ linux-6.2.0/drivers/ata/libata-scsi.c @@ -2715,18 +2715,36 @@ return 0; } -static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) +static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno) { - if (!sata_pmp_attached(ap)) { - if (likely(devno >= 0 && - devno < ata_link_max_devices(&ap->link))) + /* + * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case), + * or 2 (IDE master + slave case). However, the former case includes + * libsas hosted devices which are numbered per scsi host, leading + * to devno potentially being larger than 0 but with each struct + * ata_device having its own struct ata_port and struct ata_link. + * To accommodate these, ignore devno and always use device number 0. + */ + if (likely(!sata_pmp_attached(ap))) { + int link_max_devices = ata_link_max_devices(&ap->link); + + if (link_max_devices == 1) + return &ap->link.device[0]; + + if (devno < link_max_devices) return &ap->link.device[devno]; - } else { - if (likely(devno >= 0 && - devno < ap->nr_pmp_links)) - return &ap->pmp_link[devno].device[0]; + + return NULL; } + /* + * For PMP-attached devices, the device number corresponds to C + * (channel) of SCSI [H:C:I:L], indicating the port pmp link + * for the device. + */ + if (devno < ap->nr_pmp_links) + return &ap->pmp_link[devno].device[0]; + return NULL; } diff -u linux-6.2.0/drivers/base/cacheinfo.c linux-6.2.0/drivers/base/cacheinfo.c --- linux-6.2.0/drivers/base/cacheinfo.c +++ linux-6.2.0/drivers/base/cacheinfo.c @@ -285,6 +285,16 @@ continue;/* skip if itself or no cacheinfo */ for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) { sib_leaf = per_cpu_cacheinfo_idx(i, sib_index); + + /* + * Comparing cache IDs only makes sense if the leaves + * belong to the same cache level of same type. Skip + * the check if level and type do not match. + */ + if (sib_leaf->level != this_leaf->level || + sib_leaf->type != this_leaf->type) + continue; + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_set_cpu(i, &this_leaf->shared_cpu_map); @@ -316,6 +326,16 @@ for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index); + + /* + * Comparing cache IDs only makes sense if the leaves + * belong to the same cache level of same type. Skip + * the check if level and type do not match. + */ + if (sib_leaf->level != this_leaf->level || + sib_leaf->type != this_leaf->type) + continue; + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); diff -u linux-6.2.0/drivers/base/cpu.c linux-6.2.0/drivers/base/cpu.c --- linux-6.2.0/drivers/base/cpu.c +++ linux-6.2.0/drivers/base/cpu.c @@ -577,6 +577,12 @@ return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + ssize_t __weak cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) { @@ -594,6 +600,7 @@ static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); +static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { @@ -608,6 +615,7 @@ &dev_attr_srbds.attr, &dev_attr_mmio_stale_data.attr, &dev_attr_retbleed.attr, + &dev_attr_spec_rstack_overflow.attr, &dev_attr_gather_data_sampling.attr, NULL }; diff -u linux-6.2.0/drivers/base/regmap/regmap.c linux-6.2.0/drivers/base/regmap/regmap.c --- linux-6.2.0/drivers/base/regmap/regmap.c +++ linux-6.2.0/drivers/base/regmap/regmap.c @@ -2064,6 +2064,8 @@ size_t val_count = val_len / val_bytes; size_t chunk_count, chunk_bytes; size_t chunk_regs = val_count; + size_t max_data = map->max_raw_write - map->format.reg_bytes - + map->format.pad_bytes; int ret, i; if (!val_count) @@ -2071,8 +2073,8 @@ if (map->use_single_write) chunk_regs = 1; - else if (map->max_raw_write && val_len > map->max_raw_write) - chunk_regs = map->max_raw_write / val_bytes; + else if (map->max_raw_write && val_len > max_data) + chunk_regs = max_data / val_bytes; chunk_count = val_count / chunk_regs; chunk_bytes = chunk_regs * val_bytes; diff -u linux-6.2.0/drivers/block/nbd.c linux-6.2.0/drivers/block/nbd.c --- linux-6.2.0/drivers/block/nbd.c +++ linux-6.2.0/drivers/block/nbd.c @@ -1666,7 +1666,7 @@ return -EIO; dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); - if (!dir) { + if (IS_ERR(dir)) { dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", nbd_name(nbd)); return -EIO; @@ -1692,7 +1692,7 @@ struct dentry *dbg_dir; dbg_dir = debugfs_create_dir("nbd", NULL); - if (!dbg_dir) + if (IS_ERR(dbg_dir)) return -EIO; nbd_dbg_dir = dbg_dir; diff -u linux-6.2.0/drivers/block/rbd.c linux-6.2.0/drivers/block/rbd.c --- linux-6.2.0/drivers/block/rbd.c +++ linux-6.2.0/drivers/block/rbd.c @@ -1334,14 +1334,30 @@ /* * Must be called after rbd_obj_calc_img_extents(). */ -static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req) +static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req) { - if (!obj_req->num_img_extents || - (rbd_obj_is_entire(obj_req) && - !obj_req->img_request->snapc->num_snaps)) - return false; + rbd_assert(obj_req->img_request->snapc); - return true; + if (obj_req->img_request->op_type == OBJ_OP_DISCARD) { + dout("%s %p objno %llu discard\n", __func__, obj_req, + obj_req->ex.oe_objno); + return; + } + + if (!obj_req->num_img_extents) { + dout("%s %p objno %llu not overlapping\n", __func__, obj_req, + obj_req->ex.oe_objno); + return; + } + + if (rbd_obj_is_entire(obj_req) && + !obj_req->img_request->snapc->num_snaps) { + dout("%s %p objno %llu entire\n", __func__, obj_req, + obj_req->ex.oe_objno); + return; + } + + obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; } static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req) @@ -1442,6 +1458,7 @@ static struct ceph_osd_request * rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops) { + rbd_assert(obj_req->img_request->snapc); return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc, num_ops); } @@ -1578,15 +1595,18 @@ mutex_init(&img_request->state_mutex); } +/* + * Only snap_id is captured here, for reads. For writes, snapshot + * context is captured in rbd_img_object_requests() after exclusive + * lock is ensured to be held. + */ static void rbd_img_capture_header(struct rbd_img_request *img_req) { struct rbd_device *rbd_dev = img_req->rbd_dev; lockdep_assert_held(&rbd_dev->header_rwsem); - if (rbd_img_is_write(img_req)) - img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); - else + if (!rbd_img_is_write(img_req)) img_req->snap_id = rbd_dev->spec->snap_id; if (rbd_dev_parent_get(rbd_dev)) @@ -2233,9 +2253,6 @@ if (ret) return ret; - if (rbd_obj_copyup_enabled(obj_req)) - obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; - obj_req->write_state = RBD_OBJ_WRITE_START; return 0; } @@ -2341,8 +2358,6 @@ if (ret) return ret; - if (rbd_obj_copyup_enabled(obj_req)) - obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED; if (!obj_req->num_img_extents) { obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT; if (rbd_obj_is_entire(obj_req)) @@ -3287,6 +3302,7 @@ case RBD_OBJ_WRITE_START: rbd_assert(!*result); + rbd_obj_set_copyup_enabled(obj_req); if (rbd_obj_write_is_noop(obj_req)) return true; @@ -3473,9 +3489,19 @@ static void rbd_img_object_requests(struct rbd_img_request *img_req) { + struct rbd_device *rbd_dev = img_req->rbd_dev; struct rbd_obj_request *obj_req; rbd_assert(!img_req->pending.result && !img_req->pending.num_pending); + rbd_assert(!need_exclusive_lock(img_req) || + __rbd_is_lock_owner(rbd_dev)); + + if (rbd_img_is_write(img_req)) { + rbd_assert(!img_req->snapc); + down_read(&rbd_dev->header_rwsem); + img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); + up_read(&rbd_dev->header_rwsem); + } for_each_obj_request(img_req, obj_req) { int result = 0; @@ -3493,7 +3519,6 @@ static bool rbd_img_advance(struct rbd_img_request *img_req, int *result) { - struct rbd_device *rbd_dev = img_req->rbd_dev; int ret; again: @@ -3514,9 +3539,6 @@ if (*result) return true; - rbd_assert(!need_exclusive_lock(img_req) || - __rbd_is_lock_owner(rbd_dev)); - rbd_img_object_requests(img_req); if (!img_req->pending.num_pending) { *result = img_req->pending.result; @@ -3978,6 +4000,10 @@ { int ret; + ret = rbd_dev_refresh(rbd_dev); + if (ret) + return ret; + if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) { ret = rbd_object_map_open(rbd_dev); if (ret) diff -u linux-6.2.0/drivers/block/ublk_drv.c linux-6.2.0/drivers/block/ublk_drv.c --- linux-6.2.0/drivers/block/ublk_drv.c +++ linux-6.2.0/drivers/block/ublk_drv.c @@ -1045,6 +1045,11 @@ return ubq->nr_io_ready == ubq->q_depth; } +static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags) +{ + io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags); +} + static void ublk_cancel_queue(struct ublk_queue *ubq) { int i; @@ -1056,8 +1061,8 @@ struct ublk_io *io = &ubq->ios[i]; if (io->flags & UBLK_IO_FLAG_ACTIVE) - io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, - IO_URING_F_UNLOCKED); + io_uring_cmd_complete_in_task(io->cmd, + ublk_cmd_cancel_cb); } /* all io commands are canceled */ diff -u linux-6.2.0/drivers/bluetooth/hci_qca.c linux-6.2.0/drivers/bluetooth/hci_qca.c --- linux-6.2.0/drivers/bluetooth/hci_qca.c +++ linux-6.2.0/drivers/bluetooth/hci_qca.c @@ -78,7 +78,8 @@ QCA_HW_ERROR_EVENT, QCA_SSR_TRIGGERED, QCA_BT_OFF, - QCA_ROM_FW + QCA_ROM_FW, + QCA_DEBUGFS_CREATED, }; enum qca_capabilities { @@ -635,6 +636,9 @@ if (!hdev->debugfs) return; + if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) + return; + ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); /* read only */ diff -u linux-6.2.0/drivers/char/tpm/tpm_tis_core.c linux-6.2.0/drivers/char/tpm/tpm_tis_core.c --- linux-6.2.0/drivers/char/tpm/tpm_tis_core.c +++ linux-6.2.0/drivers/char/tpm/tpm_tis_core.c @@ -805,8 +805,11 @@ int rc; u32 int_status; - if (devm_request_irq(chip->dev.parent, irq, tis_int_handler, flags, - dev_name(&chip->dev), chip) != 0) { + + rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL, + tis_int_handler, IRQF_ONESHOT | flags, + dev_name(&chip->dev), chip); + if (rc) { dev_info(&chip->dev, "Unable to request irq: %d for probe\n", irq); return -1; diff -u linux-6.2.0/drivers/char/tpm/tpm_tis_core.h linux-6.2.0/drivers/char/tpm/tpm_tis_core.h --- linux-6.2.0/drivers/char/tpm/tpm_tis_core.h +++ linux-6.2.0/drivers/char/tpm/tpm_tis_core.h @@ -84,10 +84,10 @@ #define ILB_REMAP_SIZE 0x100 enum tpm_tis_flags { - TPM_TIS_ITPM_WORKAROUND = BIT(0), - TPM_TIS_INVALID_STATUS = BIT(1), - TPM_TIS_DEFAULT_CANCELLATION = BIT(2), - TPM_TIS_IRQ_TESTED = BIT(3), + TPM_TIS_ITPM_WORKAROUND = 0, + TPM_TIS_INVALID_STATUS = 1, + TPM_TIS_DEFAULT_CANCELLATION = 2, + TPM_TIS_IRQ_TESTED = 3, }; struct tpm_tis_data { diff -u linux-6.2.0/drivers/cpufreq/intel_pstate.c linux-6.2.0/drivers/cpufreq/intel_pstate.c --- linux-6.2.0/drivers/cpufreq/intel_pstate.c +++ linux-6.2.0/drivers/cpufreq/intel_pstate.c @@ -302,6 +302,13 @@ static struct cpufreq_driver *intel_pstate_driver __read_mostly; +#define HYBRID_SCALING_FACTOR 78741 + +static inline int core_get_scaling(void) +{ + return 100000; +} + #ifdef CONFIG_ACPI static bool acpi_ppc; #endif @@ -400,6 +407,26 @@ return cppc_perf.nominal_perf; } + +static int intel_pstate_cppc_get_scaling(int cpu) +{ + struct cppc_perf_caps cppc_perf; + int ret; + + ret = cppc_get_perf_caps(cpu, &cppc_perf); + + /* + * If the nominal frequency and the nominal performance are not + * zero and the ratio between them is not 100, return the hybrid + * scaling factor. + */ + if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq && + cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq) + return HYBRID_SCALING_FACTOR; + + return core_get_scaling(); +} + #else /* CONFIG_ACPI_CPPC_LIB */ static inline void intel_pstate_set_itmt_prio(int cpu) { @@ -506,6 +533,11 @@ { return -ENOTSUPP; } + +static int intel_pstate_cppc_get_scaling(int cpu) +{ + return core_get_scaling(); +} #endif /* CONFIG_ACPI_CPPC_LIB */ /** @@ -1906,11 +1938,6 @@ return ret; } -static inline int core_get_scaling(void) -{ - return 100000; -} - static u64 core_get_val(struct cpudata *cpudata, int pstate) { u64 val; @@ -1947,16 +1974,28 @@ *cpu_type = get_this_hybrid_cpu_type(); } -static int hybrid_get_cpu_scaling(int cpu) +static int hwp_get_cpu_scaling(int cpu) { u8 cpu_type = 0; smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1); /* P-cores have a smaller perf level-to-freqency scaling factor. */ if (cpu_type == 0x40) - return 78741; + return HYBRID_SCALING_FACTOR; - return core_get_scaling(); + /* Use default core scaling for E-cores */ + if (cpu_type == 0x20) + return core_get_scaling(); + + /* + * If reached here, this system is either non-hybrid (like Tiger + * Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with + * no E cores (in which case CPUID for hybrid support is 0). + * + * The CPPC nominal_frequency field is 0 for non-hybrid systems, + * so the default core scaling will be used for them. + */ + return intel_pstate_cppc_get_scaling(cpu); } static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) @@ -3413,8 +3452,7 @@ if (!default_driver) default_driver = &intel_pstate; - if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) - pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling; + pstate_funcs.get_cpu_scaling = hwp_get_cpu_scaling; goto hwp_cpu_matched; } diff -u linux-6.2.0/drivers/dma/at_xdmac.c linux-6.2.0/drivers/dma/at_xdmac.c --- linux-6.2.0/drivers/dma/at_xdmac.c +++ linux-6.2.0/drivers/dma/at_xdmac.c @@ -1026,6 +1026,8 @@ NULL, src_addr, dst_addr, xt, xt->sgl); + if (!first) + return NULL; /* Length of the block is (BLEN+1) microblocks. */ for (i = 0; i < xt->numf - 1; i++) @@ -1056,8 +1058,9 @@ src_addr, dst_addr, xt, chunk); if (!desc) { - list_splice_tail_init(&first->descs_list, - &atchan->free_descs_list); + if (first) + list_splice_tail_init(&first->descs_list, + &atchan->free_descs_list); return NULL; } diff -u linux-6.2.0/drivers/firmware/arm_ffa/driver.c linux-6.2.0/drivers/firmware/arm_ffa/driver.c --- linux-6.2.0/drivers/firmware/arm_ffa/driver.c +++ linux-6.2.0/drivers/firmware/arm_ffa/driver.c @@ -424,6 +424,7 @@ ep_mem_access->flag = 0; ep_mem_access->reserved = 0; } + mem_region->handle = 0; mem_region->reserved_0 = 0; mem_region->reserved_1 = 0; mem_region->ep_count = args->nattrs; diff -u linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c --- linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2542,8 +2542,6 @@ amdgpu_fru_get_product_info(adev); init_failed: - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_release_full_gpu(adev, true); return r; } @@ -3586,6 +3584,7 @@ int r, i; bool px = false; u32 max_MBps; + int tmp; adev->shutdown = false; adev->flags = flags; @@ -3764,6 +3763,12 @@ adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *) adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags == (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64); + /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a + * internal path natively support atomics, set have_atomics_support to true. + */ + else if ((adev->flags & AMD_IS_APU) && + (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) + adev->have_atomics_support = true; else adev->have_atomics_support = !pci_enable_atomic_ops_to_root(adev->pdev, @@ -3807,7 +3812,13 @@ } } } else { + tmp = amdgpu_reset_method; + /* It should do a default reset when loading or reloading the driver, + * regardless of the module parameter reset_method. + */ + amdgpu_reset_method = AMD_RESET_METHOD_NONE; r = amdgpu_asic_reset(adev); + amdgpu_reset_method = tmp; if (r) { dev_err(adev->dev, "asic reset on init failed\n"); goto failed; @@ -3867,18 +3878,6 @@ r = amdgpu_device_ip_init(adev); if (r) { - /* failed in exclusive mode due to timeout */ - if (amdgpu_sriov_vf(adev) && - !amdgpu_sriov_runtime(adev) && - amdgpu_virt_mmio_blocked(adev) && - !amdgpu_virt_wait_reset(adev)) { - dev_err(adev->dev, "VF exclusive mode timeout\n"); - /* Don't send request since VF is inactive. */ - adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; - adev->virt.ops = NULL; - r = -EAGAIN; - goto release_ras_con; - } dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); goto release_ras_con; @@ -3950,8 +3949,10 @@ msecs_to_jiffies(AMDGPU_RESUME_MS)); } - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_release_full_gpu(adev, true); flush_delayed_work(&adev->delayed_init_work); + } r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes); if (r) @@ -3991,6 +3992,20 @@ return 0; release_ras_con: + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, true); + + /* failed in exclusive mode due to timeout */ + if (amdgpu_sriov_vf(adev) && + !amdgpu_sriov_runtime(adev) && + amdgpu_virt_mmio_blocked(adev) && + !amdgpu_virt_wait_reset(adev)) { + dev_err(adev->dev, "VF exclusive mode timeout\n"); + /* Don't send request since VF is inactive. */ + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + adev->virt.ops = NULL; + r = -EAGAIN; + } amdgpu_release_ras_context(adev); failed: @@ -4206,6 +4221,7 @@ drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); cancel_delayed_work_sync(&adev->delayed_init_work); + flush_delayed_work(&adev->gfx.gfx_off_delay_work); amdgpu_ras_suspend(adev); diff -u linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c --- linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -556,6 +556,41 @@ } /** + * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether + * fence driver interrupts need to be restored. + * + * @ring: ring that to be checked + * + * Interrupts for rings that belong to GFX IP don't need to be restored + * when the target power state is s0ix. + * + * Return true if need to restore interrupts, false otherwise. + */ +static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool is_gfx_power_domain = false; + + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_SDMA: + /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */ + if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) + is_gfx_power_domain = true; + break; + case AMDGPU_RING_TYPE_GFX: + case AMDGPU_RING_TYPE_COMPUTE: + case AMDGPU_RING_TYPE_KIQ: + case AMDGPU_RING_TYPE_MES: + is_gfx_power_domain = true; + break; + default: + break; + } + + return !(adev->in_s0ix && is_gfx_power_domain); +} + +/** * amdgpu_fence_driver_hw_fini - tear down the fence driver * for all possible rings. * @@ -582,7 +617,9 @@ if (r) amdgpu_fence_driver_force_completion(ring); - if (ring->fence_drv.irq_src) + if (!drm_dev_is_unplugged(adev_to_drm(adev)) && + ring->fence_drv.irq_src && + amdgpu_fence_need_ring_interrupt_restore(ring)) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); @@ -657,7 +694,8 @@ continue; /* enable the interrupt */ - if (ring->fence_drv.irq_src) + if (ring->fence_drv.irq_src && + amdgpu_fence_need_ring_interrupt_restore(ring)) amdgpu_irq_get(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); } diff -u linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c --- linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -585,15 +585,8 @@ if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) { - /* If going to s2idle, no need to wait */ - if (adev->in_s0ix) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, - AMD_IP_BLOCK_TYPE_GFX, true)) - adev->gfx.gfx_off_state = true; - } else { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, delay); - } } } else { if (adev->gfx.gfx_off_req_count == 0) { diff -u linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c --- linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -79,9 +79,10 @@ static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); + struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo; struct amdgpu_bo_vm *vmbo; + bo = shadow_bo->parent; vmbo = to_amdgpu_bo_vm(bo); /* in case amdgpu_device_recover_vram got NULL of bo->parent */ if (!list_empty(&vmbo->shadow_list)) { @@ -694,11 +695,6 @@ return r; *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); - INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list); - /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list - * is initialized. - */ - bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy; return r; } @@ -715,6 +711,8 @@ mutex_lock(&adev->shadow_list_lock); list_add_tail(&vmbo->shadow_list, &adev->shadow_list); + vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo); + vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy; mutex_unlock(&adev->shadow_list_lock); } diff -u linux-6.2.0/drivers/gpu/drm/amd/amdgpu/vi.c linux-6.2.0/drivers/gpu/drm/amd/amdgpu/vi.c --- linux-6.2.0/drivers/gpu/drm/amd/amdgpu/vi.c +++ linux-6.2.0/drivers/gpu/drm/amd/amdgpu/vi.c @@ -542,8 +542,15 @@ u32 reference_clock = adev->clock.spll.reference_freq; u32 tmp; - if (adev->flags & AMD_IS_APU) - return reference_clock; + if (adev->flags & AMD_IS_APU) { + switch (adev->asic_type) { + case CHIP_STONEY: + /* vbios says 48Mhz, but the actual freq is 100Mhz */ + return 10000; + default: + return reference_clock; + } + } tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) diff -u linux-6.2.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c linux-6.2.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c --- linux-6.2.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ linux-6.2.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2771,7 +2771,7 @@ * this is the case when traversing through already created * MST connectors, should be skipped */ - if (aconnector->dc_link->type == dc_connection_mst_branch) + if (aconnector && aconnector->mst_port) continue; mutex_lock(&aconnector->hpd_lock); @@ -6602,7 +6602,7 @@ int clock, bpp = 0; bool is_y420 = false; - if (!aconnector->port || !aconnector->dc_sink) + if (!aconnector->port) return 0; mst_port = aconnector->port; diff -u linux-6.2.0/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c linux-6.2.0/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c --- linux-6.2.0/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ linux-6.2.0/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -137,7 +137,7 @@ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_sdp_bw_after_urgent = 100.0, + .pct_ideal_sdp_bw_after_urgent = 90.0, .pct_ideal_fabric_bw_after_urgent = 67.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0, .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented diff -u linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c --- linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1531,9 +1531,9 @@ /* * For SMU 13.0.4/11, PMFW will handle the features disablement properly - * for gpu reset case. Driver involvement is unnecessary. + * for gpu reset and S0i3 cases. Driver involvement is unnecessary. */ - if (amdgpu_in_reset(adev)) { + if (amdgpu_in_reset(adev) || adev->in_s0ix) { switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 4): case IP_VERSION(13, 0, 11): diff -u linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c --- linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -582,11 +582,11 @@ if (smu_power->power_context || smu_power->power_context_size != 0) return -EINVAL; - smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context), + smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context), GFP_KERNEL); if (!smu_power->power_context) return -ENOMEM; - smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context); + smu_power->power_context_size = sizeof(struct smu_13_0_power_context); return 0; } diff -u linux-6.2.0/drivers/gpu/drm/i915/display/intel_display.c linux-6.2.0/drivers/gpu/drm/i915/display/intel_display.c --- linux-6.2.0/drivers/gpu/drm/i915/display/intel_display.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_display.c @@ -330,7 +330,7 @@ return crtc_state->master_transcoder != INVALID_TRANSCODER; } -static bool +bool is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) { return crtc_state->sync_mode_slaves_mask != 0; @@ -1467,36 +1467,11 @@ intel_frontbuffer_flip(dev_priv, fb_bits); } -/* - * intel_connector_primary_encoder - get the primary encoder for a connector - * @connector: connector for which to return the encoder - * - * Returns the primary encoder for a connector. There is a 1:1 mapping from - * all connectors to their encoder, except for DP-MST connectors which have - * both a virtual and a primary encoder. These DP-MST primary encoders can be - * pointed to by as many DP-MST connectors as there are pipes. - */ -static struct intel_encoder * -intel_connector_primary_encoder(struct intel_connector *connector) -{ - struct intel_encoder *encoder; - - if (connector->mst_port) - return &dp_to_dig_port(connector->mst_port)->base; - - encoder = intel_attached_encoder(connector); - drm_WARN_ON(connector->base.dev, !encoder); - - return encoder; -} - static void intel_encoders_update_prepare(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; - struct drm_connector_state *new_conn_state; - struct drm_connector *connector; int i; /* @@ -1512,57 +1487,6 @@ new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; } } - - if (!state->modeset) - return; - - for_each_new_connector_in_state(&state->base, connector, new_conn_state, - i) { - struct intel_connector *intel_connector; - struct intel_encoder *encoder; - struct intel_crtc *crtc; - - if (!intel_connector_needs_modeset(state, connector)) - continue; - - intel_connector = to_intel_connector(connector); - encoder = intel_connector_primary_encoder(intel_connector); - if (!encoder->update_prepare) - continue; - - crtc = new_conn_state->crtc ? - to_intel_crtc(new_conn_state->crtc) : NULL; - encoder->update_prepare(state, encoder, crtc); - } -} - -static void intel_encoders_update_complete(struct intel_atomic_state *state) -{ - struct drm_connector_state *new_conn_state; - struct drm_connector *connector; - int i; - - if (!state->modeset) - return; - - for_each_new_connector_in_state(&state->base, connector, new_conn_state, - i) { - struct intel_connector *intel_connector; - struct intel_encoder *encoder; - struct intel_crtc *crtc; - - if (!intel_connector_needs_modeset(state, connector)) - continue; - - intel_connector = to_intel_connector(connector); - encoder = intel_connector_primary_encoder(intel_connector); - if (!encoder->update_complete) - continue; - - crtc = new_conn_state->crtc ? - to_intel_crtc(new_conn_state->crtc) : NULL; - encoder->update_complete(state, encoder, crtc); - } } static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, @@ -1924,6 +1848,8 @@ if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; + intel_dmc_enable_pipe(dev_priv, crtc->pipe); + if (!new_crtc_state->bigjoiner_pipes) { intel_encoders_pre_pll_enable(state, crtc); @@ -2052,6 +1978,8 @@ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); + + intel_disable_shared_dpll(old_crtc_state); } static void hsw_crtc_disable(struct intel_atomic_state *state, @@ -2059,6 +1987,7 @@ { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); /* * FIXME collapse everything to one hook. @@ -2068,6 +1997,20 @@ intel_encoders_disable(state, crtc); intel_encoders_post_disable(state, crtc); } + + intel_disable_shared_dpll(old_crtc_state); + + if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { + struct intel_crtc *slave_crtc; + + intel_encoders_post_pll_disable(state, crtc); + + intel_dmc_disable_pipe(i915, crtc->pipe); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, + intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) + intel_dmc_disable_pipe(i915, slave_crtc->pipe); + } } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -7155,7 +7098,6 @@ dev_priv->display.funcs.display->crtc_disable(state, crtc); crtc->active = false; intel_fbc_disable(crtc); - intel_disable_shared_dpll(old_crtc_state); if (!new_crtc_state->hw.active) intel_initial_watermarks(state, crtc); @@ -7554,8 +7496,6 @@ /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.funcs.display->commit_modeset_enables(state); - intel_encoders_update_complete(state); - if (state->modeset) intel_set_cdclk_post_plane_update(state); diff -u linux-6.2.0/drivers/gpu/drm/i915/display/intel_display.h linux-6.2.0/drivers/gpu/drm/i915/display/intel_display.h --- linux-6.2.0/drivers/gpu/drm/i915/display/intel_display.h +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_display.h @@ -255,13 +255,6 @@ I915_MAX_TC_PORTS }; -enum tc_port_mode { - TC_PORT_DISCONNECTED, - TC_PORT_TBT_ALT, - TC_PORT_DP_ALT, - TC_PORT_LEGACY, -}; - enum aux_ch { AUX_CH_A, AUX_CH_B, @@ -523,6 +516,7 @@ bool bigjoiner); enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); bool is_trans_port_sync_mode(const struct intel_crtc_state *state); +bool is_trans_port_sync_master(const struct intel_crtc_state *state); bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state); bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state); u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state); diff -u linux-6.2.0/drivers/gpu/drm/i915/display/intel_display_types.h linux-6.2.0/drivers/gpu/drm/i915/display/intel_display_types.h --- linux-6.2.0/drivers/gpu/drm/i915/display/intel_display_types.h +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_display_types.h @@ -59,6 +59,7 @@ struct intel_ddi_buf_trans; struct intel_fbc; struct intel_connector; +struct intel_tc_port; /* * Display related stuff @@ -168,9 +169,6 @@ int (*compute_config_late)(struct intel_encoder *, struct intel_crtc_state *, struct drm_connector_state *); - void (*update_prepare)(struct intel_atomic_state *, - struct intel_encoder *, - struct intel_crtc *); void (*pre_pll_enable)(struct intel_atomic_state *, struct intel_encoder *, const struct intel_crtc_state *, @@ -183,9 +181,6 @@ struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); - void (*update_complete)(struct intel_atomic_state *, - struct intel_encoder *, - struct intel_crtc *); void (*disable)(struct intel_atomic_state *, struct intel_encoder *, const struct intel_crtc_state *, @@ -237,14 +232,26 @@ * Called during system suspend after all pending requests for the * encoder are flushed (for example for DP AUX transactions) and * device interrupts are disabled. + * All modeset locks are held while the hook is called. */ void (*suspend)(struct intel_encoder *); /* + * Called without the modeset locks held after the suspend() hook for + * all encoders have been called. + */ + void (*suspend_complete)(struct intel_encoder *encoder); + /* * Called during system reboot/shutdown after all the * encoders have been disabled and suspended. + * All modeset locks are held while the hook is called. */ void (*shutdown)(struct intel_encoder *encoder); /* + * Called without the modeset locks held after the shutdown() hook for + * all encoders have been called. + */ + void (*shutdown_complete)(struct intel_encoder *encoder); + /* * Enable/disable the clock to the port. */ void (*enable_clock)(struct intel_encoder *encoder, @@ -254,6 +261,11 @@ * Returns whether the port clock is enabled or not. */ bool (*is_clock_enabled)(struct intel_encoder *encoder); + /* + * Returns the PLL type the port uses. + */ + enum icl_port_dpll_id (*port_pll_type)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int *n_entries); @@ -1777,16 +1789,7 @@ intel_wakeref_t ddi_io_wakeref; intel_wakeref_t aux_wakeref; - struct mutex tc_lock; /* protects the TypeC port mode */ - intel_wakeref_t tc_lock_wakeref; - enum intel_display_power_domain tc_lock_power_domain; - struct delayed_work tc_disconnect_phy_work; - int tc_link_refcount; - bool tc_legacy_port:1; - char tc_port_name[8]; - enum tc_port_mode tc_mode; - enum phy_fia tc_phy_fia; - u8 tc_phy_fia_idx; + struct intel_tc_port *tc; /* protects num_hdcp_streams reference count, hdcp_port_data and hdcp_auth_status */ struct mutex hdcp_mutex; diff -u linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp.c linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp.c --- linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp.c @@ -3975,9 +3975,9 @@ return false; } -static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, - struct drm_modeset_acquire_ctx *ctx, - u8 *pipe_mask) +int intel_dp_get_active_pipes(struct intel_dp *intel_dp, + struct drm_modeset_acquire_ctx *ctx, + u8 *pipe_mask) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_connector_list_iter conn_iter; @@ -3986,9 +3986,6 @@ *pipe_mask = 0; - if (!intel_dp_needs_link_retrain(intel_dp)) - return 0; - drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { struct drm_connector_state *conn_state = @@ -4022,9 +4019,6 @@ } drm_connector_list_iter_end(&conn_iter); - if (!intel_dp_needs_link_retrain(intel_dp)) - *pipe_mask = 0; - return ret; } @@ -4053,13 +4047,19 @@ if (ret) return ret; - ret = intel_dp_prep_link_retrain(intel_dp, ctx, &pipe_mask); + if (!intel_dp_needs_link_retrain(intel_dp)) + return 0; + + ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); if (ret) return ret; if (pipe_mask == 0) return 0; + if (!intel_dp_needs_link_retrain(intel_dp)) + return 0; + drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", encoder->base.base.id, encoder->base.name); diff -u linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp.h linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp.h --- linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp.h +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp.h @@ -42,6 +42,9 @@ int link_rate, int lane_count); int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int link_rate, u8 lane_count); +int intel_dp_get_active_pipes(struct intel_dp *intel_dp, + struct drm_modeset_acquire_ctx *ctx, + u8 *pipe_mask); int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx); void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode); diff -u linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp_aux.c linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp_aux.c --- linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -234,8 +234,19 @@ for (i = 0; i < ARRAY_SIZE(ch_data); i++) ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); - if (is_tc_port) + if (is_tc_port) { intel_tc_port_lock(dig_port); + /* + * Abort transfers on a disconnected port as required by + * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX + * timeouts that would otherwise happen. + * TODO: abort the transfer on non-TC ports as well. + */ + if (!intel_tc_port_connected_locked(&dig_port->base)) { + ret = -ENXIO; + goto out_unlock; + } + } aux_domain = intel_aux_power_domain(dig_port); @@ -399,7 +410,7 @@ intel_pps_unlock(intel_dp, pps_wakeref); intel_display_power_put_async(i915, aux_domain, aux_wakeref); - +out_unlock: if (is_tc_port) intel_tc_port_unlock(dig_port); diff -u linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp_mst.c linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp_mst.c --- linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -457,6 +457,20 @@ intel_dp->active_mst_links); } +static void intel_mst_post_pll_disable_dp(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); + struct intel_digital_port *dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &dig_port->dp; + + if (intel_dp->active_mst_links == 0 && + dig_port->base.post_pll_disable) + dig_port->base.post_pll_disable(state, encoder, old_crtc_state, old_conn_state); +} + static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config, @@ -469,6 +483,13 @@ if (intel_dp->active_mst_links == 0) dig_port->base.pre_pll_enable(state, &dig_port->base, pipe_config, NULL); + else + /* + * The port PLL state needs to get updated for secondary + * streams as for the primary stream. + */ + intel_ddi_update_active_dpll(state, &dig_port->base, + to_intel_crtc(pipe_config->uapi.crtc)); } static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, @@ -934,6 +955,7 @@ intel_encoder->compute_config_late = intel_dp_mst_compute_config_late; intel_encoder->disable = intel_mst_disable_dp; intel_encoder->post_disable = intel_mst_post_disable_dp; + intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp; intel_encoder->update_pipe = intel_ddi_update_pipe; intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; intel_encoder->pre_enable = intel_mst_pre_enable_dp; interdiff impossible; taking evasive action reverted: --- linux-6.2.0/drivers/gpu/drm/i915/display/intel_tc.c +++ linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_tc.c @@ -436,9 +436,9 @@ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, + "Port %s: PHY in TCCOLD, assume safe mode\n", - "Port %s: PHY in TCCOLD, assume not owned\n", dig_port->tc_port_name); + return true; - return false; } return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_tc.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_tc.c @@ -5,14 +5,66 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "intel_atomic.h" +#include "intel_ddi.h" +#include "intel_de.h" #include "intel_display.h" #include "intel_display_power_map.h" #include "intel_display_types.h" #include "intel_dkl_phy_regs.h" +#include "intel_dp.h" #include "intel_dp_mst.h" #include "intel_mg_phy_regs.h" +#include "intel_modeset_lock.h" #include "intel_tc.h" +enum tc_port_mode { + TC_PORT_DISCONNECTED, + TC_PORT_TBT_ALT, + TC_PORT_DP_ALT, + TC_PORT_LEGACY, +}; + +struct intel_tc_port; + +struct intel_tc_phy_ops { + enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc); + u32 (*hpd_live_status)(struct intel_tc_port *tc); + bool (*is_ready)(struct intel_tc_port *tc); + bool (*is_owned)(struct intel_tc_port *tc); + void (*get_hw_state)(struct intel_tc_port *tc); + bool (*connect)(struct intel_tc_port *tc, int required_lanes); + void (*disconnect)(struct intel_tc_port *tc); + void (*init)(struct intel_tc_port *tc); +}; + +struct intel_tc_port { + struct intel_digital_port *dig_port; + + const struct intel_tc_phy_ops *phy_ops; + + struct mutex lock; /* protects the TypeC port mode */ + intel_wakeref_t lock_wakeref; +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + enum intel_display_power_domain lock_power_domain; +#endif + struct delayed_work disconnect_phy_work; + struct delayed_work link_reset_work; + int link_refcount; + bool legacy_port:1; + char port_name[8]; + enum tc_port_mode mode; + enum tc_port_mode init_mode; + enum phy_fia phy_fia; + u8 phy_fia_idx; +}; + +static enum intel_display_power_domain +tc_phy_cold_off_domain(struct intel_tc_port *); +static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc); +static bool tc_phy_is_ready(struct intel_tc_port *tc); +static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc); + static const char *tc_port_mode_name(enum tc_port_mode mode) { static const char * const names[] = { @@ -28,13 +80,24 @@ return names[mode]; } +static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port) +{ + return dig_port->tc; +} + +static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc) +{ + return to_i915(tc->dig_port->base.base.dev); +} + static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + struct intel_tc_port *tc = to_tc_port(dig_port); - return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode; + return intel_phy_is_tc(i915, phy) && tc->mode == mode; } bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port) @@ -52,113 +115,178 @@ return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY); } +/* + * The display power domains used for TC ports depending on the + * platform and TC mode (legacy, DP-alt, TBT): + * + * POWER_DOMAIN_DISPLAY_CORE: + * -------------------------- + * ADLP/all modes: + * - TCSS/IOM access for PHY ready state. + * ADLP+/all modes: + * - DE/north-,south-HPD ISR access for HPD live state. + * + * POWER_DOMAIN_PORT_DDI_LANES_: + * ----------------------------------- + * ICL+/all modes: + * - DE/DDI_BUF access for port enabled state. + * ADLP/all modes: + * - DE/DDI_BUF access for PHY owned state. + * + * POWER_DOMAIN_AUX_USBC: + * ------------------------------------- + * ICL/legacy mode: + * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state + * - TCSS/PHY: block TC-cold power state for using the PHY AUX and + * main lanes. + * ADLP/legacy, DP-alt modes: + * - TCSS/PHY: block TC-cold power state for using the PHY AUX and + * main lanes. + * + * POWER_DOMAIN_TC_COLD_OFF: + * ------------------------- + * TGL/legacy, DP-alt modes: + * - TCSS/IOM,FIA access for PHY ready, owned and HPD live state + * - TCSS/PHY: block TC-cold power state for using the PHY AUX and + * main lanes. + * + * ICL, TGL, ADLP/TBT mode: + * - TCSS/IOM,FIA access for HPD live state + * - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN) + * AUX and main lanes. + */ bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_tc_port *tc = to_tc_port(dig_port); - return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) || - IS_ALDERLAKE_P(i915); + return tc_phy_cold_off_domain(tc) == + intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); } -static enum intel_display_power_domain -tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode) +static intel_wakeref_t +__tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct drm_i915_private *i915 = tc_to_i915(tc); - if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port)) - return POWER_DOMAIN_TC_COLD_OFF; + *domain = tc_phy_cold_off_domain(tc); - return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); + return intel_display_power_get(i915, *domain); } static intel_wakeref_t -tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode, - enum intel_display_power_domain *domain) +tc_cold_block(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - - *domain = tc_cold_get_power_domain(dig_port, mode); + enum intel_display_power_domain domain; + intel_wakeref_t wakeref; - return intel_display_power_get(i915, *domain); + wakeref = __tc_cold_block(tc, &domain); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + tc->lock_power_domain = domain; +#endif + return wakeref; } -static intel_wakeref_t -tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain) +static void +__tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain, + intel_wakeref_t wakeref) { - return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain); + struct drm_i915_private *i915 = tc_to_i915(tc); + + intel_display_power_put(i915, domain, wakeref); } static void -tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain, - intel_wakeref_t wakeref) +tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc); - /* - * wakeref == -1, means some error happened saving save_depot_stack but - * power should still be put down and 0 is a invalid save_depot_stack - * id so can be used to skip it for non TC legacy ports. - */ - if (wakeref == 0) - return; +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain); +#endif + __tc_cold_unblock(tc, domain, wakeref); +} - intel_display_power_put(i915, domain, wakeref); +static void +assert_display_core_power_enabled(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + + drm_WARN_ON(&i915->drm, + !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE)); } static void -assert_tc_cold_blocked(struct intel_digital_port *dig_port) +assert_tc_cold_blocked(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct drm_i915_private *i915 = tc_to_i915(tc); bool enabled; enabled = intel_display_power_is_enabled(i915, - tc_cold_get_power_domain(dig_port, - dig_port->tc_mode)); + tc_phy_cold_off_domain(tc)); drm_WARN_ON(&i915->drm, !enabled); } +static enum intel_display_power_domain +tc_port_power_domain(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port); + + return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1; +} + +static void +assert_tc_port_power_enabled(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + + drm_WARN_ON(&i915->drm, + !intel_display_power_is_enabled(i915, tc_port_power_domain(tc))); +} + u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_uncore *uncore = &i915->uncore; + struct intel_tc_port *tc = to_tc_port(dig_port); u32 lane_mask; - lane_mask = intel_uncore_read(uncore, - PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); + lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff); - assert_tc_cold_blocked(dig_port); + assert_tc_cold_blocked(tc); - lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx); - return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); + lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx); + return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx); } u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_uncore *uncore = &i915->uncore; + struct intel_tc_port *tc = to_tc_port(dig_port); u32 pin_mask; - pin_mask = intel_uncore_read(uncore, - PORT_TX_DFLEXPA1(dig_port->tc_phy_fia)); + pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia)); drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff); - assert_tc_cold_blocked(dig_port); + assert_tc_cold_blocked(tc); - return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >> - DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); + return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >> + DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx); } int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_tc_port *tc = to_tc_port(dig_port); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); intel_wakeref_t wakeref; u32 lane_mask; - if (dig_port->tc_mode != TC_PORT_DP_ALT) + if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT) return 4; - assert_tc_cold_blocked(dig_port); + assert_tc_cold_blocked(tc); lane_mask = 0; with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) @@ -185,48 +313,51 @@ int required_lanes) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_tc_port *tc = to_tc_port(dig_port); bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; - struct intel_uncore *uncore = &i915->uncore; u32 val; drm_WARN_ON(&i915->drm, - lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); + lane_reversal && tc->mode != TC_PORT_LEGACY); - assert_tc_cold_blocked(dig_port); + assert_tc_cold_blocked(tc); - val = intel_uncore_read(uncore, - PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia)); - val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx); + val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia)); + val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx); switch (required_lanes) { case 1: val |= lane_reversal ? - DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) : - DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx); + DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) : + DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx); break; case 2: val |= lane_reversal ? - DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) : - DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx); + DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) : + DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx); break; case 4: - val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx); + val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx); break; default: MISSING_CASE(required_lanes); } - intel_uncore_write(uncore, - PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val); + intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val); } -static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, +static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc, u32 live_status_mask) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct drm_i915_private *i915 = tc_to_i915(tc); u32 valid_hpd_mask; - if (dig_port->tc_legacy_port) + drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); + + if (hweight32(live_status_mask) != 1) + return; + + if (tc->legacy_port) valid_hpd_mask = BIT(TC_PORT_LEGACY); else valid_hpd_mask = BIT(TC_PORT_DP_ALT) | @@ -238,83 +369,79 @@ /* If live status mismatches the VBT flag, trust the live status. */ drm_dbg_kms(&i915->drm, "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n", - dig_port->tc_port_name, live_status_mask, valid_hpd_mask); + tc->port_name, live_status_mask, valid_hpd_mask); - dig_port->tc_legacy_port = !dig_port->tc_legacy_port; + tc->legacy_port = !tc->legacy_port; } -static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port) +static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_uncore *uncore = &i915->uncore; - u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin]; - u32 mask = 0; - u32 val; - - val = intel_uncore_read(uncore, - PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); + struct drm_i915_private *i915 = tc_to_i915(tc); + enum port port = tc->dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(i915, port); - if (val == 0xffffffff) { - drm_dbg_kms(&i915->drm, - "Port %s: PHY in TCCOLD, nothing connected\n", - dig_port->tc_port_name); - return mask; + /* + * Each Modular FIA instance houses 2 TC ports. In SOC that has more + * than two TC ports, there are multiple instances of Modular FIA. + */ + if (modular_fia) { + tc->phy_fia = tc_port / 2; + tc->phy_fia_idx = tc_port % 2; + } else { + tc->phy_fia = FIA1; + tc->phy_fia_idx = tc_port; } +} - if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx)) - mask |= BIT(TC_PORT_TBT_ALT); - if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx)) - mask |= BIT(TC_PORT_DP_ALT); - - if (intel_uncore_read(uncore, SDEISR) & isr_bit) - mask |= BIT(TC_PORT_LEGACY); +/* + * ICL TC PHY handlers + * ------------------- + */ +static enum intel_display_power_domain +icl_tc_phy_cold_off_domain(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; - /* The sink can be connected only in a single mode. */ - if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1)) - tc_port_fixup_legacy_flag(dig_port, mask); + if (tc->legacy_port) + return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); - return mask; + return POWER_DOMAIN_TC_COLD_OFF; } -static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port) +static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin]; - struct intel_uncore *uncore = &i915->uncore; - u32 val, mask = 0; + intel_wakeref_t wakeref; + u32 fia_isr; + u32 pch_isr; + u32 mask = 0; - /* - * On ADL-P HW/FW will wake from TCCOLD to complete the read access of - * registers in IOM. Note that this doesn't apply to PHY and FIA - * registers. - */ - val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port)); - if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT) - mask |= BIT(TC_PORT_DP_ALT); - if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT) + with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) { + fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia)); + pch_isr = intel_de_read(i915, SDEISR); + } + + if (fia_isr == 0xffffffff) { + drm_dbg_kms(&i915->drm, + "Port %s: PHY in TCCOLD, nothing connected\n", + tc->port_name); + return mask; + } + + if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx)) mask |= BIT(TC_PORT_TBT_ALT); + if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx)) + mask |= BIT(TC_PORT_DP_ALT); - if (intel_uncore_read(uncore, SDEISR) & isr_bit) + if (pch_isr & isr_bit) mask |= BIT(TC_PORT_LEGACY); - /* The sink can be connected only in a single mode. */ - if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1)) - tc_port_fixup_legacy_flag(dig_port, mask); - return mask; } -static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - - if (IS_ALDERLAKE_P(i915)) - return adl_tc_port_live_status_mask(dig_port); - - return icl_tc_port_live_status_mask(dig_port); -} - /* * Return the PHY status complete flag indicating that display can acquire the * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink @@ -323,146 +450,80 @@ * owned by the TBT subsystem and so switching the ownership to display is not * required. */ -static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) +static bool icl_tc_phy_is_ready(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_uncore *uncore = &i915->uncore; + struct drm_i915_private *i915 = tc_to_i915(tc); u32 val; - val = intel_uncore_read(uncore, - PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia)); - if (val == 0xffffffff) { - drm_dbg_kms(&i915->drm, - "Port %s: PHY in TCCOLD, assuming not complete\n", - dig_port->tc_port_name); - return false; - } - - return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx); -} - -/* - * Return the PHY status complete flag indicating that display can acquire the - * PHY ownership. The IOM firmware sets this flag when it's ready to switch - * the ownership to display, regardless of what sink is connected (TBT-alt, - * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT - * subsystem and so switching the ownership to display is not required. - */ -static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); - struct intel_uncore *uncore = &i915->uncore; - u32 val; + assert_tc_cold_blocked(tc); - val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port)); + val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, - "Port %s: PHY in TCCOLD, assuming not complete\n", - dig_port->tc_port_name); + "Port %s: PHY in TCCOLD, assuming not ready\n", + tc->port_name); return false; } - return val & TCSS_DDI_STATUS_READY; + return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx); } -static bool tc_phy_status_complete(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - - if (IS_ALDERLAKE_P(i915)) - return adl_tc_phy_status_complete(dig_port); - - return icl_tc_phy_status_complete(dig_port); -} - -static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port, +static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc, bool take) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_uncore *uncore = &i915->uncore; + struct drm_i915_private *i915 = tc_to_i915(tc); u32 val; - val = intel_uncore_read(uncore, - PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); + assert_tc_cold_blocked(tc); + + val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, can't %s ownership\n", - dig_port->tc_port_name, take ? "take" : "release"); + tc->port_name, take ? "take" : "release"); return false; } - val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); if (take) - val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); + val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); - intel_uncore_write(uncore, - PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val); + intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val); return true; } -static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port, - bool take) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_uncore *uncore = &i915->uncore; - enum port port = dig_port->base.port; - - intel_uncore_rmw(uncore, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP, - take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0); - - return true; -} - -static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - - if (IS_ALDERLAKE_P(i915)) - return adl_tc_phy_take_ownership(dig_port, take); - - return icl_tc_phy_take_ownership(dig_port, take); -} - -static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port) +static bool icl_tc_phy_is_owned(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_uncore *uncore = &i915->uncore; + struct drm_i915_private *i915 = tc_to_i915(tc); u32 val; - val = intel_uncore_read(uncore, - PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); + assert_tc_cold_blocked(tc); + + val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, - "Port %s: PHY in TCCOLD, assume safe mode\n", - dig_port->tc_port_name); - return true; + "Port %s: PHY in TCCOLD, assume not owned\n", + tc->port_name); + return false; } - return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); + return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx); } -static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port) +static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_uncore *uncore = &i915->uncore; - enum port port = dig_port->base.port; - u32 val; - - val = intel_uncore_read(uncore, DDI_BUF_CTL(port)); - return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP; -} + enum intel_display_power_domain domain; + intel_wakeref_t tc_cold_wref; -static bool tc_phy_is_owned(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + tc_cold_wref = __tc_cold_block(tc, &domain); - if (IS_ALDERLAKE_P(i915)) - return adl_tc_phy_is_owned(dig_port); + tc->mode = tc_phy_get_current_mode(tc); + if (tc->mode != TC_PORT_DISCONNECTED) + tc->lock_wakeref = tc_cold_block(tc); - return icl_tc_phy_is_owned(dig_port); + __tc_cold_unblock(tc, domain, tc_cold_wref); } /* @@ -476,151 +537,590 @@ * connect and disconnect to cleanly transfer ownership with the controller and * set the type-C power state. */ -static void icl_tc_phy_connect(struct intel_digital_port *dig_port, - int required_lanes) +static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc, + int required_lanes) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - u32 live_status_mask; + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; int max_lanes; - if (!tc_phy_status_complete(dig_port)) { - drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n", - dig_port->tc_port_name); - goto out_set_tbt_alt_mode; - } - - live_status_mask = tc_port_live_status_mask(dig_port); - if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY))) && - !dig_port->tc_legacy_port) { - drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n", - dig_port->tc_port_name, live_status_mask); - goto out_set_tbt_alt_mode; - } - - if (!tc_phy_take_ownership(dig_port, true) && - !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port)) - goto out_set_tbt_alt_mode; - max_lanes = intel_tc_port_fia_max_lane_count(dig_port); - if (dig_port->tc_legacy_port) { + if (tc->mode == TC_PORT_LEGACY) { drm_WARN_ON(&i915->drm, max_lanes != 4); - dig_port->tc_mode = TC_PORT_LEGACY; - - return; + return true; } + drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT); + /* * Now we have to re-check the live state, in case the port recently * became disconnected. Not necessary for legacy mode. */ - if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) { + if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) { drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n", - dig_port->tc_port_name); - goto out_release_phy; + tc->port_name); + return false; } if (max_lanes < required_lanes) { drm_dbg_kms(&i915->drm, "Port %s: PHY max lanes %d < required lanes %d\n", - dig_port->tc_port_name, + tc->port_name, max_lanes, required_lanes); - goto out_release_phy; + return false; } - dig_port->tc_mode = TC_PORT_DP_ALT; + return true; +} + +static bool icl_tc_phy_connect(struct intel_tc_port *tc, + int required_lanes) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + + tc->lock_wakeref = tc_cold_block(tc); + + if (tc->mode == TC_PORT_TBT_ALT) + return true; + + if ((!tc_phy_is_ready(tc) || + !icl_tc_phy_take_ownership(tc, true)) && + !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { + drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n", + tc->port_name, + str_yes_no(tc_phy_is_ready(tc))); + goto out_unblock_tc_cold; + } - return; + + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) + goto out_release_phy; + + return true; out_release_phy: - tc_phy_take_ownership(dig_port, false); -out_set_tbt_alt_mode: - dig_port->tc_mode = TC_PORT_TBT_ALT; + icl_tc_phy_take_ownership(tc, false); +out_unblock_tc_cold: + tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); + + return false; } /* * See the comment at the connect function. This implements the Disconnect * Flow. */ -static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) +static void icl_tc_phy_disconnect(struct intel_tc_port *tc) { - switch (dig_port->tc_mode) { + switch (tc->mode) { case TC_PORT_LEGACY: case TC_PORT_DP_ALT: - tc_phy_take_ownership(dig_port, false); + icl_tc_phy_take_ownership(tc, false); fallthrough; case TC_PORT_TBT_ALT: - dig_port->tc_mode = TC_PORT_DISCONNECTED; - fallthrough; - case TC_PORT_DISCONNECTED: + tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); break; default: - MISSING_CASE(dig_port->tc_mode); + MISSING_CASE(tc->mode); } } -static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port) +static void icl_tc_phy_init(struct intel_tc_port *tc) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + tc_phy_load_fia_params(tc, false); +} + +static const struct intel_tc_phy_ops icl_tc_phy_ops = { + .cold_off_domain = icl_tc_phy_cold_off_domain, + .hpd_live_status = icl_tc_phy_hpd_live_status, + .is_ready = icl_tc_phy_is_ready, + .is_owned = icl_tc_phy_is_owned, + .get_hw_state = icl_tc_phy_get_hw_state, + .connect = icl_tc_phy_connect, + .disconnect = icl_tc_phy_disconnect, + .init = icl_tc_phy_init, +}; + +/* + * TGL TC PHY handlers + * ------------------- + */ +static enum intel_display_power_domain +tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc) +{ + return POWER_DOMAIN_TC_COLD_OFF; +} + +static void tgl_tc_phy_init(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + intel_wakeref_t wakeref; + u32 val; + + with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) + val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1)); + + drm_WARN_ON(&i915->drm, val == 0xffffffff); + + tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK); +} + +static const struct intel_tc_phy_ops tgl_tc_phy_ops = { + .cold_off_domain = tgl_tc_phy_cold_off_domain, + .hpd_live_status = icl_tc_phy_hpd_live_status, + .is_ready = icl_tc_phy_is_ready, + .is_owned = icl_tc_phy_is_owned, + .get_hw_state = icl_tc_phy_get_hw_state, + .connect = icl_tc_phy_connect, + .disconnect = icl_tc_phy_disconnect, + .init = tgl_tc_phy_init, +}; - if (!tc_phy_status_complete(dig_port)) { - drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n", - dig_port->tc_port_name); - return dig_port->tc_mode == TC_PORT_TBT_ALT; +/* + * ADLP TC PHY handlers + * -------------------- + */ +static enum intel_display_power_domain +adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; + + if (tc->mode != TC_PORT_TBT_ALT) + return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); + + return POWER_DOMAIN_TC_COLD_OFF; +} + +static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; + enum hpd_pin hpd_pin = dig_port->base.hpd_pin; + u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin]; + u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin]; + intel_wakeref_t wakeref; + u32 cpu_isr; + u32 pch_isr; + u32 mask = 0; + + with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) { + cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR); + pch_isr = intel_de_read(i915, SDEISR); } - /* On ADL-P the PHY complete flag is set in TBT mode as well. */ - if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT) - return true; + if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK)) + mask |= BIT(TC_PORT_DP_ALT); + if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK)) + mask |= BIT(TC_PORT_TBT_ALT); + + if (pch_isr & pch_isr_bit) + mask |= BIT(TC_PORT_LEGACY); + + return mask; +} + +/* + * Return the PHY status complete flag indicating that display can acquire the + * PHY ownership. The IOM firmware sets this flag when it's ready to switch + * the ownership to display, regardless of what sink is connected (TBT-alt, + * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT + * subsystem and so switching the ownership to display is not required. + */ +static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port); + u32 val; - if (!tc_phy_is_owned(dig_port)) { - drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n", - dig_port->tc_port_name); + assert_display_core_power_enabled(tc); + val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port)); + if (val == 0xffffffff) { + drm_dbg_kms(&i915->drm, + "Port %s: PHY in TCCOLD, assuming not ready\n", + tc->port_name); return false; } - return dig_port->tc_mode == TC_PORT_DP_ALT || - dig_port->tc_mode == TC_PORT_LEGACY; + return val & TCSS_DDI_STATUS_READY; +} + +static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc, + bool take) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum port port = tc->dig_port->base.port; + + assert_tc_port_power_enabled(tc); + + intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP, + take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0); + + return true; +} + +static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum port port = tc->dig_port->base.port; + u32 val; + + assert_tc_port_power_enabled(tc); + + val = intel_de_read(i915, DDI_BUF_CTL(port)); + return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP; +} + +static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum intel_display_power_domain port_power_domain = + tc_port_power_domain(tc); + intel_wakeref_t port_wakeref; + + port_wakeref = intel_display_power_get(i915, port_power_domain); + + tc->mode = tc_phy_get_current_mode(tc); + if (tc->mode != TC_PORT_DISCONNECTED) + tc->lock_wakeref = tc_cold_block(tc); + + intel_display_power_put(i915, port_power_domain, port_wakeref); +} + +static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum intel_display_power_domain port_power_domain = + tc_port_power_domain(tc); + intel_wakeref_t port_wakeref; + + if (tc->mode == TC_PORT_TBT_ALT) { + tc->lock_wakeref = tc_cold_block(tc); + return true; + } + + port_wakeref = intel_display_power_get(i915, port_power_domain); + + if (!adlp_tc_phy_take_ownership(tc, true) && + !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { + drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n", + tc->port_name); + goto out_put_port_power; + } + + if (!tc_phy_is_ready(tc) && + !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) { + drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n", + tc->port_name); + goto out_release_phy; + } + + tc->lock_wakeref = tc_cold_block(tc); + + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) + goto out_unblock_tc_cold; + + intel_display_power_put(i915, port_power_domain, port_wakeref); + + return true; + +out_unblock_tc_cold: + tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); +out_release_phy: + adlp_tc_phy_take_ownership(tc, false); +out_put_port_power: + intel_display_power_put(i915, port_power_domain, port_wakeref); + + return false; +} + +static void adlp_tc_phy_disconnect(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum intel_display_power_domain port_power_domain = + tc_port_power_domain(tc); + intel_wakeref_t port_wakeref; + + port_wakeref = intel_display_power_get(i915, port_power_domain); + + tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref)); + + switch (tc->mode) { + case TC_PORT_LEGACY: + case TC_PORT_DP_ALT: + adlp_tc_phy_take_ownership(tc, false); + fallthrough; + case TC_PORT_TBT_ALT: + break; + default: + MISSING_CASE(tc->mode); + } + + intel_display_power_put(i915, port_power_domain, port_wakeref); +} + +static void adlp_tc_phy_init(struct intel_tc_port *tc) +{ + tc_phy_load_fia_params(tc, true); +} + +static const struct intel_tc_phy_ops adlp_tc_phy_ops = { + .cold_off_domain = adlp_tc_phy_cold_off_domain, + .hpd_live_status = adlp_tc_phy_hpd_live_status, + .is_ready = adlp_tc_phy_is_ready, + .is_owned = adlp_tc_phy_is_owned, + .get_hw_state = adlp_tc_phy_get_hw_state, + .connect = adlp_tc_phy_connect, + .disconnect = adlp_tc_phy_disconnect, + .init = adlp_tc_phy_init, +}; + +/* + * Generic TC PHY handlers + * ----------------------- + */ +static enum intel_display_power_domain +tc_phy_cold_off_domain(struct intel_tc_port *tc) +{ + return tc->phy_ops->cold_off_domain(tc); +} + +static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + u32 mask; + + mask = tc->phy_ops->hpd_live_status(tc); + + /* The sink can be connected only in a single mode. */ + drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1); + + return mask; +} + +static bool tc_phy_is_ready(struct intel_tc_port *tc) +{ + return tc->phy_ops->is_ready(tc); +} + +static bool tc_phy_is_owned(struct intel_tc_port *tc) +{ + return tc->phy_ops->is_owned(tc); +} + +static void tc_phy_get_hw_state(struct intel_tc_port *tc) +{ + tc->phy_ops->get_hw_state(tc); +} + +static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, + bool phy_is_ready, bool phy_is_owned) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + + drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready); + + return phy_is_ready && phy_is_owned; +} + +static bool tc_phy_is_connected(struct intel_tc_port *tc, + enum icl_port_dpll_id port_pll_type) +{ + struct intel_encoder *encoder = &tc->dig_port->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + bool phy_is_ready = tc_phy_is_ready(tc); + bool phy_is_owned = tc_phy_is_owned(tc); + bool is_connected; + + if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) + is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY; + else + is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; + + drm_dbg_kms(&i915->drm, + "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n", + tc->port_name, + str_yes_no(is_connected), + str_yes_no(phy_is_ready), + str_yes_no(phy_is_owned), + port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt"); + + return is_connected; +} + +static void tc_phy_wait_for_ready(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + + if (wait_for(tc_phy_is_ready(tc), 100)) + drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n", + tc->port_name); } static enum tc_port_mode -intel_tc_port_get_current_mode(struct intel_digital_port *dig_port) +hpd_mask_to_tc_mode(u32 live_status_mask) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - u32 live_status_mask = tc_port_live_status_mask(dig_port); - enum tc_port_mode mode; + if (live_status_mask) + return fls(live_status_mask) - 1; + + return TC_PORT_DISCONNECTED; +} + +static enum tc_port_mode +tc_phy_hpd_live_mode(struct intel_tc_port *tc) +{ + u32 live_status_mask = tc_phy_hpd_live_status(tc); + + return hpd_mask_to_tc_mode(live_status_mask); +} + +static enum tc_port_mode +get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc, + enum tc_port_mode live_mode) +{ + switch (live_mode) { + case TC_PORT_LEGACY: + case TC_PORT_DP_ALT: + return live_mode; + default: + MISSING_CASE(live_mode); + fallthrough; + case TC_PORT_TBT_ALT: + case TC_PORT_DISCONNECTED: + if (tc->legacy_port) + return TC_PORT_LEGACY; + else + return TC_PORT_DP_ALT; + } +} - if (!tc_phy_is_owned(dig_port) || - drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port))) +static enum tc_port_mode +get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc, + enum tc_port_mode live_mode) +{ + switch (live_mode) { + case TC_PORT_LEGACY: + return TC_PORT_DISCONNECTED; + case TC_PORT_DP_ALT: + case TC_PORT_TBT_ALT: return TC_PORT_TBT_ALT; + default: + MISSING_CASE(live_mode); + fallthrough; + case TC_PORT_DISCONNECTED: + if (tc->legacy_port) + return TC_PORT_DISCONNECTED; + else + return TC_PORT_TBT_ALT; + } +} + +static enum tc_port_mode +tc_phy_get_current_mode(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc); + bool phy_is_ready; + bool phy_is_owned; + enum tc_port_mode mode; - mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT; - if (live_status_mask) { - enum tc_port_mode live_mode = fls(live_status_mask) - 1; + /* + * For legacy ports the IOM firmware initializes the PHY during boot-up + * and system resume whether or not a sink is connected. Wait here for + * the initialization to get ready. + */ + if (tc->legacy_port) + tc_phy_wait_for_ready(tc); - if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT)) - mode = live_mode; + phy_is_ready = tc_phy_is_ready(tc); + phy_is_owned = tc_phy_is_owned(tc); + + if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { + mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); + } else { + drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT); + mode = get_tc_mode_in_phy_owned_state(tc, live_mode); } + drm_dbg_kms(&i915->drm, + "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n", + tc->port_name, + tc_port_mode_name(mode), + str_yes_no(phy_is_ready), + str_yes_no(phy_is_owned), + tc_port_mode_name(live_mode)); + return mode; } +static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc) +{ + if (tc->legacy_port) + return TC_PORT_LEGACY; + + return TC_PORT_TBT_ALT; +} + static enum tc_port_mode -intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) +hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask) { - u32 live_status_mask = tc_port_live_status_mask(dig_port); + enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask); - if (live_status_mask) - return fls(live_status_mask) - 1; + if (mode != TC_PORT_DISCONNECTED) + return mode; - return TC_PORT_TBT_ALT; + return default_tc_mode(tc); +} + +static enum tc_port_mode +tc_phy_get_target_mode(struct intel_tc_port *tc) +{ + u32 live_status_mask = tc_phy_hpd_live_status(tc); + + return hpd_mask_to_target_mode(tc, live_status_mask); } -static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, +static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + u32 live_status_mask = tc_phy_hpd_live_status(tc); + bool connected; + + tc_port_fixup_legacy_flag(tc, live_status_mask); + + tc->mode = hpd_mask_to_target_mode(tc, live_status_mask); + + connected = tc->phy_ops->connect(tc, required_lanes); + if (!connected && tc->mode != default_tc_mode(tc)) { + tc->mode = default_tc_mode(tc); + connected = tc->phy_ops->connect(tc, required_lanes); + } + + drm_WARN_ON(&i915->drm, !connected); +} + +static void tc_phy_disconnect(struct intel_tc_port *tc) +{ + if (tc->mode != TC_PORT_DISCONNECTED) { + tc->phy_ops->disconnect(tc); + tc->mode = TC_PORT_DISCONNECTED; + } +} + +static void tc_phy_init(struct intel_tc_port *tc) +{ + mutex_lock(&tc->lock); + tc->phy_ops->init(tc); + mutex_unlock(&tc->lock); +} + +static void intel_tc_port_reset_mode(struct intel_tc_port *tc, int required_lanes, bool force_disconnect) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port_mode old_tc_mode = dig_port->tc_mode; + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; + enum tc_port_mode old_tc_mode = tc->mode; intel_display_power_flush_work(i915); if (!intel_tc_cold_requires_aux_pw(dig_port)) { @@ -632,58 +1132,48 @@ drm_WARN_ON(&i915->drm, aux_powered); } - icl_tc_phy_disconnect(dig_port); + tc_phy_disconnect(tc); if (!force_disconnect) - icl_tc_phy_connect(dig_port, required_lanes); + tc_phy_connect(tc, required_lanes); drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n", - dig_port->tc_port_name, + tc->port_name, tc_port_mode_name(old_tc_mode), - tc_port_mode_name(dig_port->tc_mode)); + tc_port_mode_name(tc->mode)); } -static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) +static bool intel_tc_port_needs_reset(struct intel_tc_port *tc) { - return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode; + return tc_phy_get_target_mode(tc) != tc->mode; } -static void intel_tc_port_update_mode(struct intel_digital_port *dig_port, +static void intel_tc_port_update_mode(struct intel_tc_port *tc, int required_lanes, bool force_disconnect) { - enum intel_display_power_domain domain; - intel_wakeref_t wref; - bool needs_reset = force_disconnect; - - if (!needs_reset) { - /* Get power domain required to check the hotplug live status. */ - wref = tc_cold_block(dig_port, &domain); - needs_reset = intel_tc_port_needs_reset(dig_port); - tc_cold_unblock(dig_port, domain, wref); - } - - if (!needs_reset) - return; - - /* Get power domain required for resetting the mode. */ - wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain); - - intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect); + if (force_disconnect || + intel_tc_port_needs_reset(tc)) + intel_tc_port_reset_mode(tc, required_lanes, force_disconnect); +} - /* Get power domain matching the new mode after reset. */ - tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain, - fetch_and_zero(&dig_port->tc_lock_wakeref)); - if (dig_port->tc_mode != TC_PORT_DISCONNECTED) - dig_port->tc_lock_wakeref = tc_cold_block(dig_port, - &dig_port->tc_lock_power_domain); +static void __intel_tc_port_get_link(struct intel_tc_port *tc) +{ + tc->link_refcount++; +} - tc_cold_unblock(dig_port, domain, wref); +static void __intel_tc_port_put_link(struct intel_tc_port *tc) +{ + tc->link_refcount--; } -static void -intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port, - int refcount) +static bool tc_port_is_enabled(struct intel_tc_port *tc) { - dig_port->tc_link_refcount = refcount; + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; + + assert_tc_port_power_enabled(tc); + + return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) & + DDI_BUF_CTL_ENABLE; } /** @@ -696,85 +1186,119 @@ void intel_tc_port_init_mode(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - intel_wakeref_t tc_cold_wref; - enum intel_display_power_domain domain; + struct intel_tc_port *tc = to_tc_port(dig_port); + bool update_mode = false; + + mutex_lock(&tc->lock); - mutex_lock(&dig_port->tc_lock); + drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED); + drm_WARN_ON(&i915->drm, tc->lock_wakeref); + drm_WARN_ON(&i915->drm, tc->link_refcount); - drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED); - drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref); - drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount); + tc_phy_get_hw_state(tc); + /* + * Save the initial mode for the state check in + * intel_tc_port_sanitize_mode(). + */ + tc->init_mode = tc->mode; - tc_cold_wref = tc_cold_block(dig_port, &domain); + /* + * The PHY needs to be connected for AUX to work during HW readout and + * MST topology resume, but the PHY mode can only be changed if the + * port is disabled. + * + * An exception is the case where BIOS leaves the PHY incorrectly + * disconnected on an enabled legacy port. Work around that by + * connecting the PHY even though the port is enabled. This doesn't + * cause a problem as the PHY ownership state is ignored by the + * IOM/TCSS firmware (only display can own the PHY in that case). + */ + if (!tc_port_is_enabled(tc)) { + update_mode = true; + } else if (tc->mode == TC_PORT_DISCONNECTED) { + drm_WARN_ON(&i915->drm, !tc->legacy_port); + drm_err(&i915->drm, + "Port %s: PHY disconnected on enabled port, connecting it\n", + tc->port_name); + update_mode = true; + } + + if (update_mode) + intel_tc_port_update_mode(tc, 1, false); + + /* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */ + __intel_tc_port_get_link(tc); + + mutex_unlock(&tc->lock); +} - dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); - /* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */ - intel_tc_port_link_init_refcount(dig_port, 1); - dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain); +static bool tc_port_has_active_links(struct intel_tc_port *tc, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; + enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT; + int active_links = 0; - tc_cold_unblock(dig_port, domain, tc_cold_wref); + if (dig_port->dp.is_mst) { + /* TODO: get the PLL type for MST, once HW readout is done for it. */ + active_links = intel_dp_mst_encoder_active_links(dig_port); + } else if (crtc_state && crtc_state->hw.active) { + pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state); + active_links = 1; + } - drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n", - dig_port->tc_port_name, - tc_port_mode_name(dig_port->tc_mode)); + if (active_links && !tc_phy_is_connected(tc, pll_type)) + drm_err(&i915->drm, + "Port %s: PHY disconnected with %d active link(s)\n", + tc->port_name, active_links); - mutex_unlock(&dig_port->tc_lock); + return active_links; } /** * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode * @dig_port: digital port + * @crtc_state: atomic state of CRTC connected to @dig_port * * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver * loading and system resume: * If the encoder is enabled keep the TypeC mode/PHY connected state locked until * the encoder is disabled. * If the encoder is disabled make sure the PHY is disconnected. + * @crtc_state is valid if @dig_port is enabled, NULL otherwise. */ -void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port) +void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - struct intel_encoder *encoder = &dig_port->base; - int active_links = 0; + struct intel_tc_port *tc = to_tc_port(dig_port); - mutex_lock(&dig_port->tc_lock); + mutex_lock(&tc->lock); - if (dig_port->dp.is_mst) - active_links = intel_dp_mst_encoder_active_links(dig_port); - else if (encoder->base.crtc) - active_links = to_intel_crtc(encoder->base.crtc)->active; - - drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1); - intel_tc_port_link_init_refcount(dig_port, active_links); - - if (active_links) { - if (!icl_tc_phy_is_connected(dig_port)) - drm_dbg_kms(&i915->drm, - "Port %s: PHY disconnected with %d active link(s)\n", - dig_port->tc_port_name, active_links); - } else { + drm_WARN_ON(&i915->drm, tc->link_refcount != 1); + if (!tc_port_has_active_links(tc, crtc_state)) { /* * TBT-alt is the default mode in any case the PHY ownership is not * held (regardless of the sink's connected live state), so * we'll just switch to disconnected mode from it here without * a note. */ - if (dig_port->tc_mode != TC_PORT_TBT_ALT) + if (tc->init_mode != TC_PORT_TBT_ALT && + tc->init_mode != TC_PORT_DISCONNECTED) drm_dbg_kms(&i915->drm, "Port %s: PHY left in %s mode on disabled port, disconnecting it\n", - dig_port->tc_port_name, - tc_port_mode_name(dig_port->tc_mode)); - icl_tc_phy_disconnect(dig_port); - - tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain, - fetch_and_zero(&dig_port->tc_lock_wakeref)); + tc->port_name, + tc_port_mode_name(tc->init_mode)); + tc_phy_disconnect(tc); + __intel_tc_port_put_link(tc); } drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n", - dig_port->tc_port_name, - tc_port_mode_name(dig_port->tc_mode)); + tc->port_name, + tc_port_mode_name(tc->mode)); - mutex_unlock(&dig_port->tc_lock); + mutex_unlock(&tc->lock); } /* @@ -787,42 +1311,186 @@ * connected ports are usable, and avoids exposing to the users objects they * can't really use. */ +bool intel_tc_port_connected_locked(struct intel_encoder *encoder) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_tc_port *tc = to_tc_port(dig_port); + u32 mask = ~0; + + drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port)); + + if (tc->mode != TC_PORT_DISCONNECTED) + mask = BIT(tc->mode); + + return tc_phy_hpd_live_status(tc) & mask; +} + bool intel_tc_port_connected(struct intel_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct intel_tc_port *tc = to_tc_port(dig_port); bool is_connected; - intel_tc_port_lock(dig_port); + mutex_lock(&tc->lock); + is_connected = intel_tc_port_connected_locked(encoder); + mutex_unlock(&tc->lock); - is_connected = tc_port_live_status_mask(dig_port) & - BIT(dig_port->tc_mode); + return is_connected; +} - intel_tc_port_unlock(dig_port); +static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc) +{ + bool ret; - return is_connected; + mutex_lock(&tc->lock); + + ret = tc->link_refcount && + tc->mode == TC_PORT_DP_ALT && + intel_tc_port_needs_reset(tc); + + mutex_unlock(&tc->lock); + + return ret; } -static void __intel_tc_port_lock(struct intel_digital_port *dig_port, - int required_lanes) +bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + + if (!intel_phy_is_tc(i915, phy)) + return false; + + return __intel_tc_port_link_needs_reset(to_tc_port(dig_port)); +} + +static int reset_link_commit(struct intel_tc_port *tc, + struct intel_atomic_state *state, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + struct intel_digital_port *dig_port = tc->dig_port; + struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base); + struct intel_crtc *crtc; + u8 pipe_mask; + int ret; + + ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx); + if (ret) + return ret; + + ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask); + if (ret) + return ret; + + if (!pipe_mask) + return 0; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) { + struct intel_crtc_state *crtc_state; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + crtc_state->uapi.connectors_changed = true; + } + + if (!__intel_tc_port_link_needs_reset(tc)) + return 0; + + return drm_atomic_commit(&state->base); +} + +static int reset_link(struct intel_tc_port *tc) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_state *_state; + struct intel_atomic_state *state; + int ret; + + _state = drm_atomic_state_alloc(&i915->drm); + if (!_state) + return -ENOMEM; + + state = to_intel_atomic_state(_state); + + intel_modeset_lock_ctx_retry(&ctx, state, 0, ret) + ret = reset_link_commit(tc, state, &ctx); + + drm_atomic_state_put(&state->base); + + return ret; +} + +static void intel_tc_port_link_reset_work(struct work_struct *work) +{ + struct intel_tc_port *tc = + container_of(work, struct intel_tc_port, link_reset_work.work); + struct drm_i915_private *i915 = tc_to_i915(tc); + int ret; + + if (!__intel_tc_port_link_needs_reset(tc)) + return; + + mutex_lock(&i915->drm.mode_config.mutex); + + drm_dbg_kms(&i915->drm, + "Port %s: TypeC DP-alt sink disconnected, resetting link\n", + tc->port_name); + ret = reset_link(tc); + drm_WARN_ON(&i915->drm, ret); + + mutex_unlock(&i915->drm.mode_config.mutex); +} + +bool intel_tc_port_link_reset(struct intel_digital_port *dig_port) +{ + if (!intel_tc_port_link_needs_reset(dig_port)) + return false; + + queue_delayed_work(system_unbound_wq, + &to_tc_port(dig_port)->link_reset_work, + msecs_to_jiffies(2000)); + + return true; +} + +void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + struct intel_tc_port *tc = to_tc_port(dig_port); + + if (!intel_phy_is_tc(i915, phy)) + return; + + cancel_delayed_work(&tc->link_reset_work); +} + +static void __intel_tc_port_lock(struct intel_tc_port *tc, + int required_lanes) +{ + struct drm_i915_private *i915 = tc_to_i915(tc); - mutex_lock(&dig_port->tc_lock); + mutex_lock(&tc->lock); - cancel_delayed_work(&dig_port->tc_disconnect_phy_work); + cancel_delayed_work(&tc->disconnect_phy_work); - if (!dig_port->tc_link_refcount) - intel_tc_port_update_mode(dig_port, required_lanes, + if (!tc->link_refcount) + intel_tc_port_update_mode(tc, required_lanes, false); - drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED); - drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT && - !tc_phy_is_owned(dig_port)); + drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED); + drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT && + !tc_phy_is_owned(tc)); } void intel_tc_port_lock(struct intel_digital_port *dig_port) { - __intel_tc_port_lock(dig_port, 1); + __intel_tc_port_lock(to_tc_port(dig_port), 1); } /** @@ -835,15 +1503,15 @@ */ static void intel_tc_port_disconnect_phy_work(struct work_struct *work) { - struct intel_digital_port *dig_port = - container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work); + struct intel_tc_port *tc = + container_of(work, struct intel_tc_port, disconnect_phy_work.work); - mutex_lock(&dig_port->tc_lock); + mutex_lock(&tc->lock); - if (!dig_port->tc_link_refcount) - intel_tc_port_update_mode(dig_port, 1, true); + if (!tc->link_refcount) + intel_tc_port_update_mode(tc, 1, true); - mutex_unlock(&dig_port->tc_lock); + mutex_unlock(&tc->lock); } /** @@ -852,107 +1520,111 @@ * * Flush the delayed work disconnecting an idle PHY. */ -void intel_tc_port_flush_work(struct intel_digital_port *dig_port) +static void intel_tc_port_flush_work(struct intel_digital_port *dig_port) { - flush_delayed_work(&dig_port->tc_disconnect_phy_work); + flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work); +} + +void intel_tc_port_suspend(struct intel_digital_port *dig_port) +{ + struct intel_tc_port *tc = to_tc_port(dig_port); + + cancel_delayed_work_sync(&tc->link_reset_work); + intel_tc_port_flush_work(dig_port); } void intel_tc_port_unlock(struct intel_digital_port *dig_port) { - if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED) - queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work, + struct intel_tc_port *tc = to_tc_port(dig_port); + + if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED) + queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work, msecs_to_jiffies(1000)); - mutex_unlock(&dig_port->tc_lock); + mutex_unlock(&tc->lock); } bool intel_tc_port_ref_held(struct intel_digital_port *dig_port) { - return mutex_is_locked(&dig_port->tc_lock) || - dig_port->tc_link_refcount; + struct intel_tc_port *tc = to_tc_port(dig_port); + + return mutex_is_locked(&tc->lock) || + tc->link_refcount; } void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes) { - __intel_tc_port_lock(dig_port, required_lanes); - dig_port->tc_link_refcount++; + struct intel_tc_port *tc = to_tc_port(dig_port); + + __intel_tc_port_lock(tc, required_lanes); + __intel_tc_port_get_link(tc); intel_tc_port_unlock(dig_port); } void intel_tc_port_put_link(struct intel_digital_port *dig_port) { + struct intel_tc_port *tc = to_tc_port(dig_port); + intel_tc_port_lock(dig_port); - --dig_port->tc_link_refcount; + __intel_tc_port_put_link(tc); intel_tc_port_unlock(dig_port); /* - * Disconnecting the PHY after the PHY's PLL gets disabled may - * hang the system on ADL-P, so disconnect the PHY here synchronously. - * TODO: remove this once the root cause of the ordering requirement - * is found/fixed. + * The firmware will not update the HPD status of other TypeC ports + * that are active in DP-alt mode with their sink disconnected, until + * this port is disabled and its PHY gets disconnected. Make sure this + * happens in a timely manner by disconnecting the PHY synchronously. */ intel_tc_port_flush_work(dig_port); } -static bool -tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port) -{ - enum intel_display_power_domain domain; - intel_wakeref_t wakeref; - u32 val; - - if (!INTEL_INFO(i915)->display.has_modular_fia) - return false; - - mutex_lock(&dig_port->tc_lock); - wakeref = tc_cold_block(dig_port, &domain); - val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1)); - tc_cold_unblock(dig_port, domain, wakeref); - mutex_unlock(&dig_port->tc_lock); - - drm_WARN_ON(&i915->drm, val == 0xffffffff); - - return val & MODULAR_FIA_MASK; -} - -static void -tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port) -{ - enum port port = dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(i915, port); - - /* - * Each Modular FIA instance houses 2 TC ports. In SOC that has more - * than two TC ports, there are multiple instances of Modular FIA. - */ - if (tc_has_modular_fia(i915, dig_port)) { - dig_port->tc_phy_fia = tc_port / 2; - dig_port->tc_phy_fia_idx = tc_port % 2; - } else { - dig_port->tc_phy_fia = FIA1; - dig_port->tc_phy_fia_idx = tc_port; - } -} - -void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) +int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_tc_port *tc; enum port port = dig_port->base.port; enum tc_port tc_port = intel_port_to_tc(i915, port); if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE)) - return; + return -EINVAL; - snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name), + tc = kzalloc(sizeof(*tc), GFP_KERNEL); + if (!tc) + return -ENOMEM; + + dig_port->tc = tc; + tc->dig_port = dig_port; + + if (DISPLAY_VER(i915) >= 13) + tc->phy_ops = &adlp_tc_phy_ops; + else if (DISPLAY_VER(i915) >= 12) + tc->phy_ops = &tgl_tc_phy_ops; + else + tc->phy_ops = &icl_tc_phy_ops; + + snprintf(tc->port_name, sizeof(tc->port_name), "%c/TC#%d", port_name(port), tc_port + 1); - mutex_init(&dig_port->tc_lock); - INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work); - dig_port->tc_legacy_port = is_legacy; - dig_port->tc_mode = TC_PORT_DISCONNECTED; - dig_port->tc_link_refcount = 0; - tc_port_load_fia_params(i915, dig_port); + mutex_init(&tc->lock); + /* TODO: Combine the two works */ + INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work); + INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work); + tc->legacy_port = is_legacy; + tc->mode = TC_PORT_DISCONNECTED; + tc->link_refcount = 0; + + tc_phy_init(tc); intel_tc_port_init_mode(dig_port); + + return 0; +} + +void intel_tc_port_cleanup(struct intel_digital_port *dig_port) +{ + intel_tc_port_suspend(dig_port); + + kfree(dig_port->tc); + dig_port->tc = NULL; } diff -u linux-6.2.0/drivers/gpu/drm/i915/i915_pci.c linux-6.2.0/drivers/gpu/drm/i915/i915_pci.c --- linux-6.2.0/drivers/gpu/drm/i915/i915_pci.c +++ linux-6.2.0/drivers/gpu/drm/i915/i915_pci.c @@ -895,7 +895,6 @@ static const struct intel_device_info tgl_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), - .display.has_modular_fia = 1, .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; @@ -995,7 +994,6 @@ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), .display.has_cdclk_crawl = 1, - .display.has_modular_fia = 1, .display.has_psr_hw_tracking = 0, .__runtime.platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), @@ -1143,7 +1141,6 @@ .__runtime.graphics.ip.rel = 70, .__runtime.media.ip.ver = 13, PLATFORM(INTEL_METEORLAKE), - .display.has_modular_fia = 1, .extra_gt_list = xelpmp_extra_gt, .has_flat_ccs = 0, .has_gmd_id = 1, diff -u linux-6.2.0/drivers/hid/hid-ids.h linux-6.2.0/drivers/hid/hid-ids.h --- linux-6.2.0/drivers/hid/hid-ids.h +++ linux-6.2.0/drivers/hid/hid-ids.h @@ -523,6 +523,7 @@ #define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044 #define USB_DEVICE_ID_GOOGLE_DON 0x5050 #define USB_DEVICE_ID_GOOGLE_EEL 0x5057 +#define USB_DEVICE_ID_GOOGLE_JEWEL 0x5061 #define USB_VENDOR_ID_GOTOP 0x08f2 #define USB_DEVICE_ID_SUPER_Q2 0x007f diff -u linux-6.2.0/drivers/hid/wacom_wac.c linux-6.2.0/drivers/hid/wacom_wac.c --- linux-6.2.0/drivers/hid/wacom_wac.c +++ linux-6.2.0/drivers/hid/wacom_wac.c @@ -826,7 +826,7 @@ /* Enter report */ if ((data[1] & 0xfc) == 0xc0) { /* serial number of the tool */ - wacom->serial[idx] = ((data[3] & 0x0f) << 28) + + wacom->serial[idx] = ((__u64)(data[3] & 0x0f) << 28) + (data[4] << 20) + (data[5] << 12) + (data[6] << 4) + (data[7] >> 4); diff -u linux-6.2.0/drivers/hwmon/k10temp.c linux-6.2.0/drivers/hwmon/k10temp.c --- linux-6.2.0/drivers/hwmon/k10temp.c +++ linux-6.2.0/drivers/hwmon/k10temp.c @@ -507,6 +507,7 @@ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} }; diff -u linux-6.2.0/drivers/iio/accel/kionix-kx022a.c linux-6.2.0/drivers/iio/accel/kionix-kx022a.c --- linux-6.2.0/drivers/iio/accel/kionix-kx022a.c +++ linux-6.2.0/drivers/iio/accel/kionix-kx022a.c @@ -1049,7 +1049,7 @@ data->ien_reg = KX022A_REG_INC4; } else { irq = fwnode_irq_get_byname(fwnode, "INT2"); - if (irq <= 0) + if (irq < 0) return dev_err_probe(dev, irq, "No suitable IRQ\n"); data->inc_reg = KX022A_REG_INC5; diff -u linux-6.2.0/drivers/input/joystick/xpad.c linux-6.2.0/drivers/input/joystick/xpad.c --- linux-6.2.0/drivers/input/joystick/xpad.c +++ linux-6.2.0/drivers/input/joystick/xpad.c @@ -282,7 +282,6 @@ { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, - { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, diff -u linux-6.2.0/drivers/input/touchscreen/cyttsp5.c linux-6.2.0/drivers/input/touchscreen/cyttsp5.c --- linux-6.2.0/drivers/input/touchscreen/cyttsp5.c +++ linux-6.2.0/drivers/input/touchscreen/cyttsp5.c @@ -560,7 +560,7 @@ static int cyttsp5_hid_output_bl_launch_app(struct cyttsp5 *ts) { int rc; - u8 cmd[HID_OUTPUT_BL_LAUNCH_APP]; + u8 cmd[HID_OUTPUT_BL_LAUNCH_APP_SIZE]; u16 crc; put_unaligned_le16(HID_OUTPUT_BL_LAUNCH_APP_SIZE, cmd); diff -u linux-6.2.0/drivers/iommu/amd/init.c linux-6.2.0/drivers/iommu/amd/init.c --- linux-6.2.0/drivers/iommu/amd/init.c +++ linux-6.2.0/drivers/iommu/amd/init.c @@ -752,6 +752,30 @@ } /* + * This function restarts event logging in case the IOMMU experienced + * an GA log overflow. + */ +void amd_iommu_restart_ga_log(struct amd_iommu *iommu) +{ + u32 status; + + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + if (status & MMIO_STATUS_GALOG_RUN_MASK) + return; + + pr_info_ratelimited("IOMMU GA Log restarting\n"); + + iommu_feature_disable(iommu, CONTROL_GALOG_EN); + iommu_feature_disable(iommu, CONTROL_GAINT_EN); + + writel(MMIO_STATUS_GALOG_OVERFLOW_MASK, + iommu->mmio_base + MMIO_STATUS_OFFSET); + + iommu_feature_enable(iommu, CONTROL_GAINT_EN); + iommu_feature_enable(iommu, CONTROL_GALOG_EN); +} + +/* * This function resets the command buffer if the IOMMU stopped fetching * commands from it. */ diff -u linux-6.2.0/drivers/iommu/amd/iommu.c linux-6.2.0/drivers/iommu/amd/iommu.c --- linux-6.2.0/drivers/iommu/amd/iommu.c +++ linux-6.2.0/drivers/iommu/amd/iommu.c @@ -835,6 +835,7 @@ (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \ MMIO_STATUS_EVT_INT_MASK | \ MMIO_STATUS_PPR_INT_MASK | \ + MMIO_STATUS_GALOG_OVERFLOW_MASK | \ MMIO_STATUS_GALOG_INT_MASK) irqreturn_t amd_iommu_int_thread(int irq, void *data) @@ -858,10 +859,16 @@ } #ifdef CONFIG_IRQ_REMAP - if (status & MMIO_STATUS_GALOG_INT_MASK) { + if (status & (MMIO_STATUS_GALOG_INT_MASK | + MMIO_STATUS_GALOG_OVERFLOW_MASK)) { pr_devel("Processing IOMMU GA Log\n"); iommu_poll_ga_log(iommu); } + + if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) { + pr_info_ratelimited("IOMMU GA Log overflow\n"); + amd_iommu_restart_ga_log(iommu); + } #endif if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) { @@ -2048,7 +2055,7 @@ { struct io_pgtable_ops *pgtbl_ops; struct protection_domain *domain; - int pgtable = amd_iommu_pgtable; + int pgtable; int mode = DEFAULT_PGTABLE_LEVEL; int ret; @@ -2065,6 +2072,10 @@ mode = PAGE_MODE_NONE; } else if (type == IOMMU_DOMAIN_UNMANAGED) { pgtable = AMD_IOMMU_V1; + } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) { + pgtable = amd_iommu_pgtable; + } else { + return NULL; } switch (pgtable) { @@ -2093,6 +2104,15 @@ return NULL; } +static inline u64 dma_max_address(void) +{ + if (amd_iommu_pgtable == AMD_IOMMU_V1) + return ~0ULL; + + /* V2 with 4 level page table */ + return ((1ULL << PM_LEVEL_SHIFT(PAGE_MODE_4_LEVEL)) - 1); +} + static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) { struct protection_domain *domain; @@ -2109,7 +2129,7 @@ return NULL; domain->domain.geometry.aperture_start = 0; - domain->domain.geometry.aperture_end = ~0ULL; + domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true; return &domain->domain; @@ -2389,7 +2409,7 @@ unsigned long flags; spin_lock_irqsave(&dom->lock, flags); - domain_flush_pages(dom, gather->start, gather->end - gather->start, 1); + domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1); amd_iommu_domain_flush_complete(dom); spin_unlock_irqrestore(&dom->lock, flags); } @@ -3496,8 +3516,7 @@ struct irte_ga *entry = (struct irte_ga *) ir_data->entry; u64 valid; - if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || - !entry || entry->lo.fields_vapic.guest_mode) + if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry) return 0; valid = entry->lo.fields_vapic.valid; diff -u linux-6.2.0/drivers/iommu/mtk_iommu.c linux-6.2.0/drivers/iommu/mtk_iommu.c --- linux-6.2.0/drivers/iommu/mtk_iommu.c +++ linux-6.2.0/drivers/iommu/mtk_iommu.c @@ -746,7 +746,8 @@ { struct mtk_iommu_domain *dom = to_mtk_domain(domain); - mtk_iommu_tlb_flush_all(dom->bank->parent_data); + if (dom->bank) + mtk_iommu_tlb_flush_all(dom->bank->parent_data); } static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, diff -u linux-6.2.0/drivers/md/raid5.c linux-6.2.0/drivers/md/raid5.c --- linux-6.2.0/drivers/md/raid5.c +++ linux-6.2.0/drivers/md/raid5.c @@ -5516,7 +5516,7 @@ sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, &dd_idx, NULL); - end_sector = bio_end_sector(raid_bio); + end_sector = sector + bio_sectors(raid_bio); rcu_read_lock(); if (r5c_big_stripe_cached(conf, sector)) diff -u linux-6.2.0/drivers/media/pci/netup_unidvb/netup_unidvb_core.c linux-6.2.0/drivers/media/pci/netup_unidvb/netup_unidvb_core.c --- linux-6.2.0/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ linux-6.2.0/drivers/media/pci/netup_unidvb/netup_unidvb_core.c @@ -887,12 +887,7 @@ ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0), ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1), pci_dev->irq); - if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED, - "netup_unidvb", pci_dev) < 0) { - dev_err(&pci_dev->dev, - "%s(): can't get IRQ %d\n", __func__, pci_dev->irq); - goto irq_request_err; - } + ndev->dma_size = 2 * 188 * NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT; ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev, @@ -933,6 +928,14 @@ dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n"); goto dma_setup_err; } + + if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED, + "netup_unidvb", pci_dev) < 0) { + dev_err(&pci_dev->dev, + "%s(): can't get IRQ %d\n", __func__, pci_dev->irq); + goto dma_setup_err; + } + dev_info(&pci_dev->dev, "netup_unidvb: device has been initialized\n"); return 0; @@ -951,8 +954,6 @@ dma_free_coherent(&pci_dev->dev, ndev->dma_size, ndev->dma_virt, ndev->dma_phys); dma_alloc_err: - free_irq(pci_dev->irq, pci_dev); -irq_request_err: iounmap(ndev->lmmio1); pci_bar1_error: iounmap(ndev->lmmio0); diff -u linux-6.2.0/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c linux-6.2.0/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c --- linux-6.2.0/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c +++ linux-6.2.0/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c @@ -584,6 +584,9 @@ if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED)) { for (i = 0; i < num_supported_formats; i++) { + if (mtk_video_formats[i].type != MTK_FMT_DEC) + continue; + mtk_video_formats[i].frmsize.max_width = VCODEC_DEC_4K_CODED_WIDTH; mtk_video_formats[i].frmsize.max_height = diff -u linux-6.2.0/drivers/media/usb/uvc/uvc_driver.c linux-6.2.0/drivers/media/usb/uvc/uvc_driver.c --- linux-6.2.0/drivers/media/usb/uvc/uvc_driver.c +++ linux-6.2.0/drivers/media/usb/uvc/uvc_driver.c @@ -251,14 +251,17 @@ /* Find the format descriptor from its GUID. */ fmtdesc = uvc_format_by_guid(&buffer[5]); - if (fmtdesc != NULL) { - format->fcc = fmtdesc->fcc; - } else { + if (!fmtdesc) { + /* + * Unknown video formats are not fatal errors, the + * caller will skip this descriptor. + */ dev_info(&streaming->intf->dev, "Unknown video format %pUl\n", &buffer[5]); - format->fcc = 0; + return 0; } + format->fcc = fmtdesc->fcc; format->bpp = buffer[21]; /* @@ -675,7 +678,7 @@ interval = (u32 *)&frame[nframes]; streaming->format = format; - streaming->nformats = nformats; + streaming->nformats = 0; /* Parse the format descriptors. */ while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE) { @@ -689,7 +692,10 @@ &interval, buffer, buflen); if (ret < 0) goto error; + if (!ret) + break; + streaming->nformats++; frame += format->nframes; format++; diff -u linux-6.2.0/drivers/misc/fastrpc.c linux-6.2.0/drivers/misc/fastrpc.c --- linux-6.2.0/drivers/misc/fastrpc.c +++ linux-6.2.0/drivers/misc/fastrpc.c @@ -1892,7 +1892,7 @@ req.vaddrout = rsp_msg.vaddr; /* Add memory to static PD pool, protection thru hypervisor */ - if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { + if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) { struct qcom_scm_vmperm perm; perm.vmid = QCOM_SCM_VMID_HLOS; @@ -2337,8 +2337,10 @@ struct fastrpc_invoke_ctx *ctx; spin_lock(&user->lock); - list_for_each_entry(ctx, &user->pending, node) + list_for_each_entry(ctx, &user->pending, node) { + ctx->retval = -EPIPE; complete(&ctx->work); + } spin_unlock(&user->lock); } @@ -2349,7 +2351,9 @@ struct fastrpc_user *user; unsigned long flags; + /* No invocations past this point */ spin_lock_irqsave(&cctx->lock, flags); + cctx->rpdev = NULL; list_for_each_entry(user, &cctx->users, user) fastrpc_notify_users(user); spin_unlock_irqrestore(&cctx->lock, flags); @@ -2368,7 +2372,6 @@ of_platform_depopulate(&rpdev->dev); - cctx->rpdev = NULL; fastrpc_channel_ctx_put(cctx); } diff -u linux-6.2.0/drivers/net/dsa/mv88e6xxx/chip.c linux-6.2.0/drivers/net/dsa/mv88e6xxx/chip.c --- linux-6.2.0/drivers/net/dsa/mv88e6xxx/chip.c +++ linux-6.2.0/drivers/net/dsa/mv88e6xxx/chip.c @@ -7069,7 +7069,7 @@ goto out; } if (chip->reset) - usleep_range(1000, 2000); + usleep_range(10000, 20000); /* Detect if the device is configured in single chip addressing mode, * otherwise continue with address specific smi init/detection. diff -u linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt.c linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt.c --- linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2392,6 +2392,9 @@ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; u64 ns; + if (!ptp) + goto async_event_process_exit; + spin_lock_bh(&ptp->ptp_lock); bnxt_ptp_update_current_time(bp); ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << @@ -4790,6 +4793,9 @@ if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) continue; + if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && + !bp->ptp_cfg) + continue; __set_bit(bnxt_async_events_arr[i], async_events_bmap); } if (bmap && bmap_size) { @@ -5377,6 +5383,7 @@ if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) return; + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); /* all contexts configured to same hash_type, zero always exists */ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); resp = hwrm_req_hold(bp, req); @@ -8839,6 +8846,9 @@ goto err_out; } + if (BNXT_VF(bp)) + bnxt_hwrm_func_qcfg(bp); + rc = bnxt_setup_vnic(bp, 0); if (rc) goto err_out; @@ -11625,6 +11635,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; + struct pci_dev *pdev = bp->pdev; u32 val; if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) @@ -11638,7 +11649,7 @@ } val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); - if (val == fw_health->last_fw_heartbeat) { + if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) { fw_health->arrests++; goto fw_reset; } @@ -11646,7 +11657,7 @@ fw_health->last_fw_heartbeat = val; val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); - if (val != fw_health->last_fw_reset_cnt) { + if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) { fw_health->discoveries++; goto fw_reset; } @@ -13052,26 +13063,37 @@ #endif /* CONFIG_RFS_ACCEL */ -static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table) +static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) { struct bnxt *bp = netdev_priv(netdev); - struct udp_tunnel_info ti; unsigned int cmd; - udp_tunnel_nic_get_port(netdev, table, 0, &ti); - if (ti.type == UDP_TUNNEL_TYPE_VXLAN) + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; else cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; - if (ti.port) - return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd); + return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); +} + +static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) +{ + struct bnxt *bp = netdev_priv(netdev); + unsigned int cmd; + + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; + else + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); } static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { - .sync_table = bnxt_udp_tunnel_sync, + .set_port = bnxt_udp_tunnel_set_port, + .unset_port = bnxt_udp_tunnel_unset_port, .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | UDP_TUNNEL_NIC_INFO_OPEN_ONLY, .tables = { diff -u linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c --- linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -3831,7 +3831,7 @@ } } - if (req & BNXT_FW_RESET_AP) { + if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { /* This feature is not supported in older firmware versions */ if (bp->hwrm_spec_code >= 0x10803) { if (!bnxt_firmware_reset_ap(dev)) { diff -u linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c --- linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ linux-6.2.0/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -946,6 +946,7 @@ bnxt_ptp_timecounter_init(bp, true); bnxt_ptp_adjfine_rtc(bp, 0); } + bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true); ptp->ptp_info = bnxt_ptp_caps; if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) { diff -u linux-6.2.0/drivers/net/ethernet/broadcom/genet/bcmgenet.c linux-6.2.0/drivers/net/ethernet/broadcom/genet/bcmgenet.c --- linux-6.2.0/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ linux-6.2.0/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1272,7 +1272,8 @@ } } -static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) +void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, + bool tx_lpi_enabled) { struct bcmgenet_priv *priv = netdev_priv(dev); u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; @@ -1292,7 +1293,7 @@ /* Enable EEE and switch to a 27Mhz clock automatically */ reg = bcmgenet_readl(priv->base + off); - if (enable) + if (tx_lpi_enabled) reg |= TBUF_EEE_EN | TBUF_PM_EN; else reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); @@ -1313,6 +1314,7 @@ priv->eee.eee_enabled = enable; priv->eee.eee_active = enable; + priv->eee.tx_lpi_enabled = tx_lpi_enabled; } static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) @@ -1328,6 +1330,7 @@ e->eee_enabled = p->eee_enabled; e->eee_active = p->eee_active; + e->tx_lpi_enabled = p->tx_lpi_enabled; e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); return phy_ethtool_get_eee(dev->phydev, e); @@ -1337,7 +1340,6 @@ { struct bcmgenet_priv *priv = netdev_priv(dev); struct ethtool_eee *p = &priv->eee; - int ret = 0; if (GENET_IS_V1(priv)) return -EOPNOTSUPP; @@ -1348,16 +1350,11 @@ p->eee_enabled = e->eee_enabled; if (!p->eee_enabled) { - bcmgenet_eee_enable_set(dev, false); + bcmgenet_eee_enable_set(dev, false, false); } else { - ret = phy_init_eee(dev->phydev, false); - if (ret) { - netif_err(priv, hw, dev, "EEE initialization failed\n"); - return ret; - } - + p->eee_active = phy_init_eee(dev->phydev, false) >= 0; bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); - bcmgenet_eee_enable_set(dev, true); + bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled); } return phy_ethtool_set_eee(dev->phydev, e); @@ -4279,9 +4276,6 @@ if (!device_may_wakeup(d)) phy_resume(dev->phydev); - if (priv->eee.eee_enabled) - bcmgenet_eee_enable_set(dev, true); - bcmgenet_netif_start(dev); netif_device_attach(dev); diff -u linux-6.2.0/drivers/net/ethernet/broadcom/genet/bcmmii.c linux-6.2.0/drivers/net/ethernet/broadcom/genet/bcmmii.c --- linux-6.2.0/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ linux-6.2.0/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -87,6 +87,11 @@ reg |= CMD_TX_EN | CMD_RX_EN; } bcmgenet_umac_writel(priv, reg, UMAC_CMD); + + priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0; + bcmgenet_eee_enable_set(dev, + priv->eee.eee_enabled && priv->eee.eee_active, + priv->eee.tx_lpi_enabled); } /* setup netdev link state when PHY link status change and diff -u linux-6.2.0/drivers/net/ethernet/intel/e1000e/netdev.c linux-6.2.0/drivers/net/ethernet/intel/e1000e/netdev.c --- linux-6.2.0/drivers/net/ethernet/intel/e1000e/netdev.c +++ linux-6.2.0/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7022,6 +7022,8 @@ struct e1000_adapter *adapter = netdev_priv(netdev); int rc; + pdev->pme_poll = true; + rc = __e1000_resume(pdev); if (rc) return rc; @@ -7686,7 +7688,7 @@ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE); - if (pci_dev_run_wake(pdev) && hw->mac.type != e1000_pch_cnp) + if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); return 0; diff -u linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c --- linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -483,7 +483,7 @@ (u64)timestamp_low; break; default: - if (tracer_event->event_id >= tracer->str_db.first_string_trace || + if (tracer_event->event_id >= tracer->str_db.first_string_trace && tracer_event->event_id <= tracer->str_db.first_string_trace + tracer->str_db.num_string_trace) { tracer_event->type = TRACER_EVENT_TYPE_STRING; diff -u linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/en_main.c linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/en_main.c --- linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5754,8 +5754,8 @@ } static int -mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, - const struct mlx5e_profile *new_profile, void *new_ppriv) +mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, + const struct mlx5e_profile *new_profile, void *new_ppriv) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@ -5771,6 +5771,25 @@ err = new_profile->init(priv->mdev, priv->netdev); if (err) goto priv_cleanup; + + return 0; + +priv_cleanup: + mlx5e_priv_cleanup(priv); + return err; +} + +static int +mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev, + const struct mlx5e_profile *new_profile, void *new_ppriv) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv); + if (err) + return err; + err = mlx5e_attach_netdev(priv); if (err) goto profile_cleanup; @@ -5778,7 +5797,6 @@ profile_cleanup: new_profile->cleanup(priv); -priv_cleanup: mlx5e_priv_cleanup(priv); return err; } @@ -5797,6 +5815,12 @@ priv->profile->cleanup(priv); mlx5e_priv_cleanup(priv); + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { + mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv); + set_bit(MLX5E_STATE_DESTROYING, &priv->state); + return -EIO; + } + err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv); if (err) { /* roll back to original profile */ netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err); @@ -5856,8 +5880,11 @@ struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - if (!netif_device_present(netdev)) + if (!netif_device_present(netdev)) { + if (test_bit(MLX5E_STATE_DESTROYING, &priv->state)) + mlx5e_destroy_mdev_resources(mdev); return -ENODEV; + } mlx5e_detach_netdev(priv); mlx5e_destroy_mdev_resources(mdev); diff -u linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/main.c linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/main.c --- linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -895,7 +895,6 @@ } mlx5_pci_vsc_init(dev); - dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); return 0; err_clr_master: @@ -1129,6 +1128,7 @@ goto err_cmd_cleanup; } + dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); mlx5_start_health_poll(dev); @@ -1769,14 +1769,15 @@ struct devlink *devlink = priv_to_devlink(dev); set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); - /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain - * fw_reset before unregistering the devlink. + /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using + * devlink notify APIs. + * Hence, we must drain them before unregistering the devlink. */ mlx5_drain_fw_reset(dev); + mlx5_drain_health_wq(dev); devlink_unregister(devlink); mlx5_sriov_disable(pdev); mlx5_crdump_disable(dev); - mlx5_drain_health_wq(dev); mlx5_uninit_one(dev); mlx5_pci_close(dev); mlx5_mdev_uninit(dev); diff -u linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c --- linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -63,6 +63,7 @@ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); struct devlink *devlink = priv_to_devlink(sf_dev->mdev); + mlx5_drain_health_wq(sf_dev->mdev); devlink_unregister(devlink); mlx5_uninit_one(sf_dev->mdev); iounmap(sf_dev->mdev->iseg); diff -u linux-6.2.0/drivers/net/ethernet/qlogic/qede/qede_main.c linux-6.2.0/drivers/net/ethernet/qlogic/qede/qede_main.c --- linux-6.2.0/drivers/net/ethernet/qlogic/qede/qede_main.c +++ linux-6.2.0/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -308,6 +308,8 @@ edev->ops->get_vport_stats(edev->cdev, &stats); + spin_lock(&edev->stats_lock); + p_common->no_buff_discards = stats.common.no_buff_discards; p_common->packet_too_big_discard = stats.common.packet_too_big_discard; p_common->ttl0_discard = stats.common.ttl0_discard; @@ -405,6 +407,8 @@ p_ah->tx_1519_to_max_byte_packets = stats.ah.tx_1519_to_max_byte_packets; } + + spin_unlock(&edev->stats_lock); } static void qede_get_stats64(struct net_device *dev, @@ -413,9 +417,10 @@ struct qede_dev *edev = netdev_priv(dev); struct qede_stats_common *p_common; - qede_fill_by_demand_stats(edev); p_common = &edev->stats.common; + spin_lock(&edev->stats_lock); + stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + p_common->rx_bcast_pkts; stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + @@ -435,6 +440,8 @@ stats->collisions = edev->stats.bb.tx_total_collisions; stats->rx_crc_errors = p_common->rx_crc_errors; stats->rx_frame_errors = p_common->rx_align_errors; + + spin_unlock(&edev->stats_lock); } #ifdef CONFIG_QED_SRIOV @@ -1061,6 +1068,23 @@ rtnl_unlock(); } +static void qede_periodic_task(struct work_struct *work) +{ + struct qede_dev *edev = container_of(work, struct qede_dev, + periodic_task.work); + + qede_fill_by_demand_stats(edev); + schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks); +} + +static void qede_init_periodic_task(struct qede_dev *edev) +{ + INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task); + spin_lock_init(&edev->stats_lock); + edev->stats_coal_usecs = USEC_PER_SEC; + edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC); +} + static void qede_sp_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, @@ -1080,6 +1104,7 @@ */ if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { + cancel_delayed_work_sync(&edev->periodic_task); #ifdef CONFIG_QED_SRIOV /* SRIOV must be disabled outside the lock to avoid a deadlock. * The recovery of the active VFs is currently not supported. @@ -1270,6 +1295,7 @@ */ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); mutex_init(&edev->qede_lock); + qede_init_periodic_task(edev); rc = register_netdev(edev->ndev); if (rc) { @@ -1294,6 +1320,11 @@ edev->rx_copybreak = QEDE_RX_HDR_SIZE; qede_log_probe(edev); + + /* retain user config (for example - after recovery) */ + if (edev->stats_coal_usecs) + schedule_delayed_work(&edev->periodic_task, 0); + return 0; err4: @@ -1362,6 +1393,7 @@ unregister_netdev(ndev); cancel_delayed_work_sync(&edev->sp_task); + cancel_delayed_work_sync(&edev->periodic_task); edev->ops->common->set_power_state(cdev, PCI_D0); diff -u linux-6.2.0/drivers/net/ethernet/renesas/rswitch.c linux-6.2.0/drivers/net/ethernet/renesas/rswitch.c --- linux-6.2.0/drivers/net/ethernet/renesas/rswitch.c +++ linux-6.2.0/drivers/net/ethernet/renesas/rswitch.c @@ -1407,7 +1407,7 @@ if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) { netif_stop_subqueue(ndev, 0); - return ret; + return NETDEV_TX_BUSY; } if (skb_put_padto(skb, ETH_ZLEN)) diff -u linux-6.2.0/drivers/net/phy/sfp.c linux-6.2.0/drivers/net/phy/sfp.c --- linux-6.2.0/drivers/net/phy/sfp.c +++ linux-6.2.0/drivers/net/phy/sfp.c @@ -2198,6 +2198,11 @@ break; } + /* Force a poll to re-read the hardware signal state after + * sfp_sm_mod_probe() changed state_hw_mask. + */ + mod_delayed_work(system_wq, &sfp->poll, 1); + err = sfp_hwmon_insert(sfp); if (err) dev_warn(sfp->dev, "hwmon probe failed: %pe\n", diff -u linux-6.2.0/drivers/net/tap.c linux-6.2.0/drivers/net/tap.c --- linux-6.2.0/drivers/net/tap.c +++ linux-6.2.0/drivers/net/tap.c @@ -533,7 +533,7 @@ q->sock.state = SS_CONNECTED; q->sock.file = file; q->sock.ops = &tap_socket_ops; - sock_init_data_uid(&q->sock, &q->sk, inode->i_uid); + sock_init_data_uid(&q->sock, &q->sk, current_fsuid()); q->sk.sk_write_space = tap_sock_write_space; q->sk.sk_destruct = tap_sock_destruct; q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; diff -u linux-6.2.0/drivers/net/tun.c linux-6.2.0/drivers/net/tun.c --- linux-6.2.0/drivers/net/tun.c +++ linux-6.2.0/drivers/net/tun.c @@ -3463,7 +3463,7 @@ tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; - sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid); + sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; diff -u linux-6.2.0/drivers/net/usb/qmi_wwan.c linux-6.2.0/drivers/net/usb/qmi_wwan.c --- linux-6.2.0/drivers/net/usb/qmi_wwan.c +++ linux-6.2.0/drivers/net/usb/qmi_wwan.c @@ -1325,7 +1325,7 @@ {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ - {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ + {QMI_QUIRK_SET_DTR(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ diff -u linux-6.2.0/drivers/net/virtio_net.c linux-6.2.0/drivers/net/virtio_net.c --- linux-6.2.0/drivers/net/virtio_net.c +++ linux-6.2.0/drivers/net/virtio_net.c @@ -204,6 +204,8 @@ __virtio16 vid; __virtio64 offloads; struct virtio_net_ctrl_rss rss; + struct virtio_net_ctrl_coal_tx coal_tx; + struct virtio_net_ctrl_coal_rx coal_rx; }; struct virtnet_info { @@ -2790,12 +2792,10 @@ struct ethtool_coalesce *ec) { struct scatterlist sgs_tx, sgs_rx; - struct virtio_net_ctrl_coal_tx coal_tx; - struct virtio_net_ctrl_coal_rx coal_rx; - coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); - coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); - sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx)); + vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); + vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames); + sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_TX_SET, @@ -2806,9 +2806,9 @@ vi->tx_usecs = ec->tx_coalesce_usecs; vi->tx_max_packets = ec->tx_max_coalesced_frames; - coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); - coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); - sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx)); + vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); + vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); + sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, VIRTIO_NET_CTRL_NOTF_COAL_RX_SET, diff -u linux-6.2.0/drivers/net/wireless/intel/iwlwifi/mvm/d3.c linux-6.2.0/drivers/net/wireless/intel/iwlwifi/mvm/d3.c --- linux-6.2.0/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ linux-6.2.0/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2722,17 +2722,13 @@ if (wowlan_info_ver < 2) { struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data; - notif = kmemdup(notif_v1, - offsetofend(struct iwl_wowlan_info_notif, - received_beacons), - GFP_ATOMIC); - + notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC); if (!notif) return false; notif->tid_tear_down = notif_v1->tid_tear_down; notif->station_id = notif_v1->station_id; - + memset_after(notif, 0, station_id); } else { notif = (void *)pkt->data; } diff -u linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7615/mac.c linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7615/mac.c --- linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -919,7 +919,10 @@ msta = list_first_entry(&sta_poll_list, struct mt7615_sta, poll_list); + + spin_lock_bh(&dev->sta_poll_lock); list_del_init(&msta->poll_list); + spin_unlock_bh(&dev->sta_poll_lock); addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4; diff -u linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/dma.c linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/dma.c --- linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/dma.c +++ linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/dma.c @@ -231,10 +231,6 @@ if (ret) return ret; - ret = mt7921_wfsys_reset(dev); - if (ret) - return ret; - /* init tx queue */ ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0, MT7921_TX_RING_SIZE, diff -u linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c --- linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -477,12 +477,6 @@ { int ret; - ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); - if (ret && mt76_is_mmio(&dev->mt76)) { - dev_dbg(dev->mt76.dev, "Firmware is already download\n"); - goto fw_loaded; - } - ret = mt76_connac2_load_patch(&dev->mt76, mt7921_patch_name(dev)); if (ret) return ret; @@ -505,8 +499,6 @@ return -EIO; } -fw_loaded: - #ifdef CONFIG_PM dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support; #endif /* CONFIG_PM */ diff -u linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/pci.c linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/pci.c --- linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ linux-6.2.0/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -343,6 +343,10 @@ bus_ops->rmw = mt7921_rmw; dev->mt76.bus = bus_ops; + ret = mt7921e_mcu_fw_pmctrl(dev); + if (ret) + goto err_free_dev; + ret = __mt7921e_mcu_drv_pmctrl(dev); if (ret) goto err_free_dev; @@ -351,6 +355,10 @@ (mt7921_l1_rr(dev, MT_HW_REV) & 0xff); dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + ret = mt7921_wfsys_reset(dev); + if (ret) + goto err_free_dev; + mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); diff -u linux-6.2.0/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c linux-6.2.0/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c --- linux-6.2.0/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ linux-6.2.0/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -3993,6 +3993,7 @@ RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL | RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC; rtl8xxxu_write32(priv, REG_RCR, val32); + priv->regrcr = val32; if (priv->rtl_chip == RTL8188F) { /* Accept all data frames */ @@ -6042,7 +6043,7 @@ unsigned int *total_flags, u64 multicast) { struct rtl8xxxu_priv *priv = hw->priv; - u32 rcr = rtl8xxxu_read32(priv, REG_RCR); + u32 rcr = priv->regrcr; dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n", __func__, changed_flags, *total_flags); @@ -6088,6 +6089,7 @@ */ rtl8xxxu_write32(priv, REG_RCR, rcr); + priv->regrcr = rcr; *total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL | diff -u linux-6.2.0/drivers/net/wireless/realtek/rtw88/mac80211.c linux-6.2.0/drivers/net/wireless/realtek/rtw88/mac80211.c --- linux-6.2.0/drivers/net/wireless/realtek/rtw88/mac80211.c +++ linux-6.2.0/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -88,15 +88,6 @@ } } - if (changed & IEEE80211_CONF_CHANGE_PS) { - if (hw->conf.flags & IEEE80211_CONF_PS) { - rtwdev->ps_enabled = true; - } else { - rtwdev->ps_enabled = false; - rtw_leave_lps(rtwdev); - } - } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) rtw_set_channel(rtwdev); @@ -206,6 +197,7 @@ rtwvif->bcn_ctrl = bcn_ctrl; config |= PORT_SET_BCN_CTRL; rtw_vif_port_config(rtwdev, rtwvif, config); + rtw_recalc_lps(rtwdev, vif); mutex_unlock(&rtwdev->mutex); @@ -236,6 +228,7 @@ rtwvif->bcn_ctrl = 0; config |= PORT_SET_BCN_CTRL; rtw_vif_port_config(rtwdev, rtwvif, config); + rtw_recalc_lps(rtwdev, NULL); mutex_unlock(&rtwdev->mutex); } @@ -428,6 +421,9 @@ if (changed & BSS_CHANGED_ERP_SLOT) rtw_conf_tx(rtwdev, rtwvif); + if (changed & BSS_CHANGED_PS) + rtw_recalc_lps(rtwdev, NULL); + rtw_vif_port_config(rtwdev, rtwvif, config); mutex_unlock(&rtwdev->mutex); diff -u linux-6.2.0/drivers/net/wireless/realtek/rtw88/main.c linux-6.2.0/drivers/net/wireless/realtek/rtw88/main.c --- linux-6.2.0/drivers/net/wireless/realtek/rtw88/main.c +++ linux-6.2.0/drivers/net/wireless/realtek/rtw88/main.c @@ -250,8 +250,8 @@ * more than two stations associated to the AP, then we can not enter * lps, because fw does not handle the overlapped beacon interval * - * mac80211 should iterate vifs and determine if driver can enter - * ps by passing IEEE80211_CONF_PS to us, all we need to do is to + * rtw_recalc_lps() iterate vifs and determine if driver can enter + * ps by vif->type and vif->cfg.ps, all we need to do here is to * get that vif and check if device is having traffic more than the * threshold. */ diff -u linux-6.2.0/drivers/net/wireless/realtek/rtw88/ps.c linux-6.2.0/drivers/net/wireless/realtek/rtw88/ps.c --- linux-6.2.0/drivers/net/wireless/realtek/rtw88/ps.c +++ linux-6.2.0/drivers/net/wireless/realtek/rtw88/ps.c @@ -301,0 +302,43 @@ + +struct rtw_vif_recalc_lps_iter_data { + struct rtw_dev *rtwdev; + struct ieee80211_vif *found_vif; + int count; +}; + +static void __rtw_vif_recalc_lps(struct rtw_vif_recalc_lps_iter_data *data, + struct ieee80211_vif *vif) +{ + if (data->count < 0) + return; + + if (vif->type != NL80211_IFTYPE_STATION) { + data->count = -1; + return; + } + + data->count++; + data->found_vif = vif; +} + +static void rtw_vif_recalc_lps_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + __rtw_vif_recalc_lps(data, vif); +} + +void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif) +{ + struct rtw_vif_recalc_lps_iter_data data = { .rtwdev = rtwdev }; + + if (new_vif) + __rtw_vif_recalc_lps(&data, new_vif); + rtw_iterate_vifs(rtwdev, rtw_vif_recalc_lps_iter, &data); + + if (data.count == 1 && data.found_vif->cfg.ps) { + rtwdev->ps_enabled = true; + } else { + rtwdev->ps_enabled = false; + rtw_leave_lps(rtwdev); + } +} diff -u linux-6.2.0/drivers/nvme/host/core.c linux-6.2.0/drivers/nvme/host/core.c --- linux-6.2.0/drivers/nvme/host/core.c +++ linux-6.2.0/drivers/nvme/host/core.c @@ -3574,6 +3574,9 @@ { struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags)) + return -EBUSY; + if (device_remove_file_self(dev, attr)) nvme_delete_ctrl_sync(ctrl); return count; @@ -5034,7 +5037,7 @@ * that were missed. We identify persistent discovery controllers by * checking that they started once before, hence are reconnecting back. */ - if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && + if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && nvme_discovery_ctrl(ctrl)) nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); @@ -5045,6 +5048,7 @@ } nvme_change_uevent(ctrl, "NVME_EVENT=connected"); + set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags); } EXPORT_SYMBOL_GPL(nvme_start_ctrl); diff -u linux-6.2.0/drivers/nvme/host/multipath.c linux-6.2.0/drivers/nvme/host/multipath.c --- linux-6.2.0/drivers/nvme/host/multipath.c +++ linux-6.2.0/drivers/nvme/host/multipath.c @@ -884,7 +884,6 @@ { if (!head->disk) return; - blk_mark_disk_dead(head->disk); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); diff -u linux-6.2.0/drivers/nvme/host/pci.c linux-6.2.0/drivers/nvme/host/pci.c --- linux-6.2.0/drivers/nvme/host/pci.c +++ linux-6.2.0/drivers/nvme/host/pci.c @@ -3459,6 +3459,8 @@ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x2646, 0x5013), /* Kingston KC3000, Kingston FURY Renegade */ + .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, }, { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ @@ -3499,4 +3501,8 @@ { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, diff -u linux-6.2.0/drivers/pci/pcie/dpc.c linux-6.2.0/drivers/pci/pcie/dpc.c --- linux-6.2.0/drivers/pci/pcie/dpc.c +++ linux-6.2.0/drivers/pci/pcie/dpc.c @@ -348,13 +348,34 @@ } } +static void dpc_enable(struct pcie_device *dev) +{ + struct pci_dev *pdev = dev->port; + u16 ctl; + + pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl); + + ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN; + pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl); +} + +static void dpc_disable(struct pcie_device *dev) +{ + struct pci_dev *pdev = dev->port; + u16 ctl; + + pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl); + ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN); + pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl); +} + #define FLAG(x, y) (((x) & (y)) ? '+' : '-') static int dpc_probe(struct pcie_device *dev) { struct pci_dev *pdev = dev->port; struct device *device = &dev->device; int status; - u16 ctl, cap; + u16 cap; if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native) return -ENOTSUPP; @@ -369,10 +390,7 @@ } pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap); - pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl); - - ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN; - pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl); + dpc_enable(dev); pci_info(pdev, "enabled with IRQ %d\n", dev->irq); pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", @@ -385,14 +403,21 @@ return status; } -static void dpc_remove(struct pcie_device *dev) +static int dpc_suspend(struct pcie_device *dev) { - struct pci_dev *pdev = dev->port; - u16 ctl; + dpc_disable(dev); + return 0; +} - pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl); - ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN); - pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl); +static int dpc_resume(struct pcie_device *dev) +{ + dpc_enable(dev); + return 0; +} + +static void dpc_remove(struct pcie_device *dev) +{ + dpc_disable(dev); } static struct pcie_port_service_driver dpcdriver = { @@ -400,6 +425,8 @@ .port_type = PCIE_ANY_PORT, .service = PCIE_PORT_SERVICE_DPC, .probe = dpc_probe, + .suspend = dpc_suspend, + .resume = dpc_resume, .remove = dpc_remove, }; diff -u linux-6.2.0/drivers/scsi/megaraid/megaraid_sas_fusion.c linux-6.2.0/drivers/scsi/megaraid/megaraid_sas_fusion.c --- linux-6.2.0/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ linux-6.2.0/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3323,7 +3323,7 @@ /* copy the io request frame as well as 8 SGEs data for r1 command*/ memcpy(r1_cmd->io_request, cmd->io_request, (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST))); - memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, + memcpy(r1_cmd->io_request->SGLs, cmd->io_request->SGLs, (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION))); /*sense buffer is different for r1 command*/ r1_cmd->io_request->SenseBufferLowAddress = diff -u linux-6.2.0/drivers/soc/qcom/rpmh-rsc.c linux-6.2.0/drivers/soc/qcom/rpmh-rsc.c --- linux-6.2.0/drivers/soc/qcom/rpmh-rsc.c +++ linux-6.2.0/drivers/soc/qcom/rpmh-rsc.c @@ -1073,7 +1073,7 @@ drv->ver.minor = rsc_id & (MINOR_VER_MASK << MINOR_VER_SHIFT); drv->ver.minor >>= MINOR_VER_SHIFT; - if (drv->ver.major == 3 && drv->ver.minor >= 0) + if (drv->ver.major == 3) drv->regs = rpmh_rsc_reg_offset_ver_3_0; else drv->regs = rpmh_rsc_reg_offset_ver_2_7; diff -u linux-6.2.0/drivers/spi/spi-qup.c linux-6.2.0/drivers/spi/spi-qup.c --- linux-6.2.0/drivers/spi/spi-qup.c +++ linux-6.2.0/drivers/spi/spi-qup.c @@ -1029,23 +1029,8 @@ return -ENXIO; } - ret = clk_prepare_enable(cclk); - if (ret) { - dev_err(dev, "cannot enable core clock\n"); - return ret; - } - - ret = clk_prepare_enable(iclk); - if (ret) { - clk_disable_unprepare(cclk); - dev_err(dev, "cannot enable iface clock\n"); - return ret; - } - master = spi_alloc_master(dev, sizeof(struct spi_qup)); if (!master) { - clk_disable_unprepare(cclk); - clk_disable_unprepare(iclk); dev_err(dev, "cannot allocate master\n"); return -ENOMEM; } @@ -1093,6 +1078,19 @@ spin_lock_init(&controller->lock); init_completion(&controller->done); + ret = clk_prepare_enable(cclk); + if (ret) { + dev_err(dev, "cannot enable core clock\n"); + goto error_dma; + } + + ret = clk_prepare_enable(iclk); + if (ret) { + clk_disable_unprepare(cclk); + dev_err(dev, "cannot enable iface clock\n"); + goto error_dma; + } + iomode = readl_relaxed(base + QUP_IO_M_MODES); size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode); @@ -1122,7 +1120,7 @@ ret = spi_qup_set_state(controller, QUP_STATE_RESET); if (ret) { dev_err(dev, "cannot set RESET state\n"); - goto error_dma; + goto error_clk; } writel_relaxed(0, base + QUP_OPERATIONAL); @@ -1146,7 +1144,7 @@ ret = devm_request_irq(dev, irq, spi_qup_qup_irq, IRQF_TRIGGER_HIGH, pdev->name, controller); if (ret) - goto error_dma; + goto error_clk; pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(dev); @@ -1161,11 +1159,12 @@ disable_pm: pm_runtime_disable(&pdev->dev); +error_clk: + clk_disable_unprepare(cclk); + clk_disable_unprepare(iclk); error_dma: spi_qup_release_dma(master); error: - clk_disable_unprepare(cclk); - clk_disable_unprepare(iclk); spi_master_put(master); return ret; } diff -u linux-6.2.0/drivers/tty/serial/Kconfig linux-6.2.0/drivers/tty/serial/Kconfig --- linux-6.2.0/drivers/tty/serial/Kconfig +++ linux-6.2.0/drivers/tty/serial/Kconfig @@ -769,7 +769,7 @@ config SERIAL_CPM tristate "CPM SCC/SMC serial port support" - depends on CPM2 || CPM1 || (PPC32 && COMPILE_TEST) + depends on CPM2 || CPM1 select SERIAL_CORE help This driver supports the SCC and SMC serial ports on Motorola diff -u linux-6.2.0/drivers/tty/serial/fsl_lpuart.c linux-6.2.0/drivers/tty/serial/fsl_lpuart.c --- linux-6.2.0/drivers/tty/serial/fsl_lpuart.c +++ linux-6.2.0/drivers/tty/serial/fsl_lpuart.c @@ -1469,34 +1469,36 @@ static void lpuart32_break_ctl(struct uart_port *port, int break_state) { - unsigned long temp, modem; - struct tty_struct *tty; - unsigned int cflag = 0; + unsigned long temp; - tty = tty_port_tty_get(&port->state->port); - if (tty) { - cflag = tty->termios.c_cflag; - tty_kref_put(tty); - } - - temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK; - modem = lpuart32_read(port, UARTMODIR); + temp = lpuart32_read(port, UARTCTRL); + /* + * LPUART IP now has two known bugs, one is CTS has higher priority than the + * break signal, which causes the break signal sending through UARTCTRL_SBK + * may impacted by the CTS input if the HW flow control is enabled. It + * exists on all platforms we support in this driver. + * Another bug is i.MX8QM LPUART may have an additional break character + * being sent after SBK was cleared. + * To avoid above two bugs, we use Transmit Data Inversion function to send + * the break signal instead of UARTCTRL_SBK. + */ if (break_state != 0) { - temp |= UARTCTRL_SBK; /* - * LPUART CTS has higher priority than SBK, need to disable CTS before - * asserting SBK to avoid any interference if flow control is enabled. + * Disable the transmitter to prevent any data from being sent out + * during break, then invert the TX line to send break. */ - if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE) - lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); + temp &= ~UARTCTRL_TE; + lpuart32_write(port, temp, UARTCTRL); + temp |= UARTCTRL_TXINV; + lpuart32_write(port, temp, UARTCTRL); } else { - /* Re-enable the CTS when break off. */ - if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE)) - lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR); + /* Disable the TXINV to turn off break and re-enable transmitter. */ + temp &= ~UARTCTRL_TXINV; + lpuart32_write(port, temp, UARTCTRL); + temp |= UARTCTRL_TE; + lpuart32_write(port, temp, UARTCTRL); } - - lpuart32_write(port, temp, UARTCTRL); } static void lpuart_setup_watermark(struct lpuart_port *sport) diff -u linux-6.2.0/drivers/vdpa/mlx5/net/mlx5_vnet.c linux-6.2.0/drivers/vdpa/mlx5/net/mlx5_vnet.c --- linux-6.2.0/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ linux-6.2.0/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -3212,10 +3212,10 @@ ndev->nb_registered = false; mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); } + _vdpa_unregister_device(dev); wq = mvdev->wq; mvdev->wq = NULL; destroy_workqueue(wq); - _vdpa_unregister_device(dev); mgtdev->ndev = NULL; } diff -u linux-6.2.0/drivers/vhost/vdpa.c linux-6.2.0/drivers/vhost/vdpa.c --- linux-6.2.0/drivers/vhost/vdpa.c +++ linux-6.2.0/drivers/vhost/vdpa.c @@ -549,7 +549,14 @@ if (r) return r; - vq->last_avail_idx = vq_state.split.avail_index; + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { + vq->last_avail_idx = vq_state.packed.last_avail_idx | + (vq_state.packed.last_avail_counter << 15); + vq->last_used_idx = vq_state.packed.last_used_idx | + (vq_state.packed.last_used_counter << 15); + } else { + vq->last_avail_idx = vq_state.split.avail_index; + } break; } @@ -567,9 +574,15 @@ break; case VHOST_SET_VRING_BASE: - vq_state.split.avail_index = vq->last_avail_idx; - if (ops->set_vq_state(vdpa, idx, &vq_state)) - r = -EINVAL; + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { + vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff; + vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000); + vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff; + vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000); + } else { + vq_state.split.avail_index = vq->last_avail_idx; + } + r = ops->set_vq_state(vdpa, idx, &vq_state); break; case VHOST_SET_VRING_CALL: diff -u linux-6.2.0/drivers/video/fbdev/stifb.c linux-6.2.0/drivers/video/fbdev/stifb.c --- linux-6.2.0/drivers/video/fbdev/stifb.c +++ linux-6.2.0/drivers/video/fbdev/stifb.c @@ -1413,6 +1413,7 @@ iounmap(info->screen_base); out_err0: kfree(fb); + sti->info = NULL; return -ENXIO; } diff -u linux-6.2.0/fs/afs/dir.c linux-6.2.0/fs/afs/dir.c --- linux-6.2.0/fs/afs/dir.c +++ linux-6.2.0/fs/afs/dir.c @@ -1358,6 +1358,7 @@ op->dentry = dentry; op->create.mode = S_IFDIR | mode; op->create.reason = afs_edit_dir_for_mkdir; + op->mtime = current_time(dir); op->ops = &afs_mkdir_operation; return afs_do_sync_operation(op); } @@ -1661,6 +1662,7 @@ op->dentry = dentry; op->create.mode = S_IFREG | mode; op->create.reason = afs_edit_dir_for_create; + op->mtime = current_time(dir); op->ops = &afs_create_operation; return afs_do_sync_operation(op); @@ -1796,6 +1798,7 @@ op->ops = &afs_symlink_operation; op->create.reason = afs_edit_dir_for_symlink; op->create.symlink = content; + op->mtime = current_time(dir); return afs_do_sync_operation(op); error: diff -u linux-6.2.0/fs/btrfs/ctree.c linux-6.2.0/fs/btrfs/ctree.c --- linux-6.2.0/fs/btrfs/ctree.c +++ linux-6.2.0/fs/btrfs/ctree.c @@ -3226,6 +3226,7 @@ if (check_sibling_keys(left, right)) { ret = -EUCLEAN; + btrfs_abort_transaction(trans, ret); btrfs_tree_unlock(right); free_extent_buffer(right); return ret; @@ -3447,6 +3448,7 @@ if (check_sibling_keys(left, right)) { ret = -EUCLEAN; + btrfs_abort_transaction(trans, ret); goto out; } return __push_leaf_left(path, min_data_size, diff -u linux-6.2.0/fs/btrfs/disk-io.c linux-6.2.0/fs/btrfs/disk-io.c --- linux-6.2.0/fs/btrfs/disk-io.c +++ linux-6.2.0/fs/btrfs/disk-io.c @@ -113,7 +113,7 @@ crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, first_page_part - BTRFS_CSUM_SIZE); - for (i = 1; i < num_pages; i++) { + for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) { kaddr = page_address(buf->pages[i]); crypto_shash_update(shash, kaddr, PAGE_SIZE); } diff -u linux-6.2.0/fs/ceph/caps.c linux-6.2.0/fs/ceph/caps.c --- linux-6.2.0/fs/ceph/caps.c +++ linux-6.2.0/fs/ceph/caps.c @@ -1626,6 +1626,7 @@ struct inode *inode = &ci->netfs.inode; struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; struct ceph_mds_session *session = NULL; + bool need_put = false; int mds; dout("ceph_flush_snaps %p\n", inode); @@ -1670,8 +1671,13 @@ ceph_put_mds_session(session); /* we flushed them all; remove this inode from the queue */ spin_lock(&mdsc->snap_flush_lock); + if (!list_empty(&ci->i_snap_flush_item)) + need_put = true; list_del_init(&ci->i_snap_flush_item); spin_unlock(&mdsc->snap_flush_lock); + + if (need_put) + iput(inode); } /* diff -u linux-6.2.0/fs/ceph/mds_client.c linux-6.2.0/fs/ceph/mds_client.c --- linux-6.2.0/fs/ceph/mds_client.c +++ linux-6.2.0/fs/ceph/mds_client.c @@ -3938,7 +3938,7 @@ struct dentry *dentry; struct ceph_cap *cap; char *path; - int pathlen = 0, err = 0; + int pathlen = 0, err; u64 pathbase; u64 snap_follows; @@ -3961,6 +3961,7 @@ cap = __get_cap_for_mds(ci, mds); if (!cap) { spin_unlock(&ci->i_ceph_lock); + err = 0; goto out_err; } dout(" adding %p ino %llx.%llx cap %p %lld %s\n", diff -u linux-6.2.0/fs/ceph/snap.c linux-6.2.0/fs/ceph/snap.c --- linux-6.2.0/fs/ceph/snap.c +++ linux-6.2.0/fs/ceph/snap.c @@ -693,8 +693,10 @@ capsnap->size); spin_lock(&mdsc->snap_flush_lock); - if (list_empty(&ci->i_snap_flush_item)) + if (list_empty(&ci->i_snap_flush_item)) { + ihold(inode); list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); + } spin_unlock(&mdsc->snap_flush_lock); return 1; /* caller may want to ceph_flush_snaps */ } diff -u linux-6.2.0/fs/exfat/dir.c linux-6.2.0/fs/exfat/dir.c --- linux-6.2.0/fs/exfat/dir.c +++ linux-6.2.0/fs/exfat/dir.c @@ -34,6 +34,7 @@ { int i; struct exfat_entry_set_cache es; + unsigned int uni_len = 0, len; if (exfat_get_dentry_set(&es, sb, p_dir, entry, ES_ALL_ENTRIES)) return; @@ -51,7 +52,10 @@ if (exfat_get_entry_type(ep) != TYPE_EXTEND) break; - exfat_extract_uni_name(ep, uniname); + len = exfat_extract_uni_name(ep, uniname); + uni_len += len; + if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH) + break; uniname += EXFAT_FILE_NAME_LEN; } @@ -1044,7 +1048,8 @@ if (entry_type == TYPE_EXTEND) { unsigned short entry_uniname[16], unichar; - if (step != DIRENT_STEP_NAME) { + if (step != DIRENT_STEP_NAME || + name_len >= MAX_NAME_LENGTH) { step = DIRENT_STEP_FILE; continue; } diff -u linux-6.2.0/fs/ext4/ext4.h linux-6.2.0/fs/ext4/ext4.h --- linux-6.2.0/fs/ext4/ext4.h +++ linux-6.2.0/fs/ext4/ext4.h @@ -1007,11 +1007,13 @@ * where the second inode has larger inode number * than the first * I_DATA_SEM_QUOTA - Used for quota inodes only + * I_DATA_SEM_EA - Used for ea_inodes only */ enum { I_DATA_SEM_NORMAL = 0, I_DATA_SEM_OTHER, I_DATA_SEM_QUOTA, + I_DATA_SEM_EA }; @@ -2992,7 +2994,8 @@ EXT4_IGET_NORMAL = 0, EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */ EXT4_IGET_HANDLE = 0x0002, /* Inode # is from a handle */ - EXT4_IGET_BAD = 0x0004 /* Allow to iget a bad inode */ + EXT4_IGET_BAD = 0x0004, /* Allow to iget a bad inode */ + EXT4_IGET_EA_INODE = 0x0008 /* Inode should contain an EA value */ } ext4_iget_flags; extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, diff -u linux-6.2.0/fs/ext4/inode.c linux-6.2.0/fs/ext4/inode.c --- linux-6.2.0/fs/ext4/inode.c +++ linux-6.2.0/fs/ext4/inode.c @@ -4835,6 +4835,24 @@ inode_set_iversion_queried(inode, val); } +static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags) + +{ + if (flags & EXT4_IGET_EA_INODE) { + if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) + return "missing EA_INODE flag"; + if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) || + EXT4_I(inode)->i_file_acl) + return "ea_inode with extended attributes"; + } else { + if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) + return "unexpected EA_INODE flag"; + } + if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) + return "unexpected bad inode w/o EXT4_IGET_BAD"; + return NULL; +} + struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ext4_iget_flags flags, const char *function, unsigned int line) @@ -4844,6 +4862,7 @@ struct ext4_inode_info *ei; struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct inode *inode; + const char *err_str; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; loff_t size; @@ -4871,8 +4890,14 @@ inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); - if (!(inode->i_state & I_NEW)) + if (!(inode->i_state & I_NEW)) { + if ((err_str = check_igot_inode(inode, flags)) != NULL) { + ext4_error_inode(inode, function, line, 0, err_str); + iput(inode); + return ERR_PTR(-EFSCORRUPTED); + } return inode; + } ei = EXT4_I(inode); iloc.bh = NULL; @@ -5140,10 +5165,9 @@ if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb)) ext4_error_inode(inode, function, line, 0, "casefold flag without casefold feature"); - if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) { - ext4_error_inode(inode, function, line, 0, - "bad inode without EXT4_IGET_BAD flag"); - ret = -EUCLEAN; + if ((err_str = check_igot_inode(inode, flags)) != NULL) { + ext4_error_inode(inode, function, line, 0, err_str); + ret = -EFSCORRUPTED; goto bad_inode; } diff -u linux-6.2.0/fs/ext4/super.c linux-6.2.0/fs/ext4/super.c --- linux-6.2.0/fs/ext4/super.c +++ linux-6.2.0/fs/ext4/super.c @@ -6541,18 +6541,6 @@ } /* - * Reinitialize lazy itable initialization thread based on - * current settings - */ - if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) - ext4_unregister_li_request(sb); - else { - ext4_group_t first_not_zeroed; - first_not_zeroed = ext4_has_uninit_itable(sb); - ext4_register_li_request(sb, first_not_zeroed); - } - - /* * Handle creation of system zone data early because it can fail. * Releasing of existing data is done when we are sure remount will * succeed. @@ -6586,6 +6574,18 @@ if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks) ext4_release_system_zone(sb); + /* + * Reinitialize lazy itable initialization thread based on + * current settings + */ + if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE)) + ext4_unregister_li_request(sb); + else { + ext4_group_t first_not_zeroed; + first_not_zeroed = ext4_has_uninit_itable(sb); + ext4_register_li_request(sb, first_not_zeroed); + } + if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) ext4_stop_mmpd(sbi); diff -u linux-6.2.0/fs/ext4/xattr.c linux-6.2.0/fs/ext4/xattr.c --- linux-6.2.0/fs/ext4/xattr.c +++ linux-6.2.0/fs/ext4/xattr.c @@ -125,7 +125,11 @@ #ifdef CONFIG_LOCKDEP void ext4_xattr_inode_set_class(struct inode *ea_inode) { + struct ext4_inode_info *ei = EXT4_I(ea_inode); + lockdep_set_subclass(&ea_inode->i_rwsem, 1); + (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */ + lockdep_set_subclass(&ei->i_data_sem, I_DATA_SEM_EA); } #endif @@ -399,7 +403,7 @@ return -EFSCORRUPTED; } - inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL); + inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_EA_INODE); if (IS_ERR(inode)) { err = PTR_ERR(inode); ext4_error(parent->i_sb, @@ -407,23 +411,6 @@ err); return err; } - - if (is_bad_inode(inode)) { - ext4_error(parent->i_sb, - "error while reading EA inode %lu is_bad_inode", - ea_ino); - err = -EIO; - goto error; - } - - if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { - ext4_error(parent->i_sb, - "EA inode %lu does not have EXT4_EA_INODE_FL flag", - ea_ino); - err = -EINVAL; - goto error; - } - ext4_xattr_inode_set_class(inode); /* @@ -444,9 +431,6 @@ *ea_inode = inode; return 0; -error: - iput(inode); - return err; } /* Remove entry from mbcache when EA inode is getting evicted */ @@ -1523,11 +1507,11 @@ while (ce) { ea_inode = ext4_iget(inode->i_sb, ce->e_value, - EXT4_IGET_NORMAL); - if (!IS_ERR(ea_inode) && - !is_bad_inode(ea_inode) && - (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) && - i_size_read(ea_inode) == value_len && + EXT4_IGET_EA_INODE); + if (IS_ERR(ea_inode)) + goto next_entry; + ext4_xattr_inode_set_class(ea_inode); + if (i_size_read(ea_inode) == value_len && !ext4_xattr_inode_read(ea_inode, ea_data, value_len) && !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data, value_len) && @@ -1537,9 +1521,8 @@ kvfree(ea_data); return ea_inode; } - - if (!IS_ERR(ea_inode)) - iput(ea_inode); + iput(ea_inode); + next_entry: ce = mb_cache_entry_find_next(ea_inode_cache, ce); } kvfree(ea_data); @@ -2040,8 +2023,9 @@ else { u32 ref; +#ifdef EXT4_XATTR_DEBUG WARN_ON_ONCE(dquot_initialize_needed(inode)); - +#endif /* The old block is released after updating the inode. */ error = dquot_alloc_block(inode, @@ -2104,8 +2088,9 @@ /* We need to allocate a new block */ ext4_fsblk_t goal, block; +#ifdef EXT4_XATTR_DEBUG WARN_ON_ONCE(dquot_initialize_needed(inode)); - +#endif goal = ext4_group_first_block_no(sb, EXT4_I(inode)->i_block_group); block = ext4_new_meta_blocks(handle, inode, goal, 0, diff -u linux-6.2.0/fs/gfs2/super.c linux-6.2.0/fs/gfs2/super.c --- linux-6.2.0/fs/gfs2/super.c +++ linux-6.2.0/fs/gfs2/super.c @@ -1393,6 +1393,14 @@ if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr) goto out; + /* + * In case of an incomplete mount, gfs2_evict_inode() may be called for + * system files without having an active journal to write to. In that + * case, skip the filesystem evict. + */ + if (!sdp->sd_jdesc) + goto out; + gfs2_holder_mark_uninitialized(&gh); ret = evict_should_delete(inode, &gh); if (ret == SHOULD_DEFER_EVICTION) diff -u linux-6.2.0/fs/ksmbd/connection.c linux-6.2.0/fs/ksmbd/connection.c --- linux-6.2.0/fs/ksmbd/connection.c +++ linux-6.2.0/fs/ksmbd/connection.c @@ -296,6 +296,9 @@ return true; } +#define SMB1_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb_hdr)) +#define SMB2_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr) + 4) + /** * ksmbd_conn_handler_loop() - session thread to listen on new smb requests * @p: connection instance @@ -352,6 +355,9 @@ if (pdu_size > MAX_STREAM_PROT_LEN) break; + if (pdu_size < SMB1_MIN_SUPPORTED_HEADER_SIZE) + break; + /* 4 for rfc1002 length field */ /* 1 for implied bcc[0] */ size = pdu_size + 4 + 1; @@ -379,6 +385,12 @@ continue; } + if (((struct smb2_hdr *)smb2_get_msg(conn->request_buf))->ProtocolId == + SMB2_PROTO_NUMBER) { + if (pdu_size < SMB2_MIN_SUPPORTED_HEADER_SIZE) + break; + } + if (!default_conn_ops.process_fn) { pr_err("No connection request callback\n"); break; diff -u linux-6.2.0/fs/ksmbd/oplock.c linux-6.2.0/fs/ksmbd/oplock.c --- linux-6.2.0/fs/ksmbd/oplock.c +++ linux-6.2.0/fs/ksmbd/oplock.c @@ -157,13 +157,42 @@ rcu_read_lock(); opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info, op_entry); - if (opinfo && !atomic_inc_not_zero(&opinfo->refcount)) - opinfo = NULL; + if (opinfo) { + if (!atomic_inc_not_zero(&opinfo->refcount)) + opinfo = NULL; + else { + atomic_inc(&opinfo->conn->r_count); + if (ksmbd_conn_releasing(opinfo->conn)) { + atomic_dec(&opinfo->conn->r_count); + atomic_dec(&opinfo->refcount); + opinfo = NULL; + } + } + } + rcu_read_unlock(); return opinfo; } +static void opinfo_conn_put(struct oplock_info *opinfo) +{ + struct ksmbd_conn *conn; + + if (!opinfo) + return; + + conn = opinfo->conn; + /* + * Checking waitqueue to dropping pending requests on + * disconnection. waitqueue_active is safe because it + * uses atomic operation for condition. + */ + if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) + wake_up(&conn->r_count_q); + opinfo_put(opinfo); +} + void opinfo_put(struct oplock_info *opinfo) { if (!atomic_dec_and_test(&opinfo->refcount)) @@ -666,13 +695,6 @@ out: ksmbd_free_work_struct(work); - /* - * Checking waitqueue to dropping pending requests on - * disconnection. waitqueue_active is safe because it - * uses atomic operation for condition. - */ - if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) - wake_up(&conn->r_count_q); } /** @@ -706,7 +728,6 @@ work->conn = conn; work->sess = opinfo->sess; - atomic_inc(&conn->r_count); if (opinfo->op_state == OPLOCK_ACK_WAIT) { INIT_WORK(&work->work, __smb2_oplock_break_noti); ksmbd_queue_work(work); @@ -776,13 +797,6 @@ out: ksmbd_free_work_struct(work); - /* - * Checking waitqueue to dropping pending requests on - * disconnection. waitqueue_active is safe because it - * uses atomic operation for condition. - */ - if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) - wake_up(&conn->r_count_q); } /** @@ -822,7 +836,6 @@ work->conn = conn; work->sess = opinfo->sess; - atomic_inc(&conn->r_count); if (opinfo->op_state == OPLOCK_ACK_WAIT) { list_for_each_safe(tmp, t, &opinfo->interim_list) { struct ksmbd_work *in_work; @@ -1144,8 +1157,10 @@ } prev_opinfo = opinfo_get_list(ci); if (!prev_opinfo || - (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) + (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) { + opinfo_conn_put(prev_opinfo); goto set_lev; + } prev_op_has_lease = prev_opinfo->is_lease; if (prev_op_has_lease) prev_op_state = prev_opinfo->o_lease->state; @@ -1153,19 +1168,19 @@ if (share_ret < 0 && prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) { err = share_ret; - opinfo_put(prev_opinfo); + opinfo_conn_put(prev_opinfo); goto err_out; } if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH && prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) { - opinfo_put(prev_opinfo); + opinfo_conn_put(prev_opinfo); goto op_break_not_needed; } list_add(&work->interim_entry, &prev_opinfo->interim_list); err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II); - opinfo_put(prev_opinfo); + opinfo_conn_put(prev_opinfo); if (err == -ENOENT) goto set_lev; /* Check all oplock was freed by close */ @@ -1228,14 +1243,14 @@ return; if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH && brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) { - opinfo_put(brk_opinfo); + opinfo_conn_put(brk_opinfo); return; } brk_opinfo->open_trunc = is_trunc; list_add(&work->interim_entry, &brk_opinfo->interim_list); oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II); - opinfo_put(brk_opinfo); + opinfo_conn_put(brk_opinfo); } /** @@ -1263,6 +1278,13 @@ list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) { if (!atomic_inc_not_zero(&brk_op->refcount)) continue; + + atomic_inc(&brk_op->conn->r_count); + if (ksmbd_conn_releasing(brk_op->conn)) { + atomic_dec(&brk_op->conn->r_count); + continue; + } + rcu_read_unlock(); if (brk_op->is_lease && (brk_op->o_lease->state & (~(SMB2_LEASE_READ_CACHING_LE | @@ -1292,7 +1314,7 @@ brk_op->open_trunc = is_trunc; oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE); next: - opinfo_put(brk_op); + opinfo_conn_put(brk_op); rcu_read_lock(); } rcu_read_unlock(); @@ -1393,56 +1415,38 @@ */ struct lease_ctx_info *parse_lease_state(void *open_req) { - char *data_offset; struct create_context *cc; - unsigned int next = 0; - char *name; - bool found = false; struct smb2_create_req *req = (struct smb2_create_req *)open_req; - struct lease_ctx_info *lreq = kzalloc(sizeof(struct lease_ctx_info), - GFP_KERNEL); + struct lease_ctx_info *lreq; + + cc = smb2_find_context_vals(req, SMB2_CREATE_REQUEST_LEASE, 4); + if (IS_ERR_OR_NULL(cc)) + return NULL; + + lreq = kzalloc(sizeof(struct lease_ctx_info), GFP_KERNEL); if (!lreq) return NULL; - data_offset = (char *)req + le32_to_cpu(req->CreateContextsOffset); - cc = (struct create_context *)data_offset; - do { - cc = (struct create_context *)((char *)cc + next); - name = le16_to_cpu(cc->NameOffset) + (char *)cc; - if (le16_to_cpu(cc->NameLength) != 4 || - strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) { - next = le32_to_cpu(cc->Next); - continue; - } - found = true; - break; - } while (next != 0); - - if (found) { - if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) { - struct create_lease_v2 *lc = (struct create_lease_v2 *)cc; - - memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); - lreq->req_state = lc->lcontext.LeaseState; - lreq->flags = lc->lcontext.LeaseFlags; - lreq->duration = lc->lcontext.LeaseDuration; - memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey, - SMB2_LEASE_KEY_SIZE); - lreq->version = 2; - } else { - struct create_lease *lc = (struct create_lease *)cc; - - memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); - lreq->req_state = lc->lcontext.LeaseState; - lreq->flags = lc->lcontext.LeaseFlags; - lreq->duration = lc->lcontext.LeaseDuration; - lreq->version = 1; - } - return lreq; - } + if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) { + struct create_lease_v2 *lc = (struct create_lease_v2 *)cc; - kfree(lreq); - return NULL; + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); + lreq->req_state = lc->lcontext.LeaseState; + lreq->flags = lc->lcontext.LeaseFlags; + lreq->duration = lc->lcontext.LeaseDuration; + memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey, + SMB2_LEASE_KEY_SIZE); + lreq->version = 2; + } else { + struct create_lease *lc = (struct create_lease *)cc; + + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); + lreq->req_state = lc->lcontext.LeaseState; + lreq->flags = lc->lcontext.LeaseFlags; + lreq->duration = lc->lcontext.LeaseDuration; + lreq->version = 1; + } + return lreq; } /** diff -u linux-6.2.0/fs/ksmbd/smb2pdu.c linux-6.2.0/fs/ksmbd/smb2pdu.c --- linux-6.2.0/fs/ksmbd/smb2pdu.c +++ linux-6.2.0/fs/ksmbd/smb2pdu.c @@ -326,13 +326,9 @@ if (hdr->Command == SMB2_NEGOTIATE) aux_max = 1; else - aux_max = conn->vals->max_credits - credit_charge; + aux_max = conn->vals->max_credits - conn->total_credits; credits_granted = min_t(unsigned short, credits_requested, aux_max); - if (conn->vals->max_credits - conn->total_credits < credits_granted) - credits_granted = conn->vals->max_credits - - conn->total_credits; - conn->total_credits += credits_granted; work->credits_granted += credits_granted; @@ -865,13 +861,14 @@ static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn, struct smb2_preauth_neg_context *pneg_ctxt, - int len_of_ctxts) + int ctxt_len) { /* * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt, * which may not be present. Only check for used HashAlgorithms[1]. */ - if (len_of_ctxts < MIN_PREAUTH_CTXT_DATA_LEN) + if (ctxt_len < + sizeof(struct smb2_neg_context) + MIN_PREAUTH_CTXT_DATA_LEN) return STATUS_INVALID_PARAMETER; if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) @@ -883,15 +880,23 @@ static void decode_encrypt_ctxt(struct ksmbd_conn *conn, struct smb2_encryption_neg_context *pneg_ctxt, - int len_of_ctxts) + int ctxt_len) { - int cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount); - int i, cphs_size = cph_cnt * sizeof(__le16); + int cph_cnt; + int i, cphs_size; + + if (sizeof(struct smb2_encryption_neg_context) > ctxt_len) { + pr_err("Invalid SMB2_ENCRYPTION_CAPABILITIES context size\n"); + return; + } conn->cipher_type = 0; + cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount); + cphs_size = cph_cnt * sizeof(__le16); + if (sizeof(struct smb2_encryption_neg_context) + cphs_size > - len_of_ctxts) { + ctxt_len) { pr_err("Invalid cipher count(%d)\n", cph_cnt); return; } @@ -939,15 +944,22 @@ static void decode_sign_cap_ctxt(struct ksmbd_conn *conn, struct smb2_signing_capabilities *pneg_ctxt, - int len_of_ctxts) + int ctxt_len) { - int sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount); - int i, sign_alos_size = sign_algo_cnt * sizeof(__le16); + int sign_algo_cnt; + int i, sign_alos_size; + + if (sizeof(struct smb2_signing_capabilities) > ctxt_len) { + pr_err("Invalid SMB2_SIGNING_CAPABILITIES context length\n"); + return; + } conn->signing_negotiated = false; + sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount); + sign_alos_size = sign_algo_cnt * sizeof(__le16); if (sizeof(struct smb2_signing_capabilities) + sign_alos_size > - len_of_ctxts) { + ctxt_len) { pr_err("Invalid signing algorithm count(%d)\n", sign_algo_cnt); return; } @@ -967,13 +979,13 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn, struct smb2_negotiate_req *req, - int len_of_smb) + unsigned int len_of_smb) { /* +4 is to account for the RFC1001 len field */ struct smb2_neg_context *pctx = (struct smb2_neg_context *)req; int i = 0, len_of_ctxts; - int offset = le32_to_cpu(req->NegotiateContextOffset); - int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount); + unsigned int offset = le32_to_cpu(req->NegotiateContextOffset); + unsigned int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount); __le32 status = STATUS_INVALID_PARAMETER; ksmbd_debug(SMB, "decoding %d negotiate contexts\n", neg_ctxt_cnt); @@ -985,18 +997,16 @@ len_of_ctxts = len_of_smb - offset; while (i++ < neg_ctxt_cnt) { - int clen; - - /* check that offset is not beyond end of SMB */ - if (len_of_ctxts == 0) - break; + int clen, ctxt_len; - if (len_of_ctxts < sizeof(struct smb2_neg_context)) + if (len_of_ctxts < (int)sizeof(struct smb2_neg_context)) break; pctx = (struct smb2_neg_context *)((char *)pctx + offset); clen = le16_to_cpu(pctx->DataLength); - if (clen + sizeof(struct smb2_neg_context) > len_of_ctxts) + ctxt_len = clen + sizeof(struct smb2_neg_context); + + if (ctxt_len > len_of_ctxts) break; if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) { @@ -1007,7 +1017,7 @@ status = decode_preauth_ctxt(conn, (struct smb2_preauth_neg_context *)pctx, - len_of_ctxts); + ctxt_len); if (status != STATUS_SUCCESS) break; } else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) { @@ -1018,7 +1028,7 @@ decode_encrypt_ctxt(conn, (struct smb2_encryption_neg_context *)pctx, - len_of_ctxts); + ctxt_len); } else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) { ksmbd_debug(SMB, "deassemble SMB2_COMPRESSION_CAPABILITIES context\n"); @@ -1037,15 +1047,15 @@ } else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) { ksmbd_debug(SMB, "deassemble SMB2_SIGNING_CAPABILITIES context\n"); + decode_sign_cap_ctxt(conn, (struct smb2_signing_capabilities *)pctx, - len_of_ctxts); + ctxt_len); } /* offsets must be 8 byte aligned */ - clen = (clen + 7) & ~0x7; - offset = clen + sizeof(struct smb2_neg_context); - len_of_ctxts -= clen + sizeof(struct smb2_neg_context); + offset = (ctxt_len + 7) & ~0x7; + len_of_ctxts -= offset; } return status; } @@ -1073,16 +1083,16 @@ return rc; } - if (req->DialectCount == 0) { - pr_err("malformed packet\n"); + smb2_buf_len = get_rfc1002_len(work->request_buf); + smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects); + if (smb2_neg_size > smb2_buf_len) { rsp->hdr.Status = STATUS_INVALID_PARAMETER; rc = -EINVAL; goto err_out; } - smb2_buf_len = get_rfc1002_len(work->request_buf); - smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects); - if (smb2_neg_size > smb2_buf_len) { + if (req->DialectCount == 0) { + pr_err("malformed packet\n"); rsp->hdr.Status = STATUS_INVALID_PARAMETER; rc = -EINVAL; goto err_out; @@ -4373,21 +4383,6 @@ return 0; } -static unsigned long long get_allocation_size(struct inode *inode, - struct kstat *stat) -{ - unsigned long long alloc_size = 0; - - if (!S_ISDIR(stat->mode)) { - if ((inode->i_blocks << 9) <= stat->size) - alloc_size = stat->size; - else - alloc_size = inode->i_blocks << 9; - } - - return alloc_size; -} - static void get_file_standard_info(struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) { @@ -4402,7 +4397,7 @@ sinfo = (struct smb2_file_standard_info *)rsp->Buffer; delete_pending = ksmbd_inode_pending_delete(fp); - sinfo->AllocationSize = cpu_to_le64(get_allocation_size(inode, &stat)); + sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9); sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending); sinfo->DeletePending = delete_pending; @@ -4467,7 +4462,7 @@ file_info->Attributes = fp->f_ci->m_fattr; file_info->Pad1 = 0; file_info->AllocationSize = - cpu_to_le64(get_allocation_size(inode, &stat)); + cpu_to_le64(inode->i_blocks << 9); file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); file_info->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending); @@ -4656,7 +4651,7 @@ file_info->ChangeTime = cpu_to_le64(time); file_info->Attributes = fp->f_ci->m_fattr; file_info->AllocationSize = - cpu_to_le64(get_allocation_size(inode, &stat)); + cpu_to_le64(inode->i_blocks << 9); file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size); file_info->Reserved = cpu_to_le32(0); rsp->OutputBufferLength = diff -u linux-6.2.0/fs/nfsd/vfs.c linux-6.2.0/fs/nfsd/vfs.c --- linux-6.2.0/fs/nfsd/vfs.c +++ linux-6.2.0/fs/nfsd/vfs.c @@ -532,7 +532,15 @@ inode_lock(inode); for (retries = 1;;) { - host_err = __nfsd_setattr(dentry, iap); + struct iattr attrs; + + /* + * notify_change() can alter its iattr argument, making + * @iap unsuitable for submission multiple times. Make a + * copy for every loop iteration. + */ + attrs = *iap; + host_err = __nfsd_setattr(dentry, &attrs); if (host_err != -EAGAIN || !retries--) break; if (!nfsd_wait_for_delegreturn(rqstp, inode)) diff -u linux-6.2.0/fs/ntfs3/inode.c linux-6.2.0/fs/ntfs3/inode.c --- linux-6.2.0/fs/ntfs3/inode.c +++ linux-6.2.0/fs/ntfs3/inode.c @@ -100,6 +100,12 @@ /* Record should contain $I30 root. */ is_dir = rec->flags & RECORD_FLAG_DIR; + /* MFT_REC_MFT is not a dir */ + if (is_dir && ino == MFT_REC_MFT) { + err = -EINVAL; + goto out; + } + inode->i_generation = le16_to_cpu(rec->seq); /* Enumerate all struct Attributes MFT. */ diff -u linux-6.2.0/include/acpi/acpi_bus.h linux-6.2.0/include/acpi/acpi_bus.h --- linux-6.2.0/include/acpi/acpi_bus.h +++ linux-6.2.0/include/acpi/acpi_bus.h @@ -642,6 +642,8 @@ #ifdef CONFIG_X86 bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status); bool acpi_quirk_skip_acpi_ac_and_battery(void); +int acpi_install_cmos_rtc_space_handler(acpi_handle handle); +void acpi_remove_cmos_rtc_space_handler(acpi_handle handle); #else static inline bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *status) @@ -652,6 +654,13 @@ { return false; } +static inline int acpi_install_cmos_rtc_space_handler(acpi_handle handle) +{ + return 1; +} +static inline void acpi_remove_cmos_rtc_space_handler(acpi_handle handle) +{ +} #endif #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) diff -u linux-6.2.0/include/linux/cpu.h linux-6.2.0/include/linux/cpu.h --- linux-6.2.0/include/linux/cpu.h +++ linux-6.2.0/include/linux/cpu.h @@ -70,6 +70,8 @@ char *buf); extern ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff -u linux-6.2.0/include/linux/netdevice.h linux-6.2.0/include/linux/netdevice.h --- linux-6.2.0/include/linux/netdevice.h +++ linux-6.2.0/include/linux/netdevice.h @@ -610,7 +610,7 @@ netdevice_tracker dev_tracker; struct Qdisc __rcu *qdisc; - struct Qdisc *qdisc_sleeping; + struct Qdisc __rcu *qdisc_sleeping; #ifdef CONFIG_SYSFS struct kobject kobj; #endif @@ -758,8 +758,11 @@ /* We only give a hint, preemption can change CPU under us */ val |= raw_smp_processor_id(); - if (table->ents[index] != val) - table->ents[index] = val; + /* The following WRITE_ONCE() is paired with the READ_ONCE() + * here, and another one in get_rps_cpu(). + */ + if (READ_ONCE(table->ents[index]) != val) + WRITE_ONCE(table->ents[index], val); } } diff -u linux-6.2.0/include/net/bluetooth/hci.h linux-6.2.0/include/net/bluetooth/hci.h --- linux-6.2.0/include/net/bluetooth/hci.h +++ linux-6.2.0/include/net/bluetooth/hci.h @@ -350,6 +350,7 @@ enum { HCI_SETUP, HCI_CONFIG, + HCI_DEBUGFS_CREATED, HCI_AUTO_OFF, HCI_RFKILLED, HCI_MGMT, diff -u linux-6.2.0/include/net/bluetooth/hci_core.h linux-6.2.0/include/net/bluetooth/hci_core.h --- linux-6.2.0/include/net/bluetooth/hci_core.h +++ linux-6.2.0/include/net/bluetooth/hci_core.h @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. + Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky @@ -513,6 +514,7 @@ struct work_struct cmd_sync_work; struct list_head cmd_sync_work_list; struct mutex cmd_sync_work_lock; + struct mutex unregister_lock; struct work_struct cmd_sync_cancel_work; struct work_struct reenable_adv_work; @@ -1091,7 +1093,7 @@ if (bacmp(&c->dst, ba) || c->type != ISO_LINK) continue; - if (c->iso_qos.big == big && c->iso_qos.bis == bis) { + if (c->iso_qos.bcast.big == big && c->iso_qos.bcast.bis == bis) { rcu_read_unlock(); return c; } @@ -1200,7 +1202,7 @@ if (c->type != ISO_LINK) continue; - if (handle == c->iso_qos.cig) { + if (handle == c->iso_qos.ucast.cig) { rcu_read_unlock(); return c; } @@ -1223,7 +1225,7 @@ if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK) continue; - if (handle == c->iso_qos.big) { + if (handle == c->iso_qos.bcast.big) { rcu_read_unlock(); return c; } @@ -1332,7 +1334,7 @@ __u8 dst_type, struct bt_iso_qos *qos, __u8 data_len, __u8 *data); int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, - __u8 sid); + __u8 sid, struct bt_iso_qos *qos); int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, __u16 sync_handle, __u8 num_bis, __u8 bis[]); int hci_conn_check_link_mode(struct hci_conn *conn); diff -u linux-6.2.0/include/net/sock.h linux-6.2.0/include/net/sock.h --- linux-6.2.0/include/net/sock.h +++ linux-6.2.0/include/net/sock.h @@ -336,6 +336,7 @@ * @sk_cgrp_data: cgroup data for this cgroup * @sk_memcg: this socket's memory cgroup association * @sk_write_pending: a write to stream socket waits to start + * @sk_wait_pending: number of threads blocked on this socket * @sk_state_change: callback to indicate change in the state of the sock * @sk_data_ready: callback to indicate there is data to be processed * @sk_write_space: callback to indicate there is bf sending space available @@ -428,6 +429,7 @@ unsigned int sk_napi_id; #endif int sk_rcvbuf; + int sk_wait_pending; struct sk_filter __rcu *sk_filter; union { @@ -1150,8 +1152,12 @@ * OR an additional socket flag * [1] : sk_state and sk_prot are in the same cache line. */ - if (sk->sk_state == TCP_ESTABLISHED) - sock_rps_record_flow_hash(sk->sk_rxhash); + if (sk->sk_state == TCP_ESTABLISHED) { + /* This READ_ONCE() is paired with the WRITE_ONCE() + * from sock_rps_save_rxhash() and sock_rps_reset_rxhash(). + */ + sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash)); + } } #endif } @@ -1160,20 +1166,25 @@ const struct sk_buff *skb) { #ifdef CONFIG_RPS - if (unlikely(sk->sk_rxhash != skb->hash)) - sk->sk_rxhash = skb->hash; + /* The following WRITE_ONCE() is paired with the READ_ONCE() + * here, and another one in sock_rps_record_flow(). + */ + if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash)) + WRITE_ONCE(sk->sk_rxhash, skb->hash); #endif } static inline void sock_rps_reset_rxhash(struct sock *sk) { #ifdef CONFIG_RPS - sk->sk_rxhash = 0; + /* Paired with READ_ONCE() in sock_rps_record_flow() */ + WRITE_ONCE(sk->sk_rxhash, 0); #endif } #define sk_wait_event(__sk, __timeo, __condition, __wait) \ ({ int __rc; \ + __sk->sk_wait_pending++; \ release_sock(__sk); \ __rc = __condition; \ if (!__rc) { \ @@ -1183,6 +1194,7 @@ } \ sched_annotate_sleep(); \ lock_sock(__sk); \ + __sk->sk_wait_pending--; \ __rc = __condition; \ __rc; \ }) diff -u linux-6.2.0/include/net/tcp.h linux-6.2.0/include/net/tcp.h --- linux-6.2.0/include/net/tcp.h +++ linux-6.2.0/include/net/tcp.h @@ -632,6 +632,7 @@ void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); void tcp_fin(struct sock *sk); void tcp_check_space(struct sock *sk); +void tcp_sack_compress_send_ack(struct sock *sk); /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); diff -u linux-6.2.0/kernel/fork.c linux-6.2.0/kernel/fork.c --- linux-6.2.0/kernel/fork.c +++ linux-6.2.0/kernel/fork.c @@ -564,6 +564,7 @@ arch_release_task_struct(tsk); if (tsk->flags & PF_KTHREAD) free_kthread_struct(tsk); + bpf_task_storage_free(tsk); free_task_struct(tsk); } EXPORT_SYMBOL(free_task); @@ -858,7 +859,6 @@ cgroup_free(tsk); task_numa_free(tsk, true); security_task_free(tsk); - bpf_task_storage_free(tsk); exit_creds(tsk); delayacct_tsk_free(tsk); put_signal_struct(tsk->signal); diff -u linux-6.2.0/kernel/module/decompress.c linux-6.2.0/kernel/module/decompress.c --- linux-6.2.0/kernel/module/decompress.c +++ linux-6.2.0/kernel/module/decompress.c @@ -257,7 +257,7 @@ do { struct page *page = module_get_next_page(info); - if (!IS_ERR(page)) { + if (IS_ERR(page)) { retval = PTR_ERR(page); goto out; } diff -u linux-6.2.0/kernel/trace/trace_events_hist.c linux-6.2.0/kernel/trace/trace_events_hist.c --- linux-6.2.0/kernel/trace/trace_events_hist.c +++ linux-6.2.0/kernel/trace/trace_events_hist.c @@ -4202,13 +4202,19 @@ goto out; } - /* Some types cannot be a value */ - if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | - HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 | - HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET | - HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) { - hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str)); - ret = -EINVAL; + /* values and variables should not have some modifiers */ + if (hist_field->flags & HIST_FIELD_FL_VAR) { + /* Variable */ + if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | + HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2)) + goto err; + } else { + /* Value */ + if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | + HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 | + HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET | + HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) + goto err; } hist_data->fields[val_idx] = hist_field; @@ -4220,6 +4226,9 @@ ret = -EINVAL; out: return ret; + err: + hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str)); + return -EINVAL; } static int create_val_field(struct hist_trigger_data *hist_data, diff -u linux-6.2.0/kernel/trace/trace_osnoise.c linux-6.2.0/kernel/trace/trace_osnoise.c --- linux-6.2.0/kernel/trace/trace_osnoise.c +++ linux-6.2.0/kernel/trace/trace_osnoise.c @@ -1652,6 +1652,8 @@ osnoise_stop_tracing(); notify_new_max_latency(diff); + wake_up_process(tlat->kthread); + return HRTIMER_NORESTART; } } diff -u linux-6.2.0/lib/cpu_rmap.c linux-6.2.0/lib/cpu_rmap.c --- linux-6.2.0/lib/cpu_rmap.c +++ linux-6.2.0/lib/cpu_rmap.c @@ -268,8 +268,8 @@ struct irq_glue *glue = container_of(ref, struct irq_glue, notify.kref); - cpu_rmap_put(glue->rmap); glue->rmap->obj[glue->index] = NULL; + cpu_rmap_put(glue->rmap); kfree(glue); } diff -u linux-6.2.0/net/bluetooth/hci_conn.c linux-6.2.0/net/bluetooth/hci_conn.c --- linux-6.2.0/net/bluetooth/hci_conn.c +++ linux-6.2.0/net/bluetooth/hci_conn.c @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. + Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky @@ -795,8 +796,8 @@ if (bacmp(&conn->dst, BDADDR_ANY)) return; - if (d->big != conn->iso_qos.big || d->bis == BT_ISO_QOS_BIS_UNSET || - d->bis != conn->iso_qos.bis) + if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET || + d->bis != conn->iso_qos.bcast.bis) return; d->count++; @@ -916,10 +917,10 @@ if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags)) return; - hci_le_terminate_big(hdev, conn->iso_qos.big, - conn->iso_qos.bis); + hci_le_terminate_big(hdev, conn->iso_qos.bcast.big, + conn->iso_qos.bcast.bis); } else { - hci_le_big_terminate(hdev, conn->iso_qos.big, + hci_le_big_terminate(hdev, conn->iso_qos.bcast.big, conn->sync_handle); } } @@ -942,8 +943,8 @@ { struct iso_list_data *d = data; - /* Ignore broadcast */ - if (!bacmp(&conn->dst, BDADDR_ANY)) + /* Ignore broadcast or if CIG don't match */ + if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig) return; d->count++; @@ -958,17 +959,22 @@ struct hci_dev *hdev = conn->hdev; struct iso_list_data d; + if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET) + return; + memset(&d, 0, sizeof(d)); - d.cig = conn->iso_qos.cig; + d.cig = conn->iso_qos.ucast.cig; /* Check if ISO connection is a CIS and remove CIG if there are * no other connections using it. */ + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d); + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d); hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d); if (d.count) return; - hci_le_remove_cig(hdev, conn->iso_qos.cig); + hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig); } struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, @@ -1409,7 +1415,7 @@ struct iso_list_data data; /* Allocate a BIG if not set */ - if (qos->big == BT_ISO_QOS_BIG_UNSET) { + if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) { for (data.big = 0x00; data.big < 0xef; data.big++) { data.count = 0; data.bis = 0xff; @@ -1424,7 +1430,7 @@ return -EADDRNOTAVAIL; /* Update BIG */ - qos->big = data.big; + qos->bcast.big = data.big; } return 0; @@ -1435,7 +1441,7 @@ struct iso_list_data data; /* Allocate BIS if not set */ - if (qos->bis == BT_ISO_QOS_BIS_UNSET) { + if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) { /* Find an unused adv set to advertise BIS, skip instance 0x00 * since it is reserved as general purpose set. */ @@ -1453,7 +1459,7 @@ return -EADDRNOTAVAIL; /* Update BIS */ - qos->bis = data.bis; + qos->bcast.bis = data.bis; } return 0; @@ -1482,8 +1488,8 @@ if (err) return ERR_PTR(err); - data.big = qos->big; - data.bis = qos->bis; + data.big = qos->bcast.big; + data.bis = qos->bcast.bis; data.count = 0; /* Check if there is already a matching BIG/BIS */ @@ -1491,7 +1497,7 @@ if (data.count) return ERR_PTR(-EADDRINUSE); - conn = hci_conn_hash_lookup_bis(hdev, dst, qos->big, qos->bis); + conn = hci_conn_hash_lookup_bis(hdev, dst, qos->bcast.big, qos->bcast.bis); if (conn) return ERR_PTR(-EADDRINUSE); @@ -1646,13 +1652,13 @@ { struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis]; - cis->cis_id = qos->cis; - cis->c_sdu = cpu_to_le16(qos->out.sdu); - cis->p_sdu = cpu_to_le16(qos->in.sdu); - cis->c_phy = qos->out.phy ? qos->out.phy : qos->in.phy; - cis->p_phy = qos->in.phy ? qos->in.phy : qos->out.phy; - cis->c_rtn = qos->out.rtn; - cis->p_rtn = qos->in.rtn; + cis->cis_id = qos->ucast.cis; + cis->c_sdu = cpu_to_le16(qos->ucast.out.sdu); + cis->p_sdu = cpu_to_le16(qos->ucast.in.sdu); + cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : qos->ucast.in.phy; + cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : qos->ucast.out.phy; + cis->c_rtn = qos->ucast.out.rtn; + cis->p_rtn = qos->ucast.in.rtn; d->pdu.cp.num_cis++; } @@ -1665,8 +1671,8 @@ if (!bacmp(&conn->dst, BDADDR_ANY)) return; - if (d->cig != conn->iso_qos.cig || d->cis == BT_ISO_QOS_CIS_UNSET || - d->cis != conn->iso_qos.cis) + if (d->cig != conn->iso_qos.ucast.cig || d->cis == BT_ISO_QOS_CIS_UNSET || + d->cis != conn->iso_qos.ucast.cis) return; d->count++; @@ -1685,17 +1691,18 @@ memset(&cp, 0, sizeof(cp)); - cp.handle = qos->big; - cp.adv_handle = qos->bis; + cp.handle = qos->bcast.big; + cp.adv_handle = qos->bcast.bis; cp.num_bis = 0x01; - hci_cpu_to_le24(qos->out.interval, cp.bis.sdu_interval); - cp.bis.sdu = cpu_to_le16(qos->out.sdu); - cp.bis.latency = cpu_to_le16(qos->out.latency); - cp.bis.rtn = qos->out.rtn; - cp.bis.phy = qos->out.phy; - cp.bis.packing = qos->packing; - cp.bis.framing = qos->framing; - cp.bis.encryption = 0x00; + hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval); + cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu); + cp.bis.latency = cpu_to_le16(qos->bcast.out.latency); + cp.bis.rtn = qos->bcast.out.rtn; + cp.bis.phy = qos->bcast.out.phy; + cp.bis.packing = qos->bcast.packing; + cp.bis.framing = qos->bcast.framing; + cp.bis.encryption = qos->bcast.encryption; + memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode)); memset(&cp.bis.bcode, 0, sizeof(cp.bis.bcode)); return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp); @@ -1708,43 +1715,42 @@ memset(&data, 0, sizeof(data)); - /* Allocate a CIG if not set */ - if (qos->cig == BT_ISO_QOS_CIG_UNSET) { - for (data.cig = 0x00; data.cig < 0xff; data.cig++) { + /* Allocate first still reconfigurable CIG if not set */ + if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) { + for (data.cig = 0x00; data.cig < 0xf0; data.cig++) { data.count = 0; - data.cis = 0xff; - hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, - BT_BOUND, &data); + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, + BT_CONNECT, &data); if (data.count) continue; - hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &data); if (!data.count) break; } - if (data.cig == 0xff) + if (data.cig == 0xf0) return false; /* Update CIG */ - qos->cig = data.cig; + qos->ucast.cig = data.cig; } - data.pdu.cp.cig_id = qos->cig; - hci_cpu_to_le24(qos->out.interval, data.pdu.cp.c_interval); - hci_cpu_to_le24(qos->in.interval, data.pdu.cp.p_interval); - data.pdu.cp.sca = qos->sca; - data.pdu.cp.packing = qos->packing; - data.pdu.cp.framing = qos->framing; - data.pdu.cp.c_latency = cpu_to_le16(qos->out.latency); - data.pdu.cp.p_latency = cpu_to_le16(qos->in.latency); + data.pdu.cp.cig_id = qos->ucast.cig; + hci_cpu_to_le24(qos->ucast.out.interval, data.pdu.cp.c_interval); + hci_cpu_to_le24(qos->ucast.in.interval, data.pdu.cp.p_interval); + data.pdu.cp.sca = qos->ucast.sca; + data.pdu.cp.packing = qos->ucast.packing; + data.pdu.cp.framing = qos->ucast.framing; + data.pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency); + data.pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency); - if (qos->cis != BT_ISO_QOS_CIS_UNSET) { + if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) { data.count = 0; - data.cig = qos->cig; - data.cis = qos->cis; + data.cig = qos->ucast.cig; + data.cis = qos->ucast.cis; hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND, &data); @@ -1755,7 +1761,7 @@ } /* Reprogram all CIS(s) with the same CIG */ - for (data.cig = qos->cig, data.cis = 0x00; data.cis < 0x11; + for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0x11; data.cis++) { data.count = 0; @@ -1765,14 +1771,14 @@ continue; /* Allocate a CIS if not set */ - if (qos->cis == BT_ISO_QOS_CIS_UNSET) { + if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) { /* Update CIS */ - qos->cis = data.cis; + qos->ucast.cis = data.cis; cis_add(&data, qos); } } - if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis) + if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis) return false; if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS, @@ -1807,32 +1813,32 @@ return cis; /* Update LINK PHYs according to QoS preference */ - cis->le_tx_phy = qos->out.phy; - cis->le_rx_phy = qos->in.phy; + cis->le_tx_phy = qos->ucast.out.phy; + cis->le_rx_phy = qos->ucast.in.phy; /* If output interval is not set use the input interval as it cannot be * 0x000000. */ - if (!qos->out.interval) - qos->out.interval = qos->in.interval; + if (!qos->ucast.out.interval) + qos->ucast.out.interval = qos->ucast.in.interval; /* If input interval is not set use the output interval as it cannot be * 0x000000. */ - if (!qos->in.interval) - qos->in.interval = qos->out.interval; + if (!qos->ucast.in.interval) + qos->ucast.in.interval = qos->ucast.out.interval; /* If output latency is not set use the input latency as it cannot be * 0x0000. */ - if (!qos->out.latency) - qos->out.latency = qos->in.latency; + if (!qos->ucast.out.latency) + qos->ucast.out.latency = qos->ucast.in.latency; /* If input latency is not set use the output latency as it cannot be * 0x0000. */ - if (!qos->in.latency) - qos->in.latency = qos->out.latency; + if (!qos->ucast.in.latency) + qos->ucast.in.latency = qos->ucast.out.latency; if (!hci_le_set_cig_params(cis, qos)) { hci_conn_drop(cis); @@ -1852,7 +1858,7 @@ memset(&cmd, 0, sizeof(cmd)); - if (conn->iso_qos.out.sdu) { + if (conn->iso_qos.ucast.out.sdu) { cmd.handle = cpu_to_le16(conn->handle); cmd.direction = 0x00; /* Input (Host to Controller) */ cmd.path = 0x00; /* HCI path if enabled */ @@ -1863,7 +1869,7 @@ return false; } - if (conn->iso_qos.in.sdu) { + if (conn->iso_qos.ucast.in.sdu) { cmd.handle = cpu_to_le16(conn->handle); cmd.direction = 0x01; /* Output (Controller to Host) */ cmd.path = 0x00; /* HCI path if enabled */ @@ -1890,7 +1896,7 @@ cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle); cmd.cis[0].cis_handle = cpu_to_le16(conn->handle); cmd.cp.num_cis++; - cig = conn->iso_qos.cig; + cig = conn->iso_qos.ucast.cig; hci_dev_lock(hdev); @@ -1900,7 +1906,7 @@ struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; if (conn == data || conn->type != ISO_LINK || - conn->state == BT_CONNECTED || conn->iso_qos.cig != cig) + conn->state == BT_CONNECTED || conn->iso_qos.ucast.cig != cig) continue; /* Check if all CIS(s) belonging to a CIG are ready */ @@ -2000,8 +2006,8 @@ struct bt_iso_qos *qos) { /* Update LINK PHYs according to QoS preference */ - conn->le_tx_phy = qos->out.phy; - conn->le_tx_phy = qos->out.phy; + conn->le_tx_phy = qos->bcast.out.phy; + conn->le_tx_phy = qos->bcast.out.phy; conn->iso_qos = *qos; conn->state = BT_BOUND; } @@ -2014,16 +2020,16 @@ u32 flags = 0; int err; - if (qos->out.phy == 0x02) + if (qos->bcast.out.phy == 0x02) flags |= MGMT_ADV_FLAG_SEC_2M; /* Align intervals */ - interval = qos->out.interval / 1250; + interval = qos->bcast.out.interval / 1250; - if (qos->bis) - sync_interval = qos->sync_interval * 1600; + if (qos->bcast.bis) + sync_interval = qos->bcast.sync_interval * 1600; - err = hci_start_per_adv_sync(hdev, qos->bis, conn->le_per_adv_data_len, + err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len, conn->le_per_adv_data, flags, interval, interval, sync_interval); if (err) @@ -2060,7 +2066,7 @@ } int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, - __u8 sid) + __u8 sid, struct bt_iso_qos *qos) { struct hci_cp_le_pa_create_sync *cp; @@ -2073,9 +2079,13 @@ return -ENOMEM; } + cp->options = qos->bcast.options; cp->sid = sid; cp->addr_type = dst_type; bacpy(&cp->addr, dst); + cp->skip = cpu_to_le16(qos->bcast.skip); + cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); + cp->sync_cte_type = qos->bcast.sync_cte_type; /* Queue start pa_create_sync and scan */ return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete); @@ -2098,8 +2108,12 @@ return err; memset(&pdu, 0, sizeof(pdu)); - pdu.cp.handle = qos->big; + pdu.cp.handle = qos->bcast.big; pdu.cp.sync_handle = cpu_to_le16(sync_handle); + pdu.cp.encryption = qos->bcast.encryption; + memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode)); + pdu.cp.mse = qos->bcast.mse; + pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout); pdu.cp.num_bis = num_bis; memcpy(pdu.bis, bis, num_bis); @@ -2149,7 +2163,7 @@ return ERR_PTR(err); } - hci_iso_qos_setup(hdev, conn, &qos->out, + hci_iso_qos_setup(hdev, conn, &qos->bcast.out, conn->le_tx_phy ? conn->le_tx_phy : hdev->le_tx_def_phys); @@ -2175,9 +2189,9 @@ if (IS_ERR(le)) return le; - hci_iso_qos_setup(hdev, le, &qos->out, + hci_iso_qos_setup(hdev, le, &qos->ucast.out, le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys); - hci_iso_qos_setup(hdev, le, &qos->in, + hci_iso_qos_setup(hdev, le, &qos->ucast.in, le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys); cis = hci_bind_cis(hdev, dst, dst_type, qos); diff -u linux-6.2.0/net/bluetooth/hci_core.c linux-6.2.0/net/bluetooth/hci_core.c --- linux-6.2.0/net/bluetooth/hci_core.c +++ linux-6.2.0/net/bluetooth/hci_core.c @@ -1416,10 +1416,10 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { - struct smp_ltk *k; + struct smp_ltk *k, *tmp; int removed = 0; - list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) continue; @@ -1435,9 +1435,9 @@ void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { - struct smp_irk *k; + struct smp_irk *k, *tmp; - list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { + list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) continue; @@ -2685,7 +2685,9 @@ { BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + mutex_lock(&hdev->unregister_lock); hci_dev_set_flag(hdev, HCI_UNREGISTER); + mutex_unlock(&hdev->unregister_lock); write_lock(&hci_dev_list_lock); list_del(&hdev->list); diff -u linux-6.2.0/net/bluetooth/hci_event.c linux-6.2.0/net/bluetooth/hci_event.c --- linux-6.2.0/net/bluetooth/hci_event.c +++ linux-6.2.0/net/bluetooth/hci_event.c @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. + Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky @@ -3833,7 +3834,7 @@ rcu_read_lock(); list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { - if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id || + if (conn->type != ISO_LINK || conn->iso_qos.ucast.cig != rp->cig_id || conn->state == BT_CONNECTED) continue; @@ -3890,7 +3891,7 @@ /* Input (Host to Controller) */ case 0x00: /* Only confirm connection if output only */ - if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu) + if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu) hci_connect_cfm(conn, rp->status); break; /* Output (Controller to Host) */ @@ -6818,15 +6819,15 @@ memset(&interval, 0, sizeof(interval)); memcpy(&interval, ev->c_latency, sizeof(ev->c_latency)); - conn->iso_qos.in.interval = le32_to_cpu(interval); + conn->iso_qos.ucast.in.interval = le32_to_cpu(interval); memcpy(&interval, ev->p_latency, sizeof(ev->p_latency)); - conn->iso_qos.out.interval = le32_to_cpu(interval); - conn->iso_qos.in.latency = le16_to_cpu(ev->interval); - conn->iso_qos.out.latency = le16_to_cpu(ev->interval); - conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu); - conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu); - conn->iso_qos.in.phy = ev->c_phy; - conn->iso_qos.out.phy = ev->p_phy; + conn->iso_qos.ucast.out.interval = le32_to_cpu(interval); + conn->iso_qos.ucast.in.latency = le16_to_cpu(ev->interval); + conn->iso_qos.ucast.out.latency = le16_to_cpu(ev->interval); + conn->iso_qos.ucast.in.sdu = le16_to_cpu(ev->c_mtu); + conn->iso_qos.ucast.out.sdu = le16_to_cpu(ev->p_mtu); + conn->iso_qos.ucast.in.phy = ev->c_phy; + conn->iso_qos.ucast.out.phy = ev->p_phy; } if (!ev->status) { @@ -6900,8 +6901,8 @@ cis->handle = cis_handle; } - cis->iso_qos.cig = ev->cig_id; - cis->iso_qos.cis = ev->cis_id; + cis->iso_qos.ucast.cig = ev->cig_id; + cis->iso_qos.ucast.cis = ev->cis_id; if (!(flags & HCI_PROTO_DEFER)) { hci_le_accept_cis(hdev, ev->cis_handle); @@ -6988,13 +6989,13 @@ bis->handle = handle; } - bis->iso_qos.big = ev->handle; + bis->iso_qos.bcast.big = ev->handle; memset(&interval, 0, sizeof(interval)); memcpy(&interval, ev->latency, sizeof(ev->latency)); - bis->iso_qos.in.interval = le32_to_cpu(interval); + bis->iso_qos.bcast.in.interval = le32_to_cpu(interval); /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ - bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100; - bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu); + bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100; + bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); hci_iso_setup_path(bis); } diff -u linux-6.2.0/net/bluetooth/hci_sync.c linux-6.2.0/net/bluetooth/hci_sync.c --- linux-6.2.0/net/bluetooth/hci_sync.c +++ linux-6.2.0/net/bluetooth/hci_sync.c @@ -629,6 +629,7 @@ INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); INIT_LIST_HEAD(&hdev->cmd_sync_work_list); mutex_init(&hdev->cmd_sync_work_lock); + mutex_init(&hdev->unregister_lock); INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); INIT_WORK(&hdev->reenable_adv_work, reenable_adv); @@ -688,14 +689,19 @@ void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; + int err = 0; - if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) - return -ENODEV; + mutex_lock(&hdev->unregister_lock); + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + err = -ENODEV; + goto unlock; + } entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - + if (!entry) { + err = -ENOMEM; + goto unlock; + } entry->func = func; entry->data = data; entry->destroy = destroy; @@ -706,7 +712,9 @@ queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); - return 0; +unlock: + mutex_unlock(&hdev->unregister_lock); + return err; } EXPORT_SYMBOL(hci_cmd_sync_queue); @@ -4502,6 +4510,9 @@ !hci_dev_test_flag(hdev, HCI_CONFIG)) return 0; + if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) + return 0; + hci_debugfs_create_common(hdev); if (lmp_bredr_capable(hdev)) diff -u linux-6.2.0/net/bluetooth/iso.c linux-6.2.0/net/bluetooth/iso.c --- linux-6.2.0/net/bluetooth/iso.c +++ linux-6.2.0/net/bluetooth/iso.c @@ -3,6 +3,7 @@ * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2022 Intel Corporation + * Copyright 2023 NXP */ #include @@ -59,11 +60,17 @@ __u16 sync_handle; __u32 flags; struct bt_iso_qos qos; + bool qos_user_set; __u8 base_len; __u8 base[BASE_MAX_LENGTH]; struct iso_conn *conn; }; +static struct bt_iso_qos default_qos; + +static bool check_ucast_qos(struct bt_iso_qos *qos); +static bool check_bcast_qos(struct bt_iso_qos *qos); + /* ---- ISO timers ---- */ #define ISO_CONN_TIMEOUT (HZ * 40) #define ISO_DISCONN_TIMEOUT (HZ * 2) @@ -264,8 +271,15 @@ goto unlock; } + /* Fail if user set invalid QoS */ + if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { + iso_pi(sk)->qos = default_qos; + err = -EINVAL; + goto unlock; + } + /* Fail if out PHYs are marked as disabled */ - if (!iso_pi(sk)->qos.out.phy) { + if (!iso_pi(sk)->qos.bcast.out.phy) { err = -EINVAL; goto unlock; } @@ -336,8 +350,15 @@ goto unlock; } + /* Fail if user set invalid QoS */ + if (iso_pi(sk)->qos_user_set && !check_ucast_qos(&iso_pi(sk)->qos)) { + iso_pi(sk)->qos = default_qos; + err = -EINVAL; + goto unlock; + } + /* Fail if either PHYs are marked as disabled */ - if (!iso_pi(sk)->qos.in.phy && !iso_pi(sk)->qos.out.phy) { + if (!iso_pi(sk)->qos.ucast.in.phy && !iso_pi(sk)->qos.ucast.out.phy) { err = -EINVAL; goto unlock; } @@ -417,7 +438,7 @@ BT_DBG("sk %p len %d", sk, skb->len); - if (skb->len > qos->out.sdu) + if (skb->len > qos->ucast.out.sdu) return -EMSGSIZE; len = skb->len; @@ -680,13 +701,23 @@ } static struct bt_iso_qos default_qos = { - .cig = BT_ISO_QOS_CIG_UNSET, - .cis = BT_ISO_QOS_CIS_UNSET, - .sca = 0x00, - .packing = 0x00, - .framing = 0x00, - .in = DEFAULT_IO_QOS, - .out = DEFAULT_IO_QOS, + .bcast = { + .big = BT_ISO_QOS_BIG_UNSET, + .bis = BT_ISO_QOS_BIS_UNSET, + .sync_interval = 0x00, + .packing = 0x00, + .framing = 0x00, + .in = DEFAULT_IO_QOS, + .out = DEFAULT_IO_QOS, + .encryption = 0x00, + .bcode = {0x00}, + .options = 0x00, + .skip = 0x0000, + .sync_timeout = 0x4000, + .sync_cte_type = 0x00, + .mse = 0x00, + .timeout = 0x4000, + }, }; static struct sock *iso_sock_alloc(struct net *net, struct socket *sock, @@ -893,9 +924,15 @@ if (!hdev) return -EHOSTUNREACH; + /* Fail if user set invalid QoS */ + if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { + iso_pi(sk)->qos = default_qos; + return -EINVAL; + } + err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), - iso_pi(sk)->bc_sid); + iso_pi(sk)->bc_sid, &iso_pi(sk)->qos); hci_dev_put(hdev); @@ -1154,21 +1191,62 @@ return true; } -static bool check_qos(struct bt_iso_qos *qos) +static bool check_ucast_qos(struct bt_iso_qos *qos) +{ + if (qos->ucast.sca > 0x07) + return false; + + if (qos->ucast.packing > 0x01) + return false; + + if (qos->ucast.framing > 0x01) + return false; + + if (!check_io_qos(&qos->ucast.in)) + return false; + + if (!check_io_qos(&qos->ucast.out)) + return false; + + return true; +} + +static bool check_bcast_qos(struct bt_iso_qos *qos) { - if (qos->sca > 0x07) + if (qos->bcast.sync_interval > 0x07) + return false; + + if (qos->bcast.packing > 0x01) + return false; + + if (qos->bcast.framing > 0x01) + return false; + + if (!check_io_qos(&qos->bcast.in)) + return false; + + if (!check_io_qos(&qos->bcast.out)) + return false; + + if (qos->bcast.encryption > 0x01) return false; - if (qos->packing > 0x01) + if (qos->bcast.options > 0x07) return false; - if (qos->framing > 0x01) + if (qos->bcast.skip > 0x01f3) return false; - if (!check_io_qos(&qos->in)) + if (qos->bcast.sync_timeout < 0x000a || qos->bcast.sync_timeout > 0x4000) return false; - if (!check_io_qos(&qos->out)) + if (qos->bcast.sync_cte_type > 0x1f) + return false; + + if (qos->bcast.mse > 0x1f) + return false; + + if (qos->bcast.timeout < 0x000a || qos->bcast.timeout > 0x4000) return false; return true; @@ -1179,7 +1257,7 @@ { struct sock *sk = sock->sk; int len, err = 0; - struct bt_iso_qos qos; + struct bt_iso_qos qos = default_qos; u32 opt; BT_DBG("sk %p", sk); @@ -1212,24 +1290,19 @@ } len = min_t(unsigned int, sizeof(qos), optlen); - if (len != sizeof(qos)) { - err = -EINVAL; - break; - } - - memset(&qos, 0, sizeof(qos)); if (copy_from_sockptr(&qos, optval, len)) { err = -EFAULT; break; } - if (!check_qos(&qos)) { + if (len == sizeof(qos.ucast) && !check_ucast_qos(&qos)) { err = -EINVAL; break; } iso_pi(sk)->qos = qos; + iso_pi(sk)->qos_user_set = true; break; @@ -1419,7 +1492,7 @@ { struct hci_evt_le_big_sync_estabilished *ev = data; - return ev->handle == iso_pi(sk)->qos.big; + return ev->handle == iso_pi(sk)->qos.bcast.big; } static void iso_conn_ready(struct iso_conn *conn) diff -u linux-6.2.0/net/bluetooth/l2cap_core.c linux-6.2.0/net/bluetooth/l2cap_core.c --- linux-6.2.0/net/bluetooth/l2cap_core.c +++ linux-6.2.0/net/bluetooth/l2cap_core.c @@ -4307,6 +4307,10 @@ result = __le16_to_cpu(rsp->result); status = __le16_to_cpu(rsp->status); + if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START || + dcid > L2CAP_CID_DYN_END)) + return -EPROTO; + BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); @@ -4338,6 +4342,11 @@ switch (result) { case L2CAP_CR_SUCCESS: + if (__l2cap_get_chan_by_dcid(conn, dcid)) { + err = -EBADSLT; + break; + } + l2cap_state_change(chan, BT_CONFIG); chan->ident = 0; chan->dcid = dcid; @@ -4664,7 +4673,9 @@ chan->ops->set_shutdown(chan); + l2cap_chan_unlock(chan); mutex_lock(&conn->chan_lock); + l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNRESET); mutex_unlock(&conn->chan_lock); @@ -4703,7 +4714,9 @@ return 0; } + l2cap_chan_unlock(chan); mutex_lock(&conn->chan_lock); + l2cap_chan_lock(chan); l2cap_chan_del(chan, 0); mutex_unlock(&conn->chan_lock); diff -u linux-6.2.0/net/bluetooth/l2cap_sock.c linux-6.2.0/net/bluetooth/l2cap_sock.c --- linux-6.2.0/net/bluetooth/l2cap_sock.c +++ linux-6.2.0/net/bluetooth/l2cap_sock.c @@ -46,6 +46,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent); static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern); +static void l2cap_sock_cleanup_listen(struct sock *parent); bool l2cap_is_socket(struct socket *sock) { @@ -1415,6 +1416,7 @@ if (!sk) return 0; + l2cap_sock_cleanup_listen(sk); bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, SHUT_RDWR); diff -u linux-6.2.0/net/can/j1939/socket.c linux-6.2.0/net/can/j1939/socket.c --- linux-6.2.0/net/can/j1939/socket.c +++ linux-6.2.0/net/can/j1939/socket.c @@ -1088,6 +1088,11 @@ void j1939_sk_send_loop_abort(struct sock *sk, int err) { + struct j1939_sock *jsk = j1939_sk(sk); + + if (jsk->state & J1939_SOCK_ERRQUEUE) + return; + sk->sk_err = err; sk_error_report(sk); diff -u linux-6.2.0/net/core/dev.c linux-6.2.0/net/core/dev.c --- linux-6.2.0/net/core/dev.c +++ linux-6.2.0/net/core/dev.c @@ -4468,8 +4468,10 @@ u32 next_cpu; u32 ident; - /* First check into global flow table if there is a match */ - ident = sock_flow_table->ents[hash & sock_flow_table->mask]; + /* First check into global flow table if there is a match. + * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow(). + */ + ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]); if ((ident ^ hash) & ~rps_cpu_mask) goto try_rps; @@ -10498,7 +10500,7 @@ return NULL; netdev_init_one_queue(dev, queue, NULL); RCU_INIT_POINTER(queue->qdisc, &noop_qdisc); - queue->qdisc_sleeping = &noop_qdisc; + RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc); rcu_assign_pointer(dev->ingress_queue, queue); #endif return queue; diff -u linux-6.2.0/net/core/rtnetlink.c linux-6.2.0/net/core/rtnetlink.c --- linux-6.2.0/net/core/rtnetlink.c +++ linux-6.2.0/net/core/rtnetlink.c @@ -3253,6 +3253,7 @@ struct net_device *dev; unsigned int num_tx_queues = 1; unsigned int num_rx_queues = 1; + int err; if (tb[IFLA_NUM_TX_QUEUES]) num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); @@ -3288,13 +3289,18 @@ if (!dev) return ERR_PTR(-ENOMEM); + err = validate_linkmsg(dev, tb, extack); + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + } + dev_net_set(dev, net); dev->rtnl_link_ops = ops; dev->rtnl_link_state = RTNL_LINK_INITIALIZING; if (tb[IFLA_MTU]) { u32 mtu = nla_get_u32(tb[IFLA_MTU]); - int err; err = dev_validate_mtu(dev, mtu, extack); if (err) { diff -u linux-6.2.0/net/core/skmsg.c linux-6.2.0/net/core/skmsg.c --- linux-6.2.0/net/core/skmsg.c +++ linux-6.2.0/net/core/skmsg.c @@ -1205,7 +1205,8 @@ rcu_read_lock(); psock = sk_psock(sk); - psock->saved_data_ready(sk); + if (psock) + psock->saved_data_ready(sk); rcu_read_unlock(); } } diff -u linux-6.2.0/net/core/sock.c linux-6.2.0/net/core/sock.c --- linux-6.2.0/net/core/sock.c +++ linux-6.2.0/net/core/sock.c @@ -2392,7 +2392,6 @@ { u32 max_segs = 1; - sk_dst_set(sk, dst); sk->sk_route_caps = dst->dev->features; if (sk_is_tcp(sk)) sk->sk_route_caps |= NETIF_F_GSO; @@ -2414,6 +2413,7 @@ } } sk->sk_gso_max_segs = max_segs; + sk_dst_set(sk, dst); } EXPORT_SYMBOL_GPL(sk_setup_caps); diff -u linux-6.2.0/net/ipv4/af_inet.c linux-6.2.0/net/ipv4/af_inet.c --- linux-6.2.0/net/ipv4/af_inet.c +++ linux-6.2.0/net/ipv4/af_inet.c @@ -589,6 +589,7 @@ add_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending += writebias; + sk->sk_wait_pending++; /* Basic assumption: if someone sets sk->sk_err, he _must_ * change state of the socket from TCP_SYN_*. @@ -604,6 +605,7 @@ } remove_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending -= writebias; + sk->sk_wait_pending--; return timeo; } diff -u linux-6.2.0/net/ipv4/inet_connection_sock.c linux-6.2.0/net/ipv4/inet_connection_sock.c --- linux-6.2.0/net/ipv4/inet_connection_sock.c +++ linux-6.2.0/net/ipv4/inet_connection_sock.c @@ -1143,6 +1143,7 @@ if (newsk) { struct inet_connection_sock *newicsk = inet_csk(newsk); + newsk->sk_wait_pending = 0; inet_sk_set_state(newsk, TCP_SYN_RECV); newicsk->icsk_bind_hash = NULL; newicsk->icsk_bind2_hash = NULL; diff -u linux-6.2.0/net/ipv4/sysctl_net_ipv4.c linux-6.2.0/net/ipv4/sysctl_net_ipv4.c --- linux-6.2.0/net/ipv4/sysctl_net_ipv4.c +++ linux-6.2.0/net/ipv4/sysctl_net_ipv4.c @@ -34,8 +34,8 @@ static int ip_ttl_max = 255; static int tcp_syn_retries_min = 1; static int tcp_syn_retries_max = MAX_TCP_SYNCNT; -static int ip_ping_group_range_min[] = { 0, 0 }; -static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; +static unsigned long ip_ping_group_range_min[] = { 0, 0 }; +static unsigned long ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; static u32 u32_max_div_HZ = UINT_MAX / HZ; static int one_day_secs = 24 * 3600; static u32 fib_multipath_hash_fields_all_mask __maybe_unused = @@ -165,7 +165,7 @@ { struct user_namespace *user_ns = current_user_ns(); int ret; - gid_t urange[2]; + unsigned long urange[2]; kgid_t low, high; struct ctl_table tmp = { .data = &urange, @@ -178,7 +178,7 @@ inet_get_ping_group_range_table(table, &low, &high); urange[0] = from_kgid_munged(user_ns, low); urange[1] = from_kgid_munged(user_ns, high); - ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); + ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && ret == 0) { low = make_kgid(user_ns, urange[0]); diff -u linux-6.2.0/net/ipv4/tcp.c linux-6.2.0/net/ipv4/tcp.c --- linux-6.2.0/net/ipv4/tcp.c +++ linux-6.2.0/net/ipv4/tcp.c @@ -3079,6 +3079,12 @@ int old_state = sk->sk_state; u32 seq; + /* Deny disconnect if other threads are blocked in sk_wait_event() + * or inet_wait_for_connect(). + */ + if (sk->sk_wait_pending) + return -EBUSY; + if (old_state != TCP_CLOSE) tcp_set_state(sk, TCP_CLOSE); @@ -4070,7 +4076,8 @@ switch (optname) { case TCP_MAXSEG: val = tp->mss_cache; - if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) + if (tp->rx_opt.user_mss && + ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) val = tp->rx_opt.user_mss; if (tp->repair) val = tp->rx_opt.mss_clamp; diff -u linux-6.2.0/net/ipv4/tcp_input.c linux-6.2.0/net/ipv4/tcp_input.c --- linux-6.2.0/net/ipv4/tcp_input.c +++ linux-6.2.0/net/ipv4/tcp_input.c @@ -4530,7 +4530,7 @@ } } -static void tcp_sack_compress_send_ack(struct sock *sk) +void tcp_sack_compress_send_ack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); diff -u linux-6.2.0/net/mac80211/ieee80211_i.h linux-6.2.0/net/mac80211/ieee80211_i.h --- linux-6.2.0/net/mac80211/ieee80211_i.h +++ linux-6.2.0/net/mac80211/ieee80211_i.h @@ -2495,7 +2495,8 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx); void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx); + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for); bool ieee80211_is_radar_required(struct ieee80211_local *local); void ieee80211_dfs_cac_timer(unsigned long data); diff -u linux-6.2.0/net/mac80211/rx.c linux-6.2.0/net/mac80211/rx.c --- linux-6.2.0/net/mac80211/rx.c +++ linux-6.2.0/net/mac80211/rx.c @@ -4836,7 +4836,9 @@ } if (unlikely(rx->sta && rx->sta->sta.mlo) && - is_unicast_ether_addr(hdr->addr1)) { + is_unicast_ether_addr(hdr->addr1) && + !ieee80211_is_probe_resp(hdr->frame_control) && + !ieee80211_is_beacon(hdr->frame_control)) { /* translate to MLD addresses */ if (ether_addr_equal(link->conf->addr, hdr->addr1)) ether_addr_copy(hdr->addr1, rx->sdata->vif.addr); diff -u linux-6.2.0/net/mac80211/util.c linux-6.2.0/net/mac80211/util.c --- linux-6.2.0/net/mac80211/util.c +++ linux-6.2.0/net/mac80211/util.c @@ -3020,7 +3020,7 @@ chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); - ieee80211_recalc_chanctx_min_def(local, chanctx); + ieee80211_recalc_chanctx_min_def(local, chanctx, NULL); } unlock: mutex_unlock(&local->chanctx_mtx); diff -u linux-6.2.0/net/mptcp/pm_netlink.c linux-6.2.0/net/mptcp/pm_netlink.c --- linux-6.2.0/net/mptcp/pm_netlink.c +++ linux-6.2.0/net/mptcp/pm_netlink.c @@ -1554,6 +1554,24 @@ return ret; } +void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list) +{ + struct mptcp_rm_list alist = { .nr = 0 }; + struct mptcp_pm_addr_entry *entry; + + list_for_each_entry(entry, rm_list, list) { + remove_anno_list_by_saddr(msk, &entry->addr); + if (alist.nr < MPTCP_RM_IDS_MAX) + alist.ids[alist.nr++] = entry->addr.id; + } + + if (alist.nr) { + spin_lock_bh(&msk->pm.lock); + mptcp_pm_remove_addr(msk, &alist); + spin_unlock_bh(&msk->pm.lock); + } +} + void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, struct list_head *rm_list) { diff -u linux-6.2.0/net/mptcp/protocol.c linux-6.2.0/net/mptcp/protocol.c --- linux-6.2.0/net/mptcp/protocol.c +++ linux-6.2.0/net/mptcp/protocol.c @@ -102,8 +102,8 @@ if (err) return err; - msk->first = ssock->sk; - msk->subflow = ssock; + WRITE_ONCE(msk->first, ssock->sk); + WRITE_ONCE(msk->subflow, ssock); subflow = mptcp_subflow_ctx(ssock->sk); list_add(&subflow->node, &msk->conn_list); sock_hold(ssock->sk); @@ -590,7 +590,7 @@ WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); WRITE_ONCE(msk->rcv_data_fin, 0); - sk->sk_shutdown |= RCV_SHUTDOWN; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ switch (sk->sk_state) { @@ -812,6 +812,13 @@ mptcp_data_unlock(sk); } +static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) +{ + mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); + WRITE_ONCE(msk->allow_infinite_fallback, false); + mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); +} + static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) { struct sock *sk = (struct sock *)msk; @@ -826,6 +833,7 @@ mptcp_sock_graft(ssk, sk->sk_socket); mptcp_sockopt_sync_locked(msk, ssk); + mptcp_subflow_joined(msk, ssk); return true; } @@ -897,7 +905,7 @@ /* hopefully temporary hack: propagate shutdown status * to msk, when all subflows agree on it */ - sk->sk_shutdown |= RCV_SHUTDOWN; + WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ sk->sk_data_ready(sk); @@ -1672,7 +1680,6 @@ lock_sock(ssk); msg->msg_flags |= MSG_DONTWAIT; - msk->connect_flags = O_NONBLOCK; msk->fastopening = 1; ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL); msk->fastopening = 0; @@ -2255,7 +2262,7 @@ { if (msk->subflow) { iput(SOCK_INODE(msk->subflow)); - msk->subflow = NULL; + WRITE_ONCE(msk->subflow, NULL); } } @@ -2392,7 +2399,7 @@ sock_put(ssk); if (ssk == msk->first) - msk->first = NULL; + WRITE_ONCE(msk->first, NULL); out: if (ssk == msk->last_snd) @@ -2499,7 +2506,7 @@ } inet_sk_state_store(sk, TCP_CLOSE); - sk->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); @@ -2693,7 +2700,7 @@ WRITE_ONCE(msk->rmem_released, 0); msk->timer_ival = TCP_RTO_MIN; - msk->first = NULL; + WRITE_ONCE(msk->first, NULL); inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); WRITE_ONCE(msk->allow_infinite_fallback, true); @@ -2930,7 +2937,7 @@ bool do_cancel_work = false; int subflows_alive = 0; - sk->sk_shutdown = SHUTDOWN_MASK; + WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) { inet_sk_state_store(sk, TCP_CLOSE); @@ -3006,7 +3013,7 @@ sock_put(sk); } -void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) +static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) { #if IS_ENABLED(CONFIG_MPTCP_IPV6) const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); @@ -3068,7 +3075,7 @@ mptcp_pm_data_reset(msk); mptcp_ca_reset(sk); - sk->sk_shutdown = 0; + WRITE_ONCE(sk->sk_shutdown, 0); sk_error_report(sk); return 0; } @@ -3082,9 +3089,10 @@ } #endif -struct sock *mptcp_sk_clone(const struct sock *sk, - const struct mptcp_options_received *mp_opt, - struct request_sock *req) +struct sock *mptcp_sk_clone_init(const struct sock *sk, + const struct mptcp_options_received *mp_opt, + struct sock *ssk, + struct request_sock *req) { struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); @@ -3103,7 +3111,7 @@ msk = mptcp_sk(nsk); msk->local_key = subflow_req->local_key; msk->token = subflow_req->token; - msk->subflow = NULL; + WRITE_ONCE(msk->subflow, NULL); msk->in_accept_queue = 1; WRITE_ONCE(msk->fully_established, false); if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD) @@ -3116,10 +3124,30 @@ msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; sock_reset_flag(nsk, SOCK_RCU_FREE); - /* will be fully established after successful MPC subflow creation */ - inet_sk_state_store(nsk, TCP_SYN_RECV); - security_inet_csk_clone(nsk, req); + + /* this can't race with mptcp_close(), as the msk is + * not yet exposted to user-space + */ + inet_sk_state_store(nsk, TCP_ESTABLISHED); + + /* The msk maintain a ref to each subflow in the connections list */ + WRITE_ONCE(msk->first, ssk); + list_add(&mptcp_subflow_ctx(ssk)->node, &msk->conn_list); + sock_hold(ssk); + + /* new mpc subflow takes ownership of the newly + * created mptcp socket + */ + mptcp_token_accept(subflow_req, msk); + + /* set msk addresses early to ensure mptcp_pm_get_local_id() + * uses the correct data + */ + mptcp_copy_inaddrs(nsk, ssk); + mptcp_propagate_sndbuf(nsk, ssk); + + mptcp_rcv_space_init(msk, ssk); bh_unlock_sock(nsk); /* note: the newly allocated socket refcount is 2 now */ @@ -3151,7 +3179,7 @@ struct socket *listener; struct sock *newsk; - listener = __mptcp_nmpc_socket(msk); + listener = READ_ONCE(msk->subflow); if (WARN_ON_ONCE(!listener)) { *err = -EINVAL; return NULL; @@ -3371,7 +3399,7 @@ struct mptcp_sock *msk = mptcp_sk(sk); struct socket *ssock; - ssock = __mptcp_nmpc_socket(msk); + ssock = msk->subflow; pr_debug("msk=%p, subflow=%p", msk, ssock); if (WARN_ON_ONCE(!ssock)) return -EINVAL; @@ -3431,14 +3459,16 @@ return false; } - if (!list_empty(&subflow->node)) - goto out; + /* active subflow, already present inside the conn_list */ + if (!list_empty(&subflow->node)) { + mptcp_subflow_joined(msk, ssk); + return true; + } if (!mptcp_pm_allow_new_subflow(msk)) goto err_prohibited; - /* active connections are already on conn_list. - * If we can't acquire msk socket lock here, let the release callback + /* If we can't acquire msk socket lock here, let the release callback * handle it */ mptcp_data_lock(parent); @@ -3461,11 +3491,6 @@ return false; } - subflow->map_seq = READ_ONCE(msk->ack_seq); - WRITE_ONCE(msk->allow_infinite_fallback, false); - -out: - mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); return true; } @@ -3583,9 +3608,9 @@ * acquired the subflow socket lock, too. */ if (msk->fastopening) - err = __inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags, 1); + err = __inet_stream_connect(ssock, uaddr, addr_len, O_NONBLOCK, 1); else - err = inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags); + err = inet_stream_connect(ssock, uaddr, addr_len, O_NONBLOCK); inet_sk(sk)->defer_connect = inet_sk(ssock->sk)->defer_connect; /* on successful connect, the msk state will be moved to established by @@ -3598,12 +3623,10 @@ mptcp_copy_inaddrs(sk, ssock->sk); - /* unblocking connect, mptcp-level inet_stream_connect will error out - * without changing the socket state, update it here. + /* silence EINPROGRESS and let the caller inet_stream_connect + * handle the connection in progress */ - if (err == -EINPROGRESS) - sk->sk_socket->state = ssock->state; - return err; + return 0; } static struct proto mptcp_prot = { @@ -3662,18 +3685,6 @@ return err; } -static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, - int addr_len, int flags) -{ - int ret; - - lock_sock(sock->sk); - mptcp_sk(sock->sk)->connect_flags = flags; - ret = __inet_stream_connect(sock, uaddr, addr_len, flags, 0); - release_sock(sock->sk); - return ret; -} - static int mptcp_listen(struct socket *sock, int backlog) { struct mptcp_sock *msk = mptcp_sk(sock->sk); @@ -3714,7 +3725,10 @@ pr_debug("msk=%p", msk); - ssock = __mptcp_nmpc_socket(msk); + /* Buggy applications can call accept on socket states other then LISTEN + * but no need to allocate the first subflow just to error out. + */ + ssock = READ_ONCE(msk->subflow); if (!ssock) return -EINVAL; @@ -3760,9 +3774,6 @@ { struct sock *sk = (struct sock *)msk; - if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) - return EPOLLOUT | EPOLLWRNORM; - if (sk_stream_is_writeable(sk)) return EPOLLOUT | EPOLLWRNORM; @@ -3780,6 +3791,7 @@ struct sock *sk = sock->sk; struct mptcp_sock *msk; __poll_t mask = 0; + u8 shutdown; int state; msk = mptcp_sk(sk); @@ -3788,23 +3800,30 @@ state = inet_sk_state_load(sk); pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags); if (state == TCP_LISTEN) { - if (WARN_ON_ONCE(!msk->subflow || !msk->subflow->sk)) + struct socket *ssock = READ_ONCE(msk->subflow); + + if (WARN_ON_ONCE(!ssock || !ssock->sk)) return 0; - return inet_csk_listen_poll(msk->subflow->sk); + return inet_csk_listen_poll(ssock->sk); } + shutdown = READ_ONCE(sk->sk_shutdown); + if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) + mask |= EPOLLHUP; + if (shutdown & RCV_SHUTDOWN) + mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; + if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { mask |= mptcp_check_readable(msk); - mask |= mptcp_check_writeable(msk); + if (shutdown & SEND_SHUTDOWN) + mask |= EPOLLOUT | EPOLLWRNORM; + else + mask |= mptcp_check_writeable(msk); } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { /* cf tcp_poll() note about TFO */ mask |= EPOLLOUT | EPOLLWRNORM; } - if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) - mask |= EPOLLHUP; - if (sk->sk_shutdown & RCV_SHUTDOWN) - mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */ smp_rmb(); @@ -3819,7 +3838,7 @@ .owner = THIS_MODULE, .release = inet_release, .bind = mptcp_bind, - .connect = mptcp_stream_connect, + .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = mptcp_stream_accept, .getname = inet_getname, @@ -3914,7 +3933,7 @@ .owner = THIS_MODULE, .release = inet6_release, .bind = mptcp_bind, - .connect = mptcp_stream_connect, + .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = mptcp_stream_accept, .getname = inet6_getname, diff -u linux-6.2.0/net/mptcp/protocol.h linux-6.2.0/net/mptcp/protocol.h --- linux-6.2.0/net/mptcp/protocol.h +++ linux-6.2.0/net/mptcp/protocol.h @@ -297,7 +297,6 @@ nodelay:1, fastopening:1, in_accept_queue:1; - int connect_flags; struct work_struct work; struct sk_buff *ooo_last_skb; struct rb_root out_of_order_queue; @@ -306,7 +305,11 @@ struct list_head rtx_queue; struct mptcp_data_frag *first_pending; struct list_head join_list; - struct socket *subflow; /* outgoing connect/listener/!mp_capable */ + struct socket *subflow; /* outgoing connect/listener/!mp_capable + * The mptcp ops can safely dereference, using suitable + * ONCE annotation, the subflow outside the socket + * lock as such sock is freed after close(). + */ struct sock *first; struct mptcp_pm_data pm; struct { @@ -616,7 +619,6 @@ int mptcp_allow_join_id0(const struct net *net); unsigned int mptcp_stale_loss_cnt(const struct net *net); int mptcp_get_pm_type(const struct net *net); -void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk); void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, const struct mptcp_options_received *mp_opt); bool __mptcp_retransmit_pending_data(struct sock *sk); @@ -686,9 +688,10 @@ int __init mptcp_proto_v6_init(void); #endif -struct sock *mptcp_sk_clone(const struct sock *sk, - const struct mptcp_options_received *mp_opt, - struct request_sock *req); +struct sock *mptcp_sk_clone_init(const struct sock *sk, + const struct mptcp_options_received *mp_opt, + struct sock *ssk, + struct request_sock *req); void mptcp_get_options(const struct sk_buff *skb, struct mptcp_options_received *mp_opt); @@ -832,6 +835,7 @@ bool echo); int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); +void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list); void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk, struct list_head *rm_list); diff -u linux-6.2.0/net/mptcp/subflow.c linux-6.2.0/net/mptcp/subflow.c --- linux-6.2.0/net/mptcp/subflow.c +++ linux-6.2.0/net/mptcp/subflow.c @@ -694,14 +694,6 @@ return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN); } -static void mptcp_force_close(struct sock *sk) -{ - /* the msk is not yet exposed to user-space, and refcount is 2 */ - inet_sk_state_store(sk, TCP_CLOSE); - sk_common_release(sk); - sock_put(sk); -} - static void subflow_ulp_fallback(struct sock *sk, struct mptcp_subflow_context *old_ctx) { @@ -756,7 +748,6 @@ struct mptcp_subflow_request_sock *subflow_req; struct mptcp_options_received mp_opt; bool fallback, fallback_is_fatal; - struct sock *new_msk = NULL; struct mptcp_sock *owner; struct sock *child; @@ -785,14 +776,9 @@ * options. */ mptcp_get_options(skb, &mp_opt); - if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) { + if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) fallback = true; - goto create_child; - } - new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req); - if (!new_msk) - fallback = true; } else if (subflow_req->mp_join) { mptcp_get_options(skb, &mp_opt); if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) || @@ -821,47 +807,19 @@ subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP); goto dispose_child; } - - if (new_msk) - mptcp_copy_inaddrs(new_msk, child); - mptcp_subflow_drop_ctx(child); - goto out; + goto fallback; } /* ssk inherits options of listener sk */ ctx->setsockopt_seq = listener->setsockopt_seq; if (ctx->mp_capable) { - owner = mptcp_sk(new_msk); - - /* this can't race with mptcp_close(), as the msk is - * not yet exposted to user-space - */ - inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED); - - /* record the newly created socket as the first msk - * subflow, but don't link it yet into conn_list - */ - WRITE_ONCE(owner->first, child); + ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req); + if (!ctx->conn) + goto fallback; - /* new mpc subflow takes ownership of the newly - * created mptcp socket - */ - mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq; + owner = mptcp_sk(ctx->conn); mptcp_pm_new_connection(owner, child, 1); - mptcp_token_accept(subflow_req, owner); - ctx->conn = new_msk; - new_msk = NULL; - - /* set msk addresses early to ensure mptcp_pm_get_local_id() - * uses the correct data - */ - mptcp_copy_inaddrs(ctx->conn, child); - mptcp_propagate_sndbuf(ctx->conn, child); - - mptcp_rcv_space_init(owner, child); - list_add(&ctx->node, &owner->conn_list); - sock_hold(child); /* with OoO packets we can reach here without ingress * mpc option @@ -901,11 +859,6 @@ } } -out: - /* dispose of the left over mptcp master, if any */ - if (unlikely(new_msk)) - mptcp_force_close(new_msk); - /* check for expected invariant - should never trigger, just help * catching eariler subtle bugs */ @@ -923,6 +876,10 @@ /* The last child reference will be released by the caller */ return child; + +fallback: + mptcp_subflow_drop_ctx(child); + return child; } static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; diff -u linux-6.2.0/net/netfilter/nf_conntrack_core.c linux-6.2.0/net/netfilter/nf_conntrack_core.c --- linux-6.2.0/net/netfilter/nf_conntrack_core.c +++ linux-6.2.0/net/netfilter/nf_conntrack_core.c @@ -2273,6 +2273,9 @@ return 0; helper = rcu_dereference(help->helper); + if (!helper) + return 0; + if (!(helper->flags & NF_CT_HELPER_F_USERSPACE)) return 0; diff -u linux-6.2.0/net/netfilter/nf_conntrack_netlink.c linux-6.2.0/net/netfilter/nf_conntrack_netlink.c --- linux-6.2.0/net/netfilter/nf_conntrack_netlink.c +++ linux-6.2.0/net/netfilter/nf_conntrack_netlink.c @@ -2982,7 +2982,9 @@ return -1; } +#if IS_ENABLED(CONFIG_NF_NAT) static const union nf_inet_addr any_addr; +#endif static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp) { @@ -3466,10 +3468,12 @@ return 0; } +#if IS_ENABLED(CONFIG_NF_NAT) static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = { [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 }, [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED }, }; +#endif static int ctnetlink_parse_expect_nat(const struct nlattr *attr, diff -u linux-6.2.0/net/netfilter/nf_tables_api.c linux-6.2.0/net/netfilter/nf_tables_api.c --- linux-6.2.0/net/netfilter/nf_tables_api.c +++ linux-6.2.0/net/netfilter/nf_tables_api.c @@ -1652,6 +1652,8 @@ if (nft_base_chain_netdev(family, ops->hooknum)) { nest_devs = nla_nest_start_noflag(skb, NFTA_HOOK_DEVS); + if (!nest_devs) + goto nla_put_failure; list_for_each_entry(hook, &basechain->hook_list, list) { if (!first) first = hook; @@ -6776,6 +6778,7 @@ ret = __nft_set_catchall_flush(ctx, set, &elem); if (ret < 0) break; + nft_set_elem_change_active(ctx->net, set, ext); } return ret; @@ -8861,7 +8864,7 @@ continue; } - if (WARN_ON_ONCE(data + expr->ops->size > data_boundary)) + if (WARN_ON_ONCE(data + size + expr->ops->size > data_boundary)) return -ENOMEM; memcpy(data + size, expr, expr->ops->size); diff -u linux-6.2.0/net/netlink/af_netlink.c linux-6.2.0/net/netlink/af_netlink.c --- linux-6.2.0/net/netlink/af_netlink.c +++ linux-6.2.0/net/netlink/af_netlink.c @@ -1779,7 +1779,7 @@ break; } } - if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen)) + if (put_user(ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)), optlen)) err = -EFAULT; netlink_unlock_table(); return err; diff -u linux-6.2.0/net/packet/af_packet.c linux-6.2.0/net/packet/af_packet.c --- linux-6.2.0/net/packet/af_packet.c +++ linux-6.2.0/net/packet/af_packet.c @@ -3194,6 +3194,9 @@ lock_sock(sk); spin_lock(&po->bind_lock); + if (!proto) + proto = po->num; + rcu_read_lock(); if (po->fanout) { @@ -3292,7 +3295,7 @@ memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min)); name[sizeof(uaddr->sa_data_min)] = 0; - return packet_do_bind(sk, name, 0, pkt_sk(sk)->num); + return packet_do_bind(sk, name, 0, 0); } static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) @@ -3309,8 +3312,7 @@ if (sll->sll_family != AF_PACKET) return -EINVAL; - return packet_do_bind(sk, NULL, sll->sll_ifindex, - sll->sll_protocol ? : pkt_sk(sk)->num); + return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol); } static struct proto packet_proto = { diff -u linux-6.2.0/net/packet/diag.c linux-6.2.0/net/packet/diag.c --- linux-6.2.0/net/packet/diag.c +++ linux-6.2.0/net/packet/diag.c @@ -143,7 +143,7 @@ rp = nlmsg_data(nlh); rp->pdiag_family = AF_PACKET; rp->pdiag_type = sk->sk_type; - rp->pdiag_num = ntohs(po->num); + rp->pdiag_num = ntohs(READ_ONCE(po->num)); rp->pdiag_ino = sk_ino; sock_diag_save_cookie(sk, rp->pdiag_cookie); diff -u linux-6.2.0/net/rxrpc/af_rxrpc.c linux-6.2.0/net/rxrpc/af_rxrpc.c --- linux-6.2.0/net/rxrpc/af_rxrpc.c +++ linux-6.2.0/net/rxrpc/af_rxrpc.c @@ -967,6 +967,7 @@ BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb)); ret = -ENOMEM; + rxrpc_gen_version_string(); rxrpc_call_jar = kmem_cache_create( "rxrpc_call_jar", sizeof(struct rxrpc_call), 0, SLAB_HWCACHE_ALIGN, NULL); diff -u linux-6.2.0/net/rxrpc/ar-internal.h linux-6.2.0/net/rxrpc/ar-internal.h --- linux-6.2.0/net/rxrpc/ar-internal.h +++ linux-6.2.0/net/rxrpc/ar-internal.h @@ -1064,6 +1064,7 @@ /* * local_event.c */ +void rxrpc_gen_version_string(void); void rxrpc_send_version_request(struct rxrpc_local *local, struct rxrpc_host_header *hdr, struct sk_buff *skb); diff -u linux-6.2.0/net/sched/cls_api.c linux-6.2.0/net/sched/cls_api.c --- linux-6.2.0/net/sched/cls_api.c +++ linux-6.2.0/net/sched/cls_api.c @@ -42,8 +42,6 @@ #include #include -extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; - /* The list of all installed classifier types */ static LIST_HEAD(tcf_proto_base); @@ -2790,6 +2788,7 @@ return PTR_ERR(ops); if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); + module_put(ops->owner); return -EOPNOTSUPP; } diff -u linux-6.2.0/net/sched/cls_fw.c linux-6.2.0/net/sched/cls_fw.c --- linux-6.2.0/net/sched/cls_fw.c +++ linux-6.2.0/net/sched/cls_fw.c @@ -267,7 +267,6 @@ return -ENOBUFS; fnew->id = f->id; - fnew->res = f->res; fnew->ifindex = f->ifindex; fnew->tp = f->tp; diff -u linux-6.2.0/net/sched/cls_u32.c linux-6.2.0/net/sched/cls_u32.c --- linux-6.2.0/net/sched/cls_u32.c +++ linux-6.2.0/net/sched/cls_u32.c @@ -815,7 +815,6 @@ new->ifindex = n->ifindex; new->fshift = n->fshift; - new->res = n->res; new->flags = n->flags; RCU_INIT_POINTER(new->ht_down, ht); diff -u linux-6.2.0/net/smc/smc_llc.c linux-6.2.0/net/smc/smc_llc.c --- linux-6.2.0/net/smc/smc_llc.c +++ linux-6.2.0/net/smc/smc_llc.c @@ -578,7 +578,10 @@ { struct smc_buf_desc *buf_next; - if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { + if (!buf_pos) + return _smc_llc_get_next_rmb(lgr, buf_lst); + + if (list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { (*buf_lst)++; return _smc_llc_get_next_rmb(lgr, buf_lst); } @@ -614,6 +617,8 @@ goto out; buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); for (i = 0; i < ext->num_rkeys; i++) { + while (buf_pos && !(buf_pos)->used) + buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); if (!buf_pos) break; rmb = buf_pos; @@ -623,8 +628,6 @@ cpu_to_be64((uintptr_t)rmb->cpu_addr) : cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl)); buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); - while (buf_pos && !(buf_pos)->used) - buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); } len += i * sizeof(ext->rt[0]); out: @@ -848,6 +851,8 @@ addc_llc->num_rkeys = *num_rkeys_todo; n = *num_rkeys_todo; for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) { + while (*buf_pos && !(*buf_pos)->used) + *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); if (!*buf_pos) { addc_llc->num_rkeys = addc_llc->num_rkeys - *num_rkeys_todo; @@ -864,8 +869,6 @@ (*num_rkeys_todo)--; *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); - while (*buf_pos && !(*buf_pos)->used) - *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); } addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT; addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont); diff -u linux-6.2.0/net/sunrpc/svcsock.c linux-6.2.0/net/sunrpc/svcsock.c --- linux-6.2.0/net/sunrpc/svcsock.c +++ linux-6.2.0/net/sunrpc/svcsock.c @@ -1338,25 +1338,10 @@ return svsk; } -bool svc_alien_sock(struct net *net, int fd) -{ - int err; - struct socket *sock = sockfd_lookup(fd, &err); - bool ret = false; - - if (!sock) - goto out; - if (sock_net(sock->sk) != net) - ret = true; - sockfd_put(sock); -out: - return ret; -} -EXPORT_SYMBOL_GPL(svc_alien_sock); - /** * svc_addsock - add a listener socket to an RPC service * @serv: pointer to RPC service to which to add a new listener + * @net: caller's network namespace * @fd: file descriptor of the new listener * @name_return: pointer to buffer to fill in with name of listener * @len: size of the buffer @@ -1366,8 +1351,8 @@ * Name is terminated with '\n'. On error, returns a negative errno * value. */ -int svc_addsock(struct svc_serv *serv, const int fd, char *name_return, - const size_t len, const struct cred *cred) +int svc_addsock(struct svc_serv *serv, struct net *net, const int fd, + char *name_return, const size_t len, const struct cred *cred) { int err = 0; struct socket *so = sockfd_lookup(fd, &err); @@ -1378,6 +1363,9 @@ if (!so) return err; + err = -EINVAL; + if (sock_net(so->sk) != net) + goto out; err = -EAFNOSUPPORT; if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6)) goto out; diff -u linux-6.2.0/net/tls/tls_strp.c linux-6.2.0/net/tls/tls_strp.c --- linux-6.2.0/net/tls/tls_strp.c +++ linux-6.2.0/net/tls/tls_strp.c @@ -20,7 +20,9 @@ strp->stopped = 1; /* Report an error on the lower socket */ - strp->sk->sk_err = -err; + WRITE_ONCE(strp->sk->sk_err, -err); + /* Paired with smp_rmb() in tcp_poll() */ + smp_wmb(); sk_error_report(strp->sk); } diff -u linux-6.2.0/net/tls/tls_sw.c linux-6.2.0/net/tls/tls_sw.c --- linux-6.2.0/net/tls/tls_sw.c +++ linux-6.2.0/net/tls/tls_sw.c @@ -67,7 +67,9 @@ { WARN_ON_ONCE(err >= 0); /* sk->sk_err should contain a positive error code. */ - sk->sk_err = -err; + WRITE_ONCE(sk->sk_err, -err); + /* Paired with smp_rmb() in tcp_poll() */ + smp_wmb(); sk_error_report(sk); } @@ -2287,8 +2289,12 @@ struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct sk_psock *psock; + gfp_t alloc_save; + alloc_save = sk->sk_allocation; + sk->sk_allocation = GFP_ATOMIC; tls_strp_data_ready(&ctx->strp); + sk->sk_allocation = alloc_save; psock = sk_psock_get(sk); if (psock) { diff -u linux-6.2.0/net/wireless/nl80211.c linux-6.2.0/net/wireless/nl80211.c --- linux-6.2.0/net/wireless/nl80211.c +++ linux-6.2.0/net/wireless/nl80211.c @@ -10543,6 +10543,8 @@ if (!info->attrs[NL80211_ATTR_MLD_ADDR]) return -EINVAL; req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); + if (!is_valid_ether_addr(req.ap_mld_addr)) + return -EINVAL; } req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, diff -u linux-6.2.0/net/xfrm/xfrm_policy.c linux-6.2.0/net/xfrm/xfrm_policy.c --- linux-6.2.0/net/xfrm/xfrm_policy.c +++ linux-6.2.0/net/xfrm/xfrm_policy.c @@ -3312,7 +3312,7 @@ static inline int xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, - unsigned short family) + unsigned short family, u32 if_id) { if (xfrm_state_kern(x)) return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); @@ -3323,7 +3323,8 @@ (tmpl->allalgs || (tmpl->aalgos & (1<props.aalgo)) || !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && !(x->props.mode != XFRM_MODE_TRANSPORT && - xfrm_state_addr_cmp(tmpl, x, family)); + xfrm_state_addr_cmp(tmpl, x, family)) && + (if_id == 0 || if_id == x->if_id); } /* @@ -3335,7 +3336,7 @@ */ static inline int xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, - unsigned short family) + unsigned short family, u32 if_id) { int idx = start; @@ -3345,7 +3346,7 @@ } else start = -1; for (; idx < sp->len; idx++) { - if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) + if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id)) return ++idx; if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { if (start == -1) @@ -3724,7 +3725,7 @@ * are implied between each two transformations. */ for (i = xfrm_nr-1, k = 0; i >= 0; i--) { - k = xfrm_policy_ok(tpp[i], sp, k, family); + k = xfrm_policy_ok(tpp[i], sp, k, family, if_id); if (k < 0) { if (k < -1) /* "-2 - errored_index" returned */ diff -u linux-6.2.0/security/selinux/Makefile linux-6.2.0/security/selinux/Makefile --- linux-6.2.0/security/selinux/Makefile +++ linux-6.2.0/security/selinux/Makefile @@ -28,3 +28,7 @@ targets += flask.h av_permissions.h -$(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/genheaders/genheaders FORCE +# once make >= 4.3 is required, we can use grouped targets in the rule below, +# which basically involves adding both headers and a '&' before the colon, see +# the example below: +# $(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/... +$(obj)/flask.h: scripts/selinux/genheaders/genheaders FORCE $(call if_changed,flask) diff -u linux-6.2.0/sound/pci/hda/hda_codec.c linux-6.2.0/sound/pci/hda/hda_codec.c --- linux-6.2.0/sound/pci/hda/hda_codec.c +++ linux-6.2.0/sound/pci/hda/hda_codec.c @@ -2458,10 +2458,14 @@ type == HDA_PCM_TYPE_HDMI) { /* suppose a single SPDIF device */ for (dig_mix = dig_mixes; dig_mix->name; dig_mix++) { + struct snd_ctl_elem_id id; + kctl = find_mixer_ctl(codec, dig_mix->name, 0, 0); if (!kctl) break; - kctl->id.index = spdif_index; + id = kctl->id; + id.index = spdif_index; + snd_ctl_rename_id(codec->card, &kctl->id, &id); } bus->primary_dig_out_type = HDA_PCM_TYPE_HDMI; } diff -u linux-6.2.0/sound/pci/hda/hda_intel.c linux-6.2.0/sound/pci/hda/hda_intel.c --- linux-6.2.0/sound/pci/hda/hda_intel.c +++ linux-6.2.0/sound/pci/hda/hda_intel.c @@ -227,6 +227,7 @@ AZX_DRIVER_ATI, AZX_DRIVER_ATIHDMI, AZX_DRIVER_ATIHDMI_NS, + AZX_DRIVER_GFHDMI, AZX_DRIVER_VIA, AZX_DRIVER_SIS, AZX_DRIVER_ULI, @@ -349,6 +350,7 @@ [AZX_DRIVER_ATI] = "HDA ATI SB", [AZX_DRIVER_ATIHDMI] = "HDA ATI HDMI", [AZX_DRIVER_ATIHDMI_NS] = "HDA ATI HDMI", + [AZX_DRIVER_GFHDMI] = "HDA GF HDMI", [AZX_DRIVER_VIA] = "HDA VIA VT82xx", [AZX_DRIVER_SIS] = "HDA SIS966", [AZX_DRIVER_ULI] = "HDA ULI M5461", @@ -1743,6 +1745,12 @@ } switch (chip->driver_type) { + /* + * increase the bdl size for Glenfly Gpus for hardware + * limitation on hdac interrupt interval + */ + case AZX_DRIVER_GFHDMI: + return 128; case AZX_DRIVER_ICH: case AZX_DRIVER_PCH: return 1; @@ -1858,6 +1866,12 @@ pci_write_config_dword(pci, PCI_BASE_ADDRESS_1, 0); } #endif + /* + * Fix response write request not synced to memory when handle + * hdac interrupt on Glenfly Gpus + */ + if (chip->driver_type == AZX_DRIVER_GFHDMI) + bus->polling_mode = 1; err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio"); if (err < 0) @@ -1959,6 +1973,7 @@ chip->playback_streams = ATIHDMI_NUM_PLAYBACK; chip->capture_streams = ATIHDMI_NUM_CAPTURE; break; + case AZX_DRIVER_GFHDMI: case AZX_DRIVER_GENERIC: default: chip->playback_streams = ICH6_NUM_PLAYBACK; @@ -2727,6 +2742,12 @@ { PCI_DEVICE(0x1002, 0xab38), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS | AZX_DCAPS_PM_RUNTIME }, + /* GLENFLY */ + { PCI_DEVICE(0x6766, PCI_ANY_ID), + .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, + .class_mask = 0xffffff, + .driver_data = AZX_DRIVER_GFHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_NO_64BIT }, /* VIA VT8251/VT8237A */ { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA }, /* VIA GFX VT7122/VX900 */ diff -u linux-6.2.0/sound/pci/hda/patch_hdmi.c linux-6.2.0/sound/pci/hda/patch_hdmi.c --- linux-6.2.0/sound/pci/hda/patch_hdmi.c +++ linux-6.2.0/sound/pci/hda/patch_hdmi.c @@ -4489,6 +4489,22 @@ return patch_simple_hdmi(codec, VIAHDMI_CVT_NID, VIAHDMI_PIN_NID); } +static int patch_gf_hdmi(struct hda_codec *codec) +{ + int err; + + err = patch_generic_hdmi(codec); + if (err) + return err; + + /* + * Glenfly GPUs have two codecs, stream switches from one codec to + * another, need to do actual clean-ups in codec_cleanup_stream + */ + codec->no_sticky_stream = 1; + return 0; +} + /* * patch entries */ @@ -4584,6 +4600,12 @@ HDA_CODEC_ENTRY(0x10de00a7, "GPU a7 HDMI/DP", patch_nvhdmi), HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch), +HDA_CODEC_ENTRY(0x67663d82, "Arise 82 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x67663d83, "Arise 83 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x67663d84, "Arise 84 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x67663d85, "Arise 85 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x67663d86, "Arise 86 HDMI/DP", patch_gf_hdmi), +HDA_CODEC_ENTRY(0x67663d87, "Arise 87 HDMI/DP", patch_gf_hdmi), HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi), diff -u linux-6.2.0/sound/pci/hda/patch_realtek.c linux-6.2.0/sound/pci/hda/patch_realtek.c --- linux-6.2.0/sound/pci/hda/patch_realtek.c +++ linux-6.2.0/sound/pci/hda/patch_realtek.c @@ -7071,6 +7071,8 @@ ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, ALC295_FIXUP_DISABLE_DAC3, ALC285_FIXUP_SPEAKER2_TO_DAC1, + ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1, + ALC285_FIXUP_ASUS_HEADSET_MIC, ALC280_FIXUP_HP_HEADSET_MIC, ALC221_FIXUP_HP_FRONT_MIC, ALC292_FIXUP_TPT460, @@ -8041,6 +8043,22 @@ .chained = true, .chain_id = ALC269_FIXUP_THINKPAD_ACPI }, + [ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_speaker2_to_dac1, + .chained = true, + .chain_id = ALC245_FIXUP_CS35L41_SPI_2 + }, + [ALC285_FIXUP_ASUS_HEADSET_MIC] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x03a11050 }, + { 0x1b, 0x03a11c30 }, + { } + }, + .chained = true, + .chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1 + }, [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -9515,6 +9533,8 @@ SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), + SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2), @@ -9535,6 +9555,7 @@ SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B), SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), + SND_PCI_QUIRK(0x1043, 0x1b93, "ASUS G614JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), @@ -9553,6 +9574,11 @@ SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), + SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), @@ -9624,6 +9650,7 @@ SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0x51b1, "Clevo NS50AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x5630, "Clevo NP50RNJS", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), @@ -11682,6 +11709,7 @@ SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x872b, "HP", ALC897_FIXUP_HP_HSMIC_VERB), SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), + SND_PCI_QUIRK(0x103c, 0x8768, "HP Slim Desktop S01", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2), SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE), diff -u linux-6.2.0/sound/pci/ice1712/aureon.c linux-6.2.0/sound/pci/ice1712/aureon.c --- linux-6.2.0/sound/pci/ice1712/aureon.c +++ linux-6.2.0/sound/pci/ice1712/aureon.c @@ -1903,11 +1903,12 @@ else { for (i = 0; i < ARRAY_SIZE(cs8415_controls); i++) { struct snd_kcontrol *kctl; - err = snd_ctl_add(ice->card, (kctl = snd_ctl_new1(&cs8415_controls[i], ice))); - if (err < 0) - return err; + kctl = snd_ctl_new1(&cs8415_controls[i], ice); if (i > 1) kctl->id.device = ice->pcm->device; + err = snd_ctl_add(ice->card, kctl); + if (err < 0) + return err; } } } diff -u linux-6.2.0/sound/pci/ymfpci/ymfpci_main.c linux-6.2.0/sound/pci/ymfpci/ymfpci_main.c --- linux-6.2.0/sound/pci/ymfpci/ymfpci_main.c +++ linux-6.2.0/sound/pci/ymfpci/ymfpci_main.c @@ -1827,20 +1827,20 @@ if (snd_BUG_ON(!chip->pcm_spdif)) return -ENXIO; kctl = snd_ctl_new1(&snd_ymfpci_spdif_default, chip); + kctl->id.device = chip->pcm_spdif->device; err = snd_ctl_add(chip->card, kctl); if (err < 0) return err; - kctl->id.device = chip->pcm_spdif->device; kctl = snd_ctl_new1(&snd_ymfpci_spdif_mask, chip); + kctl->id.device = chip->pcm_spdif->device; err = snd_ctl_add(chip->card, kctl); if (err < 0) return err; - kctl->id.device = chip->pcm_spdif->device; kctl = snd_ctl_new1(&snd_ymfpci_spdif_stream, chip); + kctl->id.device = chip->pcm_spdif->device; err = snd_ctl_add(chip->card, kctl); if (err < 0) return err; - kctl->id.device = chip->pcm_spdif->device; chip->spdif_pcm_ctl = kctl; /* direct recording source */ diff -u linux-6.2.0/sound/soc/amd/yc/acp6x-mach.c linux-6.2.0/sound/soc/amd/yc/acp6x-mach.c --- linux-6.2.0/sound/soc/amd/yc/acp6x-mach.c +++ linux-6.2.0/sound/soc/amd/yc/acp6x-mach.c @@ -297,6 +297,13 @@ DMI_MATCH(DMI_BOARD_NAME, "8A22"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "System76"), + DMI_MATCH(DMI_PRODUCT_VERSION, "pang12"), + } + }, {} }; diff -u linux-6.2.0/sound/soc/codecs/wsa881x.c linux-6.2.0/sound/soc/codecs/wsa881x.c --- linux-6.2.0/sound/soc/codecs/wsa881x.c +++ linux-6.2.0/sound/soc/codecs/wsa881x.c @@ -648,7 +648,6 @@ .readable_reg = wsa881x_readable_register, .reg_format_endian = REGMAP_ENDIAN_NATIVE, .val_format_endian = REGMAP_ENDIAN_NATIVE, - .can_multi_write = true, }; enum { diff -u linux-6.2.0/sound/soc/codecs/wsa883x.c linux-6.2.0/sound/soc/codecs/wsa883x.c --- linux-6.2.0/sound/soc/codecs/wsa883x.c +++ linux-6.2.0/sound/soc/codecs/wsa883x.c @@ -950,7 +950,6 @@ .writeable_reg = wsa883x_writeable_register, .reg_format_endian = REGMAP_ENDIAN_NATIVE, .val_format_endian = REGMAP_ENDIAN_NATIVE, - .can_multi_write = true, .use_single_read = true, }; diff -u linux-6.2.0/sound/soc/sof/pm.c linux-6.2.0/sound/soc/sof/pm.c --- linux-6.2.0/sound/soc/sof/pm.c +++ linux-6.2.0/sound/soc/sof/pm.c @@ -159,7 +159,7 @@ ret = tplg_ops->set_up_all_pipelines(sdev, false); if (ret < 0) { dev_err(sdev->dev, "Failed to restore pipeline after resume %d\n", ret); - return ret; + goto setup_fail; } } @@ -173,6 +173,18 @@ dev_err(sdev->dev, "ctx_restore IPC error during resume: %d\n", ret); } +setup_fail: +#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE) + if (ret < 0) { + /* + * Debugfs cannot be read in runtime suspend, so cache + * the contents upon failure. This allows to capture + * possible DSP coredump information. + */ + sof_cache_debugfs(sdev); + } +#endif + return ret; } diff -u linux-6.2.0/tools/objtool/check.c linux-6.2.0/tools/objtool/check.c --- linux-6.2.0/tools/objtool/check.c +++ linux-6.2.0/tools/objtool/check.c @@ -376,7 +376,7 @@ if (!strcmp(sec->name, ".noinstr.text") || !strcmp(sec->name, ".entry.text") || - !strncmp(sec->name, ".text.__x86.", 12)) + !strncmp(sec->name, ".text..__x86.", 13)) sec->noinstr = true; /* @@ -436,7 +436,7 @@ return -1; } - if (func->return_thunk || func->alias != func) + if (func->embedded_insn || func->alias != func) continue; if (!find_insn(file, sec, func->offset)) { @@ -1291,16 +1291,33 @@ return 0; } +/* + * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol + * will be added to the .retpoline_sites section. + */ __weak bool arch_is_retpoline(struct symbol *sym) { return false; } +/* + * Symbols that replace INSN_RETURN, every (tail) call to such a symbol + * will be added to the .return_sites section. + */ __weak bool arch_is_rethunk(struct symbol *sym) { return false; } +/* + * Symbols that are embedded inside other instructions, because sometimes crazy + * code exists. These are mostly ignored for validation purposes. + */ +__weak bool arch_is_embedded_insn(struct symbol *sym) +{ + return false; +} + #define NEGATIVE_RELOC ((void *)-1L) static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) @@ -1584,14 +1601,14 @@ struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); /* - * This is a special case for zen_untrain_ret(). + * This is a special case for retbleed_untrain_ret(). * It jumps to __x86_return_thunk(), but objtool * can't find the thunk's starting RET * instruction, because the RET is also in the * middle of another instruction. Objtool only * knows about the outer instruction. */ - if (sym && sym->return_thunk) { + if (sym && sym->embedded_insn) { add_return_call(file, insn, false); continue; } @@ -2481,6 +2498,9 @@ if (arch_is_rethunk(func)) func->return_thunk = true; + if (arch_is_embedded_insn(func)) + func->embedded_insn = true; + if (arch_ftrace_match(func->name)) func->fentry = true; @@ -2606,12 +2626,17 @@ return 0; } -static bool is_fentry_call(struct instruction *insn) +static bool is_special_call(struct instruction *insn) { - if (insn->type == INSN_CALL && - insn->call_dest && - insn->call_dest->fentry) - return true; + if (insn->type == INSN_CALL) { + struct symbol *dest = insn->call_dest; + + if (!dest) + return false; + + if (dest->fentry || dest->embedded_insn) + return true; + } return false; } @@ -3605,7 +3630,7 @@ if (ret) return ret; - if (opts.stackval && func && !is_fentry_call(insn) && + if (opts.stackval && func && !is_special_call(insn) && !has_valid_stack_frame(&state)) { WARN_FUNC("call without frame pointer save/setup", sec, insn->offset); diff -u linux-6.2.0/tools/perf/builtin-ftrace.c linux-6.2.0/tools/perf/builtin-ftrace.c --- linux-6.2.0/tools/perf/builtin-ftrace.c +++ linux-6.2.0/tools/perf/builtin-ftrace.c @@ -1175,7 +1175,7 @@ OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf, "Use BPF to measure function latency"), #endif - OPT_BOOLEAN('n', "--use-nsec", &ftrace.use_nsec, + OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec, "Use nano-second histogram"), OPT_PARENT(common_options), }; diff -u linux-6.2.0/tools/testing/selftests/net/mptcp/userspace_pm.sh linux-6.2.0/tools/testing/selftests/net/mptcp/userspace_pm.sh --- linux-6.2.0/tools/testing/selftests/net/mptcp/userspace_pm.sh +++ linux-6.2.0/tools/testing/selftests/net/mptcp/userspace_pm.sh @@ -1,6 +1,10 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Cannot not run test without ip tool" only in patch2: unchanged: --- linux-6.2.0.orig/Documentation/admin-guide/hw-vuln/srso.rst +++ linux-6.2.0/Documentation/admin-guide/hw-vuln/srso.rst @@ -0,0 +1,150 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Speculative Return Stack Overflow (SRSO) +======================================== + +This is a mitigation for the speculative return stack overflow (SRSO) +vulnerability found on AMD processors. The mechanism is by now the well +known scenario of poisoning CPU functional units - the Branch Target +Buffer (BTB) and Return Address Predictor (RAP) in this case - and then +tricking the elevated privilege domain (the kernel) into leaking +sensitive data. + +AMD CPUs predict RET instructions using a Return Address Predictor (aka +Return Address Stack/Return Stack Buffer). In some cases, a non-architectural +CALL instruction (i.e., an instruction predicted to be a CALL but is +not actually a CALL) can create an entry in the RAP which may be used +to predict the target of a subsequent RET instruction. + +The specific circumstances that lead to this varies by microarchitecture +but the concern is that an attacker can mis-train the CPU BTB to predict +non-architectural CALL instructions in kernel space and use this to +control the speculative target of a subsequent kernel RET, potentially +leading to information disclosure via a speculative side-channel. + +The issue is tracked under CVE-2023-20569. + +Affected processors +------------------- + +AMD Zen, generations 1-4. That is, all families 0x17 and 0x19. Older +processors have not been investigated. + +System information and options +------------------------------ + +First of all, it is required that the latest microcode be loaded for +mitigations to be effective. + +The sysfs file showing SRSO mitigation status is: + + /sys/devices/system/cpu/vulnerabilities/spec_rstack_overflow + +The possible values in this file are: + + * 'Not affected': + + The processor is not vulnerable + + * 'Vulnerable: no microcode': + + The processor is vulnerable, no microcode extending IBPB + functionality to address the vulnerability has been applied. + + * 'Mitigation: microcode': + + Extended IBPB functionality microcode patch has been applied. It does + not address User->Kernel and Guest->Host transitions protection but it + does address User->User and VM->VM attack vectors. + + Note that User->User mitigation is controlled by how the IBPB aspect in + the Spectre v2 mitigation is selected: + + * conditional IBPB: + + where each process can select whether it needs an IBPB issued + around it PR_SPEC_DISABLE/_ENABLE etc, see :doc:`spectre` + + * strict: + + i.e., always on - by supplying spectre_v2_user=on on the kernel + command line + + (spec_rstack_overflow=microcode) + + * 'Mitigation: safe RET': + + Software-only mitigation. It complements the extended IBPB microcode + patch functionality by addressing User->Kernel and Guest->Host + transitions protection. + + Selected by default or by spec_rstack_overflow=safe-ret + + * 'Mitigation: IBPB': + + Similar protection as "safe RET" above but employs an IBPB barrier on + privilege domain crossings (User->Kernel, Guest->Host). + + (spec_rstack_overflow=ibpb) + + * 'Mitigation: IBPB on VMEXIT': + + Mitigation addressing the cloud provider scenario - the Guest->Host + transitions only. + + (spec_rstack_overflow=ibpb-vmexit) + + + +In order to exploit vulnerability, an attacker needs to: + + - gain local access on the machine + + - break kASLR + + - find gadgets in the running kernel in order to use them in the exploit + + - potentially create and pin an additional workload on the sibling + thread, depending on the microarchitecture (not necessary on fam 0x19) + + - run the exploit + +Considering the performance implications of each mitigation type, the +default one is 'Mitigation: safe RET' which should take care of most +attack vectors, including the local User->Kernel one. + +As always, the user is advised to keep her/his system up-to-date by +applying software updates regularly. + +The default setting will be reevaluated when needed and especially when +new attack vectors appear. + +As one can surmise, 'Mitigation: safe RET' does come at the cost of some +performance depending on the workload. If one trusts her/his userspace +and does not want to suffer the performance impact, one can always +disable the mitigation with spec_rstack_overflow=off. + +Similarly, 'Mitigation: IBPB' is another full mitigation type employing +an indrect branch prediction barrier after having applied the required +microcode patch for one's system. This mitigation comes also at +a performance cost. + +Mitigation: safe RET +-------------------- + +The mitigation works by ensuring all RET instructions speculate to +a controlled location, similar to how speculation is controlled in the +retpoline sequence. To accomplish this, the __x86_return_thunk forces +the CPU to mispredict every function return using a 'safe return' +sequence. + +To ensure the safety of this mitigation, the kernel must ensure that the +safe return sequence is itself free from attacker interference. In Zen3 +and Zen4, this is accomplished by creating a BTB alias between the +untraining function srso_alias_untrain_ret() and the safe return +function srso_alias_safe_ret() which results in evicting a potentially +poisoned BTB entry and using that safe one for all function returns. + +In older Zen1 and Zen2, this is accomplished using a reinterpretation +technique similar to Retbleed one: srso_untrain_ret() and +srso_safe_ret(). only in patch2: unchanged: --- linux-6.2.0.orig/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml +++ linux-6.2.0/Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml @@ -86,7 +86,7 @@ of the MAX chips to the GyroADC, while MISO line of each Maxim ADC connects to a shared input pin of the GyroADC. enum: - - adi,7476 + - adi,ad7476 - fujitsu,mb88101a - maxim,max1162 - maxim,max11100 only in patch2: unchanged: --- linux-6.2.0.orig/Documentation/devicetree/bindings/sound/tas2562.yaml +++ linux-6.2.0/Documentation/devicetree/bindings/sound/tas2562.yaml @@ -55,7 +55,9 @@ description: TDM TX current sense time slot. '#sound-dai-cells': - const: 1 + # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward + # compatibility but is deprecated. + enum: [0, 1] required: - compatible @@ -72,7 +74,7 @@ codec: codec@4c { compatible = "ti,tas2562"; reg = <0x4c>; - #sound-dai-cells = <1>; + #sound-dai-cells = <0>; interrupt-parent = <&gpio1>; interrupts = <14>; shutdown-gpios = <&gpio1 15 0>; only in patch2: unchanged: --- linux-6.2.0.orig/Documentation/devicetree/bindings/sound/tas2770.yaml +++ linux-6.2.0/Documentation/devicetree/bindings/sound/tas2770.yaml @@ -57,7 +57,9 @@ - 1 # Falling edge '#sound-dai-cells': - const: 1 + # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward + # compatibility but is deprecated. + enum: [0, 1] required: - compatible @@ -74,7 +76,7 @@ codec: codec@41 { compatible = "ti,tas2770"; reg = <0x41>; - #sound-dai-cells = <1>; + #sound-dai-cells = <0>; interrupt-parent = <&gpio1>; interrupts = <14>; reset-gpio = <&gpio1 15 0>; only in patch2: unchanged: --- linux-6.2.0.orig/Documentation/devicetree/bindings/sound/tas27xx.yaml +++ linux-6.2.0/Documentation/devicetree/bindings/sound/tas27xx.yaml @@ -50,7 +50,9 @@ description: TDM TX voltage sense time slot. '#sound-dai-cells': - const: 1 + # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward + # compatibility but is deprecated. + enum: [0, 1] required: - compatible @@ -67,7 +69,7 @@ codec: codec@38 { compatible = "ti,tas2764"; reg = <0x38>; - #sound-dai-cells = <1>; + #sound-dai-cells = <0>; interrupt-parent = <&gpio1>; interrupts = <14>; reset-gpios = <&gpio1 15 0>; only in patch2: unchanged: --- linux-6.2.0.orig/Documentation/devicetree/bindings/usb/snps,dwc3.yaml +++ linux-6.2.0/Documentation/devicetree/bindings/usb/snps,dwc3.yaml @@ -260,7 +260,7 @@ description: High-Speed PHY interface selection between UTMI+ and ULPI when the DWC_USB3_HSPHY_INTERFACE has value 3. - $ref: /schemas/types.yaml#/definitions/uint8 + $ref: /schemas/types.yaml#/definitions/string enum: [utmi, ulpi] snps,quirk-frame-length-adjustment: only in patch2: unchanged: --- linux-6.2.0.orig/Documentation/mm/page_table_check.rst +++ linux-6.2.0/Documentation/mm/page_table_check.rst @@ -54,3 +54,22 @@ Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page table support without extra kernel parameter. + +Implementation notes +==================== + +We specifically decided not to use VMA information in order to avoid relying on +MM states (except for limited "struct page" info). The page table check is a +separate from Linux-MM state machine that verifies that the user accessible +pages are not falsely shared. + +PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without +EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory +regions into the userspace via /dev/mem. At the same time, pages may change +their properties (e.g., from anonymous pages to named pages) while they are +still being mapped in the userspace, leading to "corruption" detected by the +page table check. + +Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via +/dev/mem. However, these pages are always considered as named pages, so they +won't break the logic used in the page table check. only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm/boot/dts/at91-sama7g5ek.dts +++ linux-6.2.0/arch/arm/boot/dts/at91-sama7g5ek.dts @@ -792,7 +792,7 @@ }; &shdwc { - atmel,shdwc-debouncer = <976>; + debounce-delay-us = <976>; status = "okay"; input@0 { only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm/boot/dts/stm32f7-pinctrl.dtsi +++ linux-6.2.0/arch/arm/boot/dts/stm32f7-pinctrl.dtsi @@ -284,6 +284,88 @@ slew-rate = <2>; }; }; + + can1_pins_a: can1-0 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + }; + }; + + can1_pins_b: can1-1 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + }; + }; + + can1_pins_c: can1-2 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + + }; + }; + + can1_pins_d: can1-3 { + pins1 { + pinmux = ; /* CAN1_TX */ + }; + pins2 { + pinmux = ; /* CAN1_RX */ + bias-pull-up; + + }; + }; + + can2_pins_a: can2-0 { + pins1 { + pinmux = ; /* CAN2_TX */ + }; + pins2 { + pinmux = ; /* CAN2_RX */ + bias-pull-up; + }; + }; + + can2_pins_b: can2-1 { + pins1 { + pinmux = ; /* CAN2_TX */ + }; + pins2 { + pinmux = ; /* CAN2_RX */ + bias-pull-up; + }; + }; + + can3_pins_a: can3-0 { + pins1 { + pinmux = ; /* CAN3_TX */ + }; + pins2 { + pinmux = ; /* CAN3_RX */ + bias-pull-up; + }; + }; + + can3_pins_b: can3-1 { + pins1 { + pinmux = ; /* CAN3_TX */ + }; + pins2 { + pinmux = ; /* CAN3_RX */ + bias-pull-up; + }; + }; }; }; }; only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm/kernel/unwind.c +++ linux-6.2.0/arch/arm/kernel/unwind.c @@ -308,6 +308,29 @@ return URC_OK; } +static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl) +{ + unsigned long bytes = 0; + unsigned long insn; + unsigned long result = 0; + + /* + * unwind_get_byte() will advance `ctrl` one instruction at a time, so + * loop until we get an instruction byte where bit 7 is not set. + * + * Note: This decodes a maximum of 4 bytes to output 28 bits data where + * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence + * it is sufficient for unwinding the stack. + */ + do { + insn = unwind_get_byte(ctrl); + result |= (insn & 0x7f) << (bytes * 7); + bytes++; + } while (!!(insn & 0x80) && (bytes != sizeof(result))); + + return result; +} + /* * Execute the current unwind instruction. */ @@ -361,7 +384,7 @@ if (ret) goto error; } else if (insn == 0xb2) { - unsigned long uleb128 = unwind_get_byte(ctrl); + unsigned long uleb128 = unwind_decode_uleb128(ctrl); ctrl->vrs[SP] += 0x204 + (uleb128 << 2); } else { only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm/mach-at91/pm.c +++ linux-6.2.0/arch/arm/mach-at91/pm.c @@ -334,16 +334,14 @@ pdev = of_find_device_by_node(eth->np); if (!pdev) return false; + /* put_device(eth->dev) is called at the end of suspend. */ eth->dev = &pdev->dev; } /* No quirks if device isn't a wakeup source. */ - if (!device_may_wakeup(eth->dev)) { - put_device(eth->dev); + if (!device_may_wakeup(eth->dev)) return false; - } - /* put_device(eth->dev) is called at the end of suspend. */ return true; } @@ -439,14 +437,14 @@ pr_err("AT91: PM: failed to enable %s clocks\n", j == AT91_PM_G_ETH ? "geth" : "eth"); } - } else { - /* - * Release the reference to eth->dev taken in - * at91_pm_eth_quirk_is_valid(). - */ - put_device(eth->dev); - eth->dev = NULL; } + + /* + * Release the reference to eth->dev taken in + * at91_pm_eth_quirk_is_valid(). + */ + put_device(eth->dev); + eth->dev = NULL; } return ret; only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi +++ linux-6.2.0/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi @@ -90,6 +90,8 @@ clocks = <&uart0_lpcg IMX_LPCG_CLK_4>, <&uart0_lpcg IMX_LPCG_CLK_0>; clock-names = "ipg", "baud"; + assigned-clocks = <&clk IMX_SC_R_UART_0 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <80000000>; power-domains = <&pd IMX_SC_R_UART_0>; status = "disabled"; }; @@ -100,6 +102,8 @@ clocks = <&uart1_lpcg IMX_LPCG_CLK_4>, <&uart1_lpcg IMX_LPCG_CLK_0>; clock-names = "ipg", "baud"; + assigned-clocks = <&clk IMX_SC_R_UART_1 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <80000000>; power-domains = <&pd IMX_SC_R_UART_1>; status = "disabled"; }; @@ -110,6 +114,8 @@ clocks = <&uart2_lpcg IMX_LPCG_CLK_4>, <&uart2_lpcg IMX_LPCG_CLK_0>; clock-names = "ipg", "baud"; + assigned-clocks = <&clk IMX_SC_R_UART_2 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <80000000>; power-domains = <&pd IMX_SC_R_UART_2>; status = "disabled"; }; @@ -120,6 +126,8 @@ clocks = <&uart3_lpcg IMX_LPCG_CLK_4>, <&uart3_lpcg IMX_LPCG_CLK_0>; clock-names = "ipg", "baud"; + assigned-clocks = <&clk IMX_SC_R_UART_3 IMX_SC_PM_CLK_PER>; + assigned-clock-rates = <80000000>; power-domains = <&pd IMX_SC_R_UART_3>; status = "disabled"; }; only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi +++ linux-6.2.0/arch/arm64/boot/dts/freescale/imx8mn-beacon-baseboard.dtsi @@ -81,7 +81,7 @@ &ecspi2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_espi2>; - cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>; + cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>; status = "okay"; eeprom@0 { @@ -202,7 +202,7 @@ MX8MN_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x82 MX8MN_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x82 MX8MN_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x82 - MX8MN_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x41 + MX8MN_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x41 >; }; only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/boot/dts/freescale/imx8qm-mek.dts +++ linux-6.2.0/arch/arm64/boot/dts/freescale/imx8qm-mek.dts @@ -82,8 +82,8 @@ pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; vmmc-supply = <®_usdhc2_vmmc>; - cd-gpios = <&lsio_gpio4 22 GPIO_ACTIVE_LOW>; - wp-gpios = <&lsio_gpio4 21 GPIO_ACTIVE_HIGH>; + cd-gpios = <&lsio_gpio5 22 GPIO_ACTIVE_LOW>; + wp-gpios = <&lsio_gpio5 21 GPIO_ACTIVE_HIGH>; status = "okay"; }; only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi +++ linux-6.2.0/arch/arm64/boot/dts/qcom/sc7180-lite.dtsi @@ -16,3 +16,11 @@ &cpu6_opp12 { opp-peak-kBps = <8532000 23347200>; }; + +&cpu6_opp13 { + opp-peak-kBps = <8532000 23347200>; +}; + +&cpu6_opp14 { + opp-peak-kBps = <8532000 23347200>; +}; only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts +++ linux-6.2.0/arch/arm64/boot/dts/qcom/sm6375-sony-xperia-murray-pdx225.dts @@ -151,12 +151,12 @@ }; &remoteproc_adsp { - firmware-name = "qcom/Sony/murray/adsp.mbn"; + firmware-name = "qcom/sm6375/Sony/murray/adsp.mbn"; status = "okay"; }; &remoteproc_cdsp { - firmware-name = "qcom/Sony/murray/cdsp.mbn"; + firmware-name = "qcom/sm6375/Sony/murray/cdsp.mbn"; status = "okay"; }; only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/kernel/vdso.c +++ linux-6.2.0/arch/arm64/kernel/vdso.c @@ -290,7 +290,7 @@ memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz); - aarch32_vectors_page = virt_to_page(vdso_page); + aarch32_vectors_page = virt_to_page((void *)vdso_page); return 0; } only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/kvm/hyp/include/hyp/switch.h +++ linux-6.2.0/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -351,17 +351,21 @@ return false; } -static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) +static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code) { if (!__populate_fault_info(vcpu)) return true; return false; } +static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) + __alias(kvm_hyp_handle_memory_fault); +static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) + __alias(kvm_hyp_handle_memory_fault); static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) { - if (!__populate_fault_info(vcpu)) + if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) return true; if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ linux-6.2.0/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -568,7 +568,7 @@ struct check_walk_data { enum pkvm_page_state desired; - enum pkvm_page_state (*get_page_state)(kvm_pte_t pte); + enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr); }; static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, @@ -576,10 +576,7 @@ { struct check_walk_data *d = ctx->arg; - if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old))) - return -EINVAL; - - return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM; + return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM; } static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, @@ -594,8 +591,11 @@ return kvm_pgtable_walk(pgt, addr, size, &walker); } -static enum pkvm_page_state host_get_page_state(kvm_pte_t pte) +static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr) { + if (!addr_is_allowed_memory(addr)) + return PKVM_NOPAGE; + if (!kvm_pte_valid(pte) && pte) return PKVM_NOPAGE; @@ -702,7 +702,7 @@ return host_stage2_set_owner_locked(addr, size, host_id); } -static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte) +static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr) { if (!kvm_pte_valid(pte)) return PKVM_NOPAGE; only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/kvm/hyp/nvhe/switch.c +++ linux-6.2.0/arch/arm64/kvm/hyp/nvhe/switch.c @@ -186,6 +186,7 @@ [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; @@ -196,6 +197,7 @@ [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/kvm/hyp/vhe/switch.c +++ linux-6.2.0/arch/arm64/kvm/hyp/vhe/switch.c @@ -110,6 +110,7 @@ [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, + [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/kvm/vgic/vgic-v2.c +++ linux-6.2.0/arch/arm64/kvm/vgic/vgic-v2.c @@ -312,12 +312,6 @@ return ret; } - ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2); - if (ret) { - kvm_err("Unable to register VGIC MMIO regions\n"); - return ret; - } - if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) { ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base, kvm_vgic_global_state.vcpu_base, only in patch2: unchanged: --- linux-6.2.0.orig/arch/arm64/kvm/vgic/vgic-v3.c +++ linux-6.2.0/arch/arm64/kvm/vgic/vgic-v3.c @@ -538,7 +538,6 @@ { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; - int ret = 0; unsigned long c; kvm_for_each_vcpu(c, vcpu, kvm) { @@ -568,12 +567,6 @@ return -EBUSY; } - ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3); - if (ret) { - kvm_err("Unable to register VGICv3 dist MMIO regions\n"); - return ret; - } - if (kvm_vgic_global_state.has_gicv4_1) vgic_v4_configure_vsgis(kvm); only in patch2: unchanged: --- linux-6.2.0.orig/arch/riscv/include/asm/perf_event.h +++ linux-6.2.0/arch/riscv/include/asm/perf_event.h @@ -10,4 +10,11 @@ #include #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs + +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->epc = (__ip); \ + (regs)->s0 = (unsigned long) __builtin_frame_address(0); \ + (regs)->sp = current_stack_pointer; \ + (regs)->status = SR_PP; \ +} #endif /* _ASM_RISCV_PERF_EVENT_H */ only in patch2: unchanged: --- linux-6.2.0.orig/arch/s390/kernel/topology.c +++ linux-6.2.0/arch/s390/kernel/topology.c @@ -95,7 +95,7 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu) { static cpumask_t mask; - int i; + unsigned int max_cpu; cpumask_clear(&mask); if (!cpumask_test_cpu(cpu, &cpu_setup_mask)) @@ -104,9 +104,10 @@ if (topology_mode != TOPOLOGY_MODE_HW) goto out; cpu -= cpu % (smp_cpu_mtid + 1); - for (i = 0; i <= smp_cpu_mtid; i++) { - if (cpumask_test_cpu(cpu + i, &cpu_setup_mask)) - cpumask_set_cpu(cpu + i, &mask); + max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1); + for (; cpu <= max_cpu; cpu++) { + if (cpumask_test_cpu(cpu, &cpu_setup_mask)) + cpumask_set_cpu(cpu, &mask); } out: cpumask_copy(dst, &mask); @@ -123,25 +124,26 @@ unsigned int core; for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) { - unsigned int rcore; - int lcpu, i; + unsigned int max_cpu, rcore; + int cpu; rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin; - lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); - if (lcpu < 0) + cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift); + if (cpu < 0) continue; - for (i = 0; i <= smp_cpu_mtid; i++) { - topo = &cpu_topology[lcpu + i]; + max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1); + for (; cpu <= max_cpu; cpu++) { + topo = &cpu_topology[cpu]; topo->drawer_id = drawer->id; topo->book_id = book->id; topo->socket_id = socket->id; topo->core_id = rcore; - topo->thread_id = lcpu + i; + topo->thread_id = cpu; topo->dedicated = tl_core->d; - cpumask_set_cpu(lcpu + i, &drawer->mask); - cpumask_set_cpu(lcpu + i, &book->mask); - cpumask_set_cpu(lcpu + i, &socket->mask); - smp_cpu_set_polarization(lcpu + i, tl_core->pp); + cpumask_set_cpu(cpu, &drawer->mask); + cpumask_set_cpu(cpu, &book->mask); + cpumask_set_cpu(cpu, &socket->mask); + smp_cpu_set_polarization(cpu, tl_core->pp); } } } only in patch2: unchanged: --- linux-6.2.0.orig/arch/um/drivers/Makefile +++ linux-6.2.0/arch/um/drivers/Makefile @@ -16,7 +16,8 @@ hostaudio-objs := hostaudio_kern.o ubd-objs := ubd_kern.o ubd_user.o port-objs := port_kern.o port_user.o -harddog-objs := harddog_kern.o harddog_user.o +harddog-objs := harddog_kern.o +harddog-builtin-$(CONFIG_UML_WATCHDOG) := harddog_user.o harddog_user_exp.o rtc-objs := rtc_kern.o rtc_user.o LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a) @@ -60,6 +61,7 @@ obj-$(CONFIG_TTY_CHAN) += tty.o obj-$(CONFIG_XTERM_CHAN) += xterm.o xterm_kern.o obj-$(CONFIG_UML_WATCHDOG) += harddog.o +obj-y += $(harddog-builtin-y) $(harddog-builtin-m) obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o obj-$(CONFIG_UML_RANDOM) += random.o obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o only in patch2: unchanged: --- linux-6.2.0.orig/arch/um/drivers/harddog.h +++ linux-6.2.0/arch/um/drivers/harddog.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef UM_WATCHDOG_H +#define UM_WATCHDOG_H + +int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock); +void stop_watchdog(int in_fd, int out_fd); +int ping_watchdog(int fd); + +#endif /* UM_WATCHDOG_H */ only in patch2: unchanged: --- linux-6.2.0.orig/arch/um/drivers/harddog_kern.c +++ linux-6.2.0/arch/um/drivers/harddog_kern.c @@ -47,6 +47,7 @@ #include #include #include "mconsole.h" +#include "harddog.h" MODULE_LICENSE("GPL"); @@ -60,8 +61,6 @@ * Allow only one person to hold it open */ -extern int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock); - static int harddog_open(struct inode *inode, struct file *file) { int err = -EBUSY; @@ -92,8 +91,6 @@ return err; } -extern void stop_watchdog(int in_fd, int out_fd); - static int harddog_release(struct inode *inode, struct file *file) { /* @@ -112,8 +109,6 @@ return 0; } -extern int ping_watchdog(int fd); - static ssize_t harddog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { only in patch2: unchanged: --- linux-6.2.0.orig/arch/um/drivers/harddog_user.c +++ linux-6.2.0/arch/um/drivers/harddog_user.c @@ -7,6 +7,7 @@ #include #include #include +#include "harddog.h" struct dog_data { int stdin_fd; only in patch2: unchanged: --- linux-6.2.0.orig/arch/um/drivers/harddog_user_exp.c +++ linux-6.2.0/arch/um/drivers/harddog_user_exp.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "harddog.h" + +#if IS_MODULE(CONFIG_UML_WATCHDOG) +EXPORT_SYMBOL(start_watchdog); +EXPORT_SYMBOL(stop_watchdog); +EXPORT_SYMBOL(ping_watchdog); +#endif only in patch2: unchanged: --- linux-6.2.0.orig/arch/x86/include/asm/cpufeature.h +++ linux-6.2.0/arch/x86/include/asm/cpufeature.h @@ -32,6 +32,7 @@ CPUID_8000_0007_EBX, CPUID_7_EDX, CPUID_8000_001F_EAX, + CPUID_8000_0021_EAX, }; #define X86_CAP_FMT_NUM "%d:%d" @@ -94,8 +95,9 @@ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 20)) + BUILD_BUG_ON_ZERO(NCAPINTS != 21)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -118,8 +120,9 @@ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 20)) + BUILD_BUG_ON_ZERO(NCAPINTS != 21)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ only in patch2: unchanged: --- linux-6.2.0.orig/arch/x86/include/asm/disabled-features.h +++ linux-6.2.0/arch/x86/include/asm/disabled-features.h @@ -124,6 +124,7 @@ #define DISABLED_MASK17 0 #define DISABLED_MASK18 0 #define DISABLED_MASK19 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20) +#define DISABLED_MASK20 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) #endif /* _ASM_X86_DISABLED_FEATURES_H */ only in patch2: unchanged: --- linux-6.2.0.orig/arch/x86/include/asm/entry-common.h +++ linux-6.2.0/arch/x86/include/asm/entry-common.h @@ -92,6 +92,7 @@ static __always_inline void arch_exit_to_user_mode(void) { mds_user_clear_cpu_buffers(); + amd_clear_divider(); } #define arch_exit_to_user_mode arch_exit_to_user_mode only in patch2: unchanged: --- linux-6.2.0.orig/arch/x86/include/asm/nospec-branch.h +++ linux-6.2.0/arch/x86/include/asm/nospec-branch.h @@ -211,7 +211,7 @@ * eventually turn into it's own annotation. */ .macro ANNOTATE_UNRET_END -#ifdef CONFIG_DEBUG_ENTRY +#if (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) ANNOTATE_RETPOLINE_SAFE nop #endif @@ -267,9 +267,9 @@ .endm #ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" +#define CALL_UNTRAIN_RET "call entry_untrain_ret" #else -#define CALL_ZEN_UNTRAIN_RET "" +#define CALL_UNTRAIN_RET "" #endif /* @@ -277,7 +277,7 @@ * return thunk isn't mapped into the userspace tables (then again, AMD * typically has NO_MELTDOWN). * - * While zen_untrain_ret() doesn't clobber anything but requires stack, + * While retbleed_untrain_ret() doesn't clobber anything but requires stack, * entry_ibpb() will clobber AX, CX, DX. * * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point @@ -285,10 +285,10 @@ */ .macro UNTRAIN_RET #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ - defined(CONFIG_CALL_DEPTH_TRACKING) + defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO) ANNOTATE_UNRET_END ALTERNATIVE_3 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH #endif @@ -299,7 +299,7 @@ defined(CONFIG_CALL_DEPTH_TRACKING) ANNOTATE_UNRET_END ALTERNATIVE_3 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \ __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH #endif @@ -326,15 +326,24 @@ extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; +#ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); -extern void zen_untrain_ret(void); +#else +static inline void __x86_return_thunk(void) {} +#endif + +extern void retbleed_return_thunk(void); +extern void srso_return_thunk(void); +extern void srso_alias_return_thunk(void); + +extern void retbleed_untrain_ret(void); +extern void srso_untrain_ret(void); +extern void srso_alias_untrain_ret(void); + +extern void entry_untrain_ret(void); extern void entry_ibpb(void); -#ifdef CONFIG_CALL_THUNKS extern void (*x86_return_thunk)(void); -#else -#define x86_return_thunk (&__x86_return_thunk) -#endif #ifdef CONFIG_CALL_DEPTH_TRACKING extern void __x86_return_skl(void); @@ -461,9 +470,6 @@ SPEC_STORE_BYPASS_SECCOMP, }; -extern char __indirect_thunk_start[]; -extern char __indirect_thunk_end[]; - static __always_inline void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) { @@ -475,11 +481,11 @@ : "memory"); } +extern u64 x86_pred_cmd; + static inline void indirect_branch_prediction_barrier(void) { - u64 val = PRED_CMD_IBPB; - - alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); + alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB); } /* The Intel SPEC CTRL MSR base value cache */ only in patch2: unchanged: --- linux-6.2.0.orig/arch/x86/include/asm/required-features.h +++ linux-6.2.0/arch/x86/include/asm/required-features.h @@ -98,6 +98,7 @@ #define REQUIRED_MASK17 0 #define REQUIRED_MASK18 0 #define REQUIRED_MASK19 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20) +#define REQUIRED_MASK20 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ only in patch2: unchanged: --- linux-6.2.0.orig/arch/x86/kernel/vmlinux.lds.S +++ linux-6.2.0/arch/x86/kernel/vmlinux.lds.S @@ -134,14 +134,26 @@ KPROBES_TEXT SOFTIRQENTRY_TEXT #ifdef CONFIG_RETPOLINE - __indirect_thunk_start = .; - *(.text.__x86.*) - __indirect_thunk_end = .; + *(.text..__x86.indirect_thunk) + *(.text..__x86.return_thunk) #endif STATIC_CALL_TEXT ALIGN_ENTRY_TEXT_BEGIN +#ifdef CONFIG_CPU_SRSO + *(.text..__x86.rethunk_untrain) +#endif + ENTRY_TEXT + +#ifdef CONFIG_CPU_SRSO + /* + * See the comment above srso_alias_untrain_ret()'s + * definition. + */ + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + *(.text..__x86.rethunk_safe) +#endif ALIGN_ENTRY_TEXT_END *(.gnu.warning) @@ -509,4 +521,25 @@ "fixed_percpu_data is not at start of per-cpu area"); #endif +#ifdef CONFIG_RETHUNK +. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); +. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); +#endif + +#ifdef CONFIG_CPU_SRSO +/* + * GNU ld cannot do XOR until 2.41. + * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 + * + * LLVM lld cannot do XOR until lld-17. + * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb + * + * Instead do: (A | B) - (A & B) in order to compute the XOR + * of the two function addresses: + */ +. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), + "SRSO function pair won't alias"); +#endif + #endif /* CONFIG_X86_64 */ only in patch2: unchanged: --- linux-6.2.0.orig/arch/x86/kvm/cpuid.c +++ linux-6.2.0/arch/x86/kvm/cpuid.c @@ -741,6 +741,9 @@ 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) | F(SME_COHERENT)); + if (cpu_feature_enabled(X86_FEATURE_SRSO_NO)) + kvm_cpu_cap_set(X86_FEATURE_SRSO_NO); + kvm_cpu_cap_mask(CPUID_C000_0001_EDX, F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | only in patch2: unchanged: --- linux-6.2.0.orig/arch/x86/kvm/reverse_cpuid.h +++ linux-6.2.0/arch/x86/kvm/reverse_cpuid.h @@ -68,6 +68,7 @@ [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX}, [CPUID_7_1_EDX] = { 7, 1, CPUID_EDX}, + [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, }; /* only in patch2: unchanged: --- linux-6.2.0.orig/arch/x86/kvm/svm/vmenter.S +++ linux-6.2.0/arch/x86/kvm/svm/vmenter.S @@ -224,6 +224,9 @@ */ UNTRAIN_RET + /* SRSO */ + ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT + /* * Clear all general purpose registers except RSP and RAX to prevent * speculative use of the guest's values, even those that are reloaded only in patch2: unchanged: --- linux-6.2.0.orig/arch/x86/xen/time.c +++ linux-6.2.0/arch/x86/xen/time.c @@ -474,15 +474,51 @@ xen_clocksource.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK; } +/* + * Check if it is possible to safely use the tsc as a clocksource. This is + * only true if the hypervisor notifies the guest that its tsc is invariant, + * the tsc is stable, and the tsc instruction will never be emulated. + */ +static int __init xen_tsc_safe_clocksource(void) +{ + u32 eax, ebx, ecx, edx; + + if (!(boot_cpu_has(X86_FEATURE_CONSTANT_TSC))) + return 0; + + if (!(boot_cpu_has(X86_FEATURE_NONSTOP_TSC))) + return 0; + + if (check_tsc_unstable()) + return 0; + + /* Leaf 4, sub-leaf 0 (0x40000x03) */ + cpuid_count(xen_cpuid_base() + 3, 0, &eax, &ebx, &ecx, &edx); + + /* tsc_mode = no_emulate (2) */ + if (ebx != 2) + return 0; + + return 1; +} + static void __init xen_time_init(void) { struct pvclock_vcpu_time_info *pvti; int cpu = smp_processor_id(); struct timespec64 tp; - /* As Dom0 is never moved, no penalty on using TSC there */ + /* + * As Dom0 is never moved, no penalty on using TSC there. + * + * If it is possible for the guest to determine that the tsc is a safe + * clocksource, then set xen_clocksource rating below that of the tsc + * so that the system prefers tsc instead. + */ if (xen_initial_domain()) xen_clocksource.rating = 275; + else if (xen_tsc_safe_clocksource()) + xen_clocksource.rating = 299; clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC); only in patch2: unchanged: --- linux-6.2.0.orig/block/blk-settings.c +++ linux-6.2.0/block/blk-settings.c @@ -909,6 +909,7 @@ void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model) { struct request_queue *q = disk->queue; + unsigned int old_model = q->limits.zoned; switch (model) { case BLK_ZONED_HM: @@ -946,7 +947,7 @@ */ blk_queue_zone_write_granularity(q, queue_logical_block_size(q)); - } else { + } else if (old_model != BLK_ZONED_NONE) { disk_clear_zone_settings(disk); } } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/acpi/acpi_cmos_rtc.c +++ linux-6.2.0/drivers/acpi/acpi_cmos_rtc.c @@ -51,12 +51,11 @@ return AE_OK; } -static int acpi_install_cmos_rtc_space_handler(struct acpi_device *adev, - const struct acpi_device_id *id) +int acpi_install_cmos_rtc_space_handler(acpi_handle handle) { acpi_status status; - status = acpi_install_address_space_handler(adev->handle, + status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler, NULL, NULL); @@ -67,18 +66,30 @@ return 1; } +EXPORT_SYMBOL_GPL(acpi_install_cmos_rtc_space_handler); -static void acpi_remove_cmos_rtc_space_handler(struct acpi_device *adev) +void acpi_remove_cmos_rtc_space_handler(acpi_handle handle) { - if (ACPI_FAILURE(acpi_remove_address_space_handler(adev->handle, + if (ACPI_FAILURE(acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler))) pr_err("Error removing CMOS-RTC region handler\n"); } +EXPORT_SYMBOL_GPL(acpi_remove_cmos_rtc_space_handler); + +static int acpi_cmos_rtc_attach_handler(struct acpi_device *adev, const struct acpi_device_id *id) +{ + return acpi_install_cmos_rtc_space_handler(adev->handle); +} + +static void acpi_cmos_rtc_detach_handler(struct acpi_device *adev) +{ + acpi_remove_cmos_rtc_space_handler(adev->handle); +} static struct acpi_scan_handler cmos_rtc_handler = { .ids = acpi_cmos_rtc_ids, - .attach = acpi_install_cmos_rtc_space_handler, - .detach = acpi_remove_cmos_rtc_space_handler, + .attach = acpi_cmos_rtc_attach_handler, + .detach = acpi_cmos_rtc_detach_handler, }; void __init acpi_cmos_rtc_init(void) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/acpi/acpi_tad.c +++ linux-6.2.0/drivers/acpi/acpi_tad.c @@ -557,6 +557,7 @@ static int acpi_tad_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + acpi_handle handle = ACPI_HANDLE(dev); struct acpi_tad_driver_data *dd = dev_get_drvdata(dev); device_init_wakeup(dev, false); @@ -577,6 +578,7 @@ pm_runtime_put_sync(dev); pm_runtime_disable(dev); + acpi_remove_cmos_rtc_space_handler(handle); return 0; } @@ -589,6 +591,11 @@ unsigned long long caps; int ret; + ret = acpi_install_cmos_rtc_space_handler(handle); + if (ret < 0) { + dev_info(dev, "Unable to install space handler\n"); + return -ENODEV; + } /* * Initialization failure messages are mostly about firmware issues, so * print them at the "info" level. @@ -596,22 +603,27 @@ status = acpi_evaluate_integer(handle, "_GCP", NULL, &caps); if (ACPI_FAILURE(status)) { dev_info(dev, "Unable to get capabilities\n"); - return -ENODEV; + ret = -ENODEV; + goto remove_handler; } if (!(caps & ACPI_TAD_AC_WAKE)) { dev_info(dev, "Unsupported capabilities\n"); - return -ENODEV; + ret = -ENODEV; + goto remove_handler; } if (!acpi_has_method(handle, "_PRW")) { dev_info(dev, "Missing _PRW\n"); - return -ENODEV; + ret = -ENODEV; + goto remove_handler; } dd = devm_kzalloc(dev, sizeof(*dd), GFP_KERNEL); - if (!dd) - return -ENOMEM; + if (!dd) { + ret = -ENOMEM; + goto remove_handler; + } dd->capabilities = caps; dev_set_drvdata(dev, dd); @@ -653,6 +665,11 @@ fail: acpi_tad_remove(pdev); + /* Don't fallthrough because cmos rtc space handler is removed in acpi_tad_remove() */ + return ret; + +remove_handler: + acpi_remove_cmos_rtc_space_handler(handle); return ret; } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/block/rnbd/rnbd-proto.h +++ linux-6.2.0/drivers/block/rnbd/rnbd-proto.h @@ -241,7 +241,7 @@ bio_opf = REQ_OP_WRITE; break; case RNBD_OP_FLUSH: - bio_opf = REQ_OP_FLUSH | REQ_PREFLUSH; + bio_opf = REQ_OP_WRITE | REQ_PREFLUSH; break; case RNBD_OP_DISCARD: bio_opf = REQ_OP_DISCARD; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/dma/at_hdmac.c +++ linux-6.2.0/drivers/dma/at_hdmac.c @@ -132,7 +132,7 @@ #define ATC_DST_PIP BIT(12) /* Destination Picture-in-Picture enabled */ #define ATC_SRC_DSCR_DIS BIT(16) /* Src Descriptor fetch disable */ #define ATC_DST_DSCR_DIS BIT(20) /* Dst Descriptor fetch disable */ -#define ATC_FC GENMASK(22, 21) /* Choose Flow Controller */ +#define ATC_FC GENMASK(23, 21) /* Choose Flow Controller */ #define ATC_FC_MEM2MEM 0x0 /* Mem-to-Mem (DMA) */ #define ATC_FC_MEM2PER 0x1 /* Mem-to-Periph (DMA) */ #define ATC_FC_PER2MEM 0x2 /* Periph-to-Mem (DMA) */ @@ -153,8 +153,6 @@ #define ATC_AUTO BIT(31) /* Auto multiple buffer tx enable */ /* Bitfields in CFG */ -#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */ - #define ATC_SRC_PER GENMASK(3, 0) /* Channel src rq associated with periph handshaking ifc h */ #define ATC_DST_PER GENMASK(7, 4) /* Channel dst rq associated with periph handshaking ifc h */ #define ATC_SRC_REP BIT(8) /* Source Replay Mod */ @@ -181,10 +179,15 @@ #define ATC_DPIP_HOLE GENMASK(15, 0) #define ATC_DPIP_BOUNDARY GENMASK(25, 16) -#define ATC_SRC_PER_ID(id) (FIELD_PREP(ATC_SRC_PER_MSB, (id)) | \ - FIELD_PREP(ATC_SRC_PER, (id))) -#define ATC_DST_PER_ID(id) (FIELD_PREP(ATC_DST_PER_MSB, (id)) | \ - FIELD_PREP(ATC_DST_PER, (id))) +#define ATC_PER_MSB GENMASK(5, 4) /* Extract MSBs of a handshaking identifier */ +#define ATC_SRC_PER_ID(id) \ + ({ typeof(id) _id = (id); \ + FIELD_PREP(ATC_SRC_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \ + FIELD_PREP(ATC_SRC_PER, _id); }) +#define ATC_DST_PER_ID(id) \ + ({ typeof(id) _id = (id); \ + FIELD_PREP(ATC_DST_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \ + FIELD_PREP(ATC_DST_PER, _id); }) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/dma/pl330.c +++ linux-6.2.0/drivers/dma/pl330.c @@ -1050,7 +1050,7 @@ return true; } -static bool _start(struct pl330_thread *thrd) +static bool pl330_start_thread(struct pl330_thread *thrd) { switch (_state(thrd)) { case PL330_STATE_FAULT_COMPLETING: @@ -1702,7 +1702,7 @@ thrd->req_running = -1; /* Get going again ASAP */ - _start(thrd); + pl330_start_thread(thrd); /* For now, just make a list of callbacks to be done */ list_add_tail(&descdone->rqd, &pl330->req_done); @@ -2089,7 +2089,7 @@ } else { /* Make sure the PL330 Channel thread is active */ spin_lock(&pch->thread->dmac->lock); - _start(pch->thread); + pl330_start_thread(pch->thread); spin_unlock(&pch->thread->dmac->lock); } @@ -2107,7 +2107,7 @@ if (power_down) { pch->active = true; spin_lock(&pch->thread->dmac->lock); - _start(pch->thread); + pl330_start_thread(pch->thread); spin_unlock(&pch->thread->dmac->lock); power_down = false; } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpio/gpio-sim.c +++ linux-6.2.0/drivers/gpio/gpio-sim.c @@ -721,8 +721,10 @@ if (!line_names) return ERR_PTR(-ENOMEM); - list_for_each_entry(line, &bank->line_list, siblings) - line_names[line->offset] = line->name; + list_for_each_entry(line, &bank->line_list, siblings) { + if (line->name && (line->offset <= max_offset)) + line_names[line->offset] = line->name; + } return line_names; } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -526,6 +526,8 @@ case IP_VERSION(9, 3, 0): /* GC 10.3.7 */ case IP_VERSION(10, 3, 7): + /* GC 11.0.1 */ + case IP_VERSION(11, 0, 1): if (amdgpu_tmz == 0) { adev->gmc.tmz_enabled = false; dev_info(adev->dev, @@ -548,7 +550,6 @@ case IP_VERSION(10, 3, 1): /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): - case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 4): /* Don't enable it by default yet. */ only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -564,7 +564,6 @@ return r; } - (*vmbo)->shadow->parent = amdgpu_bo_ref(bo); amdgpu_bo_add_to_shadow_list(*vmbo); return 0; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ linux-6.2.0/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -799,7 +799,7 @@ { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct drm_buddy *mm = &mgr->mm; - struct drm_buddy_block *block; + struct amdgpu_vram_reservation *rsv; drm_printf(printer, " vis usage:%llu\n", amdgpu_vram_mgr_vis_usage(mgr)); @@ -811,8 +811,9 @@ drm_buddy_print(mm, printer); drm_printf(printer, "reserved:\n"); - list_for_each_entry(block, &mgr->reserved_pages, link) - drm_buddy_block_print(mm, block, printer); + list_for_each_entry(rsv, &mgr->reserved_pages, blocks) + drm_printf(printer, "%#018llx-%#018llx: %llu\n", + rsv->start, rsv->start + rsv->size, rsv->size); mutex_unlock(&mgr->lock); } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ linux-6.2.0/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -6925,23 +6925,6 @@ return 0; } -static int si_set_temperature_range(struct amdgpu_device *adev) -{ - int ret; - - ret = si_thermal_enable_alert(adev, false); - if (ret) - return ret; - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = si_thermal_enable_alert(adev, true); - if (ret) - return ret; - - return ret; -} - static void si_dpm_disable(struct amdgpu_device *adev) { struct rv7xx_power_info *pi = rv770_get_pi(adev); @@ -7626,18 +7609,6 @@ static int si_dpm_late_init(void *handle) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - ret = si_set_temperature_range(adev); - if (ret) - return ret; -#if 0 //TODO ? - si_dpm_powergate_uvd(adev, true); -#endif return 0; } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -580,7 +580,7 @@ DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_legacy_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -654,7 +654,8 @@ case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) @@ -681,7 +682,7 @@ DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; uint32_t min, max; @@ -763,7 +764,8 @@ case SMU_MCLK: case SMU_FCLK: for (i = 0; i < count; i++) { - ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -494,7 +494,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -594,7 +594,8 @@ case SMU_VCLK: case SMU_DCLK: for (i = 0; i < count; i++) { - ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value); if (ret) return ret; if (!value) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -478,7 +478,7 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -512,7 +512,8 @@ break; for (i = 0; i < count; i++) { - ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) break; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -866,7 +866,7 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min = 0, max = 0; @@ -898,7 +898,8 @@ goto print_clk_out; for (i = 0; i < count; i++) { - ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ linux-6.2.0/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -1000,7 +1000,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { - int i, size = 0, ret = 0; + int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t min, max; @@ -1033,7 +1033,8 @@ goto print_clk_out; for (i = 0; i < count; i++) { - ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value); + idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; + ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value); if (ret) goto print_clk_out; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/Makefile +++ linux-6.2.0/drivers/gpu/drm/i915/Makefile @@ -250,6 +250,7 @@ display/intel_hotplug.o \ display/intel_hti.o \ display/intel_lpe_audio.o \ + display/intel_modeset_lock.o \ display/intel_modeset_verify.o \ display/intel_modeset_setup.o \ display/intel_overlay.o \ only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_ddi.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_ddi.c @@ -60,6 +60,7 @@ #include "intel_hti.h" #include "intel_lspcon.h" #include "intel_mg_phy_regs.h" +#include "intel_modeset_lock.h" #include "intel_pps.h" #include "intel_psr.h" #include "intel_quirks.h" @@ -2718,9 +2719,6 @@ const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - bool is_tc_port = intel_phy_is_tc(dev_priv, phy); struct intel_crtc *slave_crtc; if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) { @@ -2770,6 +2768,17 @@ else intel_ddi_post_disable_dp(state, encoder, old_crtc_state, old_conn_state); +} + +static void intel_ddi_post_pll_disable(struct intel_atomic_state *state, + struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + bool is_tc_port = intel_phy_is_tc(dev_priv, phy); main_link_aux_power_domain_put(dig_port, old_crtc_state); @@ -3008,6 +3017,8 @@ const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + intel_tc_port_link_cancel_reset_work(enc_to_dig_port(encoder)); + intel_hdcp_disable(to_intel_connector(old_conn_state->connector)); if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) @@ -3045,37 +3056,23 @@ intel_hdcp_update_pipe(state, encoder, crtc_state, conn_state); } -static void -intel_ddi_update_prepare(struct intel_atomic_state *state, - struct intel_encoder *encoder, - struct intel_crtc *crtc) +void intel_ddi_update_active_dpll(struct intel_atomic_state *state, + struct intel_encoder *encoder, + struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_crtc_state *crtc_state = - crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL; - int required_lanes = crtc_state ? crtc_state->lane_count : 1; - - drm_WARN_ON(state->base.dev, crtc && crtc->active); - - intel_tc_port_get_link(enc_to_dig_port(encoder), - required_lanes); - if (crtc_state && crtc_state->hw.active) { - struct intel_crtc *slave_crtc; - - intel_update_active_dpll(state, crtc, encoder); + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_crtc *slave_crtc; + enum phy phy = intel_port_to_phy(i915, encoder->port); - for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, - intel_crtc_bigjoiner_slave_pipes(crtc_state)) - intel_update_active_dpll(state, slave_crtc, encoder); - } -} + if (!intel_phy_is_tc(i915, phy)) + return; -static void -intel_ddi_update_complete(struct intel_atomic_state *state, - struct intel_encoder *encoder, - struct intel_crtc *crtc) -{ - intel_tc_port_put_link(enc_to_dig_port(encoder)); + intel_update_active_dpll(state, crtc, encoder); + for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, + intel_crtc_bigjoiner_slave_pipes(crtc_state)) + intel_update_active_dpll(state, slave_crtc, encoder); } static void @@ -3089,8 +3086,13 @@ enum phy phy = intel_port_to_phy(dev_priv, encoder->port); bool is_tc_port = intel_phy_is_tc(dev_priv, phy); - if (is_tc_port) + if (is_tc_port) { + struct intel_crtc *master_crtc = + to_intel_crtc(crtc_state->uapi.crtc); + intel_tc_port_get_link(dig_port, crtc_state->lane_count); + intel_ddi_update_active_dpll(state, encoder, master_crtc); + } main_link_aux_power_domain_get(dig_port, crtc_state); @@ -3539,6 +3541,37 @@ intel_ddi_get_config(encoder, crtc_state); } +static bool icl_ddi_tc_pll_is_tbt(const struct intel_shared_dpll *pll) +{ + return pll->info->id == DPLL_ID_ICL_TBTPLL; +} + +static enum icl_port_dpll_id +icl_ddi_tc_port_pll_type(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + const struct intel_shared_dpll *pll = crtc_state->shared_dpll; + + if (drm_WARN_ON(&i915->drm, !pll)) + return ICL_PORT_DPLL_DEFAULT; + + if (icl_ddi_tc_pll_is_tbt(pll)) + return ICL_PORT_DPLL_DEFAULT; + else + return ICL_PORT_DPLL_MG_PHY; +} + +enum icl_port_dpll_id +intel_ddi_port_pll_type(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + if (!encoder->port_pll_type) + return ICL_PORT_DPLL_DEFAULT; + + return encoder->port_pll_type(encoder, crtc_state); +} + static void icl_ddi_tc_get_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct intel_shared_dpll *pll) @@ -3551,7 +3584,7 @@ if (drm_WARN_ON(&i915->drm, !pll)) return; - if (pll->info->id == DPLL_ID_ICL_TBTPLL) + if (icl_ddi_tc_pll_is_tbt(pll)) port_dpll_id = ICL_PORT_DPLL_DEFAULT; else port_dpll_id = ICL_PORT_DPLL_MG_PHY; @@ -3564,7 +3597,7 @@ icl_set_active_port_dpll(crtc_state, port_dpll_id); - if (crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL) + if (icl_ddi_tc_pll_is_tbt(crtc_state->shared_dpll)) crtc_state->port_clock = icl_calc_tbt_pll_link(i915, encoder->port); else crtc_state->port_clock = intel_dpll_get_freq(i915, crtc_state->shared_dpll, @@ -3606,7 +3639,8 @@ enum phy phy = intel_port_to_phy(i915, encoder->port); if (intel_phy_is_tc(i915, phy)) - intel_tc_port_sanitize_mode(enc_to_dig_port(encoder)); + intel_tc_port_sanitize_mode(enc_to_dig_port(encoder), + crtc_state); if (crtc_state && intel_crtc_has_dp_encoder(crtc_state)) intel_dp_sync_state(encoder, crtc_state); @@ -3806,7 +3840,7 @@ intel_dp_encoder_flush_work(encoder); if (intel_phy_is_tc(i915, phy)) - intel_tc_port_flush_work(dig_port); + intel_tc_port_cleanup(dig_port); intel_display_power_flush_work(i915); drm_encoder_cleanup(encoder); @@ -3829,9 +3863,19 @@ intel_tc_port_init_mode(dig_port); } +static int intel_ddi_encoder_late_register(struct drm_encoder *_encoder) +{ + struct intel_encoder *encoder = to_intel_encoder(_encoder); + + intel_tc_port_link_reset(enc_to_dig_port(encoder)); + + return 0; +} + static const struct drm_encoder_funcs intel_ddi_funcs = { .reset = intel_ddi_encoder_reset, .destroy = intel_ddi_encoder_destroy, + .late_register = intel_ddi_encoder_late_register, }; static struct intel_connector * @@ -3996,27 +4040,17 @@ state = intel_encoder_hotplug(encoder, connector); - drm_modeset_acquire_init(&ctx, 0); - - for (;;) { - if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) - ret = intel_hdmi_reset_link(encoder, &ctx); - else - ret = intel_dp_retrain_link(encoder, &ctx); - - if (ret == -EDEADLK) { - drm_modeset_backoff(&ctx); - continue; + if (!intel_tc_port_link_reset(dig_port)) { + intel_modeset_lock_ctx_retry(&ctx, NULL, 0, ret) { + if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) + ret = intel_hdmi_reset_link(encoder, &ctx); + else + ret = intel_dp_retrain_link(encoder, &ctx); } - break; + drm_WARN_ON(encoder->base.dev, ret); } - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); - drm_WARN(encoder->base.dev, ret, - "Acquiring modeset locks failed with %i\n", ret); - /* * Unpowered type-c dongles can take some time to boot and be * responsible, so here giving some time to those dongles to power up @@ -4221,33 +4255,29 @@ static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - enum phy phy = intel_port_to_phy(i915, encoder->port); - intel_dp_encoder_suspend(encoder); - - if (!intel_phy_is_tc(i915, phy)) - return; - - intel_tc_port_flush_work(dig_port); } -static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder) +static void intel_ddi_tc_encoder_suspend_complete(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - enum phy phy = intel_port_to_phy(i915, encoder->port); + intel_tc_port_suspend(dig_port); +} + +static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder) +{ intel_dp_encoder_shutdown(encoder); intel_hdmi_encoder_shutdown(encoder); +} - if (!intel_phy_is_tc(i915, phy)) - return; +static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - intel_tc_port_flush_work(dig_port); + intel_tc_port_cleanup(dig_port); } #define port_tc_name(port) ((port) - PORT_TC1 + '1') @@ -4361,6 +4391,7 @@ encoder->pre_pll_enable = intel_ddi_pre_pll_enable; encoder->pre_enable = intel_ddi_pre_enable; encoder->disable = intel_disable_ddi; + encoder->post_pll_disable = intel_ddi_post_pll_disable; encoder->post_disable = intel_ddi_post_disable; encoder->update_pipe = intel_ddi_update_pipe; encoder->get_hw_state = intel_ddi_get_hw_state; @@ -4400,6 +4431,7 @@ encoder->enable_clock = jsl_ddi_tc_enable_clock; encoder->disable_clock = jsl_ddi_tc_disable_clock; encoder->is_clock_enabled = jsl_ddi_tc_is_clock_enabled; + encoder->port_pll_type = icl_ddi_tc_port_pll_type; encoder->get_config = icl_ddi_combo_get_config; } else { encoder->enable_clock = icl_ddi_combo_enable_clock; @@ -4412,6 +4444,7 @@ encoder->enable_clock = icl_ddi_tc_enable_clock; encoder->disable_clock = icl_ddi_tc_disable_clock; encoder->is_clock_enabled = icl_ddi_tc_is_clock_enabled; + encoder->port_pll_type = icl_ddi_tc_port_pll_type; encoder->get_config = icl_ddi_tc_get_config; } else { encoder->enable_clock = icl_ddi_combo_enable_clock; @@ -4492,10 +4525,21 @@ !intel_bios_encoder_supports_typec_usb(devdata) && !intel_bios_encoder_supports_tbt(devdata); - intel_tc_port_init(dig_port, is_legacy); + if (!is_legacy && init_hdmi) { + is_legacy = !init_dp; + + drm_dbg_kms(&dev_priv->drm, + "VBT says port %c is non-legacy TC and has HDMI (with DP: %s), assume it's %s\n", + port_name(port), + str_yes_no(init_dp), + is_legacy ? "legacy" : "non-legacy"); + } + + encoder->suspend_complete = intel_ddi_tc_encoder_suspend_complete; + encoder->shutdown_complete = intel_ddi_tc_encoder_shutdown_complete; - encoder->update_prepare = intel_ddi_update_prepare; - encoder->update_complete = intel_ddi_update_complete; + if (intel_tc_port_init(dig_port, is_legacy) < 0) + goto err; } drm_WARN_ON(&dev_priv->drm, port > PORT_I); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_ddi.h +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_ddi.h @@ -40,6 +40,9 @@ const struct intel_crtc_state *crtc_state); void hsw_ddi_disable_clock(struct intel_encoder *encoder); bool hsw_ddi_is_clock_enabled(struct intel_encoder *encoder); +enum icl_port_dpll_id +intel_ddi_port_pll_type(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); void hsw_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state); struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder); @@ -69,5 +72,8 @@ int intel_ddi_level(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, int lane); +void intel_ddi_update_active_dpll(struct intel_atomic_state *state, + struct intel_encoder *encoder, + struct intel_crtc *crtc); #endif /* __INTEL_DDI_H__ */ only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_dmc.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_dmc.c @@ -108,6 +108,8 @@ #define DMC_V3_MAX_MMIO_COUNT 20 #define DMC_V1_MMIO_START_RANGE 0x80000 +#define PIPE_TO_DMC_ID(pipe) (DMC_FW_PIPEA + ((pipe) - PIPE_A)) + struct intel_css_header { /* 0x09 for DMC */ u32 module_type; @@ -407,6 +409,28 @@ PIPEDMC_GATING_DIS, 0); } +void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) +{ + if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe))) + return; + + if (DISPLAY_VER(i915) >= 14) + intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); + else + intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); +} + +void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) +{ + if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe))) + return; + + if (DISPLAY_VER(i915) >= 14) + intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); + else + intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); +} + /** * intel_dmc_load_program() - write the firmware from memory to register. * @dev_priv: i915 drm device. only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_dmc.h +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_dmc.h @@ -13,6 +13,8 @@ struct drm_i915_error_state_buf; struct drm_i915_private; +enum pipe; + enum { DMC_FW_MAIN = 0, DMC_FW_PIPEA, @@ -48,6 +50,8 @@ void intel_dmc_ucode_init(struct drm_i915_private *i915); void intel_dmc_load_program(struct drm_i915_private *i915); void intel_dmc_disable_program(struct drm_i915_private *i915); +void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe); +void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe); void intel_dmc_ucode_fini(struct drm_i915_private *i915); void intel_dmc_ucode_suspend(struct drm_i915_private *i915); void intel_dmc_ucode_resume(struct drm_i915_private *i915); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_dmc_regs.h +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_dmc_regs.h @@ -11,6 +11,16 @@ #define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4) #define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0 +#define _PIPEDMC_CONTROL_A 0x45250 +#define _PIPEDMC_CONTROL_B 0x45254 +#define PIPEDMC_CONTROL(pipe) _MMIO_PIPE(pipe, \ + _PIPEDMC_CONTROL_A, \ + _PIPEDMC_CONTROL_B) +#define PIPEDMC_ENABLE REG_BIT(0) + +#define MTL_PIPEDMC_CONTROL _MMIO(0x45250) +#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4) + #define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000 #define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000 only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -1110,6 +1110,16 @@ struct intel_connector *intel_connector = intel_dp->attached_connector; struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) { + drm_dbg_kms(&dp_to_i915(intel_dp)->drm, + "[CONNECTOR:%d:%s][ENCODER:%d:%s]" + "Link Training failed on disconnected sink.\n", + intel_connector->base.base.id, intel_connector->base.name, + encoder->base.base.id, encoder->base.name); + + return; + } + if (intel_dp->hobl_active) { drm_dbg_kms(&dp_to_i915(intel_dp)->drm, "[ENCODER:%d:%s] Link Training failed with HOBL active, " only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -351,13 +351,35 @@ return NULL; } +/** + * intel_reference_shared_dpll_crtc - Get a DPLL reference for a CRTC + * @crtc: CRTC on which behalf the reference is taken + * @pll: DPLL for which the reference is taken + * @shared_dpll_state: the DPLL atomic state in which the reference is tracked + * + * Take a reference for @pll tracking the use of it by @crtc. + */ +static void +intel_reference_shared_dpll_crtc(const struct intel_crtc *crtc, + const struct intel_shared_dpll *pll, + struct intel_shared_dpll_state *shared_dpll_state) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) != 0); + + shared_dpll_state->pipe_mask |= BIT(crtc->pipe); + + drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n", + crtc->base.base.id, crtc->base.name, pll->info->name); +} + static void intel_reference_shared_dpll(struct intel_atomic_state *state, const struct intel_crtc *crtc, const struct intel_shared_dpll *pll, const struct intel_dpll_hw_state *pll_state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_shared_dpll_state *shared_dpll; const enum intel_dpll_id id = pll->info->id; @@ -366,11 +388,29 @@ if (shared_dpll[id].pipe_mask == 0) shared_dpll[id].hw_state = *pll_state; - drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) != 0); + intel_reference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]); +} - shared_dpll[id].pipe_mask |= BIT(crtc->pipe); +/** + * intel_unreference_shared_dpll_crtc - Drop a DPLL reference for a CRTC + * @crtc: CRTC on which behalf the reference is dropped + * @pll: DPLL for which the reference is dropped + * @shared_dpll_state: the DPLL atomic state in which the reference is tracked + * + * Drop a reference for @pll tracking the end of use of it by @crtc. + */ +void +intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc, + const struct intel_shared_dpll *pll, + struct intel_shared_dpll_state *shared_dpll_state) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); - drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] reserving %s\n", + drm_WARN_ON(&i915->drm, (shared_dpll_state->pipe_mask & BIT(crtc->pipe)) == 0); + + shared_dpll_state->pipe_mask &= ~BIT(crtc->pipe); + + drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); } @@ -378,18 +418,12 @@ const struct intel_crtc *crtc, const struct intel_shared_dpll *pll) { - struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_shared_dpll_state *shared_dpll; const enum intel_dpll_id id = pll->info->id; shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); - drm_WARN_ON(&i915->drm, (shared_dpll[id].pipe_mask & BIT(crtc->pipe)) == 0); - - shared_dpll[id].pipe_mask &= ~BIT(crtc->pipe); - - drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] releasing %s\n", - crtc->base.base.id, crtc->base.name, pll->info->name); + intel_unreference_shared_dpll_crtc(crtc, pll, &shared_dpll[id]); } static void intel_put_dpll(struct intel_atomic_state *state, @@ -4373,7 +4407,7 @@ to_intel_crtc_state(crtc->base.state); if (crtc_state->hw.active && crtc_state->shared_dpll == pll) - pll->state.pipe_mask |= BIT(crtc->pipe); + intel_reference_shared_dpll_crtc(crtc, pll, &pll->state); } pll->active_mask = pll->state.pipe_mask; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -341,6 +341,9 @@ struct intel_encoder *encoder); void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc); +void intel_unreference_shared_dpll_crtc(const struct intel_crtc *crtc, + const struct intel_shared_dpll *pll, + struct intel_shared_dpll_state *shared_dpll_state); void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, enum icl_port_dpll_id port_dpll_id); void intel_update_active_dpll(struct intel_atomic_state *state, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_modeset_lock.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_modeset_lock.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include + +#include "intel_display_types.h" +#include "intel_modeset_lock.h" + +void _intel_modeset_lock_begin(struct drm_modeset_acquire_ctx *ctx, + struct intel_atomic_state *state, + unsigned int flags, int *ret) +{ + drm_modeset_acquire_init(ctx, flags); + + if (state) + state->base.acquire_ctx = ctx; + + *ret = -EDEADLK; +} + +bool _intel_modeset_lock_loop(int *ret) +{ + if (*ret == -EDEADLK) { + *ret = 0; + return true; + } + + return false; +} + +void _intel_modeset_lock_end(struct drm_modeset_acquire_ctx *ctx, + struct intel_atomic_state *state, + int *ret) +{ + if (*ret == -EDEADLK) { + if (state) + drm_atomic_state_clear(&state->base); + + *ret = drm_modeset_backoff(ctx); + if (*ret == 0) { + *ret = -EDEADLK; + return; + } + } + + drm_modeset_drop_locks(ctx); + drm_modeset_acquire_fini(ctx); +} only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_modeset_lock.h +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_modeset_lock.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_MODESET_LOCK_H__ +#define __INTEL_MODESET_LOCK_H__ + +#include + +struct drm_modeset_acquire_ctx; +struct intel_atomic_state; + +void _intel_modeset_lock_begin(struct drm_modeset_acquire_ctx *ctx, + struct intel_atomic_state *state, + unsigned int flags, + int *ret); +bool _intel_modeset_lock_loop(int *ret); +void _intel_modeset_lock_end(struct drm_modeset_acquire_ctx *ctx, + struct intel_atomic_state *state, + int *ret); + +/* + * Note that one must always use "continue" rather than + * "break" or "return" to handle errors within the + * intel_modeset_lock_ctx_retry() block. + */ +#define intel_modeset_lock_ctx_retry(ctx, state, flags, ret) \ + for (_intel_modeset_lock_begin((ctx), (state), (flags), &(ret)); \ + _intel_modeset_lock_loop(&(ret)); \ + _intel_modeset_lock_end((ctx), (state), &(ret))) + +#endif /* __INTEL_MODESET_LOCK_H__ */ only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -24,26 +24,19 @@ #include "intel_modeset_setup.h" #include "intel_pch_display.h" #include "intel_pm.h" +#include "intel_tc.h" #include "skl_watermark.h" -static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, - struct drm_modeset_acquire_ctx *ctx) +static void intel_crtc_disable_noatomic_begin(struct intel_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) { - struct intel_encoder *encoder; struct drm_i915_private *i915 = to_i915(crtc->base.dev); - struct intel_bw_state *bw_state = - to_intel_bw_state(i915->display.bw.obj.state); - struct intel_cdclk_state *cdclk_state = - to_intel_cdclk_state(i915->display.cdclk.obj.state); - struct intel_dbuf_state *dbuf_state = - to_intel_dbuf_state(i915->display.dbuf.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); struct intel_plane *plane; struct drm_atomic_state *state; - struct intel_crtc_state *temp_crtc_state; + struct intel_crtc *temp_crtc; enum pipe pipe = crtc->pipe; - int ret; if (!crtc_state->hw.active) return; @@ -67,10 +60,17 @@ state->acquire_ctx = ctx; /* Everything's already locked, -EDEADLK can't happen. */ - temp_crtc_state = intel_atomic_get_crtc_state(state, crtc); - ret = drm_atomic_add_affected_connectors(state, &crtc->base); + for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, + BIT(pipe) | + intel_crtc_bigjoiner_slave_pipes(crtc_state)) { + struct intel_crtc_state *temp_crtc_state = + intel_atomic_get_crtc_state(state, temp_crtc); + int ret; - drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret); + ret = drm_atomic_add_affected_connectors(state, &temp_crtc->base); + + drm_WARN_ON(&i915->drm, IS_ERR(temp_crtc_state) || ret); + } i915->display.funcs.display->crtc_disable(to_intel_atomic_state(state), crtc); @@ -83,20 +83,81 @@ crtc->active = false; crtc->base.enabled = false; - drm_WARN_ON(&i915->drm, - drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0); - crtc_state->uapi.active = false; - crtc_state->uapi.connector_mask = 0; - crtc_state->uapi.encoder_mask = 0; - intel_crtc_free_hw_state(crtc_state); - memset(&crtc_state->hw, 0, sizeof(crtc_state->hw)); + if (crtc_state->shared_dpll) + intel_unreference_shared_dpll_crtc(crtc, + crtc_state->shared_dpll, + &crtc_state->shared_dpll->state); +} - for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder) +static void set_encoder_for_connector(struct intel_connector *connector, + struct intel_encoder *encoder) +{ + struct drm_connector_state *conn_state = connector->base.state; + + if (conn_state->crtc) + drm_connector_put(&connector->base); + + if (encoder) { + conn_state->best_encoder = &encoder->base; + conn_state->crtc = encoder->base.crtc; + drm_connector_get(&connector->base); + } else { + conn_state->best_encoder = NULL; + conn_state->crtc = NULL; + } +} + +static void reset_encoder_connector_state(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(&i915->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + if (connector->base.encoder != &encoder->base) + continue; + + set_encoder_for_connector(connector, NULL); + + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; + } + drm_connector_list_iter_end(&conn_iter); +} + +static void reset_crtc_encoder_state(struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_encoder *encoder; + + for_each_encoder_on_crtc(&i915->drm, &crtc->base, encoder) { + reset_encoder_connector_state(encoder); encoder->base.crtc = NULL; + } +} + +static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_bw_state *bw_state = + to_intel_bw_state(i915->display.bw.obj.state); + struct intel_cdclk_state *cdclk_state = + to_intel_cdclk_state(i915->display.cdclk.obj.state); + struct intel_dbuf_state *dbuf_state = + to_intel_dbuf_state(i915->display.dbuf.obj.state); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + enum pipe pipe = crtc->pipe; + + __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi); + intel_crtc_free_hw_state(crtc_state); + intel_crtc_state_reset(crtc_state, crtc); + + reset_crtc_encoder_state(crtc); intel_fbc_disable(crtc); intel_update_watermarks(i915); - intel_disable_shared_dpll(crtc_state); intel_display_power_put_all_in_set(i915, &crtc->enabled_power_domains); @@ -110,6 +171,118 @@ bw_state->num_active_planes[pipe] = 0; } +/* + * Return all the pipes using a transcoder in @transcoder_mask. + * For bigjoiner configs return only the bigjoiner master. + */ +static u8 get_transcoder_pipes(struct drm_i915_private *i915, + u8 transcoder_mask) +{ + struct intel_crtc *temp_crtc; + u8 pipes = 0; + + for_each_intel_crtc(&i915->drm, temp_crtc) { + struct intel_crtc_state *temp_crtc_state = + to_intel_crtc_state(temp_crtc->base.state); + + if (temp_crtc_state->cpu_transcoder == INVALID_TRANSCODER) + continue; + + if (intel_crtc_is_bigjoiner_slave(temp_crtc_state)) + continue; + + if (transcoder_mask & BIT(temp_crtc_state->cpu_transcoder)) + pipes |= BIT(temp_crtc->pipe); + } + + return pipes; +} + +/* + * Return the port sync master and slave pipes linked to @crtc. + * For bigjoiner configs return only the bigjoiner master pipes. + */ +static void get_portsync_pipes(struct intel_crtc *crtc, + u8 *master_pipe_mask, u8 *slave_pipes_mask) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_crtc *master_crtc; + struct intel_crtc_state *master_crtc_state; + enum transcoder master_transcoder; + + if (!is_trans_port_sync_mode(crtc_state)) { + *master_pipe_mask = BIT(crtc->pipe); + *slave_pipes_mask = 0; + + return; + } + + if (is_trans_port_sync_master(crtc_state)) + master_transcoder = crtc_state->cpu_transcoder; + else + master_transcoder = crtc_state->master_transcoder; + + *master_pipe_mask = get_transcoder_pipes(i915, BIT(master_transcoder)); + drm_WARN_ON(&i915->drm, !is_power_of_2(*master_pipe_mask)); + + master_crtc = intel_crtc_for_pipe(i915, ffs(*master_pipe_mask) - 1); + master_crtc_state = to_intel_crtc_state(master_crtc->base.state); + *slave_pipes_mask = get_transcoder_pipes(i915, master_crtc_state->sync_mode_slaves_mask); +} + +static u8 get_bigjoiner_slave_pipes(struct drm_i915_private *i915, u8 master_pipes_mask) +{ + struct intel_crtc *master_crtc; + u8 pipes = 0; + + for_each_intel_crtc_in_pipe_mask(&i915->drm, master_crtc, master_pipes_mask) { + struct intel_crtc_state *master_crtc_state = + to_intel_crtc_state(master_crtc->base.state); + + pipes |= intel_crtc_bigjoiner_slave_pipes(master_crtc_state); + } + + return pipes; +} + +static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + u8 portsync_master_mask; + u8 portsync_slaves_mask; + u8 bigjoiner_slaves_mask; + struct intel_crtc *temp_crtc; + + /* TODO: Add support for MST */ + get_portsync_pipes(crtc, &portsync_master_mask, &portsync_slaves_mask); + bigjoiner_slaves_mask = get_bigjoiner_slave_pipes(i915, + portsync_master_mask | + portsync_slaves_mask); + + drm_WARN_ON(&i915->drm, + portsync_master_mask & portsync_slaves_mask || + portsync_master_mask & bigjoiner_slaves_mask || + portsync_slaves_mask & bigjoiner_slaves_mask); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, bigjoiner_slaves_mask) + intel_crtc_disable_noatomic_begin(temp_crtc, ctx); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_slaves_mask) + intel_crtc_disable_noatomic_begin(temp_crtc, ctx); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, portsync_master_mask) + intel_crtc_disable_noatomic_begin(temp_crtc, ctx); + + for_each_intel_crtc_in_pipe_mask(&i915->drm, temp_crtc, + bigjoiner_slaves_mask | + portsync_slaves_mask | + portsync_master_mask) + intel_crtc_disable_noatomic_complete(temp_crtc); +} + static void intel_modeset_update_connector_atomic_state(struct drm_i915_private *i915) { struct intel_connector *connector; @@ -121,8 +294,7 @@ struct intel_encoder *encoder = to_intel_encoder(connector->base.encoder); - if (conn_state->crtc) - drm_connector_put(&connector->base); + set_encoder_for_connector(connector, encoder); if (encoder) { struct intel_crtc *crtc = @@ -130,14 +302,7 @@ const struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - conn_state->best_encoder = &encoder->base; - conn_state->crtc = &crtc->base; conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3; - - drm_connector_get(&connector->base); - } else { - conn_state->best_encoder = NULL; - conn_state->crtc = NULL; } } drm_connector_list_iter_end(&conn_iter); @@ -210,6 +375,21 @@ return false; } +static bool intel_crtc_needs_link_reset(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct intel_encoder *encoder; + + for_each_encoder_on_crtc(dev, &crtc->base, encoder) { + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + if (dig_port && intel_tc_port_link_needs_reset(dig_port)) + return true; + } + + return false; +} + static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); @@ -265,11 +445,12 @@ crtc->pch_fifo_underrun_disabled = true; } -static void intel_sanitize_crtc(struct intel_crtc *crtc, +static bool intel_sanitize_crtc(struct intel_crtc *crtc, struct drm_modeset_acquire_ctx *ctx) { struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + bool needs_link_reset; if (crtc_state->hw.active) { struct intel_plane *plane; @@ -289,13 +470,67 @@ intel_color_commit_arm(crtc_state); } + if (!crtc_state->hw.active || + intel_crtc_is_bigjoiner_slave(crtc_state)) + return false; + + needs_link_reset = intel_crtc_needs_link_reset(crtc); + /* * Adjust the state of the output pipe according to whether we have * active connectors/encoders. */ - if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) && - !intel_crtc_is_bigjoiner_slave(crtc_state)) - intel_crtc_disable_noatomic(crtc, ctx); + if (!needs_link_reset && intel_crtc_has_encoders(crtc)) + return false; + + intel_crtc_disable_noatomic(crtc, ctx); + + /* + * The HPD state on other active/disconnected TC ports may be stuck in + * the connected state until this port is disabled and a ~10ms delay has + * passed, wait here for that so that sanitizing other CRTCs will see the + * up-to-date HPD state. + */ + if (needs_link_reset) + msleep(20); + + return true; +} + +static void intel_sanitize_all_crtcs(struct drm_i915_private *i915, + struct drm_modeset_acquire_ctx *ctx) +{ + struct intel_crtc *crtc; + u32 crtcs_forced_off = 0; + + /* + * An active and disconnected TypeC port prevents the HPD live state + * to get updated on other active/disconnected TypeC ports, so after + * a port gets disabled the CRTCs using other TypeC ports must be + * rechecked wrt. their link status. + */ + for (;;) { + u32 old_mask = crtcs_forced_off; + + for_each_intel_crtc(&i915->drm, crtc) { + u32 crtc_mask = drm_crtc_mask(&crtc->base); + + if (crtcs_forced_off & crtc_mask) + continue; + + if (intel_sanitize_crtc(crtc, ctx)) + crtcs_forced_off |= crtc_mask; + } + if (crtcs_forced_off == old_mask) + break; + } + + for_each_intel_crtc(&i915->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + + intel_crtc_state_dump(crtc_state, NULL, "setup_hw_state"); + } } static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) @@ -698,8 +933,10 @@ drm_crtc_vblank_reset(&crtc->base); - if (crtc_state->hw.active) + if (crtc_state->hw.active) { + intel_dmc_enable_pipe(i915, crtc->pipe); intel_crtc_vblank_on(crtc_state); + } } intel_fbc_sanitize(i915); @@ -709,16 +946,14 @@ for_each_intel_encoder(&i915->drm, encoder) intel_sanitize_encoder(encoder); - for_each_intel_crtc(&i915->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - - intel_sanitize_crtc(crtc, ctx); - intel_crtc_state_dump(crtc_state, NULL, "setup_hw_state"); - } - + /* + * Sanitizing CRTCs needs their connector atomic state to be + * up-to-date, so ensure that already here. + */ intel_modeset_update_connector_atomic_state(i915); + intel_sanitize_all_crtcs(i915, ctx); + intel_dpll_sanitize_state(i915); if (IS_G4X(i915)) { only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/display/intel_tc.h +++ linux-6.2.0/drivers/gpu/drm/i915/display/intel_tc.h @@ -6,9 +6,9 @@ #ifndef __INTEL_TC_H__ #define __INTEL_TC_H__ -#include #include +struct intel_crtc_state; struct intel_digital_port; struct intel_encoder; @@ -17,6 +17,7 @@ bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port); bool intel_tc_port_connected(struct intel_encoder *encoder); +bool intel_tc_port_connected_locked(struct intel_encoder *encoder); u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port); @@ -25,16 +26,21 @@ int required_lanes); void intel_tc_port_init_mode(struct intel_digital_port *dig_port); -void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port); +void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, + const struct intel_crtc_state *crtc_state); void intel_tc_port_lock(struct intel_digital_port *dig_port); void intel_tc_port_unlock(struct intel_digital_port *dig_port); -void intel_tc_port_flush_work(struct intel_digital_port *dig_port); +void intel_tc_port_suspend(struct intel_digital_port *dig_port); void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes); void intel_tc_port_put_link(struct intel_digital_port *dig_port); bool intel_tc_port_ref_held(struct intel_digital_port *dig_port); +bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port); +bool intel_tc_port_link_reset(struct intel_digital_port *dig_port); +void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port); -void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); +int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); +void intel_tc_port_cleanup(struct intel_digital_port *dig_port); bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ linux-6.2.0/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -346,8 +346,10 @@ continue; ce = intel_context_create(data[m].ce[0]->engine); - if (IS_ERR(ce)) + if (IS_ERR(ce)) { + err = PTR_ERR(ce); goto out; + } err = intel_context_pin(ce); if (err) { @@ -367,8 +369,10 @@ worker = kthread_create_worker(0, "igt/parallel:%s", data[n].ce[0]->engine->name); - if (IS_ERR(worker)) + if (IS_ERR(worker)) { + err = PTR_ERR(worker); goto out; + } data[n].worker = worker; } @@ -397,8 +401,10 @@ } } - if (igt_live_test_end(&t)) - err = -EIO; + if (igt_live_test_end(&t)) { + err = err ?: -EIO; + break; + } } out: only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/gt/selftest_execlists.c +++ linux-6.2.0/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -1530,8 +1530,8 @@ struct drm_i915_gem_object *obj; struct i915_vma *vma; enum intel_engine_id id; - int err = -ENOMEM; u32 *map; + int err; /* * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can @@ -1539,13 +1539,17 @@ */ ctx_hi = kernel_context(gt->i915, NULL); - if (!ctx_hi) - return -ENOMEM; + if (IS_ERR(ctx_hi)) + return PTR_ERR(ctx_hi); + ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; ctx_lo = kernel_context(gt->i915, NULL); - if (!ctx_lo) + if (IS_ERR(ctx_lo)) { + err = PTR_ERR(ctx_lo); goto err_ctx_hi; + } + ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/i915_driver.c +++ linux-6.2.0/drivers/gpu/drm/i915/i915_driver.c @@ -574,7 +574,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - struct pci_dev *root_pdev; int ret; if (i915_inject_probe_failure(dev_priv)) @@ -686,15 +685,6 @@ intel_bw_init_hw(dev_priv); - /* - * FIXME: Temporary hammer to avoid freezing the machine on our DGFX - * This should be totally removed when we handle the pci states properly - * on runtime PM and on s2idle cases. - */ - root_pdev = pcie_find_root_port(pdev); - if (root_pdev) - pci_d3cold_disable(root_pdev); - return 0; err_msi: @@ -718,16 +708,11 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - struct pci_dev *root_pdev; i915_perf_fini(dev_priv); if (pdev->msi_enabled) pci_disable_msi(pdev); - - root_pdev = pcie_find_root_port(pdev); - if (root_pdev) - pci_d3cold_enable(root_pdev); } /** @@ -1094,11 +1079,19 @@ if (!HAS_DISPLAY(dev_priv)) return; + /* + * TODO: check and remove holding the modeset locks if none of + * the encoders depends on this. + */ drm_modeset_lock_all(&dev_priv->drm); for_each_intel_encoder(&dev_priv->drm, encoder) if (encoder->suspend) encoder->suspend(encoder); drm_modeset_unlock_all(&dev_priv->drm); + + for_each_intel_encoder(&dev_priv->drm, encoder) + if (encoder->suspend_complete) + encoder->suspend_complete(encoder); } static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) @@ -1108,11 +1101,19 @@ if (!HAS_DISPLAY(dev_priv)) return; + /* + * TODO: check and remove holding the modeset locks if none of + * the encoders depends on this. + */ drm_modeset_lock_all(&dev_priv->drm); for_each_intel_encoder(&dev_priv->drm, encoder) if (encoder->shutdown) encoder->shutdown(encoder); drm_modeset_unlock_all(&dev_priv->drm); + + for_each_intel_encoder(&dev_priv->drm, encoder) + if (encoder->shutdown_complete) + encoder->shutdown_complete(encoder); } void i915_driver_shutdown(struct drm_i915_private *i915) @@ -1622,6 +1623,8 @@ { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *root_pdev; struct intel_gt *gt; int ret, i; @@ -1671,6 +1674,15 @@ drm_err(&dev_priv->drm, "Unclaimed access detected prior to suspending\n"); + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM. + */ + root_pdev = pcie_find_root_port(pdev); + if (root_pdev) + pci_d3cold_disable(root_pdev); + rpm->suspended = true; /* @@ -1709,6 +1721,8 @@ { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *root_pdev; struct intel_gt *gt; int ret, i; @@ -1722,6 +1736,11 @@ intel_opregion_notify_adapter(dev_priv, PCI_D0); rpm->suspended = false; + + root_pdev = pcie_find_root_port(pdev); + if (root_pdev) + pci_d3cold_enable(root_pdev); + if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) drm_dbg(&dev_priv->drm, "Unclaimed access during suspend, bios?\n"); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/i915/intel_device_info.h +++ linux-6.2.0/drivers/gpu/drm/i915/intel_device_info.h @@ -191,7 +191,6 @@ func(has_hotplug); \ func(has_hti); \ func(has_ipc); \ - func(has_modular_fia); \ func(has_overlay); \ func(has_psr); \ func(has_psr_hw_tracking); \ only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/lima/lima_sched.c +++ linux-6.2.0/drivers/gpu/drm/lima/lima_sched.c @@ -165,7 +165,7 @@ void lima_sched_context_fini(struct lima_sched_pipe *pipe, struct lima_sched_context *context) { - drm_sched_entity_fini(&context->base); + drm_sched_entity_destroy(&context->base); } struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/gpu/drm/msm/msm_iommu.c +++ linux-6.2.0/drivers/gpu/drm/msm/msm_iommu.c @@ -234,7 +234,12 @@ /* Get the pagetable configuration from the domain */ if (adreno_smmu->cookie) ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); - if (!ttbr1_cfg) + + /* + * If you hit this WARN_ONCE() you are probably missing an entry in + * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c + */ + if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables")) return ERR_PTR(-ENODEV); /* only in patch2: unchanged: --- linux-6.2.0.orig/drivers/hid/hid-google-hammer.c +++ linux-6.2.0/drivers/hid/hid-google-hammer.c @@ -587,6 +587,8 @@ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_JEWEL) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) }, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/i2c/busses/i2c-mv64xxx.c +++ linux-6.2.0/drivers/i2c/busses/i2c-mv64xxx.c @@ -520,6 +520,17 @@ while (readl(drv_data->reg_base + drv_data->reg_offsets.control) & MV64XXX_I2C_REG_CONTROL_IFLG) { + /* + * It seems that sometime the controller updates the status + * register only after it asserts IFLG in control register. + * This may result in weird bugs when in atomic mode. A delay + * of 100 ns before reading the status register solves this + * issue. This bug does not seem to appear when using + * interrupts. + */ + if (drv_data->atomic) + ndelay(100); + status = readl(drv_data->reg_base + drv_data->reg_offsets.status); mv64xxx_i2c_fsm(drv_data, status); mv64xxx_i2c_do_action(drv_data); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/i2c/busses/i2c-sprd.c +++ linux-6.2.0/drivers/i2c/busses/i2c-sprd.c @@ -576,12 +576,14 @@ struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev); int ret; - ret = pm_runtime_resume_and_get(i2c_dev->dev); + ret = pm_runtime_get_sync(i2c_dev->dev); if (ret < 0) - return ret; + dev_err(&pdev->dev, "Failed to resume device (%pe)\n", ERR_PTR(ret)); i2c_del_adapter(&i2c_dev->adap); - clk_disable_unprepare(i2c_dev->clk); + + if (ret >= 0) + clk_disable_unprepare(i2c_dev->clk); pm_runtime_put_noidle(i2c_dev->dev); pm_runtime_disable(i2c_dev->dev); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/accel/st_accel_core.c +++ linux-6.2.0/drivers/iio/accel/st_accel_core.c @@ -1289,12 +1289,12 @@ adev = ACPI_COMPANION(indio_dev->dev.parent); if (!adev) - return 0; + return -ENXIO; /* Read _ONT data, which should be a package of 6 integers. */ status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer); if (status == AE_NOT_FOUND) { - return 0; + return -ENXIO; } else if (ACPI_FAILURE(status)) { dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n", status); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/adc/ad4130.c +++ linux-6.2.0/drivers/iio/adc/ad4130.c @@ -1817,6 +1817,11 @@ .unprepare = ad4130_int_clk_unprepare, }; +static void ad4130_clk_del_provider(void *of_node) +{ + of_clk_del_provider(of_node); +} + static int ad4130_setup_int_clk(struct ad4130_state *st) { struct device *dev = &st->spi->dev; @@ -1824,6 +1829,7 @@ struct clk_init_data init; const char *clk_name; struct clk *clk; + int ret; if (st->int_pin_sel == AD4130_INT_PIN_CLK || st->mclk_sel != AD4130_MCLK_76_8KHZ) @@ -1843,7 +1849,11 @@ if (IS_ERR(clk)) return PTR_ERR(clk); - return of_clk_add_provider(of_node, of_clk_src_simple_get, clk); + ret = of_clk_add_provider(of_node, of_clk_src_simple_get, clk); + if (ret) + return ret; + + return devm_add_action_or_reset(dev, ad4130_clk_del_provider, of_node); } static int ad4130_setup(struct iio_dev *indio_dev) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/adc/ad7192.c +++ linux-6.2.0/drivers/iio/adc/ad7192.c @@ -897,10 +897,6 @@ __AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \ BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info) -#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \ - __AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info) - #define AD719x_TEMP_CHANNEL(_si, _address) \ __AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL) @@ -908,7 +904,7 @@ AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M), AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M), AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP), - AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M), + AD719x_DIFF_CHANNEL(3, 2, 2, AD7192_CH_AIN2P_AIN2M), AD719x_CHANNEL(4, 1, AD7192_CH_AIN1), AD719x_CHANNEL(5, 2, AD7192_CH_AIN2), AD719x_CHANNEL(6, 3, AD7192_CH_AIN3), @@ -922,7 +918,7 @@ AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M), AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M), AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP), - AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M), + AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M), AD719x_CHANNEL(6, 1, AD7193_CH_AIN1), AD719x_CHANNEL(7, 2, AD7193_CH_AIN2), AD719x_CHANNEL(8, 3, AD7193_CH_AIN3), only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/adc/ad_sigma_delta.c +++ linux-6.2.0/drivers/iio/adc/ad_sigma_delta.c @@ -584,6 +584,10 @@ init_completion(&sigma_delta->completion); sigma_delta->irq_dis = true; + + /* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */ + irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_irq(dev, sigma_delta->spi->irq, ad_sd_data_rdy_trig_poll, sigma_delta->info->irq_flags | IRQF_NO_AUTOEN, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/adc/mt6370-adc.c +++ linux-6.2.0/drivers/iio/adc/mt6370-adc.c @@ -19,6 +19,7 @@ #include +#define MT6370_REG_DEV_INFO 0x100 #define MT6370_REG_CHG_CTRL3 0x113 #define MT6370_REG_CHG_CTRL7 0x117 #define MT6370_REG_CHG_ADC 0x121 @@ -27,6 +28,7 @@ #define MT6370_ADC_START_MASK BIT(0) #define MT6370_ADC_IN_SEL_MASK GENMASK(7, 4) #define MT6370_AICR_ICHG_MASK GENMASK(7, 2) +#define MT6370_VENID_MASK GENMASK(7, 4) #define MT6370_AICR_100_mA 0x0 #define MT6370_AICR_150_mA 0x1 @@ -47,6 +49,10 @@ #define ADC_CONV_TIME_MS 35 #define ADC_CONV_POLLING_TIME_US 1000 +#define MT6370_VID_RT5081 0x8 +#define MT6370_VID_RT5081A 0xA +#define MT6370_VID_MT6370 0xE + struct mt6370_adc_data { struct device *dev; struct regmap *regmap; @@ -55,6 +61,7 @@ * from being read at the same time. */ struct mutex adc_lock; + unsigned int vid; }; static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan, @@ -98,6 +105,30 @@ return ret; } +static int mt6370_adc_get_ibus_scale(struct mt6370_adc_data *priv) +{ + switch (priv->vid) { + case MT6370_VID_RT5081: + case MT6370_VID_RT5081A: + case MT6370_VID_MT6370: + return 3350; + default: + return 3875; + } +} + +static int mt6370_adc_get_ibat_scale(struct mt6370_adc_data *priv) +{ + switch (priv->vid) { + case MT6370_VID_RT5081: + case MT6370_VID_RT5081A: + case MT6370_VID_MT6370: + return 2680; + default: + return 3870; + } +} + static int mt6370_adc_read_scale(struct mt6370_adc_data *priv, int chan, int *val1, int *val2) { @@ -123,7 +154,7 @@ case MT6370_AICR_250_mA: case MT6370_AICR_300_mA: case MT6370_AICR_350_mA: - *val1 = 3350; + *val1 = mt6370_adc_get_ibus_scale(priv); break; default: *val1 = 5000; @@ -150,7 +181,7 @@ case MT6370_ICHG_600_mA: case MT6370_ICHG_700_mA: case MT6370_ICHG_800_mA: - *val1 = 2680; + *val1 = mt6370_adc_get_ibat_scale(priv); break; default: *val1 = 5000; @@ -251,6 +282,20 @@ MT6370_ADC_CHAN(TEMP_JC, IIO_TEMP, 12, BIT(IIO_CHAN_INFO_OFFSET)), }; +static int mt6370_get_vendor_info(struct mt6370_adc_data *priv) +{ + unsigned int dev_info; + int ret; + + ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info); + if (ret) + return ret; + + priv->vid = FIELD_GET(MT6370_VENID_MASK, dev_info); + + return 0; +} + static int mt6370_adc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -272,6 +317,10 @@ priv->regmap = regmap; mutex_init(&priv->adc_lock); + ret = mt6370_get_vendor_info(priv); + if (ret) + return dev_err_probe(dev, ret, "Failed to get vid\n"); + ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, 0); if (ret) return dev_err_probe(dev, ret, "Failed to reset ADC\n"); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/adc/mxs-lradc-adc.c +++ linux-6.2.0/drivers/iio/adc/mxs-lradc-adc.c @@ -757,13 +757,13 @@ ret = mxs_lradc_adc_trigger_init(iio); if (ret) - goto err_trig; + return ret; ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time, &mxs_lradc_adc_trigger_handler, &mxs_lradc_adc_buffer_ops); if (ret) - return ret; + goto err_trig; adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc]; @@ -801,9 +801,9 @@ err_dev: mxs_lradc_adc_hw_stop(adc); - mxs_lradc_adc_trigger_remove(iio); -err_trig: iio_triggered_buffer_cleanup(iio); +err_trig: + mxs_lradc_adc_trigger_remove(iio); return ret; } @@ -814,8 +814,8 @@ iio_device_unregister(iio); mxs_lradc_adc_hw_stop(adc); - mxs_lradc_adc_trigger_remove(iio); iio_triggered_buffer_cleanup(iio); + mxs_lradc_adc_trigger_remove(iio); return 0; } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/adc/stm32-adc.c +++ linux-6.2.0/drivers/iio/adc/stm32-adc.c @@ -2006,16 +2006,15 @@ * to get the *real* number of channels. */ ret = device_property_count_u32(dev, "st,adc-diff-channels"); - if (ret < 0) - return ret; - - ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32)); - if (ret > adc_info->max_channels) { - dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n"); - return -EINVAL; - } else if (ret > 0) { - adc->num_diff = ret; - num_channels += ret; + if (ret > 0) { + ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32)); + if (ret > adc_info->max_channels) { + dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n"); + return -EINVAL; + } else if (ret > 0) { + adc->num_diff = ret; + num_channels += ret; + } } /* Optional sample time is provided either for each, or all channels */ @@ -2037,6 +2036,7 @@ struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX]; struct device *dev = &indio_dev->dev; u32 num_diff = adc->num_diff; + int num_se = nchans - num_diff; int size = num_diff * sizeof(*diff) / sizeof(u32); int scan_index = 0, ret, i, c; u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX]; @@ -2063,29 +2063,32 @@ scan_index++; } } - - ret = device_property_read_u32_array(dev, "st,adc-channels", chans, - nchans); - if (ret) - return ret; - - for (c = 0; c < nchans; c++) { - if (chans[c] >= adc_info->max_channels) { - dev_err(&indio_dev->dev, "Invalid channel %d\n", - chans[c]); - return -EINVAL; + if (num_se > 0) { + ret = device_property_read_u32_array(dev, "st,adc-channels", chans, num_se); + if (ret) { + dev_err(&indio_dev->dev, "Failed to get st,adc-channels %d\n", ret); + return ret; } - /* Channel can't be configured both as single-ended & diff */ - for (i = 0; i < num_diff; i++) { - if (chans[c] == diff[i].vinp) { - dev_err(&indio_dev->dev, "channel %d misconfigured\n", chans[c]); + for (c = 0; c < num_se; c++) { + if (chans[c] >= adc_info->max_channels) { + dev_err(&indio_dev->dev, "Invalid channel %d\n", + chans[c]); return -EINVAL; } + + /* Channel can't be configured both as single-ended & diff */ + for (i = 0; i < num_diff; i++) { + if (chans[c] == diff[i].vinp) { + dev_err(&indio_dev->dev, "channel %d misconfigured\n", + chans[c]); + return -EINVAL; + } + } + stm32_adc_chan_init_one(indio_dev, &channels[scan_index], + chans[c], 0, scan_index, false); + scan_index++; } - stm32_adc_chan_init_one(indio_dev, &channels[scan_index], - chans[c], 0, scan_index, false); - scan_index++; } if (adc->nsmps > 0) { @@ -2306,7 +2309,7 @@ if (legacy) ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels, - num_channels); + timestamping ? num_channels - 1 : num_channels); else ret = stm32_adc_generic_chan_init(indio_dev, adc, channels); if (ret < 0) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/addac/ad74413r.c +++ linux-6.2.0/drivers/iio/addac/ad74413r.c @@ -981,7 +981,7 @@ ret = ad74413r_get_single_adc_result(indio_dev, chan->channel, val); - if (ret) + if (ret < 0) return ret; ad74413r_adc_to_resistance_result(*val, val); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/dac/Makefile +++ linux-6.2.0/drivers/iio/dac/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_AD5592R) += ad5592r.o obj-$(CONFIG_AD5593R) += ad5593r.o obj-$(CONFIG_AD5755) += ad5755.o -obj-$(CONFIG_AD5755) += ad5758.o +obj-$(CONFIG_AD5758) += ad5758.o obj-$(CONFIG_AD5761) += ad5761.o obj-$(CONFIG_AD5764) += ad5764.o obj-$(CONFIG_AD5766) += ad5766.o only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/dac/mcp4725.c +++ linux-6.2.0/drivers/iio/dac/mcp4725.c @@ -47,12 +47,18 @@ struct mcp4725_data *data = iio_priv(i2c_get_clientdata( to_i2c_client(dev))); u8 outbuf[2]; + int ret; outbuf[0] = (data->powerdown_mode + 1) << 4; outbuf[1] = 0; data->powerdown = true; - return i2c_master_send(data->client, outbuf, 2); + ret = i2c_master_send(data->client, outbuf, 2); + if (ret < 0) + return ret; + else if (ret != 2) + return -EIO; + return 0; } static int mcp4725_resume(struct device *dev) @@ -60,13 +66,19 @@ struct mcp4725_data *data = iio_priv(i2c_get_clientdata( to_i2c_client(dev))); u8 outbuf[2]; + int ret; /* restore previous DAC value */ outbuf[0] = (data->dac_value >> 8) & 0xf; outbuf[1] = data->dac_value & 0xff; data->powerdown = false; - return i2c_master_send(data->client, outbuf, 2); + ret = i2c_master_send(data->client, outbuf, 2); + if (ret < 0) + return ret; + else if (ret != 2) + return -EIO; + return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend, mcp4725_resume); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c +++ linux-6.2.0/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c @@ -275,9 +275,14 @@ { struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev); struct device *dev = regmap_get_device(st->map); + struct inv_icm42600_timestamp *ts = iio_priv(indio_dev); pm_runtime_get_sync(dev); + mutex_lock(&st->lock); + inv_icm42600_timestamp_reset(ts); + mutex_unlock(&st->lock); + return 0; } @@ -375,7 +380,6 @@ struct device *dev = regmap_get_device(st->map); unsigned int sensor; unsigned int *watermark; - struct inv_icm42600_timestamp *ts; struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT; unsigned int sleep_temp = 0; unsigned int sleep_sensor = 0; @@ -385,11 +389,9 @@ if (indio_dev == st->indio_gyro) { sensor = INV_ICM42600_SENSOR_GYRO; watermark = &st->fifo.watermark.gyro; - ts = iio_priv(st->indio_gyro); } else if (indio_dev == st->indio_accel) { sensor = INV_ICM42600_SENSOR_ACCEL; watermark = &st->fifo.watermark.accel; - ts = iio_priv(st->indio_accel); } else { return -EINVAL; } @@ -417,8 +419,6 @@ if (!st->fifo.on) ret = inv_icm42600_set_temp_conf(st, false, &sleep_temp); - inv_icm42600_timestamp_reset(ts); - out_unlock: mutex_unlock(&st->lock); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iio/light/vcnl4035.c +++ linux-6.2.0/drivers/iio/light/vcnl4035.c @@ -8,6 +8,7 @@ * TODO: Proximity */ #include +#include #include #include #include @@ -42,6 +43,7 @@ #define VCNL4035_ALS_PERS_MASK GENMASK(3, 2) #define VCNL4035_INT_ALS_IF_H_MASK BIT(12) #define VCNL4035_INT_ALS_IF_L_MASK BIT(13) +#define VCNL4035_DEV_ID_MASK GENMASK(7, 0) /* Default values */ #define VCNL4035_MODE_ALS_ENABLE BIT(0) @@ -413,6 +415,7 @@ return ret; } + id = FIELD_GET(VCNL4035_DEV_ID_MASK, id); if (id != VCNL4035_DEV_ID_VAL) { dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n", id, VCNL4035_DEV_ID_VAL); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ linux-6.2.0/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -3241,9 +3241,7 @@ udwr.remote_qkey = gsi_sqp->qplib_qp.qkey; /* post data received in the send queue */ - rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); - - return 0; + return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr); } static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ linux-6.2.0/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -2043,6 +2043,12 @@ u32 pg_sz_lvl; int rc; + if (!cq->dpi) { + dev_err(&rcfw->pdev->dev, + "FP: CREATE_CQ failed due to NULL DPI\n"); + return -EINVAL; + } + hwq_attr.res = res; hwq_attr.depth = cq->max_wqe; hwq_attr.stride = sizeof(struct cq_base); @@ -2054,11 +2060,6 @@ RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags); - if (!cq->dpi) { - dev_err(&rcfw->pdev->dev, - "FP: CREATE_CQ failed due to NULL DPI\n"); - return -EINVAL; - } req.dpi = cpu_to_le32(cq->dpi->dpi); req.cq_handle = cpu_to_le64(cq->cq_handle); req.cq_size = cpu_to_le32(cq->hwq.max_elements); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ linux-6.2.0/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -215,17 +215,9 @@ return -EINVAL; hwq_attr->sginfo->npages = npages; } else { - unsigned long sginfo_num_pages = ib_umem_num_dma_blocks( - hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize); - + npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem, + hwq_attr->sginfo->pgsize); hwq->is_user = true; - npages = sginfo_num_pages; - npages = (npages * PAGE_SIZE) / - BIT_ULL(hwq_attr->sginfo->pgshft); - if ((sginfo_num_pages * PAGE_SIZE) % - BIT_ULL(hwq_attr->sginfo->pgshft)) - if (!npages) - npages++; } if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) { only in patch2: unchanged: --- linux-6.2.0.orig/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ linux-6.2.0/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -584,16 +584,15 @@ /* Free the hwq if it already exist, must be a rereg */ if (mr->hwq.max_elements) bnxt_qplib_free_hwq(res, &mr->hwq); - /* Use system PAGE_SIZE */ hwq_attr.res = res; hwq_attr.depth = pages; - hwq_attr.stride = buf_pg_size; + hwq_attr.stride = sizeof(dma_addr_t); hwq_attr.type = HWQ_TYPE_MR; hwq_attr.sginfo = &sginfo; hwq_attr.sginfo->umem = umem; hwq_attr.sginfo->npages = pages; - hwq_attr.sginfo->pgsize = PAGE_SIZE; - hwq_attr.sginfo->pgshft = PAGE_SHIFT; + hwq_attr.sginfo->pgsize = buf_pg_size; + hwq_attr.sginfo->pgshft = ilog2(buf_pg_size); rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); if (rc) { dev_err(&res->pdev->dev, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/infiniband/hw/efa/efa_verbs.c +++ linux-6.2.0/drivers/infiniband/hw/efa/efa_verbs.c @@ -1397,7 +1397,7 @@ */ static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl) { - u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE); + u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE); struct scatterlist *sgl; int sg_dma_cnt, err; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ linux-6.2.0/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -4728,11 +4728,9 @@ mtu = ib_mtu_enum_to_int(ib_mtu); if (WARN_ON(mtu <= 0)) return -EINVAL; -#define MAX_LP_MSG_LEN 16384 - /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */ - lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu); - if (WARN_ON(lp_pktn_ini >= 0xF)) - return -EINVAL; +#define MIN_LP_MSG_LEN 1024 + /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */ + lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu); if (attr_mask & IB_QP_PATH_MTU) { hr_reg_write(context, QPC_MTU, ib_mtu); @@ -5136,7 +5134,6 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) { #define QP_ACK_TIMEOUT_MAX_HIP08 20 -#define QP_ACK_TIMEOUT_OFFSET 10 #define QP_ACK_TIMEOUT_MAX 31 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { @@ -5145,7 +5142,7 @@ "local ACK timeout shall be 0 to 20.\n"); return false; } - *timeout += QP_ACK_TIMEOUT_OFFSET; + *timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08; } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { if (*timeout > QP_ACK_TIMEOUT_MAX) { ibdev_warn(&hr_dev->ib_dev, @@ -5431,6 +5428,18 @@ return ret; } +static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev, + struct hns_roce_v2_qp_context *context) +{ + u8 timeout; + + timeout = (u8)hr_reg_read(context, QPC_AT); + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08; + + return timeout; +} + static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) @@ -5508,7 +5517,7 @@ qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX); qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME); - qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT); + qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context); qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT); qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ linux-6.2.0/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -72,6 +72,8 @@ #define HNS_ROCE_V2_IDX_ENTRY_SZ 4 #define HNS_ROCE_V2_SCCC_SZ 32 +#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08 10 + #define HNS_ROCE_V3_SCCC_SZ 64 #define HNS_ROCE_V3_GMV_ENTRY_SZ 32 only in patch2: unchanged: --- linux-6.2.0.orig/drivers/infiniband/hw/hns/hns_roce_mr.c +++ linux-6.2.0/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -33,6 +33,7 @@ #include #include +#include #include "hns_roce_device.h" #include "hns_roce_cmd.h" #include "hns_roce_hem.h" @@ -909,6 +910,44 @@ return page_cnt; } +static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum) +{ + return int_pow(ba_per_bt, hopnum - 1); +} + +static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr, + unsigned int pg_shift) +{ + unsigned long cap = hr_dev->caps.page_size_cap; + struct hns_roce_buf_region *re; + unsigned int pgs_per_l1ba; + unsigned int ba_per_bt; + unsigned int ba_num; + int i; + + for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) { + if (!(BIT(pg_shift) & cap)) + continue; + + ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN; + ba_num = 0; + for (i = 0; i < mtr->hem_cfg.region_count; i++) { + re = &mtr->hem_cfg.region[i]; + if (re->hopnum == 0) + continue; + + pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum); + ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba); + } + + if (ba_num <= ba_per_bt) + return pg_shift; + } + + return 0; +} + static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, unsigned int ba_page_shift) { @@ -917,6 +956,10 @@ hns_roce_hem_list_init(&mtr->hem_list); if (!cfg->is_direct) { + ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift); + if (!ba_page_shift) + return -ERANGE; + ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, cfg->region, cfg->region_count, ba_page_shift); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/infiniband/hw/irdma/verbs.c +++ linux-6.2.0/drivers/infiniband/hw/irdma/verbs.c @@ -522,11 +522,6 @@ if (!iwqp->user_mode) cancel_delayed_work_sync(&iwqp->dwork_flush); - irdma_qp_rem_ref(&iwqp->ibqp); - wait_for_completion(&iwqp->free_qp); - irdma_free_lsmm_rsrc(iwqp); - irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); - if (!iwqp->user_mode) { if (iwqp->iwscq) { irdma_clean_cqes(iwqp, iwqp->iwscq); @@ -534,6 +529,12 @@ irdma_clean_cqes(iwqp, iwqp->iwrcq); } } + + irdma_qp_rem_ref(&iwqp->ibqp); + wait_for_completion(&iwqp->free_qp); + irdma_free_lsmm_rsrc(iwqp); + irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp); + irdma_remove_push_mmap_entries(iwqp); irdma_free_qp_rsrc(iwqp); @@ -3190,6 +3191,7 @@ break; case IB_WR_LOCAL_INV: info.op_type = IRDMA_OP_TYPE_INV_STAG; + info.local_fence = info.read_fence; info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; err = irdma_uk_stag_local_invalidate(ukqp, &info, true); break; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/input/input.c +++ linux-6.2.0/drivers/input/input.c @@ -702,7 +702,7 @@ __input_release_device(handle); - if (!dev->inhibited && !--dev->users) { + if (!--dev->users && !dev->inhibited) { if (dev->poller) input_dev_poller_stop(dev->poller); if (dev->close) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/input/mouse/elantech.c +++ linux-6.2.0/drivers/input/mouse/elantech.c @@ -674,10 +674,11 @@ struct input_dev *dev = psmouse->dev; struct elantech_data *etd = psmouse->private; unsigned char *packet = psmouse->packet; - int id = ((packet[3] & 0xe0) >> 5) - 1; + int id; int pres, traces; - if (id < 0) + id = ((packet[3] & 0xe0) >> 5) - 1; + if (id < 0 || id >= ETP_MAX_FINGERS) return; etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2]; @@ -707,7 +708,7 @@ int id, sid; id = ((packet[0] & 0xe0) >> 5) - 1; - if (id < 0) + if (id < 0 || id >= ETP_MAX_FINGERS) return; sid = ((packet[3] & 0xe0) >> 5) - 1; @@ -728,7 +729,7 @@ input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x); input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y); - if (sid >= 0) { + if (sid >= 0 && sid < ETP_MAX_FINGERS) { etd->mt[sid].x += delta_x2 * weight; etd->mt[sid].y -= delta_y2 * weight; input_mt_slot(dev, sid); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iommu/Kconfig +++ linux-6.2.0/drivers/iommu/Kconfig @@ -285,6 +285,7 @@ config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64) + depends on ARM || ARM64 || COMPILE_TEST select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iommu/amd/amd_iommu.h +++ linux-6.2.0/drivers/iommu/amd/amd_iommu.h @@ -15,9 +15,7 @@ extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid); extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu); -extern int amd_iommu_init_devices(void); -extern void amd_iommu_uninit_devices(void); -extern void amd_iommu_init_notifier(void); +extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu); extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid); #ifdef CONFIG_AMD_IOMMU_DEBUGFS only in patch2: unchanged: --- linux-6.2.0.orig/drivers/iommu/rockchip-iommu.c +++ linux-6.2.0/drivers/iommu/rockchip-iommu.c @@ -1303,20 +1303,22 @@ for (i = 0; i < iommu->num_irq; i++) { int irq = platform_get_irq(pdev, i); - if (irq < 0) - return irq; + if (irq < 0) { + err = irq; + goto err_pm_disable; + } err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, IRQF_SHARED, dev_name(dev), iommu); - if (err) { - pm_runtime_disable(dev); - goto err_remove_sysfs; - } + if (err) + goto err_pm_disable; } dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask); return 0; +err_pm_disable: + pm_runtime_disable(dev); err_remove_sysfs: iommu_device_sysfs_remove(&iommu->iommu); err_put_group: only in patch2: unchanged: --- linux-6.2.0.orig/drivers/mailbox/mailbox-test.c +++ linux-6.2.0/drivers/mailbox/mailbox-test.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,7 @@ char *signal; char *message; spinlock_t lock; + struct mutex mutex; wait_queue_head_t waitq; struct fasync_struct *async_queue; struct dentry *root_debugfs_dir; @@ -95,6 +97,7 @@ size_t count, loff_t *ppos) { struct mbox_test_device *tdev = filp->private_data; + char *message; void *data; int ret; @@ -110,10 +113,13 @@ return -EINVAL; } - tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL); - if (!tdev->message) + message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL); + if (!message) return -ENOMEM; + mutex_lock(&tdev->mutex); + + tdev->message = message; ret = copy_from_user(tdev->message, userbuf, count); if (ret) { ret = -EFAULT; @@ -144,6 +150,8 @@ kfree(tdev->message); tdev->signal = NULL; + mutex_unlock(&tdev->mutex); + return ret < 0 ? ret : count; } @@ -392,6 +400,7 @@ platform_set_drvdata(pdev, tdev); spin_lock_init(&tdev->lock); + mutex_init(&tdev->mutex); if (tdev->rx_channel) { tdev->rx_buffer = devm_kzalloc(&pdev->dev, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/dvb-core/dvb_ca_en50221.c +++ linux-6.2.0/drivers/media/dvb-core/dvb_ca_en50221.c @@ -151,6 +151,12 @@ /* mutex serializing ioctls */ struct mutex ioctl_mutex; + + /* A mutex used when a device is disconnected */ + struct mutex remove_mutex; + + /* Whether the device is disconnected */ + int exit; }; static void dvb_ca_private_free(struct dvb_ca_private *ca) @@ -187,7 +193,7 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 *ebuf, int ecount); static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, - u8 *ebuf, int ecount); + u8 *ebuf, int ecount, int size_write_flag); /** * findstr - Safely find needle in haystack. @@ -370,7 +376,7 @@ ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10); if (ret) return ret; - ret = dvb_ca_en50221_write_data(ca, slot, buf, 2); + ret = dvb_ca_en50221_write_data(ca, slot, buf, 2, CMDREG_SW); if (ret != 2) return -EIO; ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN); @@ -778,11 +784,13 @@ * @buf: The data in this buffer is treated as a complete link-level packet to * be written. * @bytes_write: Size of ebuf. + * @size_write_flag: A flag on Command Register which says whether the link size + * information will be writen or not. * * return: Number of bytes written, or < 0 on error. */ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, - u8 *buf, int bytes_write) + u8 *buf, int bytes_write, int size_write_flag) { struct dvb_ca_slot *sl = &ca->slot_info[slot]; int status; @@ -817,7 +825,7 @@ /* OK, set HC bit */ status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, - IRQEN | CMDREG_HC); + IRQEN | CMDREG_HC | size_write_flag); if (status) goto exit; @@ -1508,7 +1516,7 @@ mutex_lock(&sl->slot_lock); status = dvb_ca_en50221_write_data(ca, slot, fragbuf, - fraglen + 2); + fraglen + 2, 0); mutex_unlock(&sl->slot_lock); if (status == (fraglen + 2)) { written = 1; @@ -1709,12 +1717,22 @@ dprintk("%s\n", __func__); - if (!try_module_get(ca->pub->owner)) + mutex_lock(&ca->remove_mutex); + + if (ca->exit) { + mutex_unlock(&ca->remove_mutex); + return -ENODEV; + } + + if (!try_module_get(ca->pub->owner)) { + mutex_unlock(&ca->remove_mutex); return -EIO; + } err = dvb_generic_open(inode, file); if (err < 0) { module_put(ca->pub->owner); + mutex_unlock(&ca->remove_mutex); return err; } @@ -1739,6 +1757,7 @@ dvb_ca_private_get(ca); + mutex_unlock(&ca->remove_mutex); return 0; } @@ -1758,6 +1777,8 @@ dprintk("%s\n", __func__); + mutex_lock(&ca->remove_mutex); + /* mark the CA device as closed */ ca->open = 0; dvb_ca_en50221_thread_update_delay(ca); @@ -1768,6 +1789,13 @@ dvb_ca_private_put(ca); + if (dvbdev->users == 1 && ca->exit == 1) { + mutex_unlock(&ca->remove_mutex); + wake_up(&dvbdev->wait_queue); + } else { + mutex_unlock(&ca->remove_mutex); + } + return err; } @@ -1891,6 +1919,7 @@ } mutex_init(&ca->ioctl_mutex); + mutex_init(&ca->remove_mutex); if (signal_pending(current)) { ret = -EINTR; @@ -1933,6 +1962,14 @@ dprintk("%s\n", __func__); + mutex_lock(&ca->remove_mutex); + ca->exit = 1; + mutex_unlock(&ca->remove_mutex); + + if (ca->dvbdev->users < 1) + wait_event(ca->dvbdev->wait_queue, + ca->dvbdev->users == 1); + /* shutdown the thread if there was one */ kthread_stop(ca->thread); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/dvb-core/dvb_demux.c +++ linux-6.2.0/drivers/media/dvb-core/dvb_demux.c @@ -115,12 +115,12 @@ cc = buf[3] & 0x0f; ccok = ((feed->cc + 1) & 0x0f) == cc; - feed->cc = cc; if (!ccok) { set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED); dprintk_sect_loss("missed packet: %d instead of %d!\n", cc, (feed->cc + 1) & 0x0f); } + feed->cc = cc; if (buf[1] & 0x40) // PUSI ? feed->peslen = 0xfffa; @@ -300,7 +300,6 @@ cc = buf[3] & 0x0f; ccok = ((feed->cc + 1) & 0x0f) == cc; - feed->cc = cc; if (buf[3] & 0x20) { /* adaption field present, check for discontinuity_indicator */ @@ -336,6 +335,7 @@ feed->pusi_seen = false; dvb_dmx_swfilter_section_new(feed); } + feed->cc = cc; if (buf[1] & 0x40) { /* PUSI=1 (is set), section boundary is here */ only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/dvb-core/dvb_net.c +++ linux-6.2.0/drivers/media/dvb-core/dvb_net.c @@ -1564,15 +1564,43 @@ return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl); } +static int locked_dvb_net_open(struct inode *inode, struct file *file) +{ + struct dvb_device *dvbdev = file->private_data; + struct dvb_net *dvbnet = dvbdev->priv; + int ret; + + if (mutex_lock_interruptible(&dvbnet->remove_mutex)) + return -ERESTARTSYS; + + if (dvbnet->exit) { + mutex_unlock(&dvbnet->remove_mutex); + return -ENODEV; + } + + ret = dvb_generic_open(inode, file); + + mutex_unlock(&dvbnet->remove_mutex); + + return ret; +} + static int dvb_net_close(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_net *dvbnet = dvbdev->priv; + mutex_lock(&dvbnet->remove_mutex); + dvb_generic_release(inode, file); - if(dvbdev->users == 1 && dvbnet->exit == 1) + if (dvbdev->users == 1 && dvbnet->exit == 1) { + mutex_unlock(&dvbnet->remove_mutex); wake_up(&dvbdev->wait_queue); + } else { + mutex_unlock(&dvbnet->remove_mutex); + } + return 0; } @@ -1580,7 +1608,7 @@ static const struct file_operations dvb_net_fops = { .owner = THIS_MODULE, .unlocked_ioctl = dvb_net_ioctl, - .open = dvb_generic_open, + .open = locked_dvb_net_open, .release = dvb_net_close, .llseek = noop_llseek, }; @@ -1599,10 +1627,13 @@ { int i; + mutex_lock(&dvbnet->remove_mutex); dvbnet->exit = 1; + mutex_unlock(&dvbnet->remove_mutex); + if (dvbnet->dvbdev->users < 1) wait_event(dvbnet->dvbdev->wait_queue, - dvbnet->dvbdev->users==1); + dvbnet->dvbdev->users == 1); dvb_unregister_device(dvbnet->dvbdev); @@ -1621,6 +1652,7 @@ int i; mutex_init(&dvbnet->ioctl_mutex); + mutex_init(&dvbnet->remove_mutex); dvbnet->demux = dmx; for (i=0; i static DEFINE_MUTEX(dvbdev_mutex); +static LIST_HEAD(dvbdevfops_list); static int dvbdev_debug; module_param(dvbdev_debug, int, 0644); @@ -453,14 +454,15 @@ enum dvb_device_type type, int demux_sink_pads) { struct dvb_device *dvbdev; - struct file_operations *dvbdevfops; + struct file_operations *dvbdevfops = NULL; + struct dvbdevfops_node *node = NULL, *new_node = NULL; struct device *clsdev; int minor; int id, ret; mutex_lock(&dvbdev_register_lock); - if ((id = dvbdev_get_free_id (adap, type)) < 0){ + if ((id = dvbdev_get_free_id (adap, type)) < 0) { mutex_unlock(&dvbdev_register_lock); *pdvbdev = NULL; pr_err("%s: couldn't find free device id\n", __func__); @@ -468,18 +470,45 @@ } *pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL); - if (!dvbdev){ mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } - dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL); + /* + * When a device of the same type is probe()d more than once, + * the first allocated fops are used. This prevents memory leaks + * that can occur when the same device is probe()d repeatedly. + */ + list_for_each_entry(node, &dvbdevfops_list, list_head) { + if (node->fops->owner == adap->module && + node->type == type && + node->template == template) { + dvbdevfops = node->fops; + break; + } + } - if (!dvbdevfops){ - kfree (dvbdev); - mutex_unlock(&dvbdev_register_lock); - return -ENOMEM; + if (dvbdevfops == NULL) { + dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL); + if (!dvbdevfops) { + kfree(dvbdev); + mutex_unlock(&dvbdev_register_lock); + return -ENOMEM; + } + + new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL); + if (!new_node) { + kfree(dvbdevfops); + kfree(dvbdev); + mutex_unlock(&dvbdev_register_lock); + return -ENOMEM; + } + + new_node->fops = dvbdevfops; + new_node->type = type; + new_node->template = template; + list_add_tail (&new_node->list_head, &dvbdevfops_list); } memcpy(dvbdev, template, sizeof(struct dvb_device)); @@ -490,20 +519,20 @@ dvbdev->priv = priv; dvbdev->fops = dvbdevfops; init_waitqueue_head (&dvbdev->wait_queue); - dvbdevfops->owner = adap->module; - list_add_tail (&dvbdev->list_head, &adap->device_list); - down_write(&minor_rwsem); #ifdef CONFIG_DVB_DYNAMIC_MINORS for (minor = 0; minor < MAX_DVB_MINORS; minor++) if (dvb_minors[minor] == NULL) break; - if (minor == MAX_DVB_MINORS) { + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } list_del (&dvbdev->list_head); - kfree(dvbdevfops); kfree(dvbdev); up_write(&minor_rwsem); mutex_unlock(&dvbdev_register_lock); @@ -512,41 +541,47 @@ #else minor = nums2minor(adap->num, type, id); #endif - dvbdev->minor = minor; dvb_minors[minor] = dvb_device_get(dvbdev); up_write(&minor_rwsem); - ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads); if (ret) { pr_err("%s: dvb_register_media_device failed to create the mediagraph\n", __func__); - + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } dvb_media_device_free(dvbdev); list_del (&dvbdev->list_head); - kfree(dvbdevfops); kfree(dvbdev); mutex_unlock(&dvbdev_register_lock); return ret; } - mutex_unlock(&dvbdev_register_lock); - clsdev = device_create(dvb_class, adap->device, MKDEV(DVB_MAJOR, minor), dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id); if (IS_ERR(clsdev)) { pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n", __func__, adap->num, dnames[type], id, PTR_ERR(clsdev)); + if (new_node) { + list_del (&new_node->list_head); + kfree(dvbdevfops); + kfree(new_node); + } dvb_media_device_free(dvbdev); list_del (&dvbdev->list_head); - kfree(dvbdevfops); kfree(dvbdev); + mutex_unlock(&dvbdev_register_lock); return PTR_ERR(clsdev); } + dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n", adap->num, dnames[type], id, minor, minor); + mutex_unlock(&dvbdev_register_lock); return 0; } EXPORT_SYMBOL(dvb_register_device); @@ -575,7 +610,6 @@ { struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref); - kfree (dvbdev->fops); kfree (dvbdev); } @@ -1081,9 +1115,17 @@ static void __exit exit_dvbdev(void) { + struct dvbdevfops_node *node, *next; + class_destroy(dvb_class); cdev_del(&dvb_device_cdev); unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS); + + list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) { + list_del (&node->list_head); + kfree(node->fops); + kfree(node); + } } subsys_initcall(init_dvbdev); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/dvb-frontends/mn88443x.c +++ linux-6.2.0/drivers/media/dvb-frontends/mn88443x.c @@ -798,7 +798,7 @@ static struct i2c_driver mn88443x_driver = { .driver = { .name = "mn88443x", - .of_match_table = of_match_ptr(mn88443x_of_match), + .of_match_table = mn88443x_of_match, }, .probe = mn88443x_probe, .remove = mn88443x_remove, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/platform/renesas/rcar-vin/rcar-dma.c +++ linux-6.2.0/drivers/media/platform/renesas/rcar-vin/rcar-dma.c @@ -728,11 +728,9 @@ case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: case V4L2_FIELD_NONE: - vnmc = VNMC_IM_ODD_EVEN; - progressive = true; - break; case V4L2_FIELD_ALTERNATE: vnmc = VNMC_IM_ODD_EVEN; + progressive = true; break; default: vnmc = VNMC_IM_ODD; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/usb/dvb-usb-v2/ce6230.c +++ linux-6.2.0/drivers/media/usb/dvb-usb-v2/ce6230.c @@ -101,6 +101,10 @@ if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { if (msg[i].addr == ce6230_zl10353_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = DEMOD_READ; req.value = msg[i].addr >> 1; req.index = msg[i].buf[0]; @@ -117,6 +121,10 @@ } else { if (msg[i].addr == ce6230_zl10353_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = DEMOD_WRITE; req.value = msg[i].addr >> 1; req.index = msg[i].buf[0]; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/usb/dvb-usb-v2/ec168.c +++ linux-6.2.0/drivers/media/usb/dvb-usb-v2/ec168.c @@ -115,6 +115,10 @@ while (i < num) { if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { if (msg[i].addr == ec168_ec100_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = READ_DEMOD; req.value = 0; req.index = 0xff00 + msg[i].buf[0]; /* reg */ @@ -131,6 +135,10 @@ } } else { if (msg[i].addr == ec168_ec100_config.demod_address) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = WRITE_DEMOD; req.value = msg[i].buf[1]; /* val */ req.index = 0xff00 + msg[i].buf[0]; /* reg */ @@ -139,6 +147,10 @@ ret = ec168_ctrl_msg(d, &req); i += 1; } else { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } req.cmd = WRITE_I2C; req.value = msg[i].buf[0]; /* val */ req.index = 0x0100 + msg[i].addr; /* I2C addr */ only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ linux-6.2.0/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -176,6 +176,10 @@ ret = -EOPNOTSUPP; goto err_mutex_unlock; } else if (msg[0].addr == 0x10) { + if (msg[0].len < 1 || msg[1].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 1 - integrated demod */ if (msg[0].buf[0] == 0x00) { /* return demod page from driver cache */ @@ -189,6 +193,10 @@ ret = rtl28xxu_ctrl_msg(d, &req); } } else if (msg[0].len < 2) { + if (msg[0].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 2 - old I2C */ req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1); req.index = CMD_I2C_RD; @@ -217,8 +225,16 @@ ret = -EOPNOTSUPP; goto err_mutex_unlock; } else if (msg[0].addr == 0x10) { + if (msg[0].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 1 - integrated demod */ if (msg[0].buf[0] == 0x00) { + if (msg[0].len < 2) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* save demod page for later demod access */ dev->page = msg[0].buf[1]; ret = 0; @@ -231,6 +247,10 @@ ret = rtl28xxu_ctrl_msg(d, &req); } } else if ((msg[0].len < 23) && (!dev->new_i2c_write)) { + if (msg[0].len < 1) { + ret = -EOPNOTSUPP; + goto err_mutex_unlock; + } /* method 2 - old I2C */ req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1); req.index = CMD_I2C_WR; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/usb/dvb-usb/az6027.c +++ linux-6.2.0/drivers/media/usb/dvb-usb/az6027.c @@ -988,6 +988,10 @@ /* write/read request */ if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) { req = 0xB9; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff)); value = msg[i].addr + (msg[i].len << 8); length = msg[i + 1].len + 6; @@ -1001,6 +1005,10 @@ /* demod 16bit addr */ req = 0xBD; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff)); value = msg[i].addr + (2 << 8); length = msg[i].len - 2; @@ -1026,6 +1034,10 @@ } else { req = 0xBD; + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } index = msg[i].buf[0] & 0x00FF; value = msg[i].addr + (1 << 8); length = msg[i].len - 1; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/usb/dvb-usb/digitv.c +++ linux-6.2.0/drivers/media/usb/dvb-usb/digitv.c @@ -63,6 +63,10 @@ warn("more than 2 i2c messages at a time is not handled yet. TODO."); for (i = 0; i < num; i++) { + if (msg[i].len < 1) { + i = -EOPNOTSUPP; + break; + } /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { if (digitv_ctrl_msg(d, USB_READ_COFDM, msg[i].buf[0], NULL, 0, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/usb/dvb-usb/dw2102.c +++ linux-6.2.0/drivers/media/usb/dvb-usb/dw2102.c @@ -946,7 +946,7 @@ for (i = 0; i < 6; i++) { obuf[1] = 0xf0 + i; if (i2c_transfer(&d->i2c_adap, msg, 2) != 2) - break; + return -1; else mac[i] = ibuf[0]; } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ linux-6.2.0/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -1544,8 +1544,7 @@ dvb_dmx_release(&dec->demux); if (dec->fe) { dvb_unregister_frontend(dec->fe); - if (dec->fe->ops.release) - dec->fe->ops.release(dec->fe); + dvb_frontend_detach(dec->fe); } dvb_unregister_adapter(&dec->adapter); } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/misc/eeprom/Kconfig +++ linux-6.2.0/drivers/misc/eeprom/Kconfig @@ -6,6 +6,7 @@ depends on I2C && SYSFS select NVMEM select NVMEM_SYSFS + select REGMAP select REGMAP_I2C help Enable this driver to get read/write support to most I2C EEPROMs only in patch2: unchanged: --- linux-6.2.0.orig/drivers/mmc/core/pwrseq_sd8787.c +++ linux-6.2.0/drivers/mmc/core/pwrseq_sd8787.c @@ -28,7 +28,6 @@ struct mmc_pwrseq pwrseq; struct gpio_desc *reset_gpio; struct gpio_desc *pwrdn_gpio; - u32 reset_pwrdwn_delay_ms; }; #define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq) @@ -39,7 +38,7 @@ gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); - msleep(pwrseq->reset_pwrdwn_delay_ms); + msleep(300); gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1); } @@ -51,17 +50,37 @@ gpiod_set_value_cansleep(pwrseq->reset_gpio, 0); } +static void mmc_pwrseq_wilc1000_pre_power_on(struct mmc_host *host) +{ + struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq); + + /* The pwrdn_gpio is really CHIP_EN, reset_gpio is RESETN */ + gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1); + msleep(5); + gpiod_set_value_cansleep(pwrseq->reset_gpio, 1); +} + +static void mmc_pwrseq_wilc1000_power_off(struct mmc_host *host) +{ + struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq); + + gpiod_set_value_cansleep(pwrseq->reset_gpio, 0); + gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0); +} + static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = { .pre_power_on = mmc_pwrseq_sd8787_pre_power_on, .power_off = mmc_pwrseq_sd8787_power_off, }; -static const u32 sd8787_delay_ms = 300; -static const u32 wilc1000_delay_ms = 5; +static const struct mmc_pwrseq_ops mmc_pwrseq_wilc1000_ops = { + .pre_power_on = mmc_pwrseq_wilc1000_pre_power_on, + .power_off = mmc_pwrseq_wilc1000_power_off, +}; static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = { - { .compatible = "mmc-pwrseq-sd8787", .data = &sd8787_delay_ms }, - { .compatible = "mmc-pwrseq-wilc1000", .data = &wilc1000_delay_ms }, + { .compatible = "mmc-pwrseq-sd8787", .data = &mmc_pwrseq_sd8787_ops }, + { .compatible = "mmc-pwrseq-wilc1000", .data = &mmc_pwrseq_wilc1000_ops }, {/* sentinel */}, }; MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match); @@ -77,7 +96,6 @@ return -ENOMEM; match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node); - pwrseq->reset_pwrdwn_delay_ms = *(u32 *)match->data; pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW); if (IS_ERR(pwrseq->pwrdn_gpio)) @@ -88,7 +106,7 @@ return PTR_ERR(pwrseq->reset_gpio); pwrseq->pwrseq.dev = dev; - pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops; + pwrseq->pwrseq.ops = match->data; pwrseq->pwrseq.owner = THIS_MODULE; platform_set_drvdata(pdev, pwrseq); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/mmc/host/vub300.c +++ linux-6.2.0/drivers/mmc/host/vub300.c @@ -1715,6 +1715,9 @@ int bytes = 3 & less_cmd; int words = less_cmd >> 2; u8 *r = vub300->resp.response.command_response; + + if (!resp_len) + return; if (bytes == 3) { cmd->resp[words] = (r[1 + (words << 2)] << 24) | (r[2 + (words << 2)] << 16) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/mtd/mtdchar.c +++ linux-6.2.0/drivers/mtd/mtdchar.c @@ -590,8 +590,8 @@ (end_page - start_page + 1) * oob_per_page); } -static int mtdchar_write_ioctl(struct mtd_info *mtd, - struct mtd_write_req __user *argp) +static noinline_for_stack int +mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp) { struct mtd_info *master = mtd_get_master(mtd); struct mtd_write_req req; @@ -688,8 +688,8 @@ return ret; } -static int mtdchar_read_ioctl(struct mtd_info *mtd, - struct mtd_read_req __user *argp) +static noinline_for_stack int +mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp) { struct mtd_info *master = mtd_get_master(mtd); struct mtd_read_req req; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h +++ linux-6.2.0/drivers/mtd/nand/raw/ingenic/ingenic_ecc.h @@ -36,25 +36,25 @@ void ingenic_ecc_release(struct ingenic_ecc *ecc); struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np); #else /* CONFIG_MTD_NAND_INGENIC_ECC */ -int ingenic_ecc_calculate(struct ingenic_ecc *ecc, +static inline int ingenic_ecc_calculate(struct ingenic_ecc *ecc, struct ingenic_ecc_params *params, const u8 *buf, u8 *ecc_code) { return -ENODEV; } -int ingenic_ecc_correct(struct ingenic_ecc *ecc, +static inline int ingenic_ecc_correct(struct ingenic_ecc *ecc, struct ingenic_ecc_params *params, u8 *buf, u8 *ecc_code) { return -ENODEV; } -void ingenic_ecc_release(struct ingenic_ecc *ecc) +static inline void ingenic_ecc_release(struct ingenic_ecc *ecc) { } -struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np) +static inline struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np) { return ERR_PTR(-ENODEV); } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/mtd/nand/raw/marvell_nand.c +++ linux-6.2.0/drivers/mtd/nand/raw/marvell_nand.c @@ -2450,6 +2450,12 @@ NDTR1_WAIT_MODE; } + /* + * Reset nfc->selected_chip so the next command will cause the timing + * registers to be updated in marvell_nfc_select_target(). + */ + nfc->selected_chip = NULL; + return 0; } @@ -2887,10 +2893,6 @@ regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL, GENCONF_CLK_GATING_CTRL_ND_GATE, GENCONF_CLK_GATING_CTRL_ND_GATE); - - regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL, - GENCONF_ND_CLK_CTRL_EN, - GENCONF_ND_CLK_CTRL_EN); } /* Configure the DMA if appropriate */ only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/dsa/lan9303-core.c +++ linux-6.2.0/drivers/net/dsa/lan9303-core.c @@ -1199,8 +1199,6 @@ struct lan9303 *chip = ds->priv; dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid); - if (vid) - return -EOPNOTSUPP; return lan9303_alr_add_port(chip, addr, port, false); } @@ -1212,8 +1210,6 @@ struct lan9303 *chip = ds->priv; dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid); - if (vid) - return -EOPNOTSUPP; lan9303_alr_del_port(chip, addr, port); return 0; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ linux-6.2.0/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1312,7 +1312,7 @@ return pdata->phy_if.phy_impl.an_outcome(pdata); } -static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) +static bool xgbe_phy_status_result(struct xgbe_prv_data *pdata) { struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; @@ -1347,8 +1347,13 @@ pdata->phy.duplex = DUPLEX_FULL; - if (xgbe_set_mode(pdata, mode) && pdata->an_again) + if (!xgbe_set_mode(pdata, mode)) + return false; + + if (pdata->an_again) xgbe_phy_reconfig_aneg(pdata); + + return true; } static void xgbe_phy_status(struct xgbe_prv_data *pdata) @@ -1378,7 +1383,8 @@ return; } - xgbe_phy_status_result(pdata); + if (xgbe_phy_status_result(pdata)) + return; if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) clear_bit(XGBE_LINK_INIT, &pdata->dev_state); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ linux-6.2.0/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -703,4 +703,7 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, enum bcmgenet_power_mode mode); +void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, + bool tx_lpi_enabled); + #endif /* __BCMGENET_H__ */ only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/ethernet/freescale/enetc/enetc.c +++ linux-6.2.0/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1209,7 +1209,13 @@ if (!skb) break; - rx_byte_cnt += skb->len; + /* When set, the outer VLAN header is extracted and reported + * in the receive buffer descriptor. So rx_byte_cnt should + * add the length of the extracted VLAN header. + */ + if (bd_status & ENETC_RXBD_FLAG_VLAN) + rx_byte_cnt += VLAN_HLEN; + rx_byte_cnt += skb->len + ETH_HLEN; rx_frm_cnt++; napi_gro_receive(napi, skb); @@ -1532,6 +1538,14 @@ enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i, &cleaned_cnt, &xdp_buff); + /* When set, the outer VLAN header is extracted and reported + * in the receive buffer descriptor. So rx_byte_cnt should + * add the length of the extracted VLAN header. + */ + if (bd_status & ENETC_RXBD_FLAG_VLAN) + rx_byte_cnt += VLAN_HLEN; + rx_byte_cnt += xdp_get_buff_len(&xdp_buff); + xdp_act = bpf_prog_run_xdp(prog, &xdp_buff); switch (xdp_act) { only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ linux-6.2.0/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -139,10 +139,8 @@ inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = kvzalloc(inlen, GFP_KERNEL); - if (!in) { - err = -ENOMEM; - goto out; - } + if (!in) + return -ENOMEM; if (enable_uc_lb) lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; @@ -160,14 +158,13 @@ tirn = tir->tirn; err = mlx5_core_modify_tir(mdev, tirn, in); if (err) - goto out; + break; } + mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock); -out: kvfree(in); if (err) netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); - mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock); return err; } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c +++ linux-6.2.0/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c @@ -245,12 +245,6 @@ skb = priv->rx_skb[rx_pi_rem]; - skb_put(skb, datalen); - - skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */ - - skb->protocol = eth_type_trans(skb, netdev); - /* Alloc another RX SKB for this same index */ rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ, &rx_buf_dma, DMA_FROM_DEVICE); @@ -259,6 +253,13 @@ priv->rx_skb[rx_pi_rem] = rx_skb; dma_unmap_single(priv->dev, *rx_wqe_addr, MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE); + + skb_put(skb, datalen); + + skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */ + + skb->protocol = eth_type_trans(skb, netdev); + *rx_wqe_addr = rx_buf_dma; } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) { priv->stats.rx_mac_errors++; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ linux-6.2.0/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1903,7 +1903,7 @@ { u32 i; - if (!cdev) { + if (!cdev || cdev->recov_in_prog) { memset(stats, 0, sizeof(*stats)); return; } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/ethernet/qlogic/qede/qede.h +++ linux-6.2.0/drivers/net/ethernet/qlogic/qede/qede.h @@ -271,6 +271,10 @@ #define QEDE_ERR_WARN 3 struct qede_dump_info dump_info; + struct delayed_work periodic_task; + unsigned long stats_coal_ticks; + u32 stats_coal_usecs; + spinlock_t stats_lock; /* lock for vport stats access */ }; enum QEDE_STATE { only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ linux-6.2.0/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -430,6 +430,8 @@ } } + spin_lock(&edev->stats_lock); + for (i = 0; i < QEDE_NUM_STATS; i++) { if (qede_is_irrelevant_stat(edev, i)) continue; @@ -439,6 +441,8 @@ buf++; } + spin_unlock(&edev->stats_lock); + __qede_unlock(edev); } @@ -830,6 +834,7 @@ coal->rx_coalesce_usecs = rx_coal; coal->tx_coalesce_usecs = tx_coal; + coal->stats_block_coalesce_usecs = edev->stats_coal_usecs; return rc; } @@ -843,6 +848,19 @@ int i, rc = 0; u16 rxc, txc; + if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) { + edev->stats_coal_usecs = coal->stats_block_coalesce_usecs; + if (edev->stats_coal_usecs) { + edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs); + schedule_delayed_work(&edev->periodic_task, 0); + + DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n", + edev->stats_coal_ticks); + } else { + cancel_delayed_work_sync(&edev->periodic_task); + } + } + if (!netif_running(dev)) { DP_INFO(edev, "Interface is down\n"); return -EINVAL; @@ -2253,7 +2271,8 @@ } static const struct ethtool_ops qede_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_STATS_BLOCK_USECS, .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, .get_drvinfo = qede_get_drvinfo, @@ -2304,7 +2323,8 @@ }; static const struct ethtool_ops qede_vf_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_STATS_BLOCK_USECS, .get_link_ksettings = qede_get_link_ksettings, .get_drvinfo = qede_get_drvinfo, .get_msglevel = qede_get_msglevel, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/wireless/broadcom/b43/b43.h +++ linux-6.2.0/drivers/net/wireless/broadcom/b43/b43.h @@ -651,7 +651,7 @@ union { __be16 d16; __be32 d32; - } data __packed; + } __packed data; } __packed; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/wireless/broadcom/b43legacy/b43legacy.h +++ linux-6.2.0/drivers/net/wireless/broadcom/b43legacy/b43legacy.h @@ -379,7 +379,7 @@ union { __be16 d16; __be32 d32; - } data __packed; + } __packed data; } __packed; #define B43legacy_PHYMODE(phytype) (1 << (phytype)) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ linux-6.2.0/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2692,6 +2692,8 @@ return; lq_sta = mvm_sta; + + spin_lock(&lq_sta->pers.lock); iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags, info->band, &info->control.rates[0]); info->control.rates[0].count = 1; @@ -2706,6 +2708,7 @@ iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band, &txrc->reported_rate); } + spin_unlock(&lq_sta->pers.lock); } static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ linux-6.2.0/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1409,6 +1409,7 @@ u32 rege9c; u32 regeb4; u32 regebc; + u32 regrcr; int next_mbox; int nr_out_eps; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/wireless/realtek/rtw88/ps.h +++ linux-6.2.0/drivers/net/wireless/realtek/rtw88/ps.h @@ -23,4 +23,6 @@ void rtw_leave_lps(struct rtw_dev *rtwdev); void rtw_leave_lps_deep(struct rtw_dev *rtwdev); enum rtw_lps_deep_mode rtw_get_lps_deep_mode(struct rtw_dev *rtwdev); +void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif); + #endif only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/wireless/realtek/rtw89/mac80211.c +++ linux-6.2.0/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -79,15 +79,6 @@ !(hw->conf.flags & IEEE80211_CONF_IDLE)) rtw89_leave_ips(rtwdev); - if (changed & IEEE80211_CONF_CHANGE_PS) { - if (hw->conf.flags & IEEE80211_CONF_PS) { - rtwdev->lps_enabled = true; - } else { - rtw89_leave_lps(rtwdev); - rtwdev->lps_enabled = false; - } - } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0, &hw->conf.chandef); @@ -146,6 +137,8 @@ rtw89_core_txq_init(rtwdev, vif->txq); rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START); + + rtw89_recalc_lps(rtwdev); out: mutex_unlock(&rtwdev->mutex); @@ -169,6 +162,8 @@ rtw89_mac_remove_vif(rtwdev, rtwvif); rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port); list_del_init(&rtwvif->list); + rtw89_recalc_lps(rtwdev); + mutex_unlock(&rtwdev->mutex); } @@ -424,6 +419,9 @@ if (changed & BSS_CHANGED_P2P_PS) rtw89_process_p2p_ps(rtwdev, vif); + if (changed & BSS_CHANGED_PS) + rtw89_recalc_lps(rtwdev); + mutex_unlock(&rtwdev->mutex); } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/wireless/realtek/rtw89/ps.c +++ linux-6.2.0/drivers/net/wireless/realtek/rtw89/ps.c @@ -244,3 +244,29 @@ rtw89_p2p_disable_all_noa(rtwdev, vif); rtw89_p2p_update_noa(rtwdev, vif); } + +void rtw89_recalc_lps(struct rtw89_dev *rtwdev) +{ + struct ieee80211_vif *vif, *found_vif = NULL; + struct rtw89_vif *rtwvif; + int count = 0; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + vif = rtwvif_to_vif(rtwvif); + + if (vif->type != NL80211_IFTYPE_STATION) { + count = 0; + break; + } + + count++; + found_vif = vif; + } + + if (count == 1 && found_vif->cfg.ps) { + rtwdev->lps_enabled = true; + } else { + rtw89_leave_lps(rtwdev); + rtwdev->lps_enabled = false; + } +} only in patch2: unchanged: --- linux-6.2.0.orig/drivers/net/wireless/realtek/rtw89/ps.h +++ linux-6.2.0/drivers/net/wireless/realtek/rtw89/ps.h @@ -14,5 +14,6 @@ void rtw89_leave_ips(struct rtw89_dev *rtwdev); void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl); void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); +void rtw89_recalc_lps(struct rtw89_dev *rtwdev); #endif only in patch2: unchanged: --- linux-6.2.0.orig/drivers/nvme/host/constants.c +++ linux-6.2.0/drivers/nvme/host/constants.c @@ -21,7 +21,7 @@ [nvme_cmd_resv_release] = "Reservation Release", [nvme_cmd_zone_mgmt_send] = "Zone Management Send", [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive", - [nvme_cmd_zone_append] = "Zone Management Append", + [nvme_cmd_zone_append] = "Zone Append", }; static const char * const nvme_admin_ops[] = { only in patch2: unchanged: --- linux-6.2.0.orig/drivers/nvme/host/hwmon.c +++ linux-6.2.0/drivers/nvme/host/hwmon.c @@ -163,7 +163,9 @@ case hwmon_temp_max: case hwmon_temp_min: if ((!channel && data->ctrl->wctemp) || - (channel && data->log->temp_sensor[channel - 1])) { + (channel && data->log->temp_sensor[channel - 1] && + !(data->ctrl->quirks & + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) { if (data->ctrl->quirks & NVME_QUIRK_NO_TEMP_THRESH_CHANGE) return 0444; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/nvme/host/nvme.h +++ linux-6.2.0/drivers/nvme/host/nvme.h @@ -149,6 +149,11 @@ * Reports garbage in the namespace identifiers (eui64, nguid, uuid). */ NVME_QUIRK_BOGUS_NID = (1 << 18), + + /* + * No temperature thresholds for channels other than 0 (Composite). + */ + NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19), }; /* only in patch2: unchanged: --- linux-6.2.0.orig/drivers/pci/pcie/aer.c +++ linux-6.2.0/drivers/pci/pcie/aer.c @@ -1385,6 +1385,22 @@ return 0; } +static int aer_suspend(struct pcie_device *dev) +{ + struct aer_rpc *rpc = get_service_data(dev); + + aer_disable_rootport(rpc); + return 0; +} + +static int aer_resume(struct pcie_device *dev) +{ + struct aer_rpc *rpc = get_service_data(dev); + + aer_enable_rootport(rpc); + return 0; +} + /** * aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP * @dev: pointer to Root Port, RCEC, or RCiEP @@ -1456,6 +1472,8 @@ .service = PCIE_PORT_SERVICE_AER, .probe = aer_probe, + .suspend = aer_suspend, + .resume = aer_resume, .remove = aer_remove, }; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c +++ linux-6.2.0/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c @@ -70,7 +70,7 @@ HHI_MIPI_CNTL1_BANDGAP); regmap_write(priv->regmap, HHI_MIPI_CNTL2, - FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x459) | + FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x45a) | FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL1, 0x2680)); reg = DSI_LANE_CLK; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ linux-6.2.0/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -1810,7 +1810,7 @@ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); - goto err_unlock; + goto err_decrement_count; } ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); @@ -1860,7 +1860,8 @@ reset_control_bulk_assert(cfg->num_resets, qmp->resets); err_disable_regulators: regulator_bulk_disable(cfg->num_vregs, qmp->vregs); -err_unlock: +err_decrement_count: + qmp->init_count--; mutex_unlock(&qmp->phy_mutex); return ret; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c +++ linux-6.2.0/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c @@ -379,7 +379,7 @@ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs); if (ret) { dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret); - goto err_unlock; + goto err_decrement_count; } ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets); @@ -409,7 +409,8 @@ reset_control_bulk_assert(cfg->num_resets, qmp->resets); err_disable_regulators: regulator_bulk_disable(cfg->num_vregs, qmp->vregs); -err_unlock: +err_decrement_count: + qmp->init_count--; mutex_unlock(&qmp->phy_mutex); return ret; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/pinctrl/meson/pinctrl-meson-axg.c +++ linux-6.2.0/drivers/pinctrl/meson/pinctrl-meson-axg.c @@ -400,6 +400,7 @@ GPIO_GROUP(GPIOA_15), GPIO_GROUP(GPIOA_16), GPIO_GROUP(GPIOA_17), + GPIO_GROUP(GPIOA_18), GPIO_GROUP(GPIOA_19), GPIO_GROUP(GPIOA_20), only in patch2: unchanged: --- linux-6.2.0.orig/drivers/platform/mellanox/mlxbf-tmfifo.c +++ linux-6.2.0/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -784,7 +784,7 @@ fifo = vring->fifo; /* Return if vdev is not ready. */ - if (!fifo->vdev[devid]) + if (!fifo || !fifo->vdev[devid]) return; /* Return if another vring is running. */ @@ -980,9 +980,13 @@ vq->num_max = vring->num; + vq->priv = vring; + + /* Make vq update visible before using it. */ + virtio_mb(false); + vqs[i] = vq; vring->vq = vq; - vq->priv = vring; } return 0; @@ -1302,6 +1306,9 @@ mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); + /* Make all updates visible before setting the 'is_ready' flag. */ + virtio_mb(false); + fifo->is_ready = true; return 0; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/platform/surface/aggregator/controller.c +++ linux-6.2.0/drivers/platform/surface/aggregator/controller.c @@ -825,7 +825,7 @@ cplt->dev = dev; - cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME); + cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0); if (!cplt->wq) return -ENOMEM; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/platform/surface/surface_aggregator_tabletsw.c +++ linux-6.2.0/drivers/platform/surface/surface_aggregator_tabletsw.c @@ -201,6 +201,7 @@ SSAM_KIP_COVER_STATE_LAPTOP = 0x03, SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04, SSAM_KIP_COVER_STATE_FOLDED_BACK = 0x05, + SSAM_KIP_COVER_STATE_BOOK = 0x06, }; static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw, u32 state) @@ -221,6 +222,9 @@ case SSAM_KIP_COVER_STATE_FOLDED_BACK: return "folded-back"; + case SSAM_KIP_COVER_STATE_BOOK: + return "book"; + default: dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state); return ""; @@ -233,6 +237,7 @@ case SSAM_KIP_COVER_STATE_DISCONNECTED: case SSAM_KIP_COVER_STATE_FOLDED_CANVAS: case SSAM_KIP_COVER_STATE_FOLDED_BACK: + case SSAM_KIP_COVER_STATE_BOOK: return true; case SSAM_KIP_COVER_STATE_CLOSED: only in patch2: unchanged: --- linux-6.2.0.orig/drivers/platform/x86/intel_scu_pcidrv.c +++ linux-6.2.0/drivers/platform/x86/intel_scu_pcidrv.c @@ -34,6 +34,7 @@ static const struct pci_device_id pci_ids[] = { { PCI_VDEVICE(INTEL, 0x080e) }, + { PCI_VDEVICE(INTEL, 0x082a) }, { PCI_VDEVICE(INTEL, 0x08ea) }, { PCI_VDEVICE(INTEL, 0x0a94) }, { PCI_VDEVICE(INTEL, 0x11a0) }, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/s390/block/dasd_ioctl.c +++ linux-6.2.0/drivers/s390/block/dasd_ioctl.c @@ -552,10 +552,10 @@ memcpy(dasd_info->type, base->discipline->name, 4); - spin_lock_irqsave(&block->queue_lock, flags); + spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); list_for_each(l, &base->ccw_queue) dasd_info->chanq_len++; - spin_unlock_irqrestore(&block->queue_lock, flags); + spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); return 0; } only in patch2: unchanged: --- linux-6.2.0.orig/drivers/s390/crypto/pkey_api.c +++ linux-6.2.0/drivers/s390/crypto/pkey_api.c @@ -1293,6 +1293,7 @@ return PTR_ERR(kkey); rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey); DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc); + memzero_explicit(kkey, ktp.keylen); kfree(kkey); if (rc) break; @@ -1426,6 +1427,7 @@ kkey, ktp.keylen, &ktp.protkey); DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc); kfree(apqns); + memzero_explicit(kkey, ktp.keylen); kfree(kkey); if (rc) break; @@ -1552,6 +1554,7 @@ protkey, &protkeylen); DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc); kfree(apqns); + memzero_explicit(kkey, ktp.keylen); kfree(kkey); if (rc) { kfree(protkey); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ linux-6.2.0/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -526,7 +526,10 @@ __le32 Control; /* 0x3C */ union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */ union RAID_CONTEXT_UNION RaidContext; /* 0x60 */ - union MPI2_SGE_IO_UNION SGL; /* 0x80 */ + union { + union MPI2_SGE_IO_UNION SGL; /* 0x80 */ + DECLARE_FLEX_ARRAY(union MPI2_SGE_IO_UNION, SGLs); + }; }; /* only in patch2: unchanged: --- linux-6.2.0.orig/drivers/scsi/scsi_lib.c +++ linux-6.2.0/drivers/scsi/scsi_lib.c @@ -1484,6 +1484,7 @@ */ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, "queuecommand : device blocked\n")); + atomic_dec(&cmd->device->iorequest_cnt); return SCSI_MLQUEUE_DEVICE_BUSY; } @@ -1516,6 +1517,7 @@ trace_scsi_dispatch_cmd_start(cmd); rtn = host->hostt->queuecommand(host, cmd); if (rtn) { + atomic_dec(&cmd->device->iorequest_cnt); trace_scsi_dispatch_cmd_error(cmd, rtn); if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && rtn != SCSI_MLQUEUE_TARGET_BUSY) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/scsi/stex.c +++ linux-6.2.0/drivers/scsi/stex.c @@ -109,7 +109,9 @@ TASK_ATTRIBUTE_HEADOFQUEUE = 0x1, TASK_ATTRIBUTE_ORDERED = 0x2, TASK_ATTRIBUTE_ACA = 0x4, +}; +enum { SS_STS_NORMAL = 0x80000000, SS_STS_DONE = 0x40000000, SS_STS_HANDSHAKE = 0x20000000, @@ -121,7 +123,9 @@ SS_I2H_REQUEST_RESET = 0x2000, SS_MU_OPERATIONAL = 0x80000000, +}; +enum { STEX_CDB_LENGTH = 16, STATUS_VAR_LEN = 128, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/soc/qcom/icc-bwmon.c +++ linux-6.2.0/drivers/soc/qcom/icc-bwmon.c @@ -603,12 +603,12 @@ bwmon->max_bw_kbps = UINT_MAX; opp = dev_pm_opp_find_bw_floor(dev, &bwmon->max_bw_kbps, 0); if (IS_ERR(opp)) - return dev_err_probe(dev, ret, "failed to find max peak bandwidth\n"); + return dev_err_probe(dev, PTR_ERR(opp), "failed to find max peak bandwidth\n"); bwmon->min_bw_kbps = 0; opp = dev_pm_opp_find_bw_ceil(dev, &bwmon->min_bw_kbps, 0); if (IS_ERR(opp)) - return dev_err_probe(dev, ret, "failed to find min peak bandwidth\n"); + return dev_err_probe(dev, PTR_ERR(opp), "failed to find min peak bandwidth\n"); bwmon->dev = dev; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/soundwire/stream.c +++ linux-6.2.0/drivers/soundwire/stream.c @@ -2019,8 +2019,10 @@ skip_alloc_master_rt: s_rt = sdw_slave_rt_find(slave, stream); - if (s_rt) + if (s_rt) { + alloc_slave_rt = false; goto skip_alloc_slave_rt; + } s_rt = sdw_slave_rt_alloc(slave, m_rt); if (!s_rt) { only in patch2: unchanged: --- linux-6.2.0.orig/drivers/spi/spi-mt65xx.c +++ linux-6.2.0/drivers/spi/spi-mt65xx.c @@ -1275,6 +1275,9 @@ struct mtk_spi *mdata = spi_master_get_devdata(master); int ret; + if (mdata->use_spimem && !completion_done(&mdata->spimem_done)) + complete(&mdata->spimem_done); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) return ret; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/tee/amdtee/amdtee_if.h +++ linux-6.2.0/drivers/tee/amdtee/amdtee_if.h @@ -118,16 +118,18 @@ /** * struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE - * @low_addr: [in] bits [31:0] of the physical address of the TA binary - * @hi_addr: [in] bits [63:32] of the physical address of the TA binary - * @size: [in] size of TA binary in bytes - * @ta_handle: [out] return handle of the loaded TA + * @low_addr: [in] bits [31:0] of the physical address of the TA binary + * @hi_addr: [in] bits [63:32] of the physical address of the TA binary + * @size: [in] size of TA binary in bytes + * @ta_handle: [out] return handle of the loaded TA + * @return_origin: [out] origin of return code after TEE processing */ struct tee_cmd_load_ta { u32 low_addr; u32 hi_addr; u32 size; u32 ta_handle; + u32 return_origin; }; /** only in patch2: unchanged: --- linux-6.2.0.orig/drivers/tee/amdtee/call.c +++ linux-6.2.0/drivers/tee/amdtee/call.c @@ -423,19 +423,23 @@ if (ret) { arg->ret_origin = TEEC_ORIGIN_COMMS; arg->ret = TEEC_ERROR_COMMUNICATION; - } else if (arg->ret == TEEC_SUCCESS) { - ret = get_ta_refcount(load_cmd.ta_handle); - if (!ret) { - arg->ret_origin = TEEC_ORIGIN_COMMS; - arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + } else { + arg->ret_origin = load_cmd.return_origin; - /* Unload the TA on error */ - unload_cmd.ta_handle = load_cmd.ta_handle; - psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, - (void *)&unload_cmd, - sizeof(unload_cmd), &ret); - } else { - set_session_id(load_cmd.ta_handle, 0, &arg->session); + if (arg->ret == TEEC_SUCCESS) { + ret = get_ta_refcount(load_cmd.ta_handle); + if (!ret) { + arg->ret_origin = TEEC_ORIGIN_COMMS; + arg->ret = TEEC_ERROR_OUT_OF_MEMORY; + + /* Unload the TA on error */ + unload_cmd.ta_handle = load_cmd.ta_handle; + psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, + (void *)&unload_cmd, + sizeof(unload_cmd), &ret); + } else { + set_session_id(load_cmd.ta_handle, 0, &arg->session); + } } } mutex_unlock(&ta_refcount_mutex); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/tty/serial/8250/8250_tegra.c +++ linux-6.2.0/drivers/tty/serial/8250/8250_tegra.c @@ -112,13 +112,15 @@ ret = serial8250_register_8250_port(&port8250); if (ret < 0) - goto err_clkdisable; + goto err_ctrl_assert; platform_set_drvdata(pdev, uart); uart->line = ret; return 0; +err_ctrl_assert: + reset_control_assert(uart->rst); err_clkdisable: clk_disable_unprepare(uart->clk); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/tty/serial/cpm_uart/cpm_uart.h +++ linux-6.2.0/drivers/tty/serial/cpm_uart/cpm_uart.h @@ -19,8 +19,6 @@ #include "cpm_uart_cpm2.h" #elif defined(CONFIG_CPM1) #include "cpm_uart_cpm1.h" -#elif defined(CONFIG_COMPILE_TEST) -#include "cpm_uart_cpm2.h" #endif #define SERIAL_CPM_MAJOR 204 only in patch2: unchanged: --- linux-6.2.0.orig/drivers/usb/cdns3/cdns3-gadget.c +++ linux-6.2.0/drivers/usb/cdns3/cdns3-gadget.c @@ -2097,6 +2097,19 @@ else priv_ep->trb_burst_size = 16; + /* + * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs + * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the + * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI + * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This + * results in data corruption when it crosses the 4K border. The corruption + * specifically occurs from the position (4K - (address & 0x7F)) to 4K. + * + * So force trb_burst_size to 16 at such platform. + */ + if (priv_dev->dev_ver < DEV_VER_V2) + priv_ep->trb_burst_size = 16; + mult = min_t(u8, mult, EP_CFG_MULT_MAX); buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/usb/core/buffer.c +++ linux-6.2.0/drivers/usb/core/buffer.c @@ -172,3 +172,44 @@ } dma_free_coherent(hcd->self.sysdev, size, addr, dma); } + +void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, + size_t size, gfp_t mem_flags, dma_addr_t *dma) +{ + if (size == 0) + return NULL; + + if (hcd->localmem_pool) + return gen_pool_dma_alloc_align(hcd->localmem_pool, + size, dma, PAGE_SIZE); + + /* some USB hosts just use PIO */ + if (!hcd_uses_dma(hcd)) { + *dma = DMA_MAPPING_ERROR; + return (void *)__get_free_pages(mem_flags, + get_order(size)); + } + + return dma_alloc_coherent(hcd->self.sysdev, + size, dma, mem_flags); +} + +void hcd_buffer_free_pages(struct usb_hcd *hcd, + size_t size, void *addr, dma_addr_t dma) +{ + if (!addr) + return; + + if (hcd->localmem_pool) { + gen_pool_free(hcd->localmem_pool, + (unsigned long)addr, size); + return; + } + + if (!hcd_uses_dma(hcd)) { + free_pages((unsigned long)addr, get_order(size)); + return; + } + + dma_free_coherent(hcd->self.sysdev, size, addr, dma); +} only in patch2: unchanged: --- linux-6.2.0.orig/drivers/usb/core/devio.c +++ linux-6.2.0/drivers/usb/core/devio.c @@ -186,6 +186,7 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count) { struct usb_dev_state *ps = usbm->ps; + struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus); unsigned long flags; spin_lock_irqsave(&ps->lock, flags); @@ -194,8 +195,8 @@ list_del(&usbm->memlist); spin_unlock_irqrestore(&ps->lock, flags); - usb_free_coherent(ps->dev, usbm->size, usbm->mem, - usbm->dma_handle); + hcd_buffer_free_pages(hcd, usbm->size, + usbm->mem, usbm->dma_handle); usbfs_decrease_memory_usage( usbm->size + sizeof(struct usb_memory)); kfree(usbm); @@ -234,7 +235,7 @@ size_t size = vma->vm_end - vma->vm_start; void *mem; unsigned long flags; - dma_addr_t dma_handle; + dma_addr_t dma_handle = DMA_MAPPING_ERROR; int ret; ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory)); @@ -247,8 +248,8 @@ goto error_decrease_mem; } - mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, - &dma_handle); + mem = hcd_buffer_alloc_pages(hcd, + size, GFP_USER | __GFP_NOWARN, &dma_handle); if (!mem) { ret = -ENOMEM; goto error_free_usbm; @@ -264,7 +265,14 @@ usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); - if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { + /* + * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates + * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check + * whether we are in such cases, and then use remap_pfn_range (or + * dma_mmap_coherent) to map normal (or DMA) pages into the user + * space, respectively. + */ + if (dma_handle == DMA_MAPPING_ERROR) { if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(usbm->mem) >> PAGE_SHIFT, size, vma->vm_page_prot) < 0) { only in patch2: unchanged: --- linux-6.2.0.orig/drivers/usb/gadget/function/f_fs.c +++ linux-6.2.0/drivers/usb/gadget/function/f_fs.c @@ -3620,6 +3620,7 @@ /* Drain any pending AIO completions */ drain_workqueue(ffs->io_completion_wq); + ffs_event_add(ffs, FUNCTIONFS_UNBIND); if (!--opts->refcnt) functionfs_unbind(ffs); @@ -3644,7 +3645,6 @@ func->function.ssp_descriptors = NULL; func->interfaces_nums = NULL; - ffs_event_add(ffs, FUNCTIONFS_UNBIND); } static struct usb_function *ffs_alloc(struct usb_function_instance *fi) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/vdpa/vdpa_user/vduse_dev.c +++ linux-6.2.0/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1443,6 +1443,9 @@ if (config->vq_num > 0xffff) return false; + if (!config->name[0]) + return false; + if (!device_is_allowed(config->device_id)) return false; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/vhost/vhost.c +++ linux-6.2.0/drivers/vhost/vhost.c @@ -1633,17 +1633,25 @@ r = -EFAULT; break; } - if (s.num > 0xffff) { - r = -EINVAL; - break; + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { + vq->last_avail_idx = s.num & 0xffff; + vq->last_used_idx = (s.num >> 16) & 0xffff; + } else { + if (s.num > 0xffff) { + r = -EINVAL; + break; + } + vq->last_avail_idx = s.num; } - vq->last_avail_idx = s.num; /* Forget the cached index value. */ vq->avail_idx = vq->last_avail_idx; break; case VHOST_GET_VRING_BASE: s.index = idx; - s.num = vq->last_avail_idx; + if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) + s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16); + else + s.num = vq->last_avail_idx; if (copy_to_user(argp, &s, sizeof s)) r = -EFAULT; break; only in patch2: unchanged: --- linux-6.2.0.orig/drivers/vhost/vhost.h +++ linux-6.2.0/drivers/vhost/vhost.h @@ -85,13 +85,17 @@ /* The routine to call when the Guest pings us, or timeout. */ vhost_work_fn_t handle_kick; - /* Last available index we saw. */ + /* Last available index we saw. + * Values are limited to 0x7fff, and the high bit is used as + * a wrap counter when using VIRTIO_F_RING_PACKED. */ u16 last_avail_idx; /* Caches available index value from user. */ u16 avail_idx; - /* Last index we used. */ + /* Last index we used. + * Values are limited to 0x7fff, and the high bit is used as + * a wrap counter when using VIRTIO_F_RING_PACKED. */ u16 last_used_idx; /* Used flags */ only in patch2: unchanged: --- linux-6.2.0.orig/drivers/video/fbdev/core/bitblit.c +++ linux-6.2.0/drivers/video/fbdev/core/bitblit.c @@ -247,6 +247,9 @@ cursor.set = 0; + if (!vc->vc_font.data) + return; + c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); only in patch2: unchanged: --- linux-6.2.0.orig/drivers/video/fbdev/core/modedb.c +++ linux-6.2.0/drivers/video/fbdev/core/modedb.c @@ -257,6 +257,11 @@ { NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, 0, FB_VMODE_DOUBLE }, + /* 1920x1080 @ 60 Hz, 67.3 kHz hsync */ + { NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5, 0, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, + FB_VMODE_NONINTERLACED }, + /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */ { NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3, FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, only in patch2: unchanged: --- linux-6.2.0.orig/drivers/video/fbdev/imsttfb.c +++ linux-6.2.0/drivers/video/fbdev/imsttfb.c @@ -1347,7 +1347,7 @@ .fb_ioctl = imsttfb_ioctl, }; -static void init_imstt(struct fb_info *info) +static int init_imstt(struct fb_info *info) { struct imstt_par *par = info->par; __u32 i, tmp, *ip, *end; @@ -1420,7 +1420,7 @@ || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) { printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); framebuffer_release(info); - return; + return -ENODEV; } sprintf(info->fix.id, "IMS TT (%s)", par->ramdac == IBM ? "IBM" : "TVP"); @@ -1456,12 +1456,13 @@ if (register_framebuffer(info) < 0) { framebuffer_release(info); - return; + return -ENODEV; } tmp = (read_reg_le32(par->dc_regs, SSTATUS) & 0x0f00) >> 8; fb_info(info, "%s frame buffer; %uMB vram; chip version %u\n", info->fix.id, info->fix.smem_len >> 20, tmp); + return 0; } static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1529,10 +1530,10 @@ if (!par->cmap_regs) goto error; info->pseudo_palette = par->palette; - init_imstt(info); - - pci_set_drvdata(pdev, info); - return 0; + ret = init_imstt(info); + if (!ret) + pci_set_drvdata(pdev, info); + return ret; error: if (par->dc_regs) only in patch2: unchanged: --- linux-6.2.0.orig/drivers/watchdog/menz69_wdt.c +++ linux-6.2.0/drivers/watchdog/menz69_wdt.c @@ -98,14 +98,6 @@ .set_timeout = men_z069_wdt_set_timeout, }; -static struct watchdog_device men_z069_wdt = { - .info = &men_z069_info, - .ops = &men_z069_ops, - .timeout = MEN_Z069_DEFAULT_TIMEOUT, - .min_timeout = 1, - .max_timeout = MEN_Z069_WDT_COUNTER_MAX / MEN_Z069_TIMER_FREQ, -}; - static int men_z069_probe(struct mcb_device *dev, const struct mcb_device_id *id) { @@ -125,15 +117,19 @@ goto release_mem; drv->mem = mem; + drv->wdt.info = &men_z069_info; + drv->wdt.ops = &men_z069_ops; + drv->wdt.timeout = MEN_Z069_DEFAULT_TIMEOUT; + drv->wdt.min_timeout = 1; + drv->wdt.max_timeout = MEN_Z069_WDT_COUNTER_MAX / MEN_Z069_TIMER_FREQ; - drv->wdt = men_z069_wdt; watchdog_init_timeout(&drv->wdt, 0, &dev->dev); watchdog_set_nowayout(&drv->wdt, nowayout); watchdog_set_drvdata(&drv->wdt, drv); drv->wdt.parent = &dev->dev; mcb_set_drvdata(dev, drv); - return watchdog_register_device(&men_z069_wdt); + return watchdog_register_device(&drv->wdt); release_mem: mcb_release_mem(mem); only in patch2: unchanged: --- linux-6.2.0.orig/fs/ksmbd/smbacl.c +++ linux-6.2.0/fs/ksmbd/smbacl.c @@ -1290,7 +1290,7 @@ if (IS_ENABLED(CONFIG_FS_POSIX_ACL)) { posix_acls = get_inode_acl(d_inode(path->dentry), ACL_TYPE_ACCESS); - if (posix_acls && !found) { + if (!IS_ERR_OR_NULL(posix_acls) && !found) { unsigned int id = -1; pa_entry = posix_acls->a_entries; @@ -1314,7 +1314,7 @@ } } } - if (posix_acls) + if (!IS_ERR_OR_NULL(posix_acls)) posix_acl_release(posix_acls); } only in patch2: unchanged: --- linux-6.2.0.orig/fs/ksmbd/vfs.c +++ linux-6.2.0/fs/ksmbd/vfs.c @@ -1376,7 +1376,7 @@ return NULL; posix_acls = get_inode_acl(inode, acl_type); - if (!posix_acls) + if (IS_ERR_OR_NULL(posix_acls)) return NULL; smb_acl = kzalloc(sizeof(struct xattr_smb_acl) + @@ -1885,7 +1885,7 @@ return -EOPNOTSUPP; acls = get_inode_acl(parent_inode, ACL_TYPE_DEFAULT); - if (!acls) + if (IS_ERR_OR_NULL(acls)) return -ENOENT; pace = acls->a_entries; only in patch2: unchanged: --- linux-6.2.0.orig/fs/nfsd/nfsctl.c +++ linux-6.2.0/fs/nfsd/nfsctl.c @@ -714,16 +714,11 @@ if (err != 0 || fd < 0) return -EINVAL; - if (svc_alien_sock(net, fd)) { - printk(KERN_ERR "%s: socket net is different to NFSd's one\n", __func__); - return -EINVAL; - } - err = nfsd_create_serv(net); if (err != 0) return err; - err = svc_addsock(nn->nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred); + err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred); if (err >= 0 && !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1)) only in patch2: unchanged: --- linux-6.2.0.orig/include/linux/page-flags.h +++ linux-6.2.0/include/linux/page-flags.h @@ -629,6 +629,12 @@ * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. + * + * For slab pages, since slab reuses the bits in struct page to store its + * internal states, the page->mapping does not exist as such, nor do these + * flags below. So in order to avoid testing non-existent bits, please + * make sure that PageSlab(page) actually evaluates to false before calling + * the following functions (e.g., PageAnon). See mm/slab.h. */ #define PAGE_MAPPING_ANON 0x1 #define PAGE_MAPPING_MOVABLE 0x2 only in patch2: unchanged: --- linux-6.2.0.orig/include/linux/pe.h +++ linux-6.2.0/include/linux/pe.h @@ -11,25 +11,26 @@ #include /* - * Linux EFI stub v1.0 adds the following functionality: - * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path, - * - Loading/starting the kernel from firmware that targets a different - * machine type, via the entrypoint exposed in the .compat PE/COFF section. + * Starting from version v3.0, the major version field should be interpreted as + * a bit mask of features supported by the kernel's EFI stub: + * - 0x1: initrd loading from the LINUX_EFI_INITRD_MEDIA_GUID device path, + * - 0x2: initrd loading using the initrd= command line option, where the file + * may be specified using device path notation, and is not required to + * reside on the same volume as the loaded kernel image. * * The recommended way of loading and starting v1.0 or later kernels is to use * the LoadImage() and StartImage() EFI boot services, and expose the initrd * via the LINUX_EFI_INITRD_MEDIA_GUID device path. * - * Versions older than v1.0 support initrd loading via the image load options - * (using initrd=, limited to the volume from which the kernel itself was - * loaded), or via arch specific means (bootparams, DT, etc). + * Versions older than v1.0 may support initrd loading via the image load + * options (using initrd=, limited to the volume from which the kernel itself + * was loaded), or only via arch specific means (bootparams, DT, etc). * - * On x86, LoadImage() and StartImage() can be omitted if the EFI handover - * protocol is implemented, which can be inferred from the version, - * handover_offset and xloadflags fields in the bootparams structure. + * The minor version field must remain 0x0. + * (https://lore.kernel.org/all/efd6f2d4-547c-1378-1faa-53c044dbd297@gmail.com/) */ -#define LINUX_EFISTUB_MAJOR_VERSION 0x1 -#define LINUX_EFISTUB_MINOR_VERSION 0x1 +#define LINUX_EFISTUB_MAJOR_VERSION 0x3 +#define LINUX_EFISTUB_MINOR_VERSION 0x0 /* * LINUX_PE_MAGIC appears at offset 0x38 into the MS-DOS header of EFI bootable only in patch2: unchanged: --- linux-6.2.0.orig/include/linux/sunrpc/svcsock.h +++ linux-6.2.0/include/linux/sunrpc/svcsock.h @@ -59,10 +59,9 @@ int svc_send(struct svc_rqst *); void svc_drop(struct svc_rqst *); void svc_sock_update_bufs(struct svc_serv *serv); -bool svc_alien_sock(struct net *net, int fd); -int svc_addsock(struct svc_serv *serv, const int fd, - char *name_return, const size_t len, - const struct cred *cred); +int svc_addsock(struct svc_serv *serv, struct net *net, + const int fd, char *name_return, const size_t len, + const struct cred *cred); void svc_init_xprt_sock(void); void svc_cleanup_xprt_sock(void); struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot); only in patch2: unchanged: --- linux-6.2.0.orig/include/linux/usb/hcd.h +++ linux-6.2.0/include/linux/usb/hcd.h @@ -503,6 +503,11 @@ void hcd_buffer_free(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma); +void *hcd_buffer_alloc_pages(struct usb_hcd *hcd, + size_t size, gfp_t mem_flags, dma_addr_t *dma); +void hcd_buffer_free_pages(struct usb_hcd *hcd, + size_t size, void *addr, dma_addr_t dma); + /* generic bus glue, needed for host controllers that don't use PCI */ extern irqreturn_t usb_hcd_irq(int irq, void *__hcd); only in patch2: unchanged: --- linux-6.2.0.orig/include/media/dvb_net.h +++ linux-6.2.0/include/media/dvb_net.h @@ -41,6 +41,9 @@ * @exit: flag to indicate when the device is being removed. * @demux: pointer to &struct dmx_demux. * @ioctl_mutex: protect access to this struct. + * @remove_mutex: mutex that avoids a race condition between a callback + * called when the hardware is disconnected and the + * file_operations of dvb_net. * * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network * devices. @@ -53,6 +56,7 @@ unsigned int exit:1; struct dmx_demux *demux; struct mutex ioctl_mutex; + struct mutex remove_mutex; }; /** only in patch2: unchanged: --- linux-6.2.0.orig/include/media/dvbdev.h +++ linux-6.2.0/include/media/dvbdev.h @@ -194,6 +194,21 @@ }; /** + * struct dvbdevfops_node - fops nodes registered in dvbdevfops_list + * + * @fops: Dynamically allocated fops for ->owner registration + * @type: type of dvb_device + * @template: dvb_device used for registration + * @list_head: list_head for dvbdevfops_list + */ +struct dvbdevfops_node { + struct file_operations *fops; + enum dvb_device_type type; + const struct dvb_device *template; + struct list_head list_head; +}; + +/** * dvb_device_get - Increase dvb_device reference * * @dvbdev: pointer to struct dvb_device only in patch2: unchanged: --- linux-6.2.0.orig/include/net/bluetooth/bluetooth.h +++ linux-6.2.0/include/net/bluetooth/bluetooth.h @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky @@ -171,23 +172,39 @@ __u8 rtn; }; -struct bt_iso_qos { - union { - __u8 cig; - __u8 big; - }; - union { - __u8 cis; - __u8 bis; - }; - union { - __u8 sca; - __u8 sync_interval; - }; +struct bt_iso_ucast_qos { + __u8 cig; + __u8 cis; + __u8 sca; + __u8 packing; + __u8 framing; + struct bt_iso_io_qos in; + struct bt_iso_io_qos out; +}; + +struct bt_iso_bcast_qos { + __u8 big; + __u8 bis; + __u8 sync_interval; __u8 packing; __u8 framing; struct bt_iso_io_qos in; struct bt_iso_io_qos out; + __u8 encryption; + __u8 bcode[16]; + __u8 options; + __u16 skip; + __u16 sync_timeout; + __u8 sync_cte_type; + __u8 mse; + __u16 timeout; +}; + +struct bt_iso_qos { + union { + struct bt_iso_ucast_qos ucast; + struct bt_iso_bcast_qos bcast; + }; }; #define BT_ISO_PHY_1M 0x01 only in patch2: unchanged: --- linux-6.2.0.orig/include/net/ipv6.h +++ linux-6.2.0/include/net/ipv6.h @@ -751,12 +751,8 @@ /* more secured version of ipv6_addr_hash() */ static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval) { - u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1]; - - return jhash_3words(v, - (__force u32)a->s6_addr32[2], - (__force u32)a->s6_addr32[3], - initval); + return jhash2((__force const u32 *)a->s6_addr32, + ARRAY_SIZE(a->s6_addr32), initval); } static inline bool ipv6_addr_loopback(const struct in6_addr *a) only in patch2: unchanged: --- linux-6.2.0.orig/include/net/neighbour.h +++ linux-6.2.0/include/net/neighbour.h @@ -180,7 +180,7 @@ netdevice_tracker dev_tracker; u32 flags; u8 protocol; - u8 key[]; + u32 key[]; }; /* only in patch2: unchanged: --- linux-6.2.0.orig/include/net/netns/ipv6.h +++ linux-6.2.0/include/net/netns/ipv6.h @@ -53,7 +53,7 @@ int seg6_flowlabel; u32 ioam6_id; u64 ioam6_id_wide; - bool skip_notify_on_dev_down; + int skip_notify_on_dev_down; u8 fib_notify_on_flag_change; }; only in patch2: unchanged: --- linux-6.2.0.orig/include/net/ping.h +++ linux-6.2.0/include/net/ping.h @@ -16,11 +16,7 @@ #define PING_HTABLE_SIZE 64 #define PING_HTABLE_MASK (PING_HTABLE_SIZE-1) -/* - * gid_t is either uint or ushort. We want to pass it to - * proc_dointvec_minmax(), so it must not be larger than MAX_INT - */ -#define GID_T_MAX (((gid_t)~0U) >> 1) +#define GID_T_MAX (((gid_t)~0U) - 1) /* Compatibility glue so we can support IPv6 when it's compiled as a module */ struct pingv6_ops { only in patch2: unchanged: --- linux-6.2.0.orig/include/net/pkt_sched.h +++ linux-6.2.0/include/net/pkt_sched.h @@ -128,6 +128,8 @@ } } +extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; + /* Calculate maximal size of packet seen by hard_start_xmit routine of this device. */ only in patch2: unchanged: --- linux-6.2.0.orig/include/net/rpl.h +++ linux-6.2.0/include/net/rpl.h @@ -23,9 +23,6 @@ static inline void rpl_exit(void) {} #endif -/* Worst decompression memory usage ipv6 address (16) + pad 7 */ -#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7) - size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, unsigned char cmpre); only in patch2: unchanged: --- linux-6.2.0.orig/include/net/sch_generic.h +++ linux-6.2.0/include/net/sch_generic.h @@ -543,7 +543,7 @@ static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) { - return qdisc->dev_queue->qdisc_sleeping; + return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping); } static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) @@ -752,7 +752,9 @@ for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping) + + if (rcu_access_pointer(txq->qdisc) != + rcu_access_pointer(txq->qdisc_sleeping)) return true; } return false; only in patch2: unchanged: --- linux-6.2.0.orig/io_uring/epoll.c +++ linux-6.2.0/io_uring/epoll.c @@ -25,10 +25,6 @@ { struct io_epoll *epoll = io_kiocb_to_cmd(req, struct io_epoll); - pr_warn_once("%s: epoll_ctl support in io_uring is deprecated and will " - "be removed in a future Linux kernel version.\n", - current->comm); - if (sqe->buf_index || sqe->splice_fd_in) return -EINVAL; only in patch2: unchanged: --- linux-6.2.0.orig/kernel/bpf/map_in_map.c +++ linux-6.2.0/kernel/bpf/map_in_map.c @@ -81,9 +81,13 @@ /* Misc members not needed in bpf_map_meta_equal() check. */ inner_map_meta->ops = inner_map->ops; if (inner_map->ops == &array_map_ops) { + struct bpf_array *inner_array_meta = + container_of(inner_map_meta, struct bpf_array, map); + struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map); + + inner_array_meta->index_mask = inner_array->index_mask; + inner_array_meta->elem_size = inner_array->elem_size; inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1; - container_of(inner_map_meta, struct bpf_array, map)->index_mask = - container_of(inner_map, struct bpf_array, map)->index_mask; } fdput(f); only in patch2: unchanged: --- linux-6.2.0.orig/kernel/trace/bpf_trace.c +++ linux-6.2.0/kernel/trace/bpf_trace.c @@ -904,13 +904,23 @@ BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) { + struct path copy; long len; char *p; if (!sz) return 0; - p = d_path(path, buf, sz); + /* + * The path pointer is verified as trusted and safe to use, + * but let's double check it's valid anyway to workaround + * potentially broken verifier. + */ + len = copy_from_kernel_nofault(©, path, sizeof(*path)); + if (len < 0) + return len; + + p = d_path(©, buf, sz); if (IS_ERR(p)) { len = PTR_ERR(p); } else { only in patch2: unchanged: --- linux-6.2.0.orig/kernel/trace/trace_probe.h +++ linux-6.2.0/kernel/trace/trace_probe.h @@ -307,7 +307,7 @@ { struct trace_probe_event *tpe = trace_probe_event_from_call(call); - return list_first_entry(&tpe->probes, struct trace_probe, list); + return list_first_entry_or_null(&tpe->probes, struct trace_probe, list); } static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp) only in patch2: unchanged: --- linux-6.2.0.orig/lib/test_firmware.c +++ linux-6.2.0/lib/test_firmware.c @@ -44,6 +44,7 @@ bool sent; const struct firmware *fw; const char *name; + const char *fw_buf; struct completion completion; struct task_struct *task; struct device *dev; @@ -174,8 +175,14 @@ for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; - if (req->fw) + if (req->fw) { + if (req->fw_buf) { + kfree_const(req->fw_buf); + req->fw_buf = NULL; + } release_firmware(req->fw); + req->fw = NULL; + } } vfree(test_fw_config->reqs); @@ -352,16 +359,26 @@ return len; } -static int test_dev_config_update_bool(const char *buf, size_t size, +static inline int __test_dev_config_update_bool(const char *buf, size_t size, bool *cfg) { int ret; - mutex_lock(&test_fw_mutex); if (strtobool(buf, cfg) < 0) ret = -EINVAL; else ret = size; + + return ret; +} + +static int test_dev_config_update_bool(const char *buf, size_t size, + bool *cfg) +{ + int ret; + + mutex_lock(&test_fw_mutex); + ret = __test_dev_config_update_bool(buf, size, cfg); mutex_unlock(&test_fw_mutex); return ret; @@ -372,7 +389,8 @@ return snprintf(buf, PAGE_SIZE, "%d\n", val); } -static int test_dev_config_update_size_t(const char *buf, +static int __test_dev_config_update_size_t( + const char *buf, size_t size, size_t *cfg) { @@ -383,9 +401,7 @@ if (ret) return ret; - mutex_lock(&test_fw_mutex); *(size_t *)cfg = new; - mutex_unlock(&test_fw_mutex); /* Always return full write size even if we didn't consume all */ return size; @@ -401,7 +417,7 @@ return snprintf(buf, PAGE_SIZE, "%d\n", val); } -static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) +static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) { u8 val; int ret; @@ -410,14 +426,23 @@ if (ret) return ret; - mutex_lock(&test_fw_mutex); *(u8 *)cfg = val; - mutex_unlock(&test_fw_mutex); /* Always return full write size even if we didn't consume all */ return size; } +static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) +{ + int ret; + + mutex_lock(&test_fw_mutex); + ret = __test_dev_config_update_u8(buf, size, cfg); + mutex_unlock(&test_fw_mutex); + + return ret; +} + static ssize_t test_dev_config_show_u8(char *buf, u8 val) { return snprintf(buf, PAGE_SIZE, "%u\n", val); @@ -470,10 +495,10 @@ mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_u8(buf, count, - &test_fw_config->num_requests); + rc = __test_dev_config_update_u8(buf, count, + &test_fw_config->num_requests); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -517,10 +542,10 @@ mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_size_t(buf, count, - &test_fw_config->buf_size); + rc = __test_dev_config_update_size_t(buf, count, + &test_fw_config->buf_size); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -547,10 +572,10 @@ mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_size_t(buf, count, - &test_fw_config->file_offset); + rc = __test_dev_config_update_size_t(buf, count, + &test_fw_config->file_offset); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -651,6 +676,8 @@ mutex_lock(&test_fw_mutex); release_firmware(test_firmware); + if (test_fw_config->reqs) + __test_release_all_firmware(); test_firmware = NULL; rc = request_firmware(&test_firmware, name, dev); if (rc) { @@ -751,6 +778,8 @@ mutex_lock(&test_fw_mutex); release_firmware(test_firmware); test_firmware = NULL; + if (test_fw_config->reqs) + __test_release_all_firmware(); rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL, NULL, trigger_async_request_cb); if (rc) { @@ -793,6 +822,8 @@ mutex_lock(&test_fw_mutex); release_firmware(test_firmware); + if (test_fw_config->reqs) + __test_release_all_firmware(); test_firmware = NULL; rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name, dev, GFP_KERNEL, NULL, @@ -855,6 +886,8 @@ test_fw_config->buf_size); if (!req->fw) kfree(test_buf); + else + req->fw_buf = test_buf; } else { req->rc = test_fw_config->req_firmware(&req->fw, req->name, @@ -894,6 +927,11 @@ mutex_lock(&test_fw_mutex); + if (test_fw_config->reqs) { + rc = -EBUSY; + goto out_bail; + } + test_fw_config->reqs = vzalloc(array3_size(sizeof(struct test_batched_req), test_fw_config->num_requests, 2)); @@ -910,6 +948,7 @@ req->fw = NULL; req->idx = i; req->name = test_fw_config->name; + req->fw_buf = NULL; req->dev = dev; init_completion(&req->completion); req->task = kthread_run(test_fw_run_batch_request, req, @@ -992,6 +1031,11 @@ mutex_lock(&test_fw_mutex); + if (test_fw_config->reqs) { + rc = -EBUSY; + goto out_bail; + } + test_fw_config->reqs = vzalloc(array3_size(sizeof(struct test_batched_req), test_fw_config->num_requests, 2)); @@ -1009,6 +1053,7 @@ for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; req->name = test_fw_config->name; + req->fw_buf = NULL; req->fw = NULL; req->idx = i; init_completion(&req->completion); only in patch2: unchanged: --- linux-6.2.0.orig/mm/Kconfig.debug +++ linux-6.2.0/mm/Kconfig.debug @@ -98,6 +98,7 @@ config PAGE_TABLE_CHECK bool "Check for invalid mappings in user page tables" depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK + depends on EXCLUSIVE_SYSTEM_RAM select PAGE_EXTENSION help Check that anonymous page is not being mapped twice with read write only in patch2: unchanged: --- linux-6.2.0.orig/mm/page_table_check.c +++ linux-6.2.0/mm/page_table_check.c @@ -70,6 +70,8 @@ page = pfn_to_page(pfn); page_ext = page_ext_get(page); + + BUG_ON(PageSlab(page)); anon = PageAnon(page); for (i = 0; i < pgcnt; i++) { @@ -106,6 +108,8 @@ page = pfn_to_page(pfn); page_ext = page_ext_get(page); + + BUG_ON(PageSlab(page)); anon = PageAnon(page); for (i = 0; i < pgcnt; i++) { @@ -132,6 +136,8 @@ struct page_ext *page_ext; unsigned long i; + BUG_ON(PageSlab(page)); + page_ext = page_ext_get(page); BUG_ON(!page_ext); for (i = 0; i < (1ul << order); i++) { only in patch2: unchanged: --- linux-6.2.0.orig/net/atm/resources.c +++ linux-6.2.0/net/atm/resources.c @@ -400,6 +400,7 @@ return error; } +#ifdef CONFIG_PROC_FS void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos) { mutex_lock(&atm_dev_mutex); @@ -415,3 +416,4 @@ { return seq_list_next(v, &atm_devs, pos); } +#endif only in patch2: unchanged: --- linux-6.2.0.orig/net/batman-adv/distributed-arp-table.c +++ linux-6.2.0/net/batman-adv/distributed-arp-table.c @@ -101,7 +101,6 @@ */ static void batadv_dat_start_timer(struct batadv_priv *bat_priv) { - INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge); queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work, msecs_to_jiffies(10000)); } @@ -819,6 +818,7 @@ if (!bat_priv->dat.hash) return -ENOMEM; + INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge); batadv_dat_start_timer(bat_priv); batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1, only in patch2: unchanged: --- linux-6.2.0.orig/net/can/j1939/main.c +++ linux-6.2.0/net/can/j1939/main.c @@ -126,7 +126,7 @@ #define J1939_CAN_ID CAN_EFF_FLAG #define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG) -static DEFINE_SPINLOCK(j1939_netdev_lock); +static DEFINE_MUTEX(j1939_netdev_lock); static struct j1939_priv *j1939_priv_create(struct net_device *ndev) { @@ -220,7 +220,7 @@ j1939_can_rx_unregister(priv); j1939_ecu_unmap_all(priv); j1939_priv_set(priv->ndev, NULL); - spin_unlock(&j1939_netdev_lock); + mutex_unlock(&j1939_netdev_lock); } /* get pointer to priv without increasing ref counter */ @@ -248,9 +248,9 @@ { struct j1939_priv *priv; - spin_lock(&j1939_netdev_lock); + mutex_lock(&j1939_netdev_lock); priv = j1939_priv_get_by_ndev_locked(ndev); - spin_unlock(&j1939_netdev_lock); + mutex_unlock(&j1939_netdev_lock); return priv; } @@ -260,14 +260,14 @@ struct j1939_priv *priv, *priv_new; int ret; - spin_lock(&j1939_netdev_lock); + mutex_lock(&j1939_netdev_lock); priv = j1939_priv_get_by_ndev_locked(ndev); if (priv) { kref_get(&priv->rx_kref); - spin_unlock(&j1939_netdev_lock); + mutex_unlock(&j1939_netdev_lock); return priv; } - spin_unlock(&j1939_netdev_lock); + mutex_unlock(&j1939_netdev_lock); priv = j1939_priv_create(ndev); if (!priv) @@ -277,29 +277,31 @@ spin_lock_init(&priv->j1939_socks_lock); INIT_LIST_HEAD(&priv->j1939_socks); - spin_lock(&j1939_netdev_lock); + mutex_lock(&j1939_netdev_lock); priv_new = j1939_priv_get_by_ndev_locked(ndev); if (priv_new) { /* Someone was faster than us, use their priv and roll * back our's. */ kref_get(&priv_new->rx_kref); - spin_unlock(&j1939_netdev_lock); + mutex_unlock(&j1939_netdev_lock); dev_put(ndev); kfree(priv); return priv_new; } j1939_priv_set(ndev, priv); - spin_unlock(&j1939_netdev_lock); ret = j1939_can_rx_register(priv); if (ret < 0) goto out_priv_put; + mutex_unlock(&j1939_netdev_lock); return priv; out_priv_put: j1939_priv_set(ndev, NULL); + mutex_unlock(&j1939_netdev_lock); + dev_put(ndev); kfree(priv); @@ -308,7 +310,7 @@ void j1939_netdev_stop(struct j1939_priv *priv) { - kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock); + kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock); j1939_priv_put(priv); } only in patch2: unchanged: --- linux-6.2.0.orig/net/ipv4/tcp_offload.c +++ linux-6.2.0/net/ipv4/tcp_offload.c @@ -60,12 +60,12 @@ struct tcphdr *th; unsigned int thlen; unsigned int seq; - __be32 delta; unsigned int oldlen; unsigned int mss; struct sk_buff *gso_skb = skb; __sum16 newcheck; bool ooo_okay, copy_destructor; + __wsum delta; th = tcp_hdr(skb); thlen = th->doff * 4; @@ -75,7 +75,7 @@ if (!pskb_may_pull(skb, thlen)) goto out; - oldlen = (u16)~skb->len; + oldlen = ~skb->len; __skb_pull(skb, thlen); mss = skb_shinfo(skb)->gso_size; @@ -110,7 +110,7 @@ if (skb_is_gso(segs)) mss *= skb_shinfo(segs)->gso_segs; - delta = htonl(oldlen + (thlen + mss)); + delta = (__force __wsum)htonl(oldlen + thlen + mss); skb = segs; th = tcp_hdr(skb); @@ -119,8 +119,7 @@ if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); - newcheck = ~csum_fold((__force __wsum)((__force u32)th->check + - (__force u32)delta)); + newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); while (skb->next) { th->fin = th->psh = 0; @@ -165,11 +164,11 @@ WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); } - delta = htonl(oldlen + (skb_tail_pointer(skb) - - skb_transport_header(skb)) + - skb->data_len); - th->check = ~csum_fold((__force __wsum)((__force u32)th->check + - (__force u32)delta)); + delta = (__force __wsum)htonl(oldlen + + (skb_tail_pointer(skb) - + skb_transport_header(skb)) + + skb->data_len); + th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta)); if (skb->ip_summed == CHECKSUM_PARTIAL) gso_reset_checksum(skb, ~th->check); else only in patch2: unchanged: --- linux-6.2.0.orig/net/ipv4/tcp_timer.c +++ linux-6.2.0/net/ipv4/tcp_timer.c @@ -290,9 +290,19 @@ void tcp_delack_timer_handler(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); + struct tcp_sock *tp = tcp_sk(sk); - if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || - !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) + if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) + return; + + /* Handling the sack compression case */ + if (tp->compressed_ack) { + tcp_mstamp_refresh(tp); + tcp_sack_compress_send_ack(sk); + return; + } + + if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) return; if (time_after(icsk->icsk_ack.timeout, jiffies)) { @@ -312,7 +322,7 @@ inet_csk_exit_pingpong_mode(sk); icsk->icsk_ack.ato = TCP_ATO_MIN; } - tcp_mstamp_refresh(tcp_sk(sk)); + tcp_mstamp_refresh(tp); tcp_send_ack(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); } only in patch2: unchanged: --- linux-6.2.0.orig/net/ipv6/exthdrs.c +++ linux-6.2.0/net/ipv6/exthdrs.c @@ -569,24 +569,6 @@ return -1; } - if (skb_cloned(skb)) { - if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0, - GFP_ATOMIC)) { - __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), - IPSTATS_MIB_OUTDISCARDS); - kfree_skb(skb); - return -1; - } - } else { - err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE); - if (unlikely(err)) { - kfree_skb(skb); - return -1; - } - } - - hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb); - if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri, hdr->cmpre))) { kfree_skb(skb); @@ -630,6 +612,17 @@ skb_pull(skb, ((hdr->hdrlen + 1) << 3)); skb_postpull_rcsum(skb, oldhdr, sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3)); + if (unlikely(!hdr->segments_left)) { + if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0, + GFP_ATOMIC)) { + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); + kfree_skb(skb); + kfree(buf); + return -1; + } + + oldhdr = ipv6_hdr(skb); + } skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr)); skb_reset_network_header(skb); skb_mac_header_rebuild(skb); only in patch2: unchanged: --- linux-6.2.0.orig/net/mac80211/chan.c +++ linux-6.2.0/net/mac80211/chan.c @@ -258,7 +258,8 @@ static enum nl80211_chan_width ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata, - struct ieee80211_chanctx_conf *conf) + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) { enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; struct ieee80211_vif *vif = &sdata->vif; @@ -267,13 +268,14 @@ rcu_read_lock(); for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT; - struct ieee80211_bss_conf *link_conf = - rcu_dereference(sdata->vif.link_conf[link_id]); + struct ieee80211_link_data *link = + rcu_dereference(sdata->link[link_id]); - if (!link_conf) + if (!link) continue; - if (rcu_access_pointer(link_conf->chanctx_conf) != conf) + if (link != rsvd_for && + rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf) continue; switch (vif->type) { @@ -287,7 +289,7 @@ * point, so take the width from the chandef, but * account also for TDLS peers */ - width = max(link_conf->chandef.width, + width = max(link->conf->chandef.width, ieee80211_get_max_required_bw(sdata, link_id)); break; case NL80211_IFTYPE_P2P_DEVICE: @@ -296,7 +298,7 @@ case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: - width = link_conf->chandef.width; + width = link->conf->chandef.width; break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_UNSPECIFIED: @@ -316,7 +318,8 @@ static enum nl80211_chan_width ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, - struct ieee80211_chanctx_conf *conf) + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) { struct ieee80211_sub_if_data *sdata; enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; @@ -328,7 +331,8 @@ if (!ieee80211_sdata_running(sdata)) continue; - width = ieee80211_get_chanctx_vif_max_required_bw(sdata, conf); + width = ieee80211_get_chanctx_vif_max_required_bw(sdata, ctx, + rsvd_for); max_bw = max(max_bw, width); } @@ -336,8 +340,8 @@ /* use the configured bandwidth in case of monitor interface */ sdata = rcu_dereference(local->monitor_sdata); if (sdata && - rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == conf) - max_bw = max(max_bw, conf->def.width); + rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf) + max_bw = max(max_bw, ctx->conf.def.width); rcu_read_unlock(); @@ -349,8 +353,10 @@ * the max of min required widths of all the interfaces bound to this * channel context. */ -static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx) +static u32 +_ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) { enum nl80211_chan_width max_bw; struct cfg80211_chan_def min_def; @@ -370,7 +376,7 @@ return 0; } - max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf); + max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for); /* downgrade chandef up to max_bw */ min_def = ctx->conf.def; @@ -448,9 +454,10 @@ * channel context. */ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx) + struct ieee80211_chanctx *ctx, + struct ieee80211_link_data *rsvd_for) { - u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx); + u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); if (!changed) return; @@ -464,10 +471,11 @@ ieee80211_chan_bw_change(local, ctx, false); } -static void ieee80211_change_chanctx(struct ieee80211_local *local, - struct ieee80211_chanctx *ctx, - struct ieee80211_chanctx *old_ctx, - const struct cfg80211_chan_def *chandef) +static void _ieee80211_change_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_chanctx *old_ctx, + const struct cfg80211_chan_def *chandef, + struct ieee80211_link_data *rsvd_for) { u32 changed; @@ -492,7 +500,7 @@ ieee80211_chan_bw_change(local, old_ctx, true); if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) { - ieee80211_recalc_chanctx_min_def(local, ctx); + ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); return; } @@ -502,7 +510,7 @@ /* check if min chanctx also changed */ changed = IEEE80211_CHANCTX_CHANGE_WIDTH | - _ieee80211_recalc_chanctx_min_def(local, ctx); + _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); drv_change_chanctx(local, ctx, changed); if (!local->use_chanctx) { @@ -514,6 +522,14 @@ ieee80211_chan_bw_change(local, old_ctx, false); } +static void ieee80211_change_chanctx(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx, + struct ieee80211_chanctx *old_ctx, + const struct cfg80211_chan_def *chandef) +{ + _ieee80211_change_chanctx(local, ctx, old_ctx, chandef, NULL); +} + static struct ieee80211_chanctx * ieee80211_find_chanctx(struct ieee80211_local *local, const struct cfg80211_chan_def *chandef, @@ -638,7 +654,7 @@ ctx->conf.rx_chains_dynamic = 1; ctx->mode = mode; ctx->conf.radar_enabled = false; - ieee80211_recalc_chanctx_min_def(local, ctx); + _ieee80211_recalc_chanctx_min_def(local, ctx, NULL); return ctx; } @@ -855,6 +871,9 @@ } if (new_ctx) { + /* recalc considering the link we'll use it for now */ + ieee80211_recalc_chanctx_min_def(local, new_ctx, link); + ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx); if (ret) goto out; @@ -873,12 +892,12 @@ ieee80211_recalc_chanctx_chantype(local, curr_ctx); ieee80211_recalc_smps_chanctx(local, curr_ctx); ieee80211_recalc_radar_chanctx(local, curr_ctx); - ieee80211_recalc_chanctx_min_def(local, curr_ctx); + ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL); } if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) { ieee80211_recalc_txpower(sdata, false); - ieee80211_recalc_chanctx_min_def(local, new_ctx); + ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL); } if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && @@ -1270,7 +1289,7 @@ ieee80211_link_update_chandef(link, &link->reserved_chandef); - ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef); + _ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef, link); vif_chsw[0].vif = &sdata->vif; vif_chsw[0].old_ctx = &old_ctx->conf; @@ -1300,7 +1319,7 @@ if (ieee80211_chanctx_refcount(local, old_ctx) == 0) ieee80211_free_chanctx(local, old_ctx); - ieee80211_recalc_chanctx_min_def(local, new_ctx); + ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL); ieee80211_recalc_smps_chanctx(local, new_ctx); ieee80211_recalc_radar_chanctx(local, new_ctx); @@ -1665,7 +1684,7 @@ ieee80211_recalc_chanctx_chantype(local, ctx); ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_recalc_radar_chanctx(local, ctx); - ieee80211_recalc_chanctx_min_def(local, ctx); + ieee80211_recalc_chanctx_min_def(local, ctx, NULL); list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, reserved_chanctx_list) { only in patch2: unchanged: --- linux-6.2.0.orig/net/mac80211/he.c +++ linux-6.2.0/net/mac80211/he.c @@ -3,7 +3,7 @@ * HE handling * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2019 - 2022 Intel Corporation + * Copyright(c) 2019 - 2023 Intel Corporation */ #include "ieee80211_i.h" @@ -114,6 +114,7 @@ struct link_sta_info *link_sta) { struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap; + const struct ieee80211_sta_he_cap *own_he_cap_ptr; struct ieee80211_sta_he_cap own_he_cap; struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie; u8 he_ppe_size; @@ -123,12 +124,16 @@ memset(he_cap, 0, sizeof(*he_cap)); - if (!he_cap_ie || - !ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif))) + if (!he_cap_ie) return; - own_he_cap = sband->iftype_data->he_cap; + own_he_cap_ptr = + ieee80211_get_he_iftype_cap(sband, + ieee80211_vif_type_p2p(&sdata->vif)); + if (!own_he_cap_ptr) + return; + + own_he_cap = *own_he_cap_ptr; /* Make sure size is OK */ mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem); only in patch2: unchanged: --- linux-6.2.0.orig/net/mac80211/mlme.c +++ linux-6.2.0/net/mac80211/mlme.c @@ -1143,6 +1143,7 @@ const u16 *inner) { unsigned int skb_len = skb->len; + bool at_extension = false; bool added = false; int i, j; u8 *len, *list_len = NULL; @@ -1154,7 +1155,6 @@ for (i = 0; i < PRESENT_ELEMS_MAX && outer[i]; i++) { u16 elem = outer[i]; bool have_inner = false; - bool at_extension = false; /* should at least be sorted in the sense of normal -> ext */ WARN_ON(at_extension && elem < PRESENT_ELEM_EXT_OFFS); @@ -1183,8 +1183,14 @@ } *list_len += 1; skb_put_u8(skb, (u8)elem); + added = true; } + /* if we added a list but no extension list, make a zero-len one */ + if (added && (!at_extension || !list_len)) + skb_put_u8(skb, 0); + + /* if nothing added remove extension element completely */ if (!added) skb_trim(skb, skb_len); else only in patch2: unchanged: --- linux-6.2.0.orig/net/mptcp/pm.c +++ linux-6.2.0/net/mptcp/pm.c @@ -87,8 +87,15 @@ unsigned int subflows_max; int ret = 0; - if (mptcp_pm_is_userspace(msk)) - return mptcp_userspace_pm_active(msk); + if (mptcp_pm_is_userspace(msk)) { + if (mptcp_userspace_pm_active(msk)) { + spin_lock_bh(&pm->lock); + pm->subflows++; + spin_unlock_bh(&pm->lock); + return true; + } + return false; + } subflows_max = mptcp_pm_get_subflows_max(msk); @@ -181,8 +188,16 @@ struct mptcp_pm_data *pm = &msk->pm; bool update_subflows; - update_subflows = (subflow->request_join || subflow->mp_join) && - mptcp_pm_is_kernel(msk); + update_subflows = subflow->request_join || subflow->mp_join; + if (mptcp_pm_is_userspace(msk)) { + if (update_subflows) { + spin_lock_bh(&pm->lock); + pm->subflows--; + spin_unlock_bh(&pm->lock); + } + return; + } + if (!READ_ONCE(pm->work_pending) && !update_subflows) return; only in patch2: unchanged: --- linux-6.2.0.orig/net/mptcp/pm_userspace.c +++ linux-6.2.0/net/mptcp/pm_userspace.c @@ -69,6 +69,7 @@ MPTCP_PM_MAX_ADDR_ID + 1, 1); list_add_tail_rcu(&e->list, &msk->pm.userspace_pm_local_addr_list); + msk->pm.local_addr_used++; ret = e->addr.id; } else if (match) { ret = entry->addr.id; @@ -78,6 +79,31 @@ return ret; } +/* If the subflow is closed from the other peer (not via a + * subflow destroy command then), we want to keep the entry + * not to assign the same ID to another address and to be + * able to send RM_ADDR after the removal of the subflow. + */ +static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk, + struct mptcp_pm_addr_entry *addr) +{ + struct mptcp_pm_addr_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &msk->pm.userspace_pm_local_addr_list, list) { + if (mptcp_addresses_equal(&entry->addr, &addr->addr, false)) { + /* TODO: a refcount is needed because the entry can + * be used multiple times (e.g. fullmesh mode). + */ + list_del_rcu(&entry->list); + kfree(entry); + msk->pm.local_addr_used--; + return 0; + } + } + + return -EINVAL; +} + int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, u8 *flags, int *ifindex) @@ -170,6 +196,7 @@ spin_lock_bh(&msk->pm.lock); if (mptcp_pm_alloc_anno_list(msk, &addr_val)) { + msk->pm.add_addr_signaled++; mptcp_pm_announce_addr(msk, &addr_val.addr, false); mptcp_pm_nl_addr_send_ack(msk); } @@ -231,7 +258,7 @@ list_move(&match->list, &free_list); - mptcp_pm_remove_addrs_and_subflows(msk, &free_list); + mptcp_pm_remove_addrs(msk, &free_list); release_sock((struct sock *)msk); @@ -250,6 +277,7 @@ struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR]; + struct mptcp_pm_addr_entry local = { 0 }; struct mptcp_addr_info addr_r; struct mptcp_addr_info addr_l; struct mptcp_sock *msk; @@ -301,12 +329,26 @@ goto create_err; } + local.addr = addr_l; + err = mptcp_userspace_pm_append_new_local_addr(msk, &local); + if (err < 0) { + GENL_SET_ERR_MSG(info, "did not match address and id"); + goto create_err; + } + lock_sock(sk); err = __mptcp_subflow_connect(sk, &addr_l, &addr_r); release_sock(sk); + spin_lock_bh(&msk->pm.lock); + if (err) + mptcp_userspace_pm_delete_local_addr(msk, &local); + else + msk->pm.subflows++; + spin_unlock_bh(&msk->pm.lock); + create_err: sock_put((struct sock *)msk); return err; @@ -419,7 +461,11 @@ ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r); if (ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); + struct mptcp_pm_addr_entry entry = { .addr = addr_l }; + spin_lock_bh(&msk->pm.lock); + mptcp_userspace_pm_delete_local_addr(msk, &entry); + spin_unlock_bh(&msk->pm.lock); mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN); mptcp_close_ssk(sk, ssk, subflow); MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW); only in patch2: unchanged: --- linux-6.2.0.orig/net/netfilter/ipset/ip_set_core.c +++ linux-6.2.0/net/netfilter/ipset/ip_set_core.c @@ -1694,6 +1694,14 @@ bool eexist = flags & IPSET_FLAG_EXIST, retried = false; do { + if (retried) { + __ip_set_get(set); + nfnl_unlock(NFNL_SUBSYS_IPSET); + cond_resched(); + nfnl_lock(NFNL_SUBSYS_IPSET); + __ip_set_put(set); + } + ip_set_lock(set); ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried); ip_set_unlock(set); only in patch2: unchanged: --- linux-6.2.0.orig/net/netfilter/nft_bitwise.c +++ linux-6.2.0/net/netfilter/nft_bitwise.c @@ -323,7 +323,7 @@ dreg = priv->dreg; regcount = DIV_ROUND_UP(priv->len, NFT_REG32_SIZE); for (i = 0; i < regcount; i++, dreg++) - track->regs[priv->dreg].bitwise = expr; + track->regs[dreg].bitwise = expr; return false; } only in patch2: unchanged: --- linux-6.2.0.orig/net/netrom/nr_subr.c +++ linux-6.2.0/net/netrom/nr_subr.c @@ -123,7 +123,7 @@ unsigned char *dptr; int len, timeout; - len = NR_NETWORK_LEN + NR_TRANSPORT_LEN; + len = NR_TRANSPORT_LEN; switch (frametype & 0x0F) { case NR_CONNREQ: @@ -141,7 +141,8 @@ return; } - if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) + skb = alloc_skb(NR_NETWORK_LEN + len, GFP_ATOMIC); + if (!skb) return; /* @@ -149,7 +150,7 @@ */ skb_reserve(skb, NR_NETWORK_LEN); - dptr = skb_put(skb, skb_tailroom(skb)); + dptr = skb_put(skb, len); switch (frametype & 0x0F) { case NR_CONNREQ: only in patch2: unchanged: --- linux-6.2.0.orig/net/openvswitch/datapath.c +++ linux-6.2.0/net/openvswitch/datapath.c @@ -236,9 +236,6 @@ /* First drop references to device. */ hlist_del_rcu(&p->dp_hash_node); - /* Free percpu memory */ - free_percpu(p->upcall_stats); - /* Then destroy it. */ ovs_vport_del(p); } @@ -1858,12 +1855,6 @@ goto err_destroy_portids; } - vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); - if (!vport->upcall_stats) { - err = -ENOMEM; - goto err_destroy_vport; - } - err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, info->snd_seq, 0, OVS_DP_CMD_NEW); BUG_ON(err < 0); @@ -1876,8 +1867,6 @@ ovs_notify(&dp_datapath_genl_family, reply, info); return 0; -err_destroy_vport: - ovs_dp_detach_port(vport); err_destroy_portids: kfree(rcu_dereference_raw(dp->upcall_portids)); err_unlock_and_destroy_meters: @@ -2322,12 +2311,6 @@ goto exit_unlock_free; } - vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); - if (!vport->upcall_stats) { - err = -ENOMEM; - goto exit_unlock_free_vport; - } - err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info), info->snd_portid, info->snd_seq, 0, OVS_VPORT_CMD_NEW, GFP_KERNEL); @@ -2345,8 +2328,6 @@ ovs_notify(&dp_vport_genl_family, reply, info); return 0; -exit_unlock_free_vport: - ovs_dp_detach_port(vport); exit_unlock_free: ovs_unlock(); kfree_skb(reply); only in patch2: unchanged: --- linux-6.2.0.orig/net/openvswitch/vport.c +++ linux-6.2.0/net/openvswitch/vport.c @@ -124,6 +124,7 @@ { struct vport *vport; size_t alloc_size; + int err; alloc_size = sizeof(struct vport); if (priv_size) { @@ -135,17 +136,29 @@ if (!vport) return ERR_PTR(-ENOMEM); + vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu); + if (!vport->upcall_stats) { + err = -ENOMEM; + goto err_kfree_vport; + } + vport->dp = parms->dp; vport->port_no = parms->port_no; vport->ops = ops; INIT_HLIST_NODE(&vport->dp_hash_node); if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) { - kfree(vport); - return ERR_PTR(-EINVAL); + err = -EINVAL; + goto err_free_percpu; } return vport; + +err_free_percpu: + free_percpu(vport->upcall_stats); +err_kfree_vport: + kfree(vport); + return ERR_PTR(err); } EXPORT_SYMBOL_GPL(ovs_vport_alloc); @@ -165,6 +178,7 @@ * it is safe to use raw dereference. */ kfree(rcu_dereference_raw(vport->upcall_portids)); + free_percpu(vport->upcall_stats); kfree(vport); } EXPORT_SYMBOL_GPL(ovs_vport_free); only in patch2: unchanged: --- linux-6.2.0.orig/net/rxrpc/local_event.c +++ linux-6.2.0/net/rxrpc/local_event.c @@ -16,7 +16,16 @@ #include #include "ar-internal.h" -static const char rxrpc_version_string[65] = "linux-" UTS_RELEASE " AF_RXRPC"; +static char rxrpc_version_string[65]; // "linux-" UTS_RELEASE " AF_RXRPC"; + +/* + * Generate the VERSION packet string. + */ +void rxrpc_gen_version_string(void) +{ + snprintf(rxrpc_version_string, sizeof(rxrpc_version_string), + "linux-%.49s AF_RXRPC", UTS_RELEASE); +} /* * Reply to a version request only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/act_police.c +++ linux-6.2.0/net/sched/act_police.c @@ -357,23 +357,23 @@ opt.burst = PSCHED_NS2TICKS(p->tcfp_burst); if (p->rate_present) { psched_ratecfg_getrate(&opt.rate, &p->rate); - if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) && + if ((p->rate.rate_bytes_ps >= (1ULL << 32)) && nla_put_u64_64bit(skb, TCA_POLICE_RATE64, - police->params->rate.rate_bytes_ps, + p->rate.rate_bytes_ps, TCA_POLICE_PAD)) goto nla_put_failure; } if (p->peak_present) { psched_ratecfg_getrate(&opt.peakrate, &p->peak); - if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) && + if ((p->peak.rate_bytes_ps >= (1ULL << 32)) && nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64, - police->params->peak.rate_bytes_ps, + p->peak.rate_bytes_ps, TCA_POLICE_PAD)) goto nla_put_failure; } if (p->pps_present) { if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64, - police->params->ppsrate.rate_pkts_ps, + p->ppsrate.rate_pkts_ps, TCA_POLICE_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64, only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/cls_route.c +++ linux-6.2.0/net/sched/cls_route.c @@ -513,7 +513,6 @@ if (fold) { f->id = fold->id; f->iif = fold->iif; - f->res = fold->res; f->handle = fold->handle; f->tp = fold->tp; only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_api.c +++ linux-6.2.0/net/sched/sch_api.c @@ -309,7 +309,7 @@ if (dev_ingress_queue(dev)) q = qdisc_match_from_root( - dev_ingress_queue(dev)->qdisc_sleeping, + rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping), handle); out: return q; @@ -328,7 +328,8 @@ nq = dev_ingress_queue_rcu(dev); if (nq) - q = qdisc_match_from_root(nq->qdisc_sleeping, handle); + q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping), + handle); out: return q; } @@ -634,8 +635,13 @@ void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, u64 delta_ns) { - if (test_bit(__QDISC_STATE_DEACTIVATED, - &qdisc_root_sleeping(wd->qdisc)->state)) + bool deactivated; + + rcu_read_lock(); + deactivated = test_bit(__QDISC_STATE_DEACTIVATED, + &qdisc_root_sleeping(wd->qdisc)->state); + rcu_read_unlock(); + if (deactivated) return; if (hrtimer_is_queued(&wd->timer)) { @@ -1242,7 +1248,12 @@ sch->parent = parent; if (handle == TC_H_INGRESS) { - sch->flags |= TCQ_F_INGRESS; + if (!(sch->flags & TCQ_F_INGRESS)) { + NL_SET_ERR_MSG(extack, + "Specified parent ID is reserved for ingress and clsact Qdiscs"); + err = -EINVAL; + goto err_out3; + } handle = TC_H_MAKE(TC_H_INGRESS, 0); } else { if (handle == 0) { @@ -1469,7 +1480,7 @@ } q = qdisc_leaf(p, clid); } else if (dev_ingress_queue(dev)) { - q = dev_ingress_queue(dev)->qdisc_sleeping; + q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); } } else { q = rtnl_dereference(dev->qdisc); @@ -1555,7 +1566,7 @@ } q = qdisc_leaf(p, clid); } else if (dev_ingress_queue_create(dev)) { - q = dev_ingress_queue(dev)->qdisc_sleeping; + q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping); } } else { q = rtnl_dereference(dev->qdisc); @@ -1587,11 +1598,20 @@ NL_SET_ERR_MSG(extack, "Invalid qdisc name"); return -EINVAL; } + if (q->flags & TCQ_F_INGRESS) { + NL_SET_ERR_MSG(extack, + "Cannot regraft ingress or clsact Qdiscs"); + return -EINVAL; + } if (q == p || (p && check_loop(q, p, 0))) { NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected"); return -ELOOP; } + if (clid == TC_H_INGRESS) { + NL_SET_ERR_MSG(extack, "Ingress cannot graft directly"); + return -EINVAL; + } qdisc_refcount_inc(q); goto graft; } else { @@ -1787,8 +1807,8 @@ dev_queue = dev_ingress_queue(dev); if (dev_queue && - tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, - &q_idx, s_q_idx, false, + tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping), + skb, cb, &q_idx, s_q_idx, false, tca[TCA_DUMP_INVISIBLE]) < 0) goto done; @@ -2226,8 +2246,8 @@ dev_queue = dev_ingress_queue(dev); if (dev_queue && - tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, - &t, s_t, false) < 0) + tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping), + skb, tcm, cb, &t, s_t, false) < 0) goto done; done: only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_fq_pie.c +++ linux-6.2.0/net/sched/sch_fq_pie.c @@ -201,6 +201,11 @@ return NET_XMIT_CN; } +static struct netlink_range_validation fq_pie_q_range = { + .min = 1, + .max = 1 << 20, +}; + static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = { [TCA_FQ_PIE_LIMIT] = {.type = NLA_U32}, [TCA_FQ_PIE_FLOWS] = {.type = NLA_U32}, @@ -208,7 +213,8 @@ [TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32}, [TCA_FQ_PIE_ALPHA] = {.type = NLA_U32}, [TCA_FQ_PIE_BETA] = {.type = NLA_U32}, - [TCA_FQ_PIE_QUANTUM] = {.type = NLA_U32}, + [TCA_FQ_PIE_QUANTUM] = + NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range), [TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32}, [TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32}, [TCA_FQ_PIE_ECN] = {.type = NLA_U32}, @@ -373,6 +379,7 @@ spinlock_t *root_lock; /* to lock qdisc for probability calculations */ u32 idx; + rcu_read_lock(); root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); @@ -385,6 +392,7 @@ mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate); spin_unlock(root_lock); + rcu_read_unlock(); } static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt, only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_generic.c +++ linux-6.2.0/net/sched/sch_generic.c @@ -648,7 +648,7 @@ static struct netdev_queue noop_netdev_queue = { RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), - .qdisc_sleeping = &noop_qdisc, + RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc), }; struct Qdisc noop_qdisc = { @@ -1103,7 +1103,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *qdisc) { - struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; + struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping); spinlock_t *root_lock; root_lock = qdisc_lock(oqdisc); @@ -1112,7 +1112,7 @@ /* ... and graft new one */ if (qdisc == NULL) qdisc = &noop_qdisc; - dev_queue->qdisc_sleeping = qdisc; + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); spin_unlock_bh(root_lock); @@ -1125,12 +1125,12 @@ struct netdev_queue *dev_queue, void *_qdisc_default) { - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { rcu_assign_pointer(dev_queue->qdisc, qdisc_default); - dev_queue->qdisc_sleeping = qdisc_default; + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default); qdisc_put(qdisc); } @@ -1154,7 +1154,7 @@ if (!netif_is_multiqueue(dev)) qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; - dev_queue->qdisc_sleeping = qdisc; + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); } static void attach_default_qdiscs(struct net_device *dev) @@ -1167,7 +1167,7 @@ if (!netif_is_multiqueue(dev) || dev->priv_flags & IFF_NO_QUEUE) { netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - qdisc = txq->qdisc_sleeping; + qdisc = rtnl_dereference(txq->qdisc_sleeping); rcu_assign_pointer(dev->qdisc, qdisc); qdisc_refcount_inc(qdisc); } else { @@ -1186,7 +1186,7 @@ netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); dev->priv_flags |= IFF_NO_QUEUE; netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - qdisc = txq->qdisc_sleeping; + qdisc = rtnl_dereference(txq->qdisc_sleeping); rcu_assign_pointer(dev->qdisc, qdisc); qdisc_refcount_inc(qdisc); dev->priv_flags ^= IFF_NO_QUEUE; @@ -1202,7 +1202,7 @@ struct netdev_queue *dev_queue, void *_need_watchdog) { - struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); int *need_watchdog_p = _need_watchdog; if (!(new_qdisc->flags & TCQ_F_BUILTIN)) @@ -1272,7 +1272,7 @@ struct Qdisc *qdisc; bool nolock; - qdisc = dev_queue->qdisc_sleeping; + qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); if (!qdisc) return; @@ -1303,7 +1303,7 @@ int val; dev_queue = netdev_get_tx_queue(dev, i); - q = dev_queue->qdisc_sleeping; + q = rtnl_dereference(dev_queue->qdisc_sleeping); root_lock = qdisc_lock(q); spin_lock_bh(root_lock); @@ -1379,7 +1379,7 @@ static int qdisc_change_tx_queue_len(struct net_device *dev, struct netdev_queue *dev_queue) { - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); const struct Qdisc_ops *ops = qdisc->ops; if (ops->change_tx_queue_len) @@ -1404,7 +1404,7 @@ unsigned int i; for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); /* Only update the default qdiscs we created, * qdiscs with handles are always hashed. */ @@ -1412,7 +1412,7 @@ qdisc_hash_del(qdisc); } for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { - qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); if (qdisc != &noop_qdisc && !qdisc->handle) qdisc_hash_add(qdisc, false); } @@ -1449,7 +1449,7 @@ struct Qdisc *qdisc = _qdisc; rcu_assign_pointer(dev_queue->qdisc, qdisc); - dev_queue->qdisc_sleeping = qdisc; + rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); } void dev_init_scheduler(struct net_device *dev) only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_ingress.c +++ linux-6.2.0/net/sched/sch_ingress.c @@ -80,6 +80,9 @@ struct net_device *dev = qdisc_dev(sch); int err; + if (sch->parent != TC_H_INGRESS) + return -EOPNOTSUPP; + net_inc_ingress_queue(); mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); @@ -101,6 +104,9 @@ { struct ingress_sched_data *q = qdisc_priv(sch); + if (sch->parent != TC_H_INGRESS) + return; + tcf_block_put_ext(q->block, sch, &q->block_info); net_dec_ingress_queue(); } @@ -134,7 +140,7 @@ .cl_ops = &ingress_class_ops, .id = "ingress", .priv_size = sizeof(struct ingress_sched_data), - .static_flags = TCQ_F_CPUSTATS, + .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS, .init = ingress_init, .destroy = ingress_destroy, .dump = ingress_dump, @@ -219,6 +225,9 @@ struct net_device *dev = qdisc_dev(sch); int err; + if (sch->parent != TC_H_CLSACT) + return -EOPNOTSUPP; + net_inc_ingress_queue(); net_inc_egress_queue(); @@ -248,6 +257,9 @@ { struct clsact_sched_data *q = qdisc_priv(sch); + if (sch->parent != TC_H_CLSACT) + return; + tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); @@ -269,7 +281,7 @@ .cl_ops = &clsact_class_ops, .id = "clsact", .priv_size = sizeof(struct clsact_sched_data), - .static_flags = TCQ_F_CPUSTATS, + .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS, .init = clsact_init, .destroy = clsact_destroy, .dump = ingress_dump, only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_mq.c +++ linux-6.2.0/net/sched/sch_mq.c @@ -141,7 +141,7 @@ * qdisc totals are added at end. */ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { - qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); spin_lock_bh(qdisc_lock(qdisc)); gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, @@ -202,7 +202,7 @@ { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); - return dev_queue->qdisc_sleeping; + return rtnl_dereference(dev_queue->qdisc_sleeping); } static unsigned long mq_find(struct Qdisc *sch, u32 classid) @@ -221,7 +221,7 @@ tcm->tcm_parent = TC_H_ROOT; tcm->tcm_handle |= TC_H_MIN(cl); - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; return 0; } @@ -230,7 +230,7 @@ { struct netdev_queue *dev_queue = mq_queue_get(sch, cl); - sch = dev_queue->qdisc_sleeping; + sch = rtnl_dereference(dev_queue->qdisc_sleeping); if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 || qdisc_qstats_copy(d, sch) < 0) return -1; only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_mqprio.c +++ linux-6.2.0/net/sched/sch_mqprio.c @@ -399,7 +399,7 @@ * qdisc totals are added at end. */ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { - qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; + qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); spin_lock_bh(qdisc_lock(qdisc)); gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats, @@ -449,7 +449,7 @@ if (!dev_queue) return NULL; - return dev_queue->qdisc_sleeping; + return rtnl_dereference(dev_queue->qdisc_sleeping); } static unsigned long mqprio_find(struct Qdisc *sch, u32 classid) @@ -482,7 +482,7 @@ tcm->tcm_parent = (tc < 0) ? 0 : TC_H_MAKE(TC_H_MAJ(sch->handle), TC_H_MIN(tc + TC_H_MIN_PRIORITY)); - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; } else { tcm->tcm_parent = TC_H_ROOT; tcm->tcm_info = 0; @@ -538,7 +538,7 @@ } else { struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl); - sch = dev_queue->qdisc_sleeping; + sch = rtnl_dereference(dev_queue->qdisc_sleeping); if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 || qdisc_qstats_copy(d, sch) < 0) only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_pie.c +++ linux-6.2.0/net/sched/sch_pie.c @@ -421,8 +421,10 @@ { struct pie_sched_data *q = from_timer(q, t, adapt_timer); struct Qdisc *sch = q->sch; - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + spinlock_t *root_lock; + rcu_read_lock(); + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); @@ -430,6 +432,7 @@ if (q->params.tupdate) mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); spin_unlock(root_lock); + rcu_read_unlock(); } static int pie_init(struct Qdisc *sch, struct nlattr *opt, only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_red.c +++ linux-6.2.0/net/sched/sch_red.c @@ -321,12 +321,15 @@ { struct red_sched_data *q = from_timer(q, t, adapt_timer); struct Qdisc *sch = q->sch; - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + spinlock_t *root_lock; + rcu_read_lock(); + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); red_adaptative_algo(&q->parms, &q->vars); mod_timer(&q->adapt_timer, jiffies + HZ/2); spin_unlock(root_lock); + rcu_read_unlock(); } static int red_init(struct Qdisc *sch, struct nlattr *opt, only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_sfq.c +++ linux-6.2.0/net/sched/sch_sfq.c @@ -606,10 +606,12 @@ { struct sfq_sched_data *q = from_timer(q, t, perturb_timer); struct Qdisc *sch = q->sch; - spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); + spinlock_t *root_lock; siphash_key_t nkey; get_random_bytes(&nkey, sizeof(nkey)); + rcu_read_lock(); + root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); q->perturbation = nkey; if (!q->filter_list && q->tail) @@ -618,6 +620,7 @@ if (q->perturb_period) mod_timer(&q->perturb_timer, jiffies + q->perturb_period); + rcu_read_unlock(); } static int sfq_change(struct Qdisc *sch, struct nlattr *opt) only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_taprio.c +++ linux-6.2.0/net/sched/sch_taprio.c @@ -2050,7 +2050,7 @@ if (!dev_queue) return NULL; - return dev_queue->qdisc_sleeping; + return rtnl_dereference(dev_queue->qdisc_sleeping); } static unsigned long taprio_find(struct Qdisc *sch, u32 classid) @@ -2069,7 +2069,7 @@ tcm->tcm_parent = TC_H_ROOT; tcm->tcm_handle |= TC_H_MIN(cl); - tcm->tcm_info = dev_queue->qdisc_sleeping->handle; + tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle; return 0; } @@ -2081,7 +2081,7 @@ { struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); - sch = dev_queue->qdisc_sleeping; + sch = rtnl_dereference(dev_queue->qdisc_sleeping); if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 || qdisc_qstats_copy(d, sch) < 0) return -1; only in patch2: unchanged: --- linux-6.2.0.orig/net/sched/sch_teql.c +++ linux-6.2.0/net/sched/sch_teql.c @@ -297,7 +297,7 @@ struct net_device *slave = qdisc_dev(q); struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0); - if (slave_txq->qdisc_sleeping != q) + if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q) continue; if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) || !netif_running(slave)) { only in patch2: unchanged: --- linux-6.2.0.orig/net/wireless/core.c +++ linux-6.2.0/net/wireless/core.c @@ -368,12 +368,12 @@ rdev = container_of(work, struct cfg80211_registered_device, sched_scan_stop_wk); - rtnl_lock(); + wiphy_lock(&rdev->wiphy); list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { if (req->nl_owner_dead) cfg80211_stop_sched_scan_req(rdev, req, false); } - rtnl_unlock(); + wiphy_unlock(&rdev->wiphy); } static void cfg80211_propagate_radar_detect_wk(struct work_struct *work) only in patch2: unchanged: --- linux-6.2.0.orig/sound/core/oss/pcm_plugin.h +++ linux-6.2.0/sound/core/oss/pcm_plugin.h @@ -141,6 +141,14 @@ void *snd_pcm_plug_buf_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t size); void snd_pcm_plug_buf_unlock(struct snd_pcm_substream *plug, void *ptr); +#else + +static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; } +static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; } +static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; } + +#endif + snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const char *ptr, snd_pcm_uframes_t size, int in_kernel); @@ -151,14 +159,6 @@ snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames); -#else - -static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; } -static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; } -static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; } - -#endif - #ifdef PLUGIN_DEBUG #define pdprintf(fmt, args...) printk(KERN_DEBUG "plugin: " fmt, ##args) #else only in patch2: unchanged: --- linux-6.2.0.orig/sound/isa/gus/gus_pcm.c +++ linux-6.2.0/sound/isa/gus/gus_pcm.c @@ -892,10 +892,10 @@ kctl = snd_ctl_new1(&snd_gf1_pcm_volume_control1, gus); else kctl = snd_ctl_new1(&snd_gf1_pcm_volume_control, gus); + kctl->id.index = control_index; err = snd_ctl_add(card, kctl); if (err < 0) return err; - kctl->id.index = control_index; return 0; } only in patch2: unchanged: --- linux-6.2.0.orig/sound/pci/cmipci.c +++ linux-6.2.0/sound/pci/cmipci.c @@ -2688,20 +2688,20 @@ } if (cm->can_ac3_hw) { kctl = snd_ctl_new1(&snd_cmipci_spdif_default, cm); + kctl->id.device = pcm_spdif_device; err = snd_ctl_add(card, kctl); if (err < 0) return err; - kctl->id.device = pcm_spdif_device; kctl = snd_ctl_new1(&snd_cmipci_spdif_mask, cm); + kctl->id.device = pcm_spdif_device; err = snd_ctl_add(card, kctl); if (err < 0) return err; - kctl->id.device = pcm_spdif_device; kctl = snd_ctl_new1(&snd_cmipci_spdif_stream, cm); + kctl->id.device = pcm_spdif_device; err = snd_ctl_add(card, kctl); if (err < 0) return err; - kctl->id.device = pcm_spdif_device; } if (cm->chip_version <= 37) { sw = snd_cmipci_old_mixer_switches; only in patch2: unchanged: --- linux-6.2.0.orig/sound/pci/ice1712/ice1712.c +++ linux-6.2.0/sound/pci/ice1712/ice1712.c @@ -2371,22 +2371,26 @@ if (snd_BUG_ON(!ice->pcm_pro)) return -EIO; - err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_default, ice)); + kctl = snd_ctl_new1(&snd_ice1712_spdif_default, ice); + kctl->id.device = ice->pcm_pro->device; + err = snd_ctl_add(ice->card, kctl); if (err < 0) return err; + kctl = snd_ctl_new1(&snd_ice1712_spdif_maskc, ice); kctl->id.device = ice->pcm_pro->device; - err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_maskc, ice)); + err = snd_ctl_add(ice->card, kctl); if (err < 0) return err; + kctl = snd_ctl_new1(&snd_ice1712_spdif_maskp, ice); kctl->id.device = ice->pcm_pro->device; - err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_maskp, ice)); + err = snd_ctl_add(ice->card, kctl); if (err < 0) return err; + kctl = snd_ctl_new1(&snd_ice1712_spdif_stream, ice); kctl->id.device = ice->pcm_pro->device; - err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_stream, ice)); + err = snd_ctl_add(ice->card, kctl); if (err < 0) return err; - kctl->id.device = ice->pcm_pro->device; ice->spdif.stream_ctl = kctl; return 0; } only in patch2: unchanged: --- linux-6.2.0.orig/sound/pci/ice1712/ice1724.c +++ linux-6.2.0/sound/pci/ice1712/ice1724.c @@ -2392,23 +2392,27 @@ if (err < 0) return err; - err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_default, ice)); + kctl = snd_ctl_new1(&snd_vt1724_spdif_default, ice); + kctl->id.device = ice->pcm->device; + err = snd_ctl_add(ice->card, kctl); if (err < 0) return err; + kctl = snd_ctl_new1(&snd_vt1724_spdif_maskc, ice); kctl->id.device = ice->pcm->device; - err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_maskc, ice)); + err = snd_ctl_add(ice->card, kctl); if (err < 0) return err; + kctl = snd_ctl_new1(&snd_vt1724_spdif_maskp, ice); kctl->id.device = ice->pcm->device; - err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_maskp, ice)); + err = snd_ctl_add(ice->card, kctl); if (err < 0) return err; - kctl->id.device = ice->pcm->device; #if 0 /* use default only */ - err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_stream, ice)); + kctl = snd_ctl_new1(&snd_vt1724_spdif_stream, ice); + kctl->id.device = ice->pcm->device; + err = snd_ctl_add(ice->card, kctl); if (err < 0) return err; - kctl->id.device = ice->pcm->device; ice->spdif.stream_ctl = kctl; #endif return 0; only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/codecs/ssm2602.c +++ linux-6.2.0/sound/soc/codecs/ssm2602.c @@ -53,6 +53,18 @@ { .reg = 0x09, .def = 0x0000 } }; +/* + * ssm2602 register patch + * Workaround for playback distortions after power up: activates digital + * core, and then powers on output, DAC, and whole chip at the same time + */ + +static const struct reg_sequence ssm2602_patch[] = { + { SSM2602_ACTIVE, 0x01 }, + { SSM2602_PWR, 0x07 }, + { SSM2602_RESET, 0x00 }, +}; + /*Appending several "None"s just for OSS mixer use*/ static const char *ssm2602_input_select[] = { @@ -589,6 +601,9 @@ return ret; } + regmap_register_patch(ssm2602->regmap, ssm2602_patch, + ARRAY_SIZE(ssm2602_patch)); + /* set the update bits */ regmap_update_bits(ssm2602->regmap, SSM2602_LINVOL, LINVOL_LRIN_BOTH, LINVOL_LRIN_BOTH); only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/dwc/dwc-i2s.c +++ linux-6.2.0/sound/soc/dwc/dwc-i2s.c @@ -132,13 +132,13 @@ /* Error Handling: TX */ if (isr[i] & ISR_TXFO) { - dev_err(dev->dev, "TX overrun (ch_id=%d)\n", i); + dev_err_ratelimited(dev->dev, "TX overrun (ch_id=%d)\n", i); irq_valid = true; } /* Error Handling: TX */ if (isr[i] & ISR_RXFO) { - dev_err(dev->dev, "RX overrun (ch_id=%d)\n", i); + dev_err_ratelimited(dev->dev, "RX overrun (ch_id=%d)\n", i); irq_valid = true; } } only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/generic/simple-card-utils.c +++ linux-6.2.0/sound/soc/generic/simple-card-utils.c @@ -314,7 +314,7 @@ } ret = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_RATE, fixed_rate, fixed_rate); - if (ret) + if (ret < 0) goto codec_err; } only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/intel/common/soc-acpi-intel-cht-match.c +++ linux-6.2.0/sound/soc/intel/common/soc-acpi-intel-cht-match.c @@ -50,6 +50,31 @@ return mach; } +/* + * Some tablets with Android factory OS have buggy DSDTs with an ESSX8316 device + * in the ACPI tables. While they are not using an ESS8316 codec. These DSDTs + * also have an ACPI device for the correct codec, ignore the ESSX8316. + */ +static const struct dmi_system_id cht_ess8316_not_present_table[] = { + { + /* Nextbook Ares 8A */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), + DMI_MATCH(DMI_BIOS_VERSION, "M882"), + }, + }, + { } +}; + +static struct snd_soc_acpi_mach *cht_ess8316_quirk(void *arg) +{ + if (dmi_check_system(cht_ess8316_not_present_table)) + return NULL; + + return arg; +} + static const struct snd_soc_acpi_codecs rt5640_comp_ids = { .num_codecs = 2, .codecs = { "10EC5640", "10EC3276" }, @@ -113,6 +138,7 @@ .drv_name = "bytcht_es8316", .fw_filename = "intel/fw_sst_22a8.bin", .board = "bytcht_es8316", + .machine_quirk = cht_ess8316_quirk, .sof_tplg_filename = "sof-cht-es8316.tplg", }, /* some CHT-T platforms rely on RT5640, use Baytrail machine driver */ only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/mediatek/mt8195/mt8195-afe-clk.c +++ linux-6.2.0/sound/soc/mediatek/mt8195/mt8195-afe-clk.c @@ -410,11 +410,6 @@ return 0; } -void mt8195_afe_deinit_clock(struct mtk_base_afe *afe) -{ - mt8195_audsys_clk_unregister(afe); -} - int mt8195_afe_enable_clk(struct mtk_base_afe *afe, struct clk *clk) { int ret; only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/mediatek/mt8195/mt8195-afe-clk.h +++ linux-6.2.0/sound/soc/mediatek/mt8195/mt8195-afe-clk.h @@ -101,7 +101,6 @@ int mt8195_afe_get_mclk_source_rate(struct mtk_base_afe *afe, int apll); int mt8195_afe_get_default_mclk_source_by_rate(int rate); int mt8195_afe_init_clock(struct mtk_base_afe *afe); -void mt8195_afe_deinit_clock(struct mtk_base_afe *afe); int mt8195_afe_enable_clk(struct mtk_base_afe *afe, struct clk *clk); void mt8195_afe_disable_clk(struct mtk_base_afe *afe, struct clk *clk); int mt8195_afe_prepare_clk(struct mtk_base_afe *afe, struct clk *clk); only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c +++ linux-6.2.0/sound/soc/mediatek/mt8195/mt8195-afe-pcm.c @@ -3253,18 +3253,13 @@ return ret; } -static int mt8195_afe_pcm_dev_remove(struct platform_device *pdev) +static void mt8195_afe_pcm_dev_remove(struct platform_device *pdev) { - struct mtk_base_afe *afe = platform_get_drvdata(pdev); - snd_soc_unregister_component(&pdev->dev); pm_runtime_disable(&pdev->dev); if (!pm_runtime_status_suspended(&pdev->dev)) mt8195_afe_runtime_suspend(&pdev->dev); - - mt8195_afe_deinit_clock(afe); - return 0; } static const struct of_device_id mt8195_afe_pcm_dt_match[] = { @@ -3285,7 +3280,7 @@ .pm = &mt8195_afe_pm_ops, }, .probe = mt8195_afe_pcm_dev_probe, - .remove = mt8195_afe_pcm_dev_remove, + .remove_new = mt8195_afe_pcm_dev_remove, }; module_platform_driver(mt8195_afe_pcm_driver); only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/mediatek/mt8195/mt8195-audsys-clk.c +++ linux-6.2.0/sound/soc/mediatek/mt8195/mt8195-audsys-clk.c @@ -148,6 +148,29 @@ GATE_AUD6(CLK_AUD_GASRC19, "aud_gasrc19", "top_asm_h", 19), }; +static void mt8195_audsys_clk_unregister(void *data) +{ + struct mtk_base_afe *afe = data; + struct mt8195_afe_private *afe_priv = afe->platform_priv; + struct clk *clk; + struct clk_lookup *cl; + int i; + + if (!afe_priv) + return; + + for (i = 0; i < CLK_AUD_NR_CLK; i++) { + cl = afe_priv->lookup[i]; + if (!cl) + continue; + + clk = cl->clk; + clk_unregister_gate(clk); + + clkdev_drop(cl); + } +} + int mt8195_audsys_clk_register(struct mtk_base_afe *afe) { struct mt8195_afe_private *afe_priv = afe->platform_priv; @@ -188,27 +211,5 @@ afe_priv->lookup[i] = cl; } - return 0; -} - -void mt8195_audsys_clk_unregister(struct mtk_base_afe *afe) -{ - struct mt8195_afe_private *afe_priv = afe->platform_priv; - struct clk *clk; - struct clk_lookup *cl; - int i; - - if (!afe_priv) - return; - - for (i = 0; i < CLK_AUD_NR_CLK; i++) { - cl = afe_priv->lookup[i]; - if (!cl) - continue; - - clk = cl->clk; - clk_unregister_gate(clk); - - clkdev_drop(cl); - } + return devm_add_action_or_reset(afe->dev, mt8195_audsys_clk_unregister, afe); } only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/mediatek/mt8195/mt8195-audsys-clk.h +++ linux-6.2.0/sound/soc/mediatek/mt8195/mt8195-audsys-clk.h @@ -10,6 +10,5 @@ #define _MT8195_AUDSYS_CLK_H_ int mt8195_audsys_clk_register(struct mtk_base_afe *afe); -void mt8195_audsys_clk_unregister(struct mtk_base_afe *afe); #endif only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/sof/debug.c +++ linux-6.2.0/sound/soc/sof/debug.c @@ -437,8 +437,8 @@ /* should we prevent DSP entering D3 ? */ if (!sdev->ipc_dump_printed) dev_info(sdev->dev, - "preventing DSP entering D3 state to preserve context\n"); - pm_runtime_get_noresume(sdev->dev); + "Attempting to prevent DSP from entering D3 state to preserve context\n"); + pm_runtime_get_if_in_use(sdev->dev); } /* dump vital information to the logs */ only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/sof/pcm.c +++ linux-6.2.0/sound/soc/sof/pcm.c @@ -619,16 +619,17 @@ "%s/%s", plat_data->tplg_filename_prefix, plat_data->tplg_filename); - if (!tplg_filename) - return -ENOMEM; + if (!tplg_filename) { + ret = -ENOMEM; + goto pm_error; + } ret = snd_sof_load_topology(component, tplg_filename); - if (ret < 0) { + if (ret < 0) dev_err(component->dev, "error: failed to load DSP topology %d\n", ret); - return ret; - } +pm_error: pm_runtime_mark_last_busy(component->dev); pm_runtime_put_autosuspend(component->dev); only in patch2: unchanged: --- linux-6.2.0.orig/sound/soc/sof/sof-client-probes.c +++ linux-6.2.0/sound/soc/sof/sof-client-probes.c @@ -218,12 +218,7 @@ ret = ipc->points_info(cdev, &desc, &num_desc); if (ret < 0) - goto exit; - - pm_runtime_mark_last_busy(dev); - err = pm_runtime_put_autosuspend(dev); - if (err < 0) - dev_err_ratelimited(dev, "debugfs read failed to idle %d\n", err); + goto pm_error; for (i = 0; i < num_desc; i++) { offset = strlen(buf); @@ -241,6 +236,13 @@ ret = simple_read_from_buffer(to, count, ppos, buf, strlen(buf)); kfree(desc); + +pm_error: + pm_runtime_mark_last_busy(dev); + err = pm_runtime_put_autosuspend(dev); + if (err < 0) + dev_err_ratelimited(dev, "debugfs read failed to idle %d\n", err); + exit: kfree(buf); return ret; only in patch2: unchanged: --- linux-6.2.0.orig/tools/arch/x86/include/asm/cpufeatures.h +++ linux-6.2.0/tools/arch/x86/include/asm/cpufeatures.h @@ -13,8 +13,8 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 20 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NCAPINTS 21 /* N 32-bit words worth of info */ +#define NBUGINTS 2 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used only in patch2: unchanged: --- linux-6.2.0.orig/tools/arch/x86/include/asm/disabled-features.h +++ linux-6.2.0/tools/arch/x86/include/asm/disabled-features.h @@ -124,6 +124,7 @@ #define DISABLED_MASK17 0 #define DISABLED_MASK18 0 #define DISABLED_MASK19 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20) +#define DISABLED_MASK20 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) #endif /* _ASM_X86_DISABLED_FEATURES_H */ only in patch2: unchanged: --- linux-6.2.0.orig/tools/arch/x86/include/asm/required-features.h +++ linux-6.2.0/tools/arch/x86/include/asm/required-features.h @@ -98,6 +98,7 @@ #define REQUIRED_MASK17 0 #define REQUIRED_MASK18 0 #define REQUIRED_MASK19 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20) +#define REQUIRED_MASK20 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ only in patch2: unchanged: --- linux-6.2.0.orig/tools/objtool/arch/x86/decode.c +++ linux-6.2.0/tools/objtool/arch/x86/decode.c @@ -827,3 +827,9 @@ { return !strcmp(sym->name, "__x86_return_thunk"); } + +bool arch_is_embedded_insn(struct symbol *sym) +{ + return !strcmp(sym->name, "retbleed_return_thunk") || + !strcmp(sym->name, "srso_safe_ret"); +} only in patch2: unchanged: --- linux-6.2.0.orig/tools/objtool/include/objtool/arch.h +++ linux-6.2.0/tools/objtool/include/objtool/arch.h @@ -92,6 +92,7 @@ bool arch_is_retpoline(struct symbol *sym); bool arch_is_rethunk(struct symbol *sym); +bool arch_is_embedded_insn(struct symbol *sym); int arch_rewrite_retpolines(struct objtool_file *file); only in patch2: unchanged: --- linux-6.2.0.orig/tools/objtool/include/objtool/elf.h +++ linux-6.2.0/tools/objtool/include/objtool/elf.h @@ -61,6 +61,7 @@ u8 return_thunk : 1; u8 fentry : 1; u8 profiling_func : 1; + u8 embedded_insn : 1; struct list_head pv_target; struct list_head reloc_list; }; only in patch2: unchanged: --- linux-6.2.0.orig/tools/perf/util/thread-stack.c +++ linux-6.2.0/tools/perf/util/thread-stack.c @@ -1037,9 +1037,7 @@ static bool is_x86_retpoline(const char *name) { - const char *p = strstr(name, "__x86_indirect_thunk_"); - - return p == name || !strcmp(name, "__indirect_thunk_start"); + return strstr(name, "__x86_indirect_thunk_") == name; } /* only in patch2: unchanged: --- linux-6.2.0.orig/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +++ linux-6.2.0/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -3,6 +3,7 @@ #include "cgroup_helpers.h" #include +#include #include "sockopt_sk.skel.h" #ifndef SOL_TCP @@ -183,6 +184,33 @@ goto err; } + /* optval=NULL case is handled correctly */ + + close(fd); + fd = socket(AF_NETLINK, SOCK_RAW, 0); + if (fd < 0) { + log_err("Failed to create AF_NETLINK socket"); + return -1; + } + + buf.u32 = 1; + optlen = sizeof(__u32); + err = setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &buf, optlen); + if (err) { + log_err("Unexpected getsockopt(NETLINK_ADD_MEMBERSHIP) err=%d errno=%d", + err, errno); + goto err; + } + + optlen = 0; + err = getsockopt(fd, SOL_NETLINK, NETLINK_LIST_MEMBERSHIPS, NULL, &optlen); + if (err) { + log_err("Unexpected getsockopt(NETLINK_LIST_MEMBERSHIPS) err=%d errno=%d", + err, errno); + goto err; + } + ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value"); + free(big_buf); close(fd); return 0; only in patch2: unchanged: --- linux-6.2.0.orig/tools/testing/selftests/bpf/progs/sockopt_sk.c +++ linux-6.2.0/tools/testing/selftests/bpf/progs/sockopt_sk.c @@ -32,6 +32,12 @@ __u8 *optval_end = ctx->optval_end; __u8 *optval = ctx->optval; struct sockopt_sk *storage; + struct bpf_sock *sk; + + /* Bypass AF_NETLINK. */ + sk = ctx->sk; + if (sk && sk->family == AF_NETLINK) + return 1; /* Make sure bpf_get_netns_cookie is callable. */ @@ -131,6 +137,12 @@ __u8 *optval_end = ctx->optval_end; __u8 *optval = ctx->optval; struct sockopt_sk *storage; + struct bpf_sock *sk; + + /* Bypass AF_NETLINK. */ + sk = ctx->sk; + if (sk && sk->family == AF_NETLINK) + return 1; /* Make sure bpf_get_netns_cookie is callable. */ only in patch2: unchanged: --- linux-6.2.0.orig/tools/testing/selftests/net/mptcp/Makefile +++ linux-6.2.0/tools/testing/selftests/net/mptcp/Makefile @@ -9,7 +9,7 @@ TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq -TEST_FILES := settings +TEST_FILES := mptcp_lib.sh settings EXTRA_CLEAN := *.pcap only in patch2: unchanged: --- linux-6.2.0.orig/tools/testing/selftests/net/mptcp/diag.sh +++ linux-6.2.0/tools/testing/selftests/net/mptcp/diag.sh @@ -1,6 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + sec=$(date +%s) rndh=$(printf %x $sec)-$(mktemp -u XXXXXX) ns="ns1-$rndh" @@ -26,6 +28,8 @@ ip netns del $ns } +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Could not run test without ip tool" only in patch2: unchanged: --- linux-6.2.0.orig/tools/testing/selftests/net/mptcp/mptcp_connect.sh +++ linux-6.2.0/tools/testing/selftests/net/mptcp/mptcp_connect.sh @@ -1,6 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + time_start=$(date +%s) optstring="S:R:d:e:l:r:h4cm:f:tC" @@ -141,6 +143,8 @@ done } +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Could not run test without ip tool" only in patch2: unchanged: --- linux-6.2.0.orig/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ linux-6.2.0/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -6,6 +6,8 @@ # address all other issues detected by shellcheck. #shellcheck disable=SC2086 +. "$(dirname "${0}")/mptcp_lib.sh" + ret=0 sin="" sinfail="" @@ -13,6 +15,7 @@ cin="" cinfail="" cinsent="" +tmpfile="" cout="" capout="" ns1="" @@ -132,6 +135,8 @@ check_tools() { + mptcp_lib_check_mptcp + if ! ip -Version &> /dev/null; then echo "SKIP: Could not run test without ip tool" exit $ksft_skip @@ -171,6 +176,7 @@ { rm -f "$cin" "$cout" "$sinfail" rm -f "$sin" "$sout" "$cinsent" "$cinfail" + rm -f "$tmpfile" rm -rf $evts_ns1 $evts_ns2 cleanup_partial } @@ -378,9 +384,16 @@ fail_test return 1 fi - bytes="--bytes=${bytes}" + + # note: BusyBox's "cmp" command doesn't support --bytes + tmpfile=$(mktemp) + head --bytes="$bytes" "$in" > "$tmpfile" + mv "$tmpfile" "$in" + head --bytes="$bytes" "$out" > "$tmpfile" + mv "$tmpfile" "$out" + tmpfile="" fi - cmp -l "$in" "$out" ${bytes} | while read -r i a b; do + cmp -l "$in" "$out" | while read -r i a b; do local sum=$((0${a} + 0${b})) if [ $check_invert -eq 0 ] || [ $sum -ne $((0xff)) ]; then echo "[ FAIL ] $what does not match (in, out):" @@ -850,7 +863,15 @@ sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q') ip netns exec ${listener_ns} ./pm_nl_ctl ann $addr token $tk id $id sleep 1 + sp=$(grep "type:10" "$evts_ns1" | + sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q') + da=$(grep "type:10" "$evts_ns1" | + sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q') + dp=$(grep "type:10" "$evts_ns1" | + sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q') ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id + ip netns exec ${listener_ns} ./pm_nl_ctl dsf lip "::ffff:$addr" \ + lport $sp rip $da rport $dp token $tk fi counter=$((counter + 1)) @@ -916,6 +937,7 @@ sleep 1 sp=$(grep "type:10" "$evts_ns2" | sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q') + ip netns exec ${connector_ns} ./pm_nl_ctl rem token $tk id $id ip netns exec ${connector_ns} ./pm_nl_ctl dsf lip $addr lport $sp \ rip $da rport $dp token $tk fi @@ -3049,7 +3071,7 @@ pm_nl_set_limits $ns1 0 1 run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow chk_join_nr 1 1 1 - chk_rm_nr 0 1 + chk_rm_nr 1 1 kill_events_pids fi } only in patch2: unchanged: --- linux-6.2.0.orig/tools/testing/selftests/net/mptcp/mptcp_lib.sh +++ linux-6.2.0/tools/testing/selftests/net/mptcp/mptcp_lib.sh @@ -0,0 +1,40 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 + +readonly KSFT_FAIL=1 +readonly KSFT_SKIP=4 + +# SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES env var can be set when validating all +# features using the last version of the kernel and the selftests to make sure +# a test is not being skipped by mistake. +mptcp_lib_expect_all_features() { + [ "${SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES:-}" = "1" ] +} + +# $1: msg +mptcp_lib_fail_if_expected_feature() { + if mptcp_lib_expect_all_features; then + echo "ERROR: missing feature: ${*}" + exit ${KSFT_FAIL} + fi + + return 1 +} + +# $1: file +mptcp_lib_has_file() { + local f="${1}" + + if [ -f "${f}" ]; then + return 0 + fi + + mptcp_lib_fail_if_expected_feature "${f} file not found" +} + +mptcp_lib_check_mptcp() { + if ! mptcp_lib_has_file "/proc/sys/net/mptcp/enabled"; then + echo "SKIP: MPTCP support is not available" + exit ${KSFT_SKIP} + fi +} only in patch2: unchanged: --- linux-6.2.0.orig/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh +++ linux-6.2.0/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh @@ -1,6 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + ret=0 sin="" sout="" @@ -84,6 +86,8 @@ rm -f "$sin" "$sout" } +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Could not run test without ip tool" only in patch2: unchanged: --- linux-6.2.0.orig/tools/testing/selftests/net/mptcp/pm_netlink.sh +++ linux-6.2.0/tools/testing/selftests/net/mptcp/pm_netlink.sh @@ -1,6 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + ksft_skip=4 ret=0 @@ -34,6 +36,8 @@ ip netns del $ns1 } +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Could not run test without ip tool" only in patch2: unchanged: --- linux-6.2.0.orig/tools/testing/selftests/net/mptcp/simult_flows.sh +++ linux-6.2.0/tools/testing/selftests/net/mptcp/simult_flows.sh @@ -1,6 +1,8 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/mptcp_lib.sh" + sec=$(date +%s) rndh=$(printf %x $sec)-$(mktemp -u XXXXXX) ns1="ns1-$rndh" @@ -34,6 +36,8 @@ done } +mptcp_lib_check_mptcp + ip -Version > /dev/null 2>&1 if [ $? -ne 0 ];then echo "SKIP: Could not run test without ip tool"