diff -u linux-aws-5.15-5.15.0/Documentation/admin-guide/kernel-parameters.txt linux-aws-5.15-5.15.0/Documentation/admin-guide/kernel-parameters.txt --- linux-aws-5.15-5.15.0/Documentation/admin-guide/kernel-parameters.txt +++ linux-aws-5.15-5.15.0/Documentation/admin-guide/kernel-parameters.txt @@ -1100,8 +1100,12 @@ nopku [X86] Disable Memory Protection Keys CPU feature found in some Intel CPUs. - .async_probe [KNL] - Enable asynchronous probe on this module. + .async_probe[=] [KNL] + If no value is specified or if the value + specified is not a valid , enable asynchronous + probe on this module. Otherwise, enable/disable + asynchronous probe on this module as indicated by the + value. See also: module.async_probe early_ioremap_debug [KNL] Enable debug messages in early_ioremap support. This @@ -3155,6 +3159,15 @@ For details see: Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst + module.async_probe= + [KNL] When set to true, modules will use async probing + by default. To enable/disable async probing for a + specific module, use the module specific control that + is documented under .async_probe. When both + module.async_probe and .async_probe are + specified, .async_probe takes precedence for + the specific module. + module.sig_enforce [KNL] When CONFIG_MODULE_SIG is set, this means that modules without (valid) signatures will fail to load. diff -u linux-aws-5.15-5.15.0/Makefile linux-aws-5.15-5.15.0/Makefile --- linux-aws-5.15-5.15.0/Makefile +++ linux-aws-5.15-5.15.0/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 15 -SUBLEVEL = 149 +SUBLEVEL = 152 EXTRAVERSION = NAME = Trick or Treat diff -u linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts --- linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts +++ linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts @@ -26,7 +26,6 @@ wlan { label = "bcm53xx:blue:wlan"; gpios = <&chipcommon 10 GPIO_ACTIVE_LOW>; - linux,default-trigger = "default-off"; }; system { diff -u linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts --- linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts +++ linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts @@ -26,7 +26,6 @@ 5ghz { label = "bcm53xx:blue:5ghz"; gpios = <&chipcommon 11 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "default-off"; }; system { @@ -42,7 +41,6 @@ 2ghz { label = "bcm53xx:blue:2ghz"; gpios = <&pcie0_chipcommon 3 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "default-off"; }; }; diff -u linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm53573.dtsi linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm53573.dtsi --- linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm53573.dtsi +++ linux-aws-5.15-5.15.0/arch/arm/boot/dts/bcm53573.dtsi @@ -159,8 +159,6 @@ }; ohci: usb@d000 { - #usb-cells = <0>; - compatible = "generic-ohci"; reg = <0xd000 0x1000>; interrupt-parent = <&gic>; @@ -183,6 +181,24 @@ gmac0: ethernet@5000 { reg = <0x5000 0x1000>; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + switch: switch@1e { + compatible = "brcm,bcm53125"; + reg = <0x1e>; + + status = "disabled"; + + /* ports are defined in board DTS */ + ports { + #address-cells = <1>; + #size-cells = <0>; + }; + }; + }; }; gmac1: ethernet@b000 { diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi linux-aws-5.15-5.15.0/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi @@ -180,9 +180,6 @@ brcm,num-gphy = <5>; brcm,num-rgmii-ports = <2>; - #address-cells = <1>; - #size-cells = <0>; - ports: ports { #address-cells = <1>; #size-cells = <0>; diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts linux-aws-5.15-5.15.0/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-s.dts @@ -191,8 +191,10 @@ }; &usdhc2 { - pinctrl-names = "default"; + pinctrl-names = "default", "state_100mhz", "state_200mhz"; pinctrl-0 = <&pinctrl_usdhc2>; + pinctrl-1 = <&pinctrl_usdhc2_100mhz>; + pinctrl-2 = <&pinctrl_usdhc2_200mhz>; vmmc-supply = <®_vdd_3v3>; vqmmc-supply = <®_nvcc_sd>; cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; @@ -275,8 +277,8 @@ pinctrl_i2c4: i2c4grp { fsl,pins = < - MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3 - MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3 + MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x40000083 + MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x40000083 >; }; @@ -288,19 +290,19 @@ pinctrl_uart1: uart1grp { fsl,pins = < - MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x140 - MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x140 - MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x140 - MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x140 + MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x0 + MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x0 + MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x0 + MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x0 >; }; pinctrl_uart2: uart2grp { fsl,pins = < - MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x140 - MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x140 - MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x140 - MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x140 + MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x0 + MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x0 + MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x0 + MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x0 >; }; @@ -312,13 +314,40 @@ pinctrl_usdhc2: usdhc2grp { fsl,pins = < - MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190 + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90 MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0 MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0 MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0 MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0 MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0 - MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019 + MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0 + >; + }; + + pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { + fsl,pins = < + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94 + MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4 + MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4 + MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4 + MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4 + MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4 + MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0 + >; + }; + + pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { + fsl,pins = < + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96 + MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6 + MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6 + MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6 + MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6 + MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6 + MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0 >; }; }; diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi linux-aws-5.15-5.15.0/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi @@ -40,17 +40,6 @@ gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>; status = "okay"; }; - - reg_usb_otg1_vbus: regulator-usb-otg1 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_reg_usb1_en>; - compatible = "regulator-fixed"; - regulator-name = "usb_otg1_vbus"; - gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>; - enable-active-high; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - }; }; /* off-board header */ @@ -102,9 +91,10 @@ }; &usbotg1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbotg1>; dr_mode = "otg"; over-current-active-low; - vbus-supply = <®_usb_otg1_vbus>; status = "okay"; }; @@ -156,14 +146,6 @@ >; }; - pinctrl_reg_usb1_en: regusb1grp { - fsl,pins = < - MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x41 - MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141 - MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41 - >; - }; - pinctrl_spi2: spi2grp { fsl,pins = < MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 @@ -188,2 +170,9 @@ }; + + pinctrl_usbotg1: usbotg1grp { + fsl,pins = < + MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141 + MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41 + >; + }; }; diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/marvell/armada-37xx.dtsi linux-aws-5.15-5.15.0/arch/arm64/boot/dts/marvell/armada-37xx.dtsi --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -414,14 +414,14 @@ crypto: crypto@90000 { compatible = "inside-secure,safexcel-eip97ies"; reg = <0x90000 0x20000>; - interrupts = , - , + interrupts = , , , , - ; - interrupt-names = "mem", "ring0", "ring1", - "ring2", "ring3", "eip"; + , + ; + interrupt-names = "ring0", "ring1", "ring2", + "ring3", "eip", "mem"; clocks = <&nb_periph_clk 15>; }; diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts @@ -71,6 +71,7 @@ memory@40000000 { reg = <0 0x40000000 0 0x40000000>; + device_type = "memory"; }; reg_1p8v: regulator-1p8v { diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts @@ -57,6 +57,7 @@ memory@40000000 { reg = <0 0x40000000 0 0x20000000>; + device_type = "memory"; }; reg_1p8v: regulator-1p8v { diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi @@ -4,6 +4,8 @@ */ #include "mt8183-kukui.dtsi" +/* Must come after mt8183-kukui.dtsi to modify cros_ec */ +#include / { panel: panel { diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi @@ -828,8 +828,18 @@ google,usb-port-id = <0>; }; - cbas { - compatible = "google,cros-cbas"; + typec { + compatible = "google,cros-ec-typec"; + #address-cells = <1>; + #size-cells = <0>; + + usb_c0: connector@0 { + compatible = "usb-c-connector"; + reg = <0>; + power-role = "dual"; + data-role = "host"; + try-power-role = "sink"; + }; }; }; }; @@ -919,3 +929,2 @@ -#include #include diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/renesas/r8a779a0.dtsi linux-aws-5.15-5.15.0/arch/arm64/boot/dts/renesas/r8a779a0.dtsi --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/renesas/r8a779a0.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/renesas/r8a779a0.dtsi @@ -583,8 +583,8 @@ avb0: ethernet@e6800000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; - reg = <0 0xe6800000 0 0x800>; + "renesas,etheravb-rcar-gen4"; + reg = <0 0xe6800000 0 0x1000>; interrupts = , , , @@ -631,8 +631,8 @@ avb1: ethernet@e6810000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; - reg = <0 0xe6810000 0 0x800>; + "renesas,etheravb-rcar-gen4"; + reg = <0 0xe6810000 0 0x1000>; interrupts = , , , @@ -679,7 +679,7 @@ avb2: ethernet@e6820000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; + "renesas,etheravb-rcar-gen4"; reg = <0 0xe6820000 0 0x1000>; interrupts = , , @@ -727,7 +727,7 @@ avb3: ethernet@e6830000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; + "renesas,etheravb-rcar-gen4"; reg = <0 0xe6830000 0 0x1000>; interrupts = , , @@ -775,7 +775,7 @@ avb4: ethernet@e6840000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; + "renesas,etheravb-rcar-gen4"; reg = <0 0xe6840000 0 0x1000>; interrupts = , , @@ -823,7 +823,7 @@ avb5: ethernet@e6850000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; + "renesas,etheravb-rcar-gen4"; reg = <0 0xe6850000 0 0x1000>; interrupts = , , @@ -935,7 +935,7 @@ msiof0: spi@e6e90000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6e90000 0 0x0064>; interrupts = ; clocks = <&cpg CPG_MOD 618>; @@ -950,7 +950,7 @@ msiof1: spi@e6ea0000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6ea0000 0 0x0064>; interrupts = ; clocks = <&cpg CPG_MOD 619>; @@ -965,7 +965,7 @@ msiof2: spi@e6c00000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6c00000 0 0x0064>; interrupts = ; clocks = <&cpg CPG_MOD 620>; @@ -980,7 +980,7 @@ msiof3: spi@e6c10000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6c10000 0 0x0064>; interrupts = ; clocks = <&cpg CPG_MOD 621>; @@ -995,7 +995,7 @@ msiof4: spi@e6c20000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6c20000 0 0x0064>; interrupts = ; clocks = <&cpg CPG_MOD 622>; @@ -1010,7 +1010,7 @@ msiof5: spi@e6c28000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6c28000 0 0x0064>; interrupts = ; clocks = <&cpg CPG_MOD 623>; diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/rockchip/px30.dtsi linux-aws-5.15-5.15.0/arch/arm64/boot/dts/rockchip/px30.dtsi --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/rockchip/px30.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/rockchip/px30.dtsi @@ -585,6 +585,7 @@ clock-names = "spiclk", "apb_pclk"; dmas = <&dmac 12>, <&dmac 13>; dma-names = "tx", "rx"; + num-cs = <2>; pinctrl-names = "default"; pinctrl-0 = <&spi0_clk &spi0_csn &spi0_miso &spi0_mosi>; #address-cells = <1>; @@ -600,6 +601,7 @@ clock-names = "spiclk", "apb_pclk"; dmas = <&dmac 14>, <&dmac 15>; dma-names = "tx", "rx"; + num-cs = <2>; pinctrl-names = "default"; pinctrl-0 = <&spi1_clk &spi1_csn0 &spi1_csn1 &spi1_miso &spi1_mosi>; #address-cells = <1>; diff -u linux-aws-5.15-5.15.0/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi linux-aws-5.15-5.15.0/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi --- linux-aws-5.15-5.15.0/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi @@ -36,6 +36,29 @@ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; }; + sound { + compatible = "audio-graph-card"; + label = "Analog"; + dais = <&i2s0_p0>; + }; + + sound-dit { + compatible = "audio-graph-card"; + label = "SPDIF"; + dais = <&spdif_p0>; + }; + + spdif-dit { + compatible = "linux,spdif-dit"; + #sound-dai-cells = <0>; + + port { + dit_p0_0: endpoint { + remote-endpoint = <&spdif_p0_0>; + }; + }; + }; + vcc12v_dcin: dc-12v { compatible = "regulator-fixed"; regulator-name = "vcc12v_dcin"; @@ -98,24 +121,25 @@ vin-supply = <&vcc5v0_sys>; }; - vcc5v0_typec: vcc5v0-typec-regulator { + vbus_typec: vbus-typec-regulator { compatible = "regulator-fixed"; enable-active-high; gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&vcc5v0_typec_en>; - regulator-name = "vcc5v0_typec"; + regulator-name = "vbus_typec"; regulator-always-on; vin-supply = <&vcc5v0_sys>; }; - vcc_lan: vcc3v3-phy-regulator { + vcc3v3_lan: vcc3v3-lan-regulator { compatible = "regulator-fixed"; - regulator-name = "vcc_lan"; + regulator-name = "vcc3v3_lan"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; + vin-supply = <&vcc3v3_sys>; }; vdd_log: vdd-log { @@ -162,7 +186,7 @@ assigned-clocks = <&cru SCLK_RMII_SRC>; assigned-clock-parents = <&clkin_gmac>; clock_in_out = "input"; - phy-supply = <&vcc_lan>; + phy-supply = <&vcc3v3_lan>; phy-mode = "rgmii"; pinctrl-names = "default"; pinctrl-0 = <&rgmii_pins>; @@ -267,8 +291,8 @@ }; }; - vcc1v8_codec: LDO_REG1 { - regulator-name = "vcc1v8_codec"; + vcca1v8_codec: LDO_REG1 { + regulator-name = "vcca1v8_codec"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <1800000>; @@ -278,8 +302,8 @@ }; }; - vcc1v8_hdmi: LDO_REG2 { - regulator-name = "vcc1v8_hdmi"; + vcca1v8_hdmi: LDO_REG2 { + regulator-name = "vcca1v8_hdmi"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <1800000>; @@ -336,8 +360,8 @@ }; }; - vcc0v9_hdmi: LDO_REG7 { - regulator-name = "vcc0v9_hdmi"; + vcca0v9_hdmi: LDO_REG7 { + regulator-name = "vcca0v9_hdmi"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <900000>; @@ -422,6 +446,20 @@ i2c-scl-rising-time-ns = <300>; i2c-scl-falling-time-ns = <15>; status = "okay"; + + es8316: codec@11 { + compatible = "everest,es8316"; + reg = <0x11>; + clocks = <&cru SCLK_I2S_8CH_OUT>; + clock-names = "mclk"; + #sound-dai-cells = <0>; + + port { + es8316_p0_0: endpoint { + remote-endpoint = <&i2s0_p0_0>; + }; + }; + }; }; &i2c3 { @@ -441,6 +479,14 @@ rockchip,capture-channels = <2>; rockchip,playback-channels = <2>; status = "okay"; + + i2s0_p0: port { + i2s0_p0_0: endpoint { + dai-format = "i2s"; + mclk-fs = <256>; + remote-endpoint = <&es8316_p0_0>; + }; + }; }; &i2s1 { @@ -456,7 +502,7 @@ status = "okay"; bt656-supply = <&vcc_3v0>; - audio-supply = <&vcc1v8_codec>; + audio-supply = <&vcca1v8_codec>; sdmmc-supply = <&vcc_sdio>; gpio1830-supply = <&vcc_3v0>; }; @@ -602,6 +648,15 @@ status = "okay"; }; +&spdif { + + spdif_p0: port { + spdif_p0_0: endpoint { + remote-endpoint = <&dit_p0_0>; + }; + }; +}; + &tcphy0 { status = "okay"; }; diff -u linux-aws-5.15-5.15.0/arch/arm64/include/asm/exception.h linux-aws-5.15-5.15.0/arch/arm64/include/asm/exception.h --- linux-aws-5.15-5.15.0/arch/arm64/include/asm/exception.h +++ linux-aws-5.15-5.15.0/arch/arm64/include/asm/exception.h @@ -8,16 +8,11 @@ #define __ASM_EXCEPTION_H #include -#include #include #include -#ifdef CONFIG_FUNCTION_GRAPH_TRACER #define __exception_irq_entry __irq_entry -#else -#define __exception_irq_entry __kprobes -#endif static inline unsigned long disr_to_esr(u64 disr) { diff -u linux-aws-5.15-5.15.0/arch/arm64/kvm/vgic/vgic-its.c linux-aws-5.15-5.15.0/arch/arm64/kvm/vgic/vgic-its.c --- linux-aws-5.15-5.15.0/arch/arm64/kvm/vgic/vgic-its.c +++ linux-aws-5.15-5.15.0/arch/arm64/kvm/vgic/vgic-its.c @@ -462,6 +462,9 @@ } irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); + if (!irq) + continue; + raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = pendmask & (1U << bit_nr); vgic_queue_irq_unlock(vcpu->kvm, irq, flags); @@ -1374,6 +1377,8 @@ for (i = 0; i < irq_count; i++) { irq = vgic_get_irq(kvm, NULL, intids[i]); + if (!irq) + continue; update_affinity(irq, vcpu2); diff -u linux-aws-5.15-5.15.0/arch/arm64/mm/mmu.c linux-aws-5.15-5.15.0/arch/arm64/mm/mmu.c --- linux-aws-5.15-5.15.0/arch/arm64/mm/mmu.c +++ linux-aws-5.15-5.15.0/arch/arm64/mm/mmu.c @@ -435,7 +435,7 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { - if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { + if (virt < PAGE_OFFSET) { pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", &phys, virt); return; @@ -462,7 +462,7 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { - if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { + if (virt < PAGE_OFFSET) { pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", &phys, virt); return; diff -u linux-aws-5.15-5.15.0/arch/mips/kernel/traps.c linux-aws-5.15-5.15.0/arch/mips/kernel/traps.c --- linux-aws-5.15-5.15.0/arch/mips/kernel/traps.c +++ linux-aws-5.15-5.15.0/arch/mips/kernel/traps.c @@ -2001,7 +2001,13 @@ void reserve_exception_space(phys_addr_t addr, unsigned long size) { - memblock_reserve(addr, size); + /* + * reserve exception space on CPUs other than CPU0 + * is too late, since memblock is unavailable when APs + * up + */ + if (smp_processor_id() == 0) + memblock_reserve(addr, size); } void __init *set_except_vector(int n, void *addr) diff -u linux-aws-5.15-5.15.0/arch/mips/kernel/vpe-mt.c linux-aws-5.15-5.15.0/arch/mips/kernel/vpe-mt.c --- linux-aws-5.15-5.15.0/arch/mips/kernel/vpe-mt.c +++ linux-aws-5.15-5.15.0/arch/mips/kernel/vpe-mt.c @@ -92,12 +92,11 @@ write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); /* - * The sde-kit passes 'memsize' to __start in $a3, so set something - * here... Or set $a3 to zero and define DFLT_STACK_SIZE and - * DFLT_HEAP_SIZE when you compile your program + * We don't pass the memsize here, so VPE programs need to be + * compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined. */ + mttgpr(7, 0); mttgpr(6, v->ntcs); - mttgpr(7, physical_memsize); /* set up VPE1 */ /* diff -u linux-aws-5.15-5.15.0/arch/powerpc/kernel/rtas.c linux-aws-5.15-5.15.0/arch/powerpc/kernel/rtas.c --- linux-aws-5.15-5.15.0/arch/powerpc/kernel/rtas.c +++ linux-aws-5.15-5.15.0/arch/powerpc/kernel/rtas.c @@ -68,10 +68,10 @@ EXPORT_SYMBOL(rtas); DEFINE_SPINLOCK(rtas_data_buf_lock); -EXPORT_SYMBOL(rtas_data_buf_lock); +EXPORT_SYMBOL_GPL(rtas_data_buf_lock); -char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; -EXPORT_SYMBOL(rtas_data_buf); +char rtas_data_buf[RTAS_DATA_BUF_SIZE] __aligned(SZ_4K); +EXPORT_SYMBOL_GPL(rtas_data_buf); unsigned long rtas_rmo_buf; @@ -80,7 +80,7 @@ * This is done like this so rtas_flash can be a module. */ void (*rtas_flash_term_hook)(int); -EXPORT_SYMBOL(rtas_flash_term_hook); +EXPORT_SYMBOL_GPL(rtas_flash_term_hook); /* RTAS use home made raw locking instead of spin_lock_irqsave * because those can be called from within really nasty contexts @@ -328,7 +328,7 @@ spin_unlock(&progress_lock); } -EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */ +EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */ int rtas_token(const char *service) { @@ -338,7 +338,7 @@ tokp = of_get_property(rtas.dev, service, NULL); return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE; } -EXPORT_SYMBOL(rtas_token); +EXPORT_SYMBOL_GPL(rtas_token); int rtas_service_present(const char *service) { @@ -498,7 +498,7 @@ } return ret; } -EXPORT_SYMBOL(rtas_call); +EXPORT_SYMBOL_GPL(rtas_call); /* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds. @@ -533,7 +533,7 @@ return ms; } -EXPORT_SYMBOL(rtas_busy_delay); +EXPORT_SYMBOL_GPL(rtas_busy_delay); static int rtas_error_rc(int rtas_rc) { @@ -579,7 +579,7 @@ return rtas_error_rc(rc); return rc; } -EXPORT_SYMBOL(rtas_get_power_level); +EXPORT_SYMBOL_GPL(rtas_get_power_level); int rtas_set_power_level(int powerdomain, int level, int *setlevel) { @@ -597,7 +597,7 @@ return rtas_error_rc(rc); return rc; } -EXPORT_SYMBOL(rtas_set_power_level); +EXPORT_SYMBOL_GPL(rtas_set_power_level); int rtas_get_sensor(int sensor, int index, int *state) { @@ -615,7 +615,7 @@ return rtas_error_rc(rc); return rc; } -EXPORT_SYMBOL(rtas_get_sensor); +EXPORT_SYMBOL_GPL(rtas_get_sensor); int rtas_get_sensor_fast(int sensor, int index, int *state) { @@ -676,7 +676,7 @@ return rtas_error_rc(rc); return rc; } -EXPORT_SYMBOL(rtas_set_indicator); +EXPORT_SYMBOL_GPL(rtas_set_indicator); /* * Ignoring RTAS extended delay diff -u linux-aws-5.15-5.15.0/arch/powerpc/perf/hv-24x7.c linux-aws-5.15-5.15.0/arch/powerpc/perf/hv-24x7.c --- linux-aws-5.15-5.15.0/arch/powerpc/perf/hv-24x7.c +++ linux-aws-5.15-5.15.0/arch/powerpc/perf/hv-24x7.c @@ -79,9 +79,8 @@ */ void read_24x7_sys_info(void) { - int call_status, len, ntypes; - - spin_lock(&rtas_data_buf_lock); + const s32 token = rtas_token("ibm,get-system-parameter"); + int call_status; /* * Making system parameter: chips and sockets and cores per chip @@ -91,32 +90,27 @@ phys_chipspersocket = 1; phys_coresperchip = 1; - call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - PROCESSOR_MODULE_INFO, - __pa(rtas_data_buf), - RTAS_DATA_BUF_SIZE); + do { + spin_lock(&rtas_data_buf_lock); + call_status = rtas_call(token, 3, 1, NULL, PROCESSOR_MODULE_INFO, + __pa(rtas_data_buf), RTAS_DATA_BUF_SIZE); + if (call_status == 0) { + int ntypes = be16_to_cpup((__be16 *)&rtas_data_buf[2]); + int len = be16_to_cpup((__be16 *)&rtas_data_buf[0]); + + if (len >= 8 && ntypes != 0) { + phys_sockets = be16_to_cpup((__be16 *)&rtas_data_buf[4]); + phys_chipspersocket = be16_to_cpup((__be16 *)&rtas_data_buf[6]); + phys_coresperchip = be16_to_cpup((__be16 *)&rtas_data_buf[8]); + } + } + spin_unlock(&rtas_data_buf_lock); + } while (rtas_busy_delay(call_status)); if (call_status != 0) { pr_err("Error calling get-system-parameter %d\n", call_status); - } else { - len = be16_to_cpup((__be16 *)&rtas_data_buf[0]); - if (len < 8) - goto out; - - ntypes = be16_to_cpup((__be16 *)&rtas_data_buf[2]); - - if (!ntypes) - goto out; - - phys_sockets = be16_to_cpup((__be16 *)&rtas_data_buf[4]); - phys_chipspersocket = be16_to_cpup((__be16 *)&rtas_data_buf[6]); - phys_coresperchip = be16_to_cpup((__be16 *)&rtas_data_buf[8]); } - -out: - spin_unlock(&rtas_data_buf_lock); } /* Domains for which more than one result element are returned for each event. */ diff -u linux-aws-5.15-5.15.0/arch/powerpc/perf/hv-gpci.c linux-aws-5.15-5.15.0/arch/powerpc/perf/hv-gpci.c --- linux-aws-5.15-5.15.0/arch/powerpc/perf/hv-gpci.c +++ linux-aws-5.15-5.15.0/arch/powerpc/perf/hv-gpci.c @@ -164,6 +164,20 @@ ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); + + /* + * ret value as 'H_PARAMETER' with detail_rc as 'GEN_BUF_TOO_SMALL', + * specifies that the current buffer size cannot accommodate + * all the information and a partial buffer returned. + * Since in this function we are only accessing data for a given starting index, + * we don't need to accommodate whole data and can get required count by + * accessing first entry data. + * Hence hcall fails only incase the ret value is other than H_SUCCESS or + * H_PARAMETER with detail_rc value as GEN_BUF_TOO_SMALL(0x1B). + */ + if (ret == H_PARAMETER && be32_to_cpu(arg->params.detail_rc) == 0x1B) + ret = 0; + if (ret) { pr_devel("hcall failed: 0x%lx\n", ret); goto out; @@ -228,6 +242,7 @@ { u64 count; u8 length; + unsigned long ret; /* Not our event */ if (event->attr.type != event->pmu->type) @@ -258,13 +273,23 @@ } /* check if the request works... */ - if (single_gpci_request(event_get_request(event), + ret = single_gpci_request(event_get_request(event), event_get_starting_index(event), event_get_secondary_index(event), event_get_counter_info_version(event), event_get_offset(event), length, - &count)) { + &count); + + /* + * ret value as H_AUTHORITY implies that partition is not permitted to retrieve + * performance information, and required to set + * "Enable Performance Information Collection" option. + */ + if (ret == H_AUTHORITY) + return -EPERM; + + if (ret) { pr_devel("gpci hcall failed\n"); return -EINVAL; } diff -u linux-aws-5.15-5.15.0/arch/powerpc/platforms/powernv/pci-ioda.c linux-aws-5.15-5.15.0/arch/powerpc/platforms/powernv/pci-ioda.c --- linux-aws-5.15-5.15.0/arch/powerpc/platforms/powernv/pci-ioda.c +++ linux-aws-5.15-5.15.0/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2334,7 +2334,8 @@ int index; int64_t rc; - if (!res || !res->flags || res->start > res->end) + if (!res || !res->flags || res->start > res->end || + res->flags & IORESOURCE_UNSET) return; if (res->flags & IORESOURCE_IO) { diff -u linux-aws-5.15-5.15.0/arch/powerpc/platforms/pseries/lpar.c linux-aws-5.15-5.15.0/arch/powerpc/platforms/pseries/lpar.c --- linux-aws-5.15-5.15.0/arch/powerpc/platforms/pseries/lpar.c +++ linux-aws-5.15-5.15.0/arch/powerpc/platforms/pseries/lpar.c @@ -1434,22 +1434,22 @@ void __init pseries_lpar_read_hblkrm_characteristics(void) { + const s32 token = rtas_token("ibm,get-system-parameter"); unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH]; int call_status, len, idx, bpsize; if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) return; - spin_lock(&rtas_data_buf_lock); - memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE); - call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - SPLPAR_TLB_BIC_TOKEN, - __pa(rtas_data_buf), - RTAS_DATA_BUF_SIZE); - memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH); - local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0'; - spin_unlock(&rtas_data_buf_lock); + do { + spin_lock(&rtas_data_buf_lock); + memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE); + call_status = rtas_call(token, 3, 1, NULL, SPLPAR_TLB_BIC_TOKEN, + __pa(rtas_data_buf), RTAS_DATA_BUF_SIZE); + memcpy(local_buffer, rtas_data_buf, SPLPAR_TLB_BIC_MAXLENGTH); + local_buffer[SPLPAR_TLB_BIC_MAXLENGTH - 1] = '\0'; + spin_unlock(&rtas_data_buf_lock); + } while (rtas_busy_delay(call_status)); if (call_status != 0) { pr_warn("%s %s Error calling get-system-parameter (0x%x)\n", diff -u linux-aws-5.15-5.15.0/arch/powerpc/platforms/pseries/lparcfg.c linux-aws-5.15-5.15.0/arch/powerpc/platforms/pseries/lparcfg.c --- linux-aws-5.15-5.15.0/arch/powerpc/platforms/pseries/lparcfg.c +++ linux-aws-5.15-5.15.0/arch/powerpc/platforms/pseries/lparcfg.c @@ -322,6 +322,7 @@ */ static void parse_system_parameter_string(struct seq_file *m) { + const s32 token = rtas_token("ibm,get-system-parameter"); int call_status; unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL); @@ -331,16 +332,15 @@ return; } - spin_lock(&rtas_data_buf_lock); - memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH); - call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1, - NULL, - SPLPAR_CHARACTERISTICS_TOKEN, - __pa(rtas_data_buf), - RTAS_DATA_BUF_SIZE); - memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH); - local_buffer[SPLPAR_MAXLENGTH - 1] = '\0'; - spin_unlock(&rtas_data_buf_lock); + do { + spin_lock(&rtas_data_buf_lock); + memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH); + call_status = rtas_call(token, 3, 1, NULL, SPLPAR_CHARACTERISTICS_TOKEN, + __pa(rtas_data_buf), RTAS_DATA_BUF_SIZE); + memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH); + local_buffer[SPLPAR_MAXLENGTH - 1] = '\0'; + spin_unlock(&rtas_data_buf_lock); + } while (rtas_busy_delay(call_status)); if (call_status != 0) { printk(KERN_INFO diff -u linux-aws-5.15-5.15.0/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts linux-aws-5.15-5.15.0/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts --- linux-aws-5.15-5.15.0/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts +++ linux-aws-5.15-5.15.0/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts @@ -70,6 +70,7 @@ interrupt-parent = <&gpio>; interrupts = <1 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; + #interrupt-cells = <2>; regulators { vdd_bcore1: bcore1 { diff -u linux-aws-5.15-5.15.0/arch/riscv/include/asm/parse_asm.h linux-aws-5.15-5.15.0/arch/riscv/include/asm/parse_asm.h --- linux-aws-5.15-5.15.0/arch/riscv/include/asm/parse_asm.h +++ linux-aws-5.15-5.15.0/arch/riscv/include/asm/parse_asm.h @@ -128,7 +128,7 @@ #define FUNCT3_C_J 0xa000 #define FUNCT3_C_JAL 0x2000 #define FUNCT4_C_JR 0x8000 -#define FUNCT4_C_JALR 0xf000 +#define FUNCT4_C_JALR 0x9000 #define FUNCT12_SRET 0x10200000 diff -u linux-aws-5.15-5.15.0/arch/riscv/include/asm/pgtable.h linux-aws-5.15-5.15.0/arch/riscv/include/asm/pgtable.h --- linux-aws-5.15-5.15.0/arch/riscv/include/asm/pgtable.h +++ linux-aws-5.15-5.15.0/arch/riscv/include/asm/pgtable.h @@ -58,7 +58,7 @@ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. */ -#define vmemmap ((struct page *)VMEMMAP_START) +#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT)) #define PCI_IO_SIZE SZ_16M #define PCI_IO_END VMEMMAP_START diff -u linux-aws-5.15-5.15.0/arch/s390/kernel/vdso32/Makefile linux-aws-5.15-5.15.0/arch/s390/kernel/vdso32/Makefile --- linux-aws-5.15-5.15.0/arch/s390/kernel/vdso32/Makefile +++ linux-aws-5.15-5.15.0/arch/s390/kernel/vdso32/Makefile @@ -22,7 +22,7 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \ +LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \ --hash-style=both --build-id=sha1 -melf_s390 -T $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) diff -u linux-aws-5.15-5.15.0/arch/s390/kernel/vdso64/Makefile linux-aws-5.15-5.15.0/arch/s390/kernel/vdso64/Makefile --- linux-aws-5.15-5.15.0/arch/s390/kernel/vdso64/Makefile +++ linux-aws-5.15-5.15.0/arch/s390/kernel/vdso64/Makefile @@ -26,7 +26,7 @@ KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \ +ldflags-y := -shared -soname=linux-vdso64.so.1 \ --hash-style=both --build-id=sha1 -T $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) diff -u linux-aws-5.15-5.15.0/arch/s390/pci/pci.c linux-aws-5.15-5.15.0/arch/s390/pci/pci.c --- linux-aws-5.15-5.15.0/arch/s390/pci/pci.c +++ linux-aws-5.15-5.15.0/arch/s390/pci/pci.c @@ -241,7 +241,7 @@ /* combine single writes by using store-block insn */ void __iowrite64_copy(void __iomem *to, const void *from, size_t count) { - zpci_memcpy_toio(to, from, count); + zpci_memcpy_toio(to, from, count * 8); } static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot) diff -u linux-aws-5.15-5.15.0/arch/x86/include/asm/nospec-branch.h linux-aws-5.15-5.15.0/arch/x86/include/asm/nospec-branch.h --- linux-aws-5.15-5.15.0/arch/x86/include/asm/nospec-branch.h +++ linux-aws-5.15-5.15.0/arch/x86/include/asm/nospec-branch.h @@ -220,6 +220,8 @@ extern void entry_untrain_ret(void); extern void entry_ibpb(void); +extern void (*x86_return_thunk)(void); + #ifdef CONFIG_X86_64 extern void clear_bhb_loop(void); #endif diff -u linux-aws-5.15-5.15.0/arch/x86/kernel/alternative.c linux-aws-5.15-5.15.0/arch/x86/kernel/alternative.c --- linux-aws-5.15-5.15.0/arch/x86/kernel/alternative.c +++ linux-aws-5.15-5.15.0/arch/x86/kernel/alternative.c @@ -521,6 +521,7 @@ } #ifdef CONFIG_RETHUNK + /* * Rewrite the compiler generated return thunk tail-calls. * @@ -536,14 +537,18 @@ { int i = 0; - if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) - return -1; - - bytes[i++] = RET_INSN_OPCODE; + if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { + if (x86_return_thunk == __x86_return_thunk) + return -1; + + i = JMP32_INSN_SIZE; + __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); + } else { + bytes[i++] = RET_INSN_OPCODE; + } for (; i < insn->length;) bytes[i++] = INT3_INSN_OPCODE; - return i; } diff -u linux-aws-5.15-5.15.0/arch/x86/kernel/cpu/intel.c linux-aws-5.15-5.15.0/arch/x86/kernel/cpu/intel.c --- linux-aws-5.15-5.15.0/arch/x86/kernel/cpu/intel.c +++ linux-aws-5.15-5.15.0/arch/x86/kernel/cpu/intel.c @@ -181,6 +181,90 @@ return false; } +#define MSR_IA32_TME_ACTIVATE 0x982 + +/* Helpers to access TME_ACTIVATE MSR */ +#define TME_ACTIVATE_LOCKED(x) (x & 0x1) +#define TME_ACTIVATE_ENABLED(x) (x & 0x2) + +#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */ +#define TME_ACTIVATE_POLICY_AES_XTS_128 0 + +#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */ + +#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */ +#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1 + +/* Values for mktme_status (SW only construct) */ +#define MKTME_ENABLED 0 +#define MKTME_DISABLED 1 +#define MKTME_UNINITIALIZED 2 +static int mktme_status = MKTME_UNINITIALIZED; + +static void detect_tme_early(struct cpuinfo_x86 *c) +{ + u64 tme_activate, tme_policy, tme_crypto_algs; + int keyid_bits = 0, nr_keyids = 0; + static u64 tme_activate_cpu0 = 0; + + rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); + + if (mktme_status != MKTME_UNINITIALIZED) { + if (tme_activate != tme_activate_cpu0) { + /* Broken BIOS? */ + pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); + pr_err_once("x86/tme: MKTME is not usable\n"); + mktme_status = MKTME_DISABLED; + + /* Proceed. We may need to exclude bits from x86_phys_bits. */ + } + } else { + tme_activate_cpu0 = tme_activate; + } + + if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { + pr_info_once("x86/tme: not enabled by BIOS\n"); + mktme_status = MKTME_DISABLED; + return; + } + + if (mktme_status != MKTME_UNINITIALIZED) + goto detect_keyid_bits; + + pr_info("x86/tme: enabled by BIOS\n"); + + tme_policy = TME_ACTIVATE_POLICY(tme_activate); + if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128) + pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); + + tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate); + if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) { + pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", + tme_crypto_algs); + mktme_status = MKTME_DISABLED; + } +detect_keyid_bits: + keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); + nr_keyids = (1UL << keyid_bits) - 1; + if (nr_keyids) { + pr_info_once("x86/mktme: enabled by BIOS\n"); + pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); + } else { + pr_info_once("x86/mktme: disabled by BIOS\n"); + } + + if (mktme_status == MKTME_UNINITIALIZED) { + /* MKTME is usable */ + mktme_status = MKTME_ENABLED; + } + + /* + * KeyID bits effectively lower the number of physical address + * bits. Update cpuinfo_x86::x86_phys_bits accordingly. + */ + c->x86_phys_bits -= keyid_bits; +} + static void early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; @@ -332,6 +416,13 @@ */ if (detect_extended_topology_early(c) < 0) detect_ht_early(c); + + /* + * Adjust the number of physical bits early because it affects the + * valid bits of the MTRR mask registers. + */ + if (cpu_has(c, X86_FEATURE_TME)) + detect_tme_early(c); } static void bsp_init_intel(struct cpuinfo_x86 *c) @@ -492,90 +583,6 @@ #endif } -#define MSR_IA32_TME_ACTIVATE 0x982 - -/* Helpers to access TME_ACTIVATE MSR */ -#define TME_ACTIVATE_LOCKED(x) (x & 0x1) -#define TME_ACTIVATE_ENABLED(x) (x & 0x2) - -#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */ -#define TME_ACTIVATE_POLICY_AES_XTS_128 0 - -#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */ - -#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */ -#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1 - -/* Values for mktme_status (SW only construct) */ -#define MKTME_ENABLED 0 -#define MKTME_DISABLED 1 -#define MKTME_UNINITIALIZED 2 -static int mktme_status = MKTME_UNINITIALIZED; - -static void detect_tme(struct cpuinfo_x86 *c) -{ - u64 tme_activate, tme_policy, tme_crypto_algs; - int keyid_bits = 0, nr_keyids = 0; - static u64 tme_activate_cpu0 = 0; - - rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); - - if (mktme_status != MKTME_UNINITIALIZED) { - if (tme_activate != tme_activate_cpu0) { - /* Broken BIOS? */ - pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); - pr_err_once("x86/tme: MKTME is not usable\n"); - mktme_status = MKTME_DISABLED; - - /* Proceed. We may need to exclude bits from x86_phys_bits. */ - } - } else { - tme_activate_cpu0 = tme_activate; - } - - if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { - pr_info_once("x86/tme: not enabled by BIOS\n"); - mktme_status = MKTME_DISABLED; - return; - } - - if (mktme_status != MKTME_UNINITIALIZED) - goto detect_keyid_bits; - - pr_info("x86/tme: enabled by BIOS\n"); - - tme_policy = TME_ACTIVATE_POLICY(tme_activate); - if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128) - pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); - - tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate); - if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) { - pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", - tme_crypto_algs); - mktme_status = MKTME_DISABLED; - } -detect_keyid_bits: - keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); - nr_keyids = (1UL << keyid_bits) - 1; - if (nr_keyids) { - pr_info_once("x86/mktme: enabled by BIOS\n"); - pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); - } else { - pr_info_once("x86/mktme: disabled by BIOS\n"); - } - - if (mktme_status == MKTME_UNINITIALIZED) { - /* MKTME is usable */ - mktme_status = MKTME_ENABLED; - } - - /* - * KeyID bits effectively lower the number of physical address - * bits. Update cpuinfo_x86::x86_phys_bits accordingly. - */ - c->x86_phys_bits -= keyid_bits; -} - static void init_cpuid_fault(struct cpuinfo_x86 *c) { u64 msr; @@ -712,9 +719,6 @@ init_ia32_feat_ctl(c); - if (cpu_has(c, X86_FEATURE_TME)) - detect_tme(c); - init_intel_misc_features(c); split_lock_init(); diff -u linux-aws-5.15-5.15.0/arch/x86/kernel/fpu/signal.c linux-aws-5.15-5.15.0/arch/x86/kernel/fpu/signal.c --- linux-aws-5.15-5.15.0/arch/x86/kernel/fpu/signal.c +++ linux-aws-5.15-5.15.0/arch/x86/kernel/fpu/signal.c @@ -274,12 +274,13 @@ * Attempt to restore the FPU registers directly from user memory. * Pagefaults are handled and any errors returned are fatal. */ -static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, - bool fx_only, unsigned int size) +static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only) { struct fpu *fpu = ¤t->thread.fpu; int ret; + /* Restore enabled features only. */ + xrestore &= fpu->fpstate->user_xfeatures; retry: fpregs_lock(); /* Ensure that XFD is up to date */ @@ -309,7 +310,7 @@ if (ret != X86_TRAP_PF) return false; - if (!fault_in_readable(buf, size)) + if (!fault_in_readable(buf, fpu->fpstate->user_size)) goto retry; return false; } @@ -339,7 +340,6 @@ struct user_i387_ia32_struct env; bool success, fx_only = false; union fpregs_state *fpregs; - unsigned int state_size; u64 user_xfeatures = 0; if (use_xsave()) { @@ -349,17 +349,14 @@ return false; fx_only = !fx_sw_user.magic1; - state_size = fx_sw_user.xstate_size; user_xfeatures = fx_sw_user.xfeatures; } else { user_xfeatures = XFEATURE_MASK_FPSSE; - state_size = fpu->fpstate->user_size; } if (likely(!ia32_fxstate)) { /* Restore the FPU registers directly from user memory. */ - return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only, - state_size); + return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only); } /* diff -u linux-aws-5.15-5.15.0/arch/x86/kernel/ftrace.c linux-aws-5.15-5.15.0/arch/x86/kernel/ftrace.c --- linux-aws-5.15-5.15.0/arch/x86/kernel/ftrace.c +++ linux-aws-5.15-5.15.0/arch/x86/kernel/ftrace.c @@ -368,7 +368,7 @@ ip = trampoline + size; if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) - memcpy(ip, text_gen_insn(JMP32_INSN_OPCODE, ip, &__x86_return_thunk), JMP32_INSN_SIZE); + __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE); else memcpy(ip, retq, sizeof(retq)); diff -u linux-aws-5.15-5.15.0/arch/x86/kernel/paravirt.c linux-aws-5.15-5.15.0/arch/x86/kernel/paravirt.c --- linux-aws-5.15-5.15.0/arch/x86/kernel/paravirt.c +++ linux-aws-5.15-5.15.0/arch/x86/kernel/paravirt.c @@ -58,29 +58,12 @@ BUG(); } -struct branch { - unsigned char opcode; - u32 delta; -} __attribute__((packed)); - static unsigned paravirt_patch_call(void *insn_buff, const void *target, unsigned long addr, unsigned len) { - const int call_len = 5; - struct branch *b = insn_buff; - unsigned long delta = (unsigned long)target - (addr+call_len); - - if (len < call_len) { - pr_warn("paravirt: Failed to patch indirect CALL at %ps\n", (void *)addr); - /* Kernel might not be viable if patching fails, bail out: */ - BUG_ON(1); - } - - b->opcode = 0xe8; /* call */ - b->delta = delta; - BUILD_BUG_ON(sizeof(*b) != call_len); - - return call_len; + __text_gen_insn(insn_buff, CALL_INSN_OPCODE, + (void *)addr, target, CALL_INSN_SIZE); + return CALL_INSN_SIZE; } #ifdef CONFIG_PARAVIRT_XXL diff -u linux-aws-5.15-5.15.0/arch/x86/kernel/static_call.c linux-aws-5.15-5.15.0/arch/x86/kernel/static_call.c --- linux-aws-5.15-5.15.0/arch/x86/kernel/static_call.c +++ linux-aws-5.15-5.15.0/arch/x86/kernel/static_call.c @@ -52,7 +52,7 @@ case RET: if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) - code = text_gen_insn(JMP32_INSN_OPCODE, insn, &__x86_return_thunk); + code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk); else code = &retinsn; break; diff -u linux-aws-5.15-5.15.0/arch/x86/mm/fault.c linux-aws-5.15-5.15.0/arch/x86/mm/fault.c --- linux-aws-5.15-5.15.0/arch/x86/mm/fault.c +++ linux-aws-5.15-5.15.0/arch/x86/mm/fault.c @@ -787,15 +787,6 @@ show_opcodes(regs, loglvl); } -/* - * The (legacy) vsyscall page is the long page in the kernel portion - * of the address space that has user-accessible permissions. - */ -static bool is_vsyscall_vaddr(unsigned long vaddr) -{ - return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); -} - static void __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, unsigned long address, u32 pkey, int si_code) diff -u linux-aws-5.15-5.15.0/arch/x86/mm/maccess.c linux-aws-5.15-5.15.0/arch/x86/mm/maccess.c --- linux-aws-5.15-5.15.0/arch/x86/mm/maccess.c +++ linux-aws-5.15-5.15.0/arch/x86/mm/maccess.c @@ -3,6 +3,8 @@ #include #include +#include + #ifdef CONFIG_X86_64 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { @@ -16,6 +18,14 @@ return false; /* + * Reading from the vsyscall page may cause an unhandled fault in + * certain cases. Though it is at an address above TASK_SIZE_MAX, it is + * usually considered as a user space address. + */ + if (is_vsyscall_vaddr(vaddr)) + return false; + + /* * Allow everything during early boot before 'x86_virt_bits' * is initialized. Needed for instruction decoding in early * exception handlers. diff -u linux-aws-5.15-5.15.0/arch/x86/net/bpf_jit_comp.c linux-aws-5.15-5.15.0/arch/x86/net/bpf_jit_comp.c --- linux-aws-5.15-5.15.0/arch/x86/net/bpf_jit_comp.c +++ linux-aws-5.15-5.15.0/arch/x86/net/bpf_jit_comp.c @@ -411,7 +411,7 @@ u8 *prog = *pprog; if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { - emit_jump(&prog, &__x86_return_thunk, ip); + emit_jump(&prog, x86_return_thunk, ip); } else { EMIT1(0xC3); /* ret */ if (IS_ENABLED(CONFIG_SLS)) diff -u linux-aws-5.15-5.15.0/arch/x86/xen/smp.c linux-aws-5.15-5.15.0/arch/x86/xen/smp.c --- linux-aws-5.15-5.15.0/arch/x86/xen/smp.c +++ linux-aws-5.15-5.15.0/arch/x86/xen/smp.c @@ -65,6 +65,8 @@ char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); + if (!resched_name) + goto fail_mem; per_cpu(xen_resched_irq, cpu).name = resched_name; rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, @@ -77,6 +79,8 @@ per_cpu(xen_resched_irq, cpu).irq = rc; callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); + if (!callfunc_name) + goto fail_mem; per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, @@ -90,6 +94,9 @@ if (!xen_fifo_events) { debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); + if (!debug_name) + goto fail_mem; + per_cpu(xen_debug_irq, cpu).name = debug_name; rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, @@ -101,6 +108,9 @@ } callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); + if (!callfunc_name) + goto fail_mem; + per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, @@ -114,6 +124,8 @@ return 0; + fail_mem: + rc = -ENOMEM; fail: xen_smp_intr_free(cpu); return rc; diff -u linux-aws-5.15-5.15.0/block/sed-opal.c linux-aws-5.15-5.15.0/block/sed-opal.c --- linux-aws-5.15-5.15.0/block/sed-opal.c +++ linux-aws-5.15-5.15.0/block/sed-opal.c @@ -895,16 +895,20 @@ token_length = response_parse_medium(iter, pos); else if (pos[0] <= LONG_ATOM_BYTE) /* long atom */ token_length = response_parse_long(iter, pos); + else if (pos[0] == EMPTY_ATOM_BYTE) /* empty atom */ + token_length = 1; else /* TOKEN */ token_length = response_parse_token(iter, pos); if (token_length < 0) return token_length; + if (pos[0] != EMPTY_ATOM_BYTE) + num_entries++; + pos += token_length; total -= token_length; iter++; - num_entries++; } resp->num = num_entries; diff -u linux-aws-5.15-5.15.0/debian.aws-5.15/changelog linux-aws-5.15-5.15.0/debian.aws-5.15/changelog --- linux-aws-5.15-5.15.0/debian.aws-5.15/changelog +++ linux-aws-5.15-5.15.0/debian.aws-5.15/changelog @@ -1,3 +1,834 @@ +linux-aws-5.15 (5.15.0-1063.69~20.04.1) focal; urgency=medium + + * focal/linux-aws-5.15: 5.15.0-1063.69~20.04.1 -proposed tracker + (LP: #2063711) + + [ Ubuntu: 5.15.0-1063.69 ] + + * jammy/linux-aws: 5.15.0-1063.69 -proposed tracker (LP: #2063712) + * aws: Support hibernation on Graviton (LP: #2060992) + - SAUCE: PM: hibernate: Allow ACPI hardware signature to be honoured + - SAUCE: PM: hibernate: Honour ACPI hardware signature by default for virtual + guests + - SAUCE: ACPICA: Detect FACS even for hardware reduced platforms + - SAUCE: arm64: acpi: Honour firmware_signature field of FACS, if it exists + - SAUCE: firmware/psci: Add definitions for PSCI v1.3 specification (ALPHA) + - SAUCE: arm64: Use SYSTEM_OFF2 PSCI call to power off for hibernate + - [Config]: Enable hibernate on arm64 + - [Config]: Enable hibernate on arm64 + * jammy/linux: 5.15.0-111.121 -proposed tracker (LP: #2063763) + * RTL8852BE fw security fail then lost WIFI function during suspend/resume + cycle (LP: #2063096) + - wifi: rtw89: download firmware with five times retry + * Mount CIFS fails with Permission denied (LP: #2061986) + - cifs: fix ntlmssp auth when there is no key exchange + * USB stick can't be detected (LP: #2040948) + - usb: Disable USB3 LPM at shutdown + * Jammy update: v5.15.153 upstream stable release (LP: #2063290) + - io_uring/unix: drop usage of io_uring socket + - io_uring: drop any code related to SCM_RIGHTS + - selftests: tls: use exact comparison in recv_partial + - ASoC: rt5645: Make LattePanda board DMI match more precise + - x86/xen: Add some null pointer checking to smp.c + - MIPS: Clear Cause.BD in instruction_pointer_set + - HID: multitouch: Add required quirk for Synaptics 0xcddc device + - gen_compile_commands: fix invalid escape sequence warning + - RDMA/mlx5: Fix fortify source warning while accessing Eth segment + - RDMA/mlx5: Relax DEVX access upon modify commands + - riscv: dts: sifive: add missing #interrupt-cells to pmic + - x86/mm: Move is_vsyscall_vaddr() into asm/vsyscall.h + - x86/mm: Disallow vsyscall page read for copy_from_kernel_nofault() + - net/iucv: fix the allocation size of iucv_path_table array + - parisc/ftrace: add missing CONFIG_DYNAMIC_FTRACE check + - block: sed-opal: handle empty atoms when parsing response + - dm-verity, dm-crypt: align "struct bvec_iter" correctly + - scsi: mpt3sas: Prevent sending diag_reset when the controller is ready + - ALSA: hda/realtek - ALC285 reduce pop noise from Headphone port + - drm/amdgpu: Enable gpu reset for S3 abort cases on Raven series + - Bluetooth: rfcomm: Fix null-ptr-deref in rfcomm_check_security + - firewire: core: use long bus reset on gap count error + - ASoC: Intel: bytcr_rt5640: Add an extra entry for the Chuwi Vi8 tablet + - Input: gpio_keys_polled - suppress deferred probe error for gpio + - ASoC: wm8962: Enable oscillator if selecting WM8962_FLL_OSC + - ASoC: wm8962: Enable both SPKOUTR_ENA and SPKOUTL_ENA in mono mode + - ASoC: wm8962: Fix up incorrect error message in wm8962_set_fll + - do_sys_name_to_handle(): use kzalloc() to fix kernel-infoleak + - s390/dasd: put block allocation in separate function + - s390/dasd: add query PPRC function + - s390/dasd: add copy pair setup + - s390/dasd: add autoquiesce feature + - s390/dasd: Use dev_*() for device log messages + - s390/dasd: fix double module refcount decrement + - fs/select: rework stack allocation hack for clang + - md: Don't clear MD_CLOSING when the raid is about to stop + - lib/cmdline: Fix an invalid format specifier in an assertion msg + - time: test: Fix incorrect format specifier + - rtc: test: Fix invalid format specifier. + - aoe: fix the potential use-after-free problem in aoecmd_cfg_pkts + - timekeeping: Fix cross-timestamp interpolation on counter wrap + - timekeeping: Fix cross-timestamp interpolation corner case decision + - timekeeping: Fix cross-timestamp interpolation for non-x86 + - sched/fair: Take the scheduling domain into account in select_idle_core() + - wifi: ath10k: fix NULL pointer dereference in + ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev() + - wifi: b43: Stop/wake correct queue in DMA Tx path when QoS is disabled + - wifi: b43: Stop/wake correct queue in PIO Tx path when QoS is disabled + - wifi: b43: Stop correct queue in DMA worker when QoS is disabled + - wifi: b43: Disable QoS for bcm4331 + - wifi: wilc1000: fix declarations ordering + - wifi: wilc1000: fix RCU usage in connect path + - wifi: rtl8xxxu: add cancel_work_sync() for c2hcmd_work + - wifi: wilc1000: fix multi-vif management when deleting a vif + - wifi: mwifiex: debugfs: Drop unnecessary error check for + debugfs_create_dir() + - cpufreq: brcmstb-avs-cpufreq: add check for cpufreq_cpu_get's return value + - cpufreq: Explicitly include correct DT includes + - cpufreq: mediatek-hw: Wait for CPU supplies before probing + - sock_diag: annotate data-races around sock_diag_handlers[family] + - inet_diag: annotate data-races around inet_diag_table[] + - bpftool: Silence build warning about calloc() + - af_unix: Annotate data-race of gc_in_progress in wait_for_unix_gc(). + - cpufreq: mediatek-hw: Don't error out if supply is not found + - arm64: dts: imx8mm-kontron: Disable pullups for I2C signals on SL/BL i.MX8MM + - arm64: dts: imx8mm-kontron: Disable pullups for onboard UART signals on BL + board + - arm64: dts: imx8mm-kontron: Add support for ultra high speed modes on SD + card + - arm64: dts: imx8mm-kontron: Use the VSELECT signal to switch SD card IO + voltage + - arm64: dts: imx8mm-kontron: Disable pull resistors for SD card signals on BL + board + - wifi: ath9k: delay all of ath9k_wmi_event_tasklet() until init is complete + - wifi: iwlwifi: mvm: report beacon protection failures + - wifi: iwlwifi: dbg-tlv: ensure NUL termination + - wifi: iwlwifi: fix EWRD table validity check + - arm64: dts: imx8mm-venice-gw71xx: fix USB OTG VBUS + - pwm: atmel-hlcdc: Convert to platform remove callback returning void + - pwm: atmel-hlcdc: Use consistent variable naming + - pwm: atmel-hlcdc: Fix clock imbalance related to suspend support + - net: blackhole_dev: fix build warning for ethh set but not used + - wifi: libertas: fix some memleaks in lbs_allocate_cmd_buffer() + - pwm: sti: Implement .apply() callback + - pwm: sti: Fix capture for st,pwm-num-chan < st,capture-num-chan + - wifi: iwlwifi: mvm: don't set replay counters to 0xff + - s390/vdso: drop '-fPIC' from LDFLAGS + - ipv6: mcast: remove one synchronize_net() barrier in ipv6_mc_down() + - arm64: dts: mt8183: kukui: Add Type C node + - arm64: dts: mt8183: kukui: Split out keyboard node and describe detachables + - arm64: dts: mt8183: Move CrosEC base detection node to kukui-based DTs + - arm64: dts: mediatek: mt7622: add missing "device_type" to memory nodes + - bpf: Mark bpf_spin_{lock,unlock}() helpers with notrace correctly + - wireless: Remove redundant 'flush_workqueue()' calls + - wifi: wilc1000: prevent use-after-free on vif when cleaning up all + interfaces + - ACPI: processor_idle: Fix memory leak in acpi_processor_power_exit() + - bus: tegra-aconnect: Update dependency to ARCH_TEGRA + - [Config]: update CONFIG_TEGRA_ACONNECT + - iommu/amd: Mark interrupt as managed + - wifi: brcmsmac: avoid function pointer casts + - net: ena: Remove ena_select_queue + - ARM: dts: arm: realview: Fix development chip ROM compatible value + - arm64: dts: renesas: r8a779a0: Update to R-Car Gen4 compatible values + - arm64: dts: renesas: r8a779a0: Correct avb[01] reg sizes + - ARM: dts: imx6dl-yapp4: Move phy reset into switch node + - ARM: dts: imx6dl-yapp4: Fix typo in the QCA switch register address + - ARM: dts: imx6dl-yapp4: Move the internal switch PHYs under the switch node + - arm64: dts: marvell: reorder crypto interrupts on Armada SoCs + - ACPI: resource: Add Infinity laptops to irq1_edge_low_force_override + - ACPI: resource: Do IRQ override on Lunnen Ground laptops + - ACPI: resource: Add MAIBENBEN X577 to irq1_edge_low_force_override + - ACPI: scan: Fix device check notification handling + - x86, relocs: Ignore relocations in .notes section + - SUNRPC: fix some memleaks in gssx_dec_option_array + - mmc: wmt-sdmmc: remove an incorrect release_mem_region() call in the .remove + function + - wifi: rtw88: 8821c: Fix false alarm count + - PCI: Make pci_dev_is_disconnected() helper public for other drivers + - iommu/vt-d: Don't issue ATS Invalidation request when device is disconnected + - igb: move PEROUT and EXTTS isr logic to separate functions + - igb: Fix missing time sync events + - Bluetooth: Remove superfluous call to hci_conn_check_pending() + - Bluetooth: hci_qca: Add support for QTI Bluetooth chip wcn6855 + - Bluetooth: hci_qca: don't use IS_ERR_OR_NULL() with gpiod_get_optional() + - Bluetooth: hci_core: Fix possible buffer overflow + - sr9800: Add check for usbnet_get_endpoints + - bpf: Fix DEVMAP_HASH overflow check on 32-bit arches + - bpf: Fix hashtab overflow check on 32-bit arches + - bpf: Fix stackmap overflow check on 32-bit arches + - ipv6: fib6_rules: flush route cache when rule is changed + - net: ip_tunnel: make sure to pull inner header in ip_tunnel_rcv() + - net: phy: fix phy_get_internal_delay accessing an empty array + - net: hns3: fix kernel crash when 1588 is received on HIP08 devices + - net: hns3: fix port duplex configure error in IMP reset + - net: phy: DP83822: enable rgmii mode if phy_interface_is_rgmii + - net: phy: dp83822: Fix RGMII TX delay configuration + - OPP: debugfs: Fix warning around icc_get_name() + - tcp: fix incorrect parameter validation in the do_tcp_getsockopt() function + - net: Change sock_getsockopt() to take the sk ptr instead of the sock ptr + - bpf: net: Change sk_getsockopt() to take the sockptr_t argument + - bpf: net: Change do_ip_getsockopt() to take the sockptr_t argument + - ipmr: fix incorrect parameter validation in the ip_mroute_getsockopt() + function + - l2tp: fix incorrect parameter validation in the pppol2tp_getsockopt() + function + - udp: fix incorrect parameter validation in the udp_lib_getsockopt() function + - net: kcm: fix incorrect parameter validation in the kcm_getsockopt) function + - net/x25: fix incorrect parameter validation in the x25_getsockopt() function + - nfp: flower: handle acti_netdevs allocation failure + - dm raid: fix false positive for requeue needed during reshape + - dm: call the resume method on internal suspend + - drm/tegra: dsi: Add missing check for of_find_device_by_node + - drm/tegra: dpaux: Populate AUX bus + - drm/tegra: dpaux: Fix PM disable depth imbalance in tegra_dpaux_probe + - drm/tegra: dsi: Make use of the helper function dev_err_probe() + - drm/tegra: dsi: Fix some error handling paths in tegra_dsi_probe() + - drm/tegra: dsi: Fix missing pm_runtime_disable() in the error handling path + of tegra_dsi_probe() + - drm/tegra: dc: rgb: Allow changing PLLD rate on Tegra30+ + - drm/tegra: rgb: Fix some error handling paths in tegra_dc_rgb_probe() + - drm/tegra: rgb: Fix missing clk_put() in the error handling paths of + tegra_dc_rgb_probe() + - drm/tegra: output: Fix missing i2c_put_adapter() in the error handling paths + of tegra_output_probe() + - drm/rockchip: inno_hdmi: Fix video timing + - drm: Don't treat 0 as -1 in drm_fixp2int_ceil + - drm/ttm: add ttm_resource_fini v2 + - drm/vmwgfx: fix a memleak in vmw_gmrid_man_get_node + - drm/rockchip: lvds: do not overwrite error code + - drm/rockchip: lvds: do not print scary message when probing defer + - drm/lima: fix a memleak in lima_heap_alloc + - dmaengine: tegra210-adma: Update dependency to ARCH_TEGRA + - [Config]: update CONFIG_TEGRA210_ADMA + - media: tc358743: register v4l2 async device only after successful setup + - PCI/DPC: Print all TLP Prefixes, not just the first + - perf record: Fix possible incorrect free in record__switch_output() + - HID: lenovo: Add middleclick_workaround sysfs knob for cptkbd + - drm/amd/display: Fix a potential buffer overflow in 'dp_dsc_clock_en_read()' + - drm/amd/display: Fix potential NULL pointer dereferences in + 'dcn10_set_output_transfer_func()' + - perf evsel: Fix duplicate initialization of data->id in + evsel__parse_sample() + - clk: meson: Add missing clocks to axg_clk_regmaps + - media: em28xx: annotate unchecked call to media_device_register() + - media: v4l2-tpg: fix some memleaks in tpg_alloc + - media: v4l2-mem2mem: fix a memleak in v4l2_m2m_register_entity + - media: edia: dvbdev: fix a use-after-free + - pinctrl: mediatek: Drop bogus slew rate register range for MT8192 + - clk: qcom: reset: Commonize the de/assert functions + - clk: qcom: reset: Ensure write completion on reset de/assertion + - quota: simplify drop_dquot_ref() + - quota: Fix potential NULL pointer dereference + - quota: Fix rcu annotations of inode dquot pointers + - PCI/P2PDMA: Fix a sleeping issue in a RCU read section + - PCI: switchtec: Fix an error handling path in switchtec_pci_probe() + - crypto: xilinx - call finalize with bh disabled + - perf thread_map: Free strlist on normal path in thread_map__new_by_tid_str() + - drm/radeon/ni: Fix wrong firmware size logging in ni_init_microcode() + - ALSA: seq: fix function cast warnings + - perf stat: Avoid metric-only segv + - ASoC: meson: Use dev_err_probe() helper + - ASoC: meson: aiu: fix function pointer type mismatch + - ASoC: meson: t9015: fix function pointer type mismatch + - powerpc: Force inlining of arch_vmap_p{u/m}d_supported() + - PCI: endpoint: Support NTB transfer between RC and EP + - [Config]: update CONFIG_PCI_EPF_VNTB + - NTB: EPF: fix possible memory leak in pci_vntb_probe() + - NTB: fix possible name leak in ntb_register_device() + - media: sun8i-di: Fix coefficient writes + - media: sun8i-di: Fix power on/off sequences + - media: sun8i-di: Fix chroma difference threshold + - media: imx: csc/scaler: fix v4l2_ctrl_handler memory leak + - media: go7007: add check of return value of go7007_read_addr() + - media: pvrusb2: remove redundant NULL check + - media: pvrusb2: fix pvr2_stream_callback casts + - clk: qcom: dispcc-sdm845: Adjust internal GDSC wait times + - drm/mediatek: dsi: Fix DSI RGB666 formats and definitions + - PCI: Mark 3ware-9650SE Root Port Extended Tags as broken + - clk: hisilicon: hi3519: Release the correct number of gates in + hi3519_clk_unregister() + - clk: hisilicon: hi3559a: Fix an erroneous devm_kfree() + - drm/tegra: put drm_gem_object ref on error in tegra_fb_create + - mfd: syscon: Call of_node_put() only when of_parse_phandle() takes a ref + - mfd: altera-sysmgr: Call of_node_put() only when of_parse_phandle() takes a + ref + - crypto: arm/sha - fix function cast warnings + - drm/tidss: Fix initial plane zpos values + - mtd: maps: physmap-core: fix flash size larger than 32-bit + - mtd: rawnand: lpc32xx_mlc: fix irq handler prototype + - ASoC: meson: axg-tdm-interface: fix mclk setup without mclk-fs + - ASoC: meson: axg-tdm-interface: add frame rate constraint + - HID: amd_sfh: Update HPD sensor structure elements + - drm/amdgpu: Fix missing break in ATOM_ARG_IMM Case of atom_get_src_int() + - media: pvrusb2: fix uaf in pvr2_context_set_notify + - media: dvb-frontends: avoid stack overflow warnings with clang + - media: go7007: fix a memleak in go7007_load_encoder + - media: ttpci: fix two memleaks in budget_av_attach + - media: mediatek: vcodec: avoid -Wcast-function-type-strict warning + - drm/mediatek: Fix a null pointer crash in mtk_drm_crtc_finish_page_flip + - powerpc/hv-gpci: Fix the H_GET_PERF_COUNTER_INFO hcall return value checks + - drm/msm/dpu: add division of drm_display_mode's hskew parameter + - module: Add support for default value for module async_probe + - modules: wait do_free_init correctly + - powerpc/embedded6xx: Fix no previous prototype for avr_uart_send() etc. + - leds: aw2013: Unlock mutex before destroying it + - leds: sgm3140: Add missing timer cleanup and flash gpio control + - backlight: lm3630a: Initialize backlight_properties on init + - backlight: lm3630a: Don't set bl->props.brightness in get_brightness + - backlight: da9052: Fully initialize backlight_properties during probe + - backlight: lm3639: Fully initialize backlight_properties during probe + - backlight: lp8788: Fully initialize backlight_properties during probe + - sparc32: Fix section mismatch in leon_pci_grpci + - clk: Fix clk_core_get NULL dereference + - clk: zynq: Prevent null pointer dereference caused by kmalloc failure + - ALSA: hda/realtek: fix ALC285 issues on HP Envy x360 laptops + - ALSA: usb-audio: Stop parsing channels bits when all channels are found. + - RDMA/srpt: Do not register event handler until srpt device is fully setup + - f2fs: multidevice: support direct IO + - f2fs: invalidate META_MAPPING before IPU/DIO write + - f2fs: replace congestion_wait() calls with io_schedule_timeout() + - f2fs: fix to invalidate META_MAPPING before DIO write + - f2fs: invalidate meta pages only for post_read required inode + - f2fs: reduce stack memory cost by using bitfield in struct f2fs_io_info + - f2fs: compress: fix to cover normal cluster write with cp_rwsem + - f2fs: compress: fix to check unreleased compressed cluster + - scsi: csiostor: Avoid function pointer casts + - RDMA/device: Fix a race between mad_client and cm_client init + - RDMA/rtrs-clt: Check strnlen return len in sysfs mpath_policy_store() + - scsi: bfa: Fix function pointer type mismatch for hcb_qe->cbfn + - net: sunrpc: Fix an off by one in rpc_sockaddr2uaddr() + - NFSv4.2: fix nfs4_listxattr kernel BUG at mm/usercopy.c:102 + - NFSv4.2: fix listxattr maximum XDR buffer size + - watchdog: stm32_iwdg: initialize default timeout + - NFS: Fix an off by one in root_nfs_cat() + - f2fs: compress: fix reserve_cblocks counting error when out of space + - afs: Revert "afs: Hide silly-rename files from userspace" + - comedi: comedi_test: Prevent timers rescheduling during deletion + - remoteproc: stm32: use correct format strings on 64-bit + - remoteproc: stm32: Fix incorrect type in assignment for va + - remoteproc: stm32: Fix incorrect type assignment returned by + stm32_rproc_get_loaded_rsc_tablef + - tty: vt: fix 20 vs 0x20 typo in EScsiignore + - serial: max310x: fix syntax error in IRQ error message + - tty: serial: samsung: fix tx_empty() to return TIOCSER_TEMT + - arm64: dts: broadcom: bcmbca: bcm4908: drop invalid switch cells + - kconfig: fix infinite loop when expanding a macro at the end of file + - rtc: mt6397: select IRQ_DOMAIN instead of depending on it + - serial: 8250_exar: Don't remove GPIO device on suspend + - staging: greybus: fix get_channel_from_mode() failure path + - usb: gadget: net2272: Use irqflags in the call to net2272_probe_fin + - io_uring: don't save/restore iowait state + - nouveau: reset the bo resource bus info after an eviction + - octeontx2-af: Use matching wake_up API variant in CGX command interface + - s390/vtime: fix average steal time calculation + - soc: fsl: dpio: fix kcalloc() argument order + - hsr: Fix uninit-value access in hsr_get_node() + - net: mtk_eth_soc: move MAC_MCR setting to mac_finish() + - net: mediatek: mtk_eth_soc: clear MAC_MCR_FORCE_LINK only when MAC is up + - net: ethernet: mtk_eth_soc: fix PPE hanging issue + - packet: annotate data-races around ignore_outgoing + - net: veth: do not manipulate GRO when using XDP + - net: dsa: mt7530: prevent possible incorrect XTAL frequency selection + - vdpa/mlx5: Allow CVQ size changes + - wireguard: receive: annotate data-race around receiving_counter.counter + - rds: introduce acquire/release ordering in acquire/release_in_xmit() + - hsr: Handle failures in module init + - net: phy: fix phy_read_poll_timeout argument type in genphy_loopback + - net/bnx2x: Prevent access to a freed page in page_pool + - octeontx2-af: Use separate handlers for interrupts + - netfilter: nf_tables: do not compare internal table flags on updates + - rcu: add a helper to report consolidated flavor QS + - net: report RCU QS on threaded NAPI repolling + - bpf: report RCU QS in cpumap kthread + - net: dsa: mt7530: fix handling of LLDP frames + - net: dsa: mt7530: fix handling of 802.1X PAE frames + - net: dsa: mt7530: fix link-local frames that ingress vlan filtering ports + - net: dsa: mt7530: fix handling of all link-local frames + - spi: spi-mt65xx: Fix NULL pointer access in interrupt handler + - regmap: Add missing map->bus check + - remoteproc: stm32: fix incorrect optional pointers + * Jammy update: v5.15.152 upstream stable release (LP: #2063276) + - mmc: mmci: stm32: use a buffer for unaligned DMA requests + - mmc: mmci: stm32: fix DMA API overlapping mappings warning + - net: lan78xx: fix runtime PM count underflow on link stop + - ixgbe: {dis, en}able irqs in ixgbe_txrx_ring_{dis, en}able + - i40e: disable NAPI right after disabling irqs when handling xsk_pool + - tracing/net_sched: Fix tracepoints that save qdisc_dev() as a string + - geneve: make sure to pull inner header in geneve_rx() + - net: sparx5: Fix use after free inside sparx5_del_mact_entry + - net: ice: Fix potential NULL pointer dereference in ice_bridge_setlink() + - net/ipv6: avoid possible UAF in ip6_route_mpath_notify() + - cpumap: Zero-initialise xdp_rxq_info struct before running XDP program + - net/rds: fix WARNING in rds_conn_connect_if_down + - netfilter: nft_ct: fix l3num expectations with inet pseudo family + - netfilter: nf_conntrack_h323: Add protection for bmp length out of range + - erofs: apply proper VMA alignment for memory mapped files on THP + - netrom: Fix a data-race around sysctl_netrom_default_path_quality + - netrom: Fix a data-race around sysctl_netrom_obsolescence_count_initialiser + - netrom: Fix data-races around sysctl_netrom_network_ttl_initialiser + - netrom: Fix a data-race around sysctl_netrom_transport_timeout + - netrom: Fix a data-race around sysctl_netrom_transport_maximum_tries + - netrom: Fix a data-race around sysctl_netrom_transport_acknowledge_delay + - netrom: Fix a data-race around sysctl_netrom_transport_busy_delay + - netrom: Fix a data-race around sysctl_netrom_transport_requested_window_size + - netrom: Fix a data-race around sysctl_netrom_transport_no_activity_timeout + - netrom: Fix a data-race around sysctl_netrom_routing_control + - netrom: Fix a data-race around sysctl_netrom_link_fails_count + - netrom: Fix data-races around sysctl_net_busy_read + - ALSA: usb-audio: Refcount multiple accesses on the single clock + - ALSA: usb-audio: Clear fixed clock rate at closing EP + - ALSA: usb-audio: Split endpoint setups for hw_params and prepare (take#2) + - ALSA: usb-audio: Properly refcounting clock rate + - ALSA: usb-audio: Apply mutex around snd_usb_endpoint_set_params() + - ALSA: usb-audio: Correct the return code from snd_usb_endpoint_set_params() + - ALSA: usb-audio: Avoid superfluous endpoint setup + - ALSA: usb-audio: Add quirk for Tascam Model 12 + - ALSA: usb-audio: Add new quirk FIXED_RATE for JBL Quantum810 Wireless + - ALSA: usb-audio: Fix microphone sound on Nexigo webcam. + - ALSA: usb-audio: add quirk for RODE NT-USB+ + - drm/amd/display: Fix uninitialized variable usage in core_link_ 'read_dpcd() + & write_dpcd()' functions + - nfp: flower: add goto_chain_index for ct entry + - nfp: flower: add hardware offload check for post ct entry + - selftests/mm: switch to bash from sh + - selftests: mm: fix map_hugetlb failure on 64K page size systems + - xhci: process isoc TD properly when there was a transaction error mid TD. + - xhci: handle isoc Babble and Buffer Overrun events properly + - serial: max310x: use regmap methods for SPI batch operations + - serial: max310x: use a separate regmap for each port + - serial: max310x: prevent infinite while() loop in port startup + - drm/amd/pm: do not expose the API used internally only in kv_dpm.c + - drm/amdgpu: Reset IH OVERFLOW_CLEAR bit + - selftests: mptcp: decrease BW in simult flows + - hv_netvsc: use netif_is_bond_master() instead of open code + - hv_netvsc: Register VF in netvsc_probe if NET_DEVICE_REGISTER missed + - drm/amd/display: Re-arrange FPU code structure for dcn2x + - drm/amd/display: move calcs folder into DML + - drm/amd/display: remove DML Makefile duplicate lines + - drm/amd/display: Increase frame-larger-than for all display_mode_vba files + - getrusage: add the "signal_struct *sig" local variable + - getrusage: move thread_group_cputime_adjusted() outside of + lock_task_sighand() + - getrusage: use __for_each_thread() + - getrusage: use sig->stats_lock rather than lock_task_sighand() + - proc: Use task_is_running() for wchan in /proc/$pid/stat + - fs/proc: do_task_stat: move thread_group_cputime_adjusted() outside of + lock_task_sighand() + - ALSA: usb-audio: Fix wrong kfree issue in snd_usb_endpoint_free_all + - ALSA: usb-audio: Always initialize fixed_rate in + snd_usb_find_implicit_fb_sync_format() + - ALSA: usb-audio: Add FIXED_RATE quirk for JBL Quantum610 Wireless + - ALSA: usb-audio: Sort quirk table entries + - regmap: allow to define reg_update_bits for no bus configuration + - regmap: Add bulk read/write callbacks into regmap_config + - serial: max310x: make accessing revision id interface-agnostic + - serial: max310x: fix IO data corruption in batched operations + - Linux 5.15.152 + * CVE-2024-26809 + - netfilter: nft_set_pipapo: release elements in clone only from destroy path + * CVE-2024-26792 + - btrfs: fix double free of anonymous device after snapshot creation failure + * CVE-2023-52530 + - wifi: mac80211: fix potential key use-after-free + * CVE-2023-52447 + - bpf: Defer the free of inner map when necessary + - rcu-tasks: Provide rcu_trace_implies_rcu_gp() + * Avoid creating non-working backlight sysfs knob from ASUS board + (LP: #2060422) + - platform/x86: asus-wmi: Consider device is absent when the read is ~0 + * [Ubuntu 22.04.4/linux-image-6.5.0-26-generic] Kernel output "UBSAN: array- + index-out-of-bounds in /build/linux-hwe-6.5-34pCLi/linux- + hwe-6.5-6.5.0/drivers/net/hyperv/netvsc.c:1445:41" multiple times, + especially during boot. (LP: #2058477) + - hv: hyperv.h: Replace one-element array with flexible-array member + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) + - netfilter: nf_tables: disallow timeout for anonymous sets + - mtd: spinand: gigadevice: Fix the get ecc status issue + - netlink: Fix kernel-infoleak-after-free in __skb_datagram_iter + - net: ip_tunnel: prevent perpetual headroom growth + - tun: Fix xdp_rxq_info's queue_index when detaching + - cpufreq: intel_pstate: fix pstate limits enforcement for adjust_perf call + back + - net: veth: clear GRO when clearing XDP even when down + - ipv6: fix potential "struct net" leak in inet6_rtm_getaddr() + - lan78xx: enable auto speed configuration for LAN7850 if no EEPROM is + detected + - net: enable memcg accounting for veth queues + - veth: try harder when allocating queue memory + - net: usb: dm9601: fix wrong return value in dm9601_mdio_read + - uapi: in6: replace temporary label with rfc9486 + - stmmac: Clear variable when destroying workqueue + - Bluetooth: Avoid potential use-after-free in hci_error_reset + - Bluetooth: hci_event: Fix wrongly recorded wakeup BD_ADDR + - netfilter: nf_tables: allow NFPROTO_INET in nft_(match/target)_validate() + - netfilter: nfnetlink_queue: silence bogus compiler warning + - netfilter: core: move ip_ct_attach indirection to struct nf_ct_hook + - netfilter: make function op structures const + - netfilter: let reset rules clean out conntrack entries + - netfilter: bridge: confirm multicast packets before passing them up the + stack + - rtnetlink: fix error logic of IFLA_BRIDGE_FLAGS writing back + - igb: extend PTP timestamp adjustments to i211 + - efi/capsule-loader: fix incorrect allocation size + - power: supply: bq27xxx-i2c: Do not free non existing IRQ + - ALSA: Drop leftover snd-rtctimer stuff from Makefile + - fbcon: always restore the old font data in fbcon_do_set_font() + - afs: Fix endless loop in directory parsing + - riscv: Sparse-Memory/vmemmap out-of-bounds fix + - ALSA: firewire-lib: fix to check cycle continuity + - gtp: fix use-after-free and null-ptr-deref in gtp_newlink() + - wifi: nl80211: reject iftype change with mesh ID change + - btrfs: dev-replace: properly validate device names + - dmaengine: fsl-qdma: fix SoC may hang on 16 byte unaligned read + - dmaengine: ptdma: use consistent DMA masks + - dmaengine: fsl-qdma: init irq after reg initialization + - mmc: core: Fix eMMC initialization with 1-bit bus connection + - mmc: sdhci-xenon: add timeout for PHY init complete + - mmc: sdhci-xenon: fix PHY init clock stability + - pmdomain: qcom: rpmhpd: Fix enabled_corner aggregation + - x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers + - mptcp: move __mptcp_error_report in protocol.c + - mptcp: process pending subflow error on close + - mptcp: rename timer related helper to less confusing names + - selftests: mptcp: add missing kconfig for NF Filter + - selftests: mptcp: add missing kconfig for NF Filter in v6 + - mptcp: clean up harmless false expressions + - mptcp: add needs_id for netlink appending addr + - mptcp: push at DSS boundaries + - mptcp: fix possible deadlock in subflow diag + - cachefiles: fix memory leak in cachefiles_add_cache() + - fs,hugetlb: fix NULL pointer dereference in hugetlbs_fill_super + - Revert "drm/bridge: lt8912b: Register and attach our DSI device at probe" + - af_unix: Drop oob_skb ref before purging queue in GC. + - gpio: 74x164: Enable output pins after registers are reset + - gpiolib: Fix the error path order in gpiochip_add_data_with_key() + - gpio: fix resource unwinding order in error path + - Revert "interconnect: Fix locking for runpm vs reclaim" + - Revert "interconnect: Teach lockdep about icc_bw_lock order" + - bpf: Add BPF_FIB_LOOKUP_SKIP_NEIGH for bpf_fib_lookup + - bpf: Add table ID to bpf_fib_lookup BPF helper + - bpf: Derive source IP addr via bpf_*_fib_lookup() + - Linux 5.15.151 + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) // + CVE-2024-26782 + - mptcp: fix double-free on socket dismantle + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) // Fix + bluetooth connections with 3.0 device (LP: #2063067) + - Bluetooth: hci_event: Fix handling of HCI_EV_IO_CAPA_REQUEST + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) + - net/sched: Retire CBQ qdisc + - [Config] updateconfigs for NET_SCH_CBQ + - net/sched: Retire ATM qdisc + - [Config] updateconfigs for NET_SCH_ATM + - net/sched: Retire dsmark qdisc + - [Config] updateconfigs for NET_SCH_DSMARK + - smb: client: fix potential OOBs in smb2_parse_contexts() + - smb: client: fix parsing of SMB3.1.1 POSIX create context + - sched/rt: sysctl_sched_rr_timeslice show default timeslice after reset + - PCI: dwc: Fix a 64bit bug in dw_pcie_ep_raise_msix_irq() + - bpf: Merge printk and seq_printf VARARG max macros + - bpf: Add struct for bin_args arg in bpf_bprintf_prepare + - bpf: Do cleanup in bpf_bprintf_cleanup only when needed + - bpf: Remove trace_printk_lock + - userfaultfd: fix mmap_changing checking in mfill_atomic_hugetlb + - zonefs: Improve error handling + - x86/fpu: Stop relying on userspace for info to fault in xsave buffer + - sched/rt: Fix sysctl_sched_rr_timeslice intial value + - sched/rt: Disallow writing invalid values to sched_rt_period_us + - scsi: target: core: Add TMF to tmr_list handling + - dmaengine: shdma: increase size of 'dev_id' + - dmaengine: fsl-qdma: increase size of 'irq_name' + - wifi: cfg80211: fix missing interfaces when dumping + - wifi: mac80211: fix race condition on enabling fast-xmit + - fbdev: savage: Error out if pixclock equals zero + - fbdev: sis: Error out if pixclock equals zero + - spi: hisi-sfc-v3xx: Return IRQ_NONE if no interrupts were detected + - ahci: asm1166: correct count of reported ports + - ahci: add 43-bit DMA address quirk for ASMedia ASM1061 controllers + - MIPS: reserve exception vector space ONLY ONCE + - platform/x86: touchscreen_dmi: Add info for the TECLAST X16 Plus tablet + - ext4: avoid dividing by 0 in mb_update_avg_fragment_size() when block bitmap + corrupt + - ext4: avoid allocating blocks from corrupted group in + ext4_mb_try_best_found() + - ext4: avoid allocating blocks from corrupted group in ext4_mb_find_by_goal() + - dmaengine: ti: edma: Add some null pointer checks to the edma_probe + - regulator: pwm-regulator: Add validity checks in continuous .get_voltage + - nvmet-tcp: fix nvme tcp ida memory leak + - ALSA: usb-audio: Check presence of valid altsetting control + - ASoC: sunxi: sun4i-spdif: Add support for Allwinner H616 + - spi: sh-msiof: avoid integer overflow in constants + - Input: xpad - add Lenovo Legion Go controllers + - netfilter: conntrack: check SCTP_CID_SHUTDOWN_ACK for vtag setting in + sctp_new + - ALSA: usb-audio: Ignore clock selector errors for single connection + - nvme-fc: do not wait in vain when unloading module + - nvmet-fcloop: swap the list_add_tail arguments + - nvmet-fc: release reference on target port + - nvmet-fc: defer cleanup using RCU properly + - nvmet-fc: hold reference on hostport match + - nvmet-fc: abort command when there is no binding + - nvmet-fc: avoid deadlock on delete association path + - nvmet-fc: take ref count on tgtport before delete assoc + - ext4: correct the hole length returned by ext4_map_blocks() + - Input: i8042 - add Fujitsu Lifebook U728 to i8042 quirk table + - fs/ntfs3: Modified fix directory element type detection + - fs/ntfs3: Improve ntfs_dir_count + - fs/ntfs3: Correct hard links updating when dealing with DOS names + - fs/ntfs3: Print warning while fixing hard links count + - fs/ntfs3: Fix detected field-spanning write (size 8) of single field + "le->name" + - fs/ntfs3: Add NULL ptr dereference checking at the end of + attr_allocate_frame() + - fs/ntfs3: Disable ATTR_LIST_ENTRY size check + - fs/ntfs3: use non-movable memory for ntfs3 MFT buffer cache + - fs/ntfs3: Prevent generic message "attempt to access beyond end of device" + - fs/ntfs3: Correct function is_rst_area_valid + - fs/ntfs3: Update inode->i_size after success write into compressed file + - fs/ntfs3: Fix oob in ntfs_listxattr + - wifi: mac80211: adding missing drv_mgd_complete_tx() call + - efi: runtime: Fix potential overflow of soft-reserved region size + - efi: Don't add memblocks for soft-reserved memory + - hwmon: (coretemp) Enlarge per package core count limit + - scsi: lpfc: Use unsigned type for num_sge + - firewire: core: send bus reset promptly on gap count error + - drm/amdgpu: skip to program GFXDEC registers for suspend abort + - drm/amdgpu: reset gpu for s3 suspend abort case + - virtio-blk: Ensure no requests in virtqueues before deleting vqs. + - pmdomain: mediatek: fix race conditions with genpd + - ksmbd: free aux buffer if ksmbd_iov_pin_rsp_read fails + - pmdomain: renesas: r8a77980-sysc: CR7 must be always on + - erofs: fix lz4 inplace decompression + - IB/hfi1: Fix sdma.h tx->num_descs off-by-one error + - drm/ttm: Fix an invalid freeing on already freed page in error path + - dm-crypt: don't modify the data when using authenticated encryption + - platform/x86: intel-vbtn: Stop calling "VBDL" from notify_handler + - platform/x86: touchscreen_dmi: Allow partial (prefix) matches for ACPI names + - KVM: arm64: vgic-its: Test for valid IRQ in MOVALL handler + - KVM: arm64: vgic-its: Test for valid IRQ in its_sync_lpi_pending_table() + - gtp: fix use-after-free and null-ptr-deref in gtp_genl_dump_pdp() + - PCI/MSI: Prevent MSI hardware interrupt number truncation + - l2tp: pass correct message length to ip6_append_data + - ARM: ep93xx: Add terminator to gpiod_lookup_table + - Revert "x86/ftrace: Use alternative RET encoding" + - x86/text-patching: Make text_gen_insn() play nice with ANNOTATE_NOENDBR + - x86/ibt,paravirt: Use text_gen_insn() for paravirt_patch() + - x86/ftrace: Use alternative RET encoding + - x86/returnthunk: Allow different return thunks + - Revert "x86/alternative: Make custom return thunk unconditional" + - x86/alternative: Make custom return thunk unconditional + - serial: amba-pl011: Fix DMA transmission in RS485 mode + - usb: dwc3: gadget: Don't disconnect if not started + - usb: cdnsp: blocked some cdns3 specific code + - usb: cdnsp: fixed issue with incorrect detecting CDNSP family controllers + - usb: cdns3: fixed memory use after free at cdns3_gadget_ep_disable() + - usb: gadget: ncm: Avoid dropping datagrams of properly parsed NTBs + - usb: roles: fix NULL pointer issue when put module's reference + - usb: roles: don't get/set_role() when usb_role_switch is unregistered + - mptcp: fix lockless access in subflow ULP diag + - clk: imx: imx8mp: add shared clk gate for usb suspend clk + - clk: qcom: gcc-qcs404: disable gpll[04]_out_aux parents + - clk: qcom: gcc-qcs404: fix names of the DSI clocks used as parents + - mtd: rawnand: sunxi: Fix the size of the last OOB region + - RISC-V: fix funct4 definition for c.jalr in parse_asm.h + - Input: iqs269a - drop unused device node references + - Input: iqs269a - configure device with a single block write + - Input: iqs269a - increase interrupt handler return delay + - clk: renesas: cpg-mssr: Fix use after free if cpg_mssr_common_init() failed + - Input: ads7846 - don't report pressure for ads7845 + - clk: renesas: cpg-mssr: Remove superfluous check in resume code + - clk: imx: avoid memory leak + - Input: ads7846 - always set last command to PWRDOWN + - Input: ads7846 - don't check penirq immediately for 7845 + - powerpc/powernv/ioda: Skip unallocated resources when mapping to PE + - clk: qcom: gpucc-sc7180: fix clk_dis_wait being programmed for CX GDSC + - clk: qcom: gpucc-sdm845: fix clk_dis_wait being programmed for CX GDSC + - clk: Honor CLK_OPS_PARENT_ENABLE in clk_core_is_enabled() + - powerpc/pseries/lparcfg: add missing RTAS retry status handling + - powerpc/perf/hv-24x7: add missing RTAS retry status handling + - powerpc/pseries/lpar: add missing RTAS retry status handling + - MIPS: SMP-CPS: fix build error when HOTPLUG_CPU not set + - MIPS: vpe-mt: drop physical_memsize + - vdpa/mlx5: Don't clear mr struct on destroy MR + - ARM: dts: BCM53573: Drop nonexistent #usb-cells + - RDMA/siw: Balance the reference of cep->kref in the error path + - RDMA/siw: Correct wrong debug message + - clk: linux/clk-provider.h: fix kernel-doc warnings and typos + - platform/x86: asus-wmi: Document the dgpu_disable sysfs attribute + - acpi: property: Let args be NULL in __acpi_node_get_property_reference + - ARM: dts: BCM53573: Drop nonexistent "default-off" LED trigger + - tools headers UAPI: Sync linux/fscrypt.h with the kernel sources + - perf beauty: Update copy of linux/socket.h with the kernel sources + - tools/virtio: fix build + - drm/amdgpu: init iommu after amdkfd device init + - f2fs: don't set GC_FAILURE_PIN for background GC + - f2fs: write checkpoint during FG_GC + - drm/i915/dg1: Update DMC_DEBUG3 register + - kernel/sched: Remove dl_boosted flag comment + - cifs: remove useless parameter 'is_fsctl' from SMB2_ioctl() + - serial: 8250: Remove serial_rs485 sanitization from em485 + - clk: imx8mp: Add DISP2 pixel clock + - clk: imx8mp: add clkout1/2 support + - dt-bindings: clocks: imx8mp: Add ID for usb suspend clock + - net: ethernet: ti: add missing of_node_put before return + - powerpc/rtas: make all exports GPL + - powerpc/rtas: ensure 4KB alignment for rtas_data_buf + - powerpc/eeh: Small refactor of eeh_handle_normal_event() + - powerpc/eeh: Set channel state after notifying the drivers + - PM: core: Redefine pm_ptr() macro + - PM: core: Add new *_PM_OPS macros, deprecate old ones + - mmc: jz4740: Use the new PM macros + - mmc: mxc: Use the new PM macros + - PM: core: Remove static qualifier in DEFINE_SIMPLE_DEV_PM_OPS macro + - Input: iqs269a - switch to DEFINE_SIMPLE_DEV_PM_OPS() and pm_sleep_ptr() + - Input: iqs269a - do not poll during suspend or resume + - Input: iqs269a - do not poll during ATI + - net/sched: Refactor qdisc_graft() for ingress and clsact Qdiscs + - netfilter: nf_tables: add rescheduling points during loop detection walks + - debugobjects: Recheck debug_objects_enabled before reporting + - nbd: Add the maximum limit of allocated index in nbd_dev_add + - md: fix data corruption for raid456 when reshape restart while grow up + - md/raid10: prevent soft lockup while flush writes + - posix-timers: Ensure timer ID search-loop limit is valid + - btrfs: add xxhash to fast checksum implementations + - ACPI: button: Add lid disable DMI quirk for Nextbook Ares 8A + - ACPI: video: Add backlight=native DMI quirk for Apple iMac11,3 + - ACPI: video: Add backlight=native DMI quirk for Lenovo ThinkPad X131e (3371 + AMD version) + - arm64: set __exception_irq_entry with __irq_entry as a default + - arm64: mm: fix VA-range sanity check + - sched/fair: Don't balance task to its current running CPU + - wifi: ath11k: fix registration of 6Ghz-only phy without the full channel + range + - bpf: Address KCSAN report on bpf_lru_list + - devlink: report devlink_port_type_warn source device + - wifi: wext-core: Fix -Wstringop-overflow warning in + ioctl_standard_iw_point() + - wifi: iwlwifi: mvm: avoid baid size integer overflow + - exfat: support dynamic allocate bh for exfat_entry_set_cache + - arm64: dts: rockchip: fix regulator name on rk3399-rock-4 + - arm64: dts: rockchip: add ES8316 codec for ROCK Pi 4 + - arm64: dts: rockchip: add SPDIF node for ROCK Pi 4 + - ARM: dts: BCM53573: Describe on-SoC BCM53125 rev 4 switch + - ACPI: video: Add backlight=native DMI quirk for Apple iMac12,1 and iMac12,2 + - ACPI: resource: Skip IRQ override on Asus Vivobook S5602ZA + - ACPI: resource: Add Asus ExpertBook B2502 to Asus quirks + - ACPI: resource: Skip IRQ override on Asus Expertbook B2402CBA + - ACPI: resource: Skip IRQ override on ASUS ExpertBook B1502CBA + - xhci: cleanup xhci_hub_control port references + - xhci: move port specific items such as state completions to port structure + - xhci: rename resume_done to resume_timestamp + - xhci: clear usb2 resume related variables in one place. + - xhci: decouple usb2 port resume and get_port_status request handling + - xhci: track port suspend state correctly in unsuccessful resume cases + - cifs: add a warning when the in-flight count goes negative + - IB/hfi1: Fix a memleak in init_credit_return + - RDMA/bnxt_re: Return error for SRQ resize + - RDMA/irdma: Fix KASAN issue with tasklet + - RDMA/irdma: Validate max_send_wr and max_recv_wr + - RDMA/irdma: Set the CQ read threshold for GEN 1 + - RDMA/irdma: Add AE for too many RNRS + - RDMA/srpt: Support specifying the srpt_service_guid parameter + - RDMA/qedr: Fix qedr_create_user_qp error flow + - arm64: dts: rockchip: set num-cs property for spi on px30 + - RDMA/srpt: fix function pointer cast warnings + - bpf, scripts: Correct GPL license name + - scsi: jazz_esp: Only build if SCSI core is builtin + - nouveau: fix function cast warnings + - net: stmmac: Fix incorrect dereference in interrupt handlers + - ipv4: properly combine dev_base_seq and ipv4.dev_addr_genid + - ipv6: properly combine dev_base_seq and ipv6.dev_addr_genid + - ata: libahci_platform: Convert to using devm bulk clocks API + - ata: libahci_platform: Introduce reset assertion/deassertion methods + - ata: ahci_ceva: fix error handling for Xilinx GT PHY support + - bpf: Fix racing between bpf_timer_cancel_and_free and bpf_timer_cancel + - drm/nouveau/instmem: fix uninitialized_var.cocci warning + - octeontx2-af: Consider the action set by PF + - s390: use the correct count for __iowrite64_copy() + - netfilter: nf_tables: set dormant flag on hook register failure + - netfilter: flowtable: simplify route logic + - netfilter: nft_flow_offload: reset dst in route object after setting up flow + - netfilter: nft_flow_offload: release dst in case direct xmit path is used + - drm/syncobj: call drm_syncobj_fence_add_wait when WAIT_AVAILABLE flag is set + - drm/amd/display: Fix memory leak in dm_sw_fini() + - i2c: imx: Add timer for handling the stop condition + - i2c: imx: when being a target, mark the last read as processed + - fs/aio: Restrict kiocb_set_cancel_fn() to I/O submitted via libaio + - netfilter: nf_tables: fix scheduling-while-atomic splat + - ext4: regenerate buddy after block freeing failed if under fc replay + - ext4: avoid bb_free and bb_fragments inconsistency in mb_free_blocks() + - netfilter: nf_tables: can't schedule in nft_chain_validate + - r8169: use new PM macros + - Linux 5.15.150 + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26733 + - packet: move from strlcpy with unused retval to strscpy + - net: dev: Convert sa_data to flexible array in struct sockaddr + - arp: Prevent overflow in arp_req_get(). + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26735 + - ipv6: sr: fix possible use-after-free and null-ptr-deref + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26736 + - afs: Increase buffer size in afs_update_volume_status() + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26748 + - usb: cdns3: fix memory double free when handle zero packet + * CVE-2023-47233 + - wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach + * CVE-2024-26584 + - net: tls: handle backlogging of crypto requests + * CVE-2024-26585 + - tls: fix race between tx work scheduling and socket close + * CVE-2024-26583 + - tls: rx: jump to a more appropriate label + - tls: rx: drop pointless else after goto + - tls: stop recv() if initial process_rx_list gave us non-DATA + - tls: rx: don't store the record type in socket context + - tls: rx: don't store the decryption status in socket context + - tls: rx: don't issue wake ups when data is decrypted + - tls: rx: refactor decrypt_skb_update() + - tls: hw: rx: use return value of tls_device_decrypted() to carry status + - tls: rx: drop unnecessary arguments from tls_setup_from_iter() + - tls: rx: don't report text length from the bowels of decrypt + - tls: rx: wrap decryption arguments in a structure + - tls: rx: factor out writing ContentType to cmsg + - tls: rx: don't track the async count + - tls: rx: move counting TlsDecryptErrors for sync + - tls: rx: assume crypto always calls our callback + - tls: rx: use async as an in-out argument + - tls: decrement decrypt_pending if no async completion will be called + - net: tls: fix async vs NIC crypto offload + - Revert "tls: rx: move counting TlsDecryptErrors for sync" + - tls: rx: simplify async wait + - tls: rx: return the already-copied data on crypto error + - tls: rx: allow only one reader at a time + - tls: rx: release the sock lock on locking timeout + - tls: extract context alloc/initialization out of tls_set_sw_offload + - net: tls: factor out tls_*crypt_async_wait() + - tls: fix race between async notify and socket close + * CVE-2024-26622 + - tomoyo: fix UAF write bug in tomoyo_write_control() + + -- Philip Cox Fri, 10 May 2024 12:43:52 -0400 + +linux-aws-5.15 (5.15.0-1062.68~20.04.1) focal; urgency=medium + + * focal/linux-aws-5.15: 5.15.0-1062.68~20.04.1 -proposed tracker + (LP: #2063583) + + [ Ubuntu: 5.15.0-1062.68 ] + + * jammy/linux-aws: 5.15.0-1062.68 -proposed tracker (LP: #2063584) + * jammy/linux: 5.15.0-107.117 -proposed tracker (LP: #2063635) + * CVE-2023-52530 + - wifi: mac80211: fix potential key use-after-free + * CVE-2024-26622 + - tomoyo: fix UAF write bug in tomoyo_write_control() + * CVE-2023-47233 + - wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach + + -- Philip Cox Wed, 01 May 2024 10:57:03 -0400 + linux-aws-5.15 (5.15.0-1061.67~20.04.1) focal; urgency=medium * focal/linux-aws-5.15: 5.15.0-1061.67~20.04.1 -proposed tracker diff -u linux-aws-5.15-5.15.0/debian.aws-5.15/reconstruct linux-aws-5.15-5.15.0/debian.aws-5.15/reconstruct --- linux-aws-5.15-5.15.0/debian.aws-5.15/reconstruct +++ linux-aws-5.15-5.15.0/debian.aws-5.15/reconstruct @@ -88,6 +88,17 @@ rm -f 'drivers/bus/mhi/core/pm.c' rm -f 'drivers/bus/mhi/pci_generic.c' rm -f 'drivers/counter/counter.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/Makefile' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/custom_float.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c' +rm -f 'drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c' +rm -f 'drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h' rm -f 'drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h' rm -f 'drivers/gpu/drm/msm/hdmi/hdmi_connector.c' rm -f 'drivers/gpu/drm/vmwgfx/vmwgfx_thp.c' @@ -147,6 +158,9 @@ rm -f 'net/sched/cls_rsvp.h' rm -f 'net/sched/cls_rsvp6.c' rm -f 'net/sched/cls_tcindex.c' +rm -f 'net/sched/sch_atm.c' +rm -f 'net/sched/sch_cbq.c' +rm -f 'net/sched/sch_dsmark.c' rm -f 'net/xfrm/xfrm_interface.c' rm -f 'tools/build/feature/test-libpython-version.c' rm -f 'tools/perf/arch/arm64/util/machine.c' diff -u linux-aws-5.15-5.15.0/debian.aws-5.15/tracking-bug linux-aws-5.15-5.15.0/debian.aws-5.15/tracking-bug --- linux-aws-5.15-5.15.0/debian.aws-5.15/tracking-bug +++ linux-aws-5.15-5.15.0/debian.aws-5.15/tracking-bug @@ -1 +1 @@ -2061761 2024.04.01-3 +2063711 2024.04.29-1 diff -u linux-aws-5.15-5.15.0/debian.aws/changelog linux-aws-5.15-5.15.0/debian.aws/changelog --- linux-aws-5.15-5.15.0/debian.aws/changelog +++ linux-aws-5.15-5.15.0/debian.aws/changelog @@ -1,3 +1,831 @@ +linux-aws (5.15.0-1063.69) jammy; urgency=medium + + * jammy/linux-aws: 5.15.0-1063.69 -proposed tracker (LP: #2063712) + + * aws: Support hibernation on Graviton (LP: #2060992) + - SAUCE: PM: hibernate: Allow ACPI hardware signature to be honoured + - SAUCE: PM: hibernate: Honour ACPI hardware signature by default for virtual + guests + - SAUCE: ACPICA: Detect FACS even for hardware reduced platforms + - SAUCE: arm64: acpi: Honour firmware_signature field of FACS, if it exists + - SAUCE: firmware/psci: Add definitions for PSCI v1.3 specification (ALPHA) + - SAUCE: arm64: Use SYSTEM_OFF2 PSCI call to power off for hibernate + - [Config]: Enable hibernate on arm64 + - [Config]: Enable hibernate on arm64 + + [ Ubuntu: 5.15.0-111.121 ] + + * jammy/linux: 5.15.0-111.121 -proposed tracker (LP: #2063763) + * RTL8852BE fw security fail then lost WIFI function during suspend/resume + cycle (LP: #2063096) + - wifi: rtw89: download firmware with five times retry + * Mount CIFS fails with Permission denied (LP: #2061986) + - cifs: fix ntlmssp auth when there is no key exchange + * USB stick can't be detected (LP: #2040948) + - usb: Disable USB3 LPM at shutdown + * Jammy update: v5.15.153 upstream stable release (LP: #2063290) + - io_uring/unix: drop usage of io_uring socket + - io_uring: drop any code related to SCM_RIGHTS + - selftests: tls: use exact comparison in recv_partial + - ASoC: rt5645: Make LattePanda board DMI match more precise + - x86/xen: Add some null pointer checking to smp.c + - MIPS: Clear Cause.BD in instruction_pointer_set + - HID: multitouch: Add required quirk for Synaptics 0xcddc device + - gen_compile_commands: fix invalid escape sequence warning + - RDMA/mlx5: Fix fortify source warning while accessing Eth segment + - RDMA/mlx5: Relax DEVX access upon modify commands + - riscv: dts: sifive: add missing #interrupt-cells to pmic + - x86/mm: Move is_vsyscall_vaddr() into asm/vsyscall.h + - x86/mm: Disallow vsyscall page read for copy_from_kernel_nofault() + - net/iucv: fix the allocation size of iucv_path_table array + - parisc/ftrace: add missing CONFIG_DYNAMIC_FTRACE check + - block: sed-opal: handle empty atoms when parsing response + - dm-verity, dm-crypt: align "struct bvec_iter" correctly + - scsi: mpt3sas: Prevent sending diag_reset when the controller is ready + - ALSA: hda/realtek - ALC285 reduce pop noise from Headphone port + - drm/amdgpu: Enable gpu reset for S3 abort cases on Raven series + - Bluetooth: rfcomm: Fix null-ptr-deref in rfcomm_check_security + - firewire: core: use long bus reset on gap count error + - ASoC: Intel: bytcr_rt5640: Add an extra entry for the Chuwi Vi8 tablet + - Input: gpio_keys_polled - suppress deferred probe error for gpio + - ASoC: wm8962: Enable oscillator if selecting WM8962_FLL_OSC + - ASoC: wm8962: Enable both SPKOUTR_ENA and SPKOUTL_ENA in mono mode + - ASoC: wm8962: Fix up incorrect error message in wm8962_set_fll + - do_sys_name_to_handle(): use kzalloc() to fix kernel-infoleak + - s390/dasd: put block allocation in separate function + - s390/dasd: add query PPRC function + - s390/dasd: add copy pair setup + - s390/dasd: add autoquiesce feature + - s390/dasd: Use dev_*() for device log messages + - s390/dasd: fix double module refcount decrement + - fs/select: rework stack allocation hack for clang + - md: Don't clear MD_CLOSING when the raid is about to stop + - lib/cmdline: Fix an invalid format specifier in an assertion msg + - time: test: Fix incorrect format specifier + - rtc: test: Fix invalid format specifier. + - aoe: fix the potential use-after-free problem in aoecmd_cfg_pkts + - timekeeping: Fix cross-timestamp interpolation on counter wrap + - timekeeping: Fix cross-timestamp interpolation corner case decision + - timekeeping: Fix cross-timestamp interpolation for non-x86 + - sched/fair: Take the scheduling domain into account in select_idle_core() + - wifi: ath10k: fix NULL pointer dereference in + ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev() + - wifi: b43: Stop/wake correct queue in DMA Tx path when QoS is disabled + - wifi: b43: Stop/wake correct queue in PIO Tx path when QoS is disabled + - wifi: b43: Stop correct queue in DMA worker when QoS is disabled + - wifi: b43: Disable QoS for bcm4331 + - wifi: wilc1000: fix declarations ordering + - wifi: wilc1000: fix RCU usage in connect path + - wifi: rtl8xxxu: add cancel_work_sync() for c2hcmd_work + - wifi: wilc1000: fix multi-vif management when deleting a vif + - wifi: mwifiex: debugfs: Drop unnecessary error check for + debugfs_create_dir() + - cpufreq: brcmstb-avs-cpufreq: add check for cpufreq_cpu_get's return value + - cpufreq: Explicitly include correct DT includes + - cpufreq: mediatek-hw: Wait for CPU supplies before probing + - sock_diag: annotate data-races around sock_diag_handlers[family] + - inet_diag: annotate data-races around inet_diag_table[] + - bpftool: Silence build warning about calloc() + - af_unix: Annotate data-race of gc_in_progress in wait_for_unix_gc(). + - cpufreq: mediatek-hw: Don't error out if supply is not found + - arm64: dts: imx8mm-kontron: Disable pullups for I2C signals on SL/BL i.MX8MM + - arm64: dts: imx8mm-kontron: Disable pullups for onboard UART signals on BL + board + - arm64: dts: imx8mm-kontron: Add support for ultra high speed modes on SD + card + - arm64: dts: imx8mm-kontron: Use the VSELECT signal to switch SD card IO + voltage + - arm64: dts: imx8mm-kontron: Disable pull resistors for SD card signals on BL + board + - wifi: ath9k: delay all of ath9k_wmi_event_tasklet() until init is complete + - wifi: iwlwifi: mvm: report beacon protection failures + - wifi: iwlwifi: dbg-tlv: ensure NUL termination + - wifi: iwlwifi: fix EWRD table validity check + - arm64: dts: imx8mm-venice-gw71xx: fix USB OTG VBUS + - pwm: atmel-hlcdc: Convert to platform remove callback returning void + - pwm: atmel-hlcdc: Use consistent variable naming + - pwm: atmel-hlcdc: Fix clock imbalance related to suspend support + - net: blackhole_dev: fix build warning for ethh set but not used + - wifi: libertas: fix some memleaks in lbs_allocate_cmd_buffer() + - pwm: sti: Implement .apply() callback + - pwm: sti: Fix capture for st,pwm-num-chan < st,capture-num-chan + - wifi: iwlwifi: mvm: don't set replay counters to 0xff + - s390/vdso: drop '-fPIC' from LDFLAGS + - ipv6: mcast: remove one synchronize_net() barrier in ipv6_mc_down() + - arm64: dts: mt8183: kukui: Add Type C node + - arm64: dts: mt8183: kukui: Split out keyboard node and describe detachables + - arm64: dts: mt8183: Move CrosEC base detection node to kukui-based DTs + - arm64: dts: mediatek: mt7622: add missing "device_type" to memory nodes + - bpf: Mark bpf_spin_{lock,unlock}() helpers with notrace correctly + - wireless: Remove redundant 'flush_workqueue()' calls + - wifi: wilc1000: prevent use-after-free on vif when cleaning up all + interfaces + - ACPI: processor_idle: Fix memory leak in acpi_processor_power_exit() + - bus: tegra-aconnect: Update dependency to ARCH_TEGRA + - [Config]: update CONFIG_TEGRA_ACONNECT + - iommu/amd: Mark interrupt as managed + - wifi: brcmsmac: avoid function pointer casts + - net: ena: Remove ena_select_queue + - ARM: dts: arm: realview: Fix development chip ROM compatible value + - arm64: dts: renesas: r8a779a0: Update to R-Car Gen4 compatible values + - arm64: dts: renesas: r8a779a0: Correct avb[01] reg sizes + - ARM: dts: imx6dl-yapp4: Move phy reset into switch node + - ARM: dts: imx6dl-yapp4: Fix typo in the QCA switch register address + - ARM: dts: imx6dl-yapp4: Move the internal switch PHYs under the switch node + - arm64: dts: marvell: reorder crypto interrupts on Armada SoCs + - ACPI: resource: Add Infinity laptops to irq1_edge_low_force_override + - ACPI: resource: Do IRQ override on Lunnen Ground laptops + - ACPI: resource: Add MAIBENBEN X577 to irq1_edge_low_force_override + - ACPI: scan: Fix device check notification handling + - x86, relocs: Ignore relocations in .notes section + - SUNRPC: fix some memleaks in gssx_dec_option_array + - mmc: wmt-sdmmc: remove an incorrect release_mem_region() call in the .remove + function + - wifi: rtw88: 8821c: Fix false alarm count + - PCI: Make pci_dev_is_disconnected() helper public for other drivers + - iommu/vt-d: Don't issue ATS Invalidation request when device is disconnected + - igb: move PEROUT and EXTTS isr logic to separate functions + - igb: Fix missing time sync events + - Bluetooth: Remove superfluous call to hci_conn_check_pending() + - Bluetooth: hci_qca: Add support for QTI Bluetooth chip wcn6855 + - Bluetooth: hci_qca: don't use IS_ERR_OR_NULL() with gpiod_get_optional() + - Bluetooth: hci_core: Fix possible buffer overflow + - sr9800: Add check for usbnet_get_endpoints + - bpf: Fix DEVMAP_HASH overflow check on 32-bit arches + - bpf: Fix hashtab overflow check on 32-bit arches + - bpf: Fix stackmap overflow check on 32-bit arches + - ipv6: fib6_rules: flush route cache when rule is changed + - net: ip_tunnel: make sure to pull inner header in ip_tunnel_rcv() + - net: phy: fix phy_get_internal_delay accessing an empty array + - net: hns3: fix kernel crash when 1588 is received on HIP08 devices + - net: hns3: fix port duplex configure error in IMP reset + - net: phy: DP83822: enable rgmii mode if phy_interface_is_rgmii + - net: phy: dp83822: Fix RGMII TX delay configuration + - OPP: debugfs: Fix warning around icc_get_name() + - tcp: fix incorrect parameter validation in the do_tcp_getsockopt() function + - net: Change sock_getsockopt() to take the sk ptr instead of the sock ptr + - bpf: net: Change sk_getsockopt() to take the sockptr_t argument + - bpf: net: Change do_ip_getsockopt() to take the sockptr_t argument + - ipmr: fix incorrect parameter validation in the ip_mroute_getsockopt() + function + - l2tp: fix incorrect parameter validation in the pppol2tp_getsockopt() + function + - udp: fix incorrect parameter validation in the udp_lib_getsockopt() function + - net: kcm: fix incorrect parameter validation in the kcm_getsockopt) function + - net/x25: fix incorrect parameter validation in the x25_getsockopt() function + - nfp: flower: handle acti_netdevs allocation failure + - dm raid: fix false positive for requeue needed during reshape + - dm: call the resume method on internal suspend + - drm/tegra: dsi: Add missing check for of_find_device_by_node + - drm/tegra: dpaux: Populate AUX bus + - drm/tegra: dpaux: Fix PM disable depth imbalance in tegra_dpaux_probe + - drm/tegra: dsi: Make use of the helper function dev_err_probe() + - drm/tegra: dsi: Fix some error handling paths in tegra_dsi_probe() + - drm/tegra: dsi: Fix missing pm_runtime_disable() in the error handling path + of tegra_dsi_probe() + - drm/tegra: dc: rgb: Allow changing PLLD rate on Tegra30+ + - drm/tegra: rgb: Fix some error handling paths in tegra_dc_rgb_probe() + - drm/tegra: rgb: Fix missing clk_put() in the error handling paths of + tegra_dc_rgb_probe() + - drm/tegra: output: Fix missing i2c_put_adapter() in the error handling paths + of tegra_output_probe() + - drm/rockchip: inno_hdmi: Fix video timing + - drm: Don't treat 0 as -1 in drm_fixp2int_ceil + - drm/ttm: add ttm_resource_fini v2 + - drm/vmwgfx: fix a memleak in vmw_gmrid_man_get_node + - drm/rockchip: lvds: do not overwrite error code + - drm/rockchip: lvds: do not print scary message when probing defer + - drm/lima: fix a memleak in lima_heap_alloc + - dmaengine: tegra210-adma: Update dependency to ARCH_TEGRA + - [Config]: update CONFIG_TEGRA210_ADMA + - media: tc358743: register v4l2 async device only after successful setup + - PCI/DPC: Print all TLP Prefixes, not just the first + - perf record: Fix possible incorrect free in record__switch_output() + - HID: lenovo: Add middleclick_workaround sysfs knob for cptkbd + - drm/amd/display: Fix a potential buffer overflow in 'dp_dsc_clock_en_read()' + - drm/amd/display: Fix potential NULL pointer dereferences in + 'dcn10_set_output_transfer_func()' + - perf evsel: Fix duplicate initialization of data->id in + evsel__parse_sample() + - clk: meson: Add missing clocks to axg_clk_regmaps + - media: em28xx: annotate unchecked call to media_device_register() + - media: v4l2-tpg: fix some memleaks in tpg_alloc + - media: v4l2-mem2mem: fix a memleak in v4l2_m2m_register_entity + - media: edia: dvbdev: fix a use-after-free + - pinctrl: mediatek: Drop bogus slew rate register range for MT8192 + - clk: qcom: reset: Commonize the de/assert functions + - clk: qcom: reset: Ensure write completion on reset de/assertion + - quota: simplify drop_dquot_ref() + - quota: Fix potential NULL pointer dereference + - quota: Fix rcu annotations of inode dquot pointers + - PCI/P2PDMA: Fix a sleeping issue in a RCU read section + - PCI: switchtec: Fix an error handling path in switchtec_pci_probe() + - crypto: xilinx - call finalize with bh disabled + - perf thread_map: Free strlist on normal path in thread_map__new_by_tid_str() + - drm/radeon/ni: Fix wrong firmware size logging in ni_init_microcode() + - ALSA: seq: fix function cast warnings + - perf stat: Avoid metric-only segv + - ASoC: meson: Use dev_err_probe() helper + - ASoC: meson: aiu: fix function pointer type mismatch + - ASoC: meson: t9015: fix function pointer type mismatch + - powerpc: Force inlining of arch_vmap_p{u/m}d_supported() + - PCI: endpoint: Support NTB transfer between RC and EP + - [Config]: update CONFIG_PCI_EPF_VNTB + - NTB: EPF: fix possible memory leak in pci_vntb_probe() + - NTB: fix possible name leak in ntb_register_device() + - media: sun8i-di: Fix coefficient writes + - media: sun8i-di: Fix power on/off sequences + - media: sun8i-di: Fix chroma difference threshold + - media: imx: csc/scaler: fix v4l2_ctrl_handler memory leak + - media: go7007: add check of return value of go7007_read_addr() + - media: pvrusb2: remove redundant NULL check + - media: pvrusb2: fix pvr2_stream_callback casts + - clk: qcom: dispcc-sdm845: Adjust internal GDSC wait times + - drm/mediatek: dsi: Fix DSI RGB666 formats and definitions + - PCI: Mark 3ware-9650SE Root Port Extended Tags as broken + - clk: hisilicon: hi3519: Release the correct number of gates in + hi3519_clk_unregister() + - clk: hisilicon: hi3559a: Fix an erroneous devm_kfree() + - drm/tegra: put drm_gem_object ref on error in tegra_fb_create + - mfd: syscon: Call of_node_put() only when of_parse_phandle() takes a ref + - mfd: altera-sysmgr: Call of_node_put() only when of_parse_phandle() takes a + ref + - crypto: arm/sha - fix function cast warnings + - drm/tidss: Fix initial plane zpos values + - mtd: maps: physmap-core: fix flash size larger than 32-bit + - mtd: rawnand: lpc32xx_mlc: fix irq handler prototype + - ASoC: meson: axg-tdm-interface: fix mclk setup without mclk-fs + - ASoC: meson: axg-tdm-interface: add frame rate constraint + - HID: amd_sfh: Update HPD sensor structure elements + - drm/amdgpu: Fix missing break in ATOM_ARG_IMM Case of atom_get_src_int() + - media: pvrusb2: fix uaf in pvr2_context_set_notify + - media: dvb-frontends: avoid stack overflow warnings with clang + - media: go7007: fix a memleak in go7007_load_encoder + - media: ttpci: fix two memleaks in budget_av_attach + - media: mediatek: vcodec: avoid -Wcast-function-type-strict warning + - drm/mediatek: Fix a null pointer crash in mtk_drm_crtc_finish_page_flip + - powerpc/hv-gpci: Fix the H_GET_PERF_COUNTER_INFO hcall return value checks + - drm/msm/dpu: add division of drm_display_mode's hskew parameter + - module: Add support for default value for module async_probe + - modules: wait do_free_init correctly + - powerpc/embedded6xx: Fix no previous prototype for avr_uart_send() etc. + - leds: aw2013: Unlock mutex before destroying it + - leds: sgm3140: Add missing timer cleanup and flash gpio control + - backlight: lm3630a: Initialize backlight_properties on init + - backlight: lm3630a: Don't set bl->props.brightness in get_brightness + - backlight: da9052: Fully initialize backlight_properties during probe + - backlight: lm3639: Fully initialize backlight_properties during probe + - backlight: lp8788: Fully initialize backlight_properties during probe + - sparc32: Fix section mismatch in leon_pci_grpci + - clk: Fix clk_core_get NULL dereference + - clk: zynq: Prevent null pointer dereference caused by kmalloc failure + - ALSA: hda/realtek: fix ALC285 issues on HP Envy x360 laptops + - ALSA: usb-audio: Stop parsing channels bits when all channels are found. + - RDMA/srpt: Do not register event handler until srpt device is fully setup + - f2fs: multidevice: support direct IO + - f2fs: invalidate META_MAPPING before IPU/DIO write + - f2fs: replace congestion_wait() calls with io_schedule_timeout() + - f2fs: fix to invalidate META_MAPPING before DIO write + - f2fs: invalidate meta pages only for post_read required inode + - f2fs: reduce stack memory cost by using bitfield in struct f2fs_io_info + - f2fs: compress: fix to cover normal cluster write with cp_rwsem + - f2fs: compress: fix to check unreleased compressed cluster + - scsi: csiostor: Avoid function pointer casts + - RDMA/device: Fix a race between mad_client and cm_client init + - RDMA/rtrs-clt: Check strnlen return len in sysfs mpath_policy_store() + - scsi: bfa: Fix function pointer type mismatch for hcb_qe->cbfn + - net: sunrpc: Fix an off by one in rpc_sockaddr2uaddr() + - NFSv4.2: fix nfs4_listxattr kernel BUG at mm/usercopy.c:102 + - NFSv4.2: fix listxattr maximum XDR buffer size + - watchdog: stm32_iwdg: initialize default timeout + - NFS: Fix an off by one in root_nfs_cat() + - f2fs: compress: fix reserve_cblocks counting error when out of space + - afs: Revert "afs: Hide silly-rename files from userspace" + - comedi: comedi_test: Prevent timers rescheduling during deletion + - remoteproc: stm32: use correct format strings on 64-bit + - remoteproc: stm32: Fix incorrect type in assignment for va + - remoteproc: stm32: Fix incorrect type assignment returned by + stm32_rproc_get_loaded_rsc_tablef + - tty: vt: fix 20 vs 0x20 typo in EScsiignore + - serial: max310x: fix syntax error in IRQ error message + - tty: serial: samsung: fix tx_empty() to return TIOCSER_TEMT + - arm64: dts: broadcom: bcmbca: bcm4908: drop invalid switch cells + - kconfig: fix infinite loop when expanding a macro at the end of file + - rtc: mt6397: select IRQ_DOMAIN instead of depending on it + - serial: 8250_exar: Don't remove GPIO device on suspend + - staging: greybus: fix get_channel_from_mode() failure path + - usb: gadget: net2272: Use irqflags in the call to net2272_probe_fin + - io_uring: don't save/restore iowait state + - nouveau: reset the bo resource bus info after an eviction + - octeontx2-af: Use matching wake_up API variant in CGX command interface + - s390/vtime: fix average steal time calculation + - soc: fsl: dpio: fix kcalloc() argument order + - hsr: Fix uninit-value access in hsr_get_node() + - net: mtk_eth_soc: move MAC_MCR setting to mac_finish() + - net: mediatek: mtk_eth_soc: clear MAC_MCR_FORCE_LINK only when MAC is up + - net: ethernet: mtk_eth_soc: fix PPE hanging issue + - packet: annotate data-races around ignore_outgoing + - net: veth: do not manipulate GRO when using XDP + - net: dsa: mt7530: prevent possible incorrect XTAL frequency selection + - vdpa/mlx5: Allow CVQ size changes + - wireguard: receive: annotate data-race around receiving_counter.counter + - rds: introduce acquire/release ordering in acquire/release_in_xmit() + - hsr: Handle failures in module init + - net: phy: fix phy_read_poll_timeout argument type in genphy_loopback + - net/bnx2x: Prevent access to a freed page in page_pool + - octeontx2-af: Use separate handlers for interrupts + - netfilter: nf_tables: do not compare internal table flags on updates + - rcu: add a helper to report consolidated flavor QS + - net: report RCU QS on threaded NAPI repolling + - bpf: report RCU QS in cpumap kthread + - net: dsa: mt7530: fix handling of LLDP frames + - net: dsa: mt7530: fix handling of 802.1X PAE frames + - net: dsa: mt7530: fix link-local frames that ingress vlan filtering ports + - net: dsa: mt7530: fix handling of all link-local frames + - spi: spi-mt65xx: Fix NULL pointer access in interrupt handler + - regmap: Add missing map->bus check + - remoteproc: stm32: fix incorrect optional pointers + * Jammy update: v5.15.152 upstream stable release (LP: #2063276) + - mmc: mmci: stm32: use a buffer for unaligned DMA requests + - mmc: mmci: stm32: fix DMA API overlapping mappings warning + - net: lan78xx: fix runtime PM count underflow on link stop + - ixgbe: {dis, en}able irqs in ixgbe_txrx_ring_{dis, en}able + - i40e: disable NAPI right after disabling irqs when handling xsk_pool + - tracing/net_sched: Fix tracepoints that save qdisc_dev() as a string + - geneve: make sure to pull inner header in geneve_rx() + - net: sparx5: Fix use after free inside sparx5_del_mact_entry + - net: ice: Fix potential NULL pointer dereference in ice_bridge_setlink() + - net/ipv6: avoid possible UAF in ip6_route_mpath_notify() + - cpumap: Zero-initialise xdp_rxq_info struct before running XDP program + - net/rds: fix WARNING in rds_conn_connect_if_down + - netfilter: nft_ct: fix l3num expectations with inet pseudo family + - netfilter: nf_conntrack_h323: Add protection for bmp length out of range + - erofs: apply proper VMA alignment for memory mapped files on THP + - netrom: Fix a data-race around sysctl_netrom_default_path_quality + - netrom: Fix a data-race around sysctl_netrom_obsolescence_count_initialiser + - netrom: Fix data-races around sysctl_netrom_network_ttl_initialiser + - netrom: Fix a data-race around sysctl_netrom_transport_timeout + - netrom: Fix a data-race around sysctl_netrom_transport_maximum_tries + - netrom: Fix a data-race around sysctl_netrom_transport_acknowledge_delay + - netrom: Fix a data-race around sysctl_netrom_transport_busy_delay + - netrom: Fix a data-race around sysctl_netrom_transport_requested_window_size + - netrom: Fix a data-race around sysctl_netrom_transport_no_activity_timeout + - netrom: Fix a data-race around sysctl_netrom_routing_control + - netrom: Fix a data-race around sysctl_netrom_link_fails_count + - netrom: Fix data-races around sysctl_net_busy_read + - ALSA: usb-audio: Refcount multiple accesses on the single clock + - ALSA: usb-audio: Clear fixed clock rate at closing EP + - ALSA: usb-audio: Split endpoint setups for hw_params and prepare (take#2) + - ALSA: usb-audio: Properly refcounting clock rate + - ALSA: usb-audio: Apply mutex around snd_usb_endpoint_set_params() + - ALSA: usb-audio: Correct the return code from snd_usb_endpoint_set_params() + - ALSA: usb-audio: Avoid superfluous endpoint setup + - ALSA: usb-audio: Add quirk for Tascam Model 12 + - ALSA: usb-audio: Add new quirk FIXED_RATE for JBL Quantum810 Wireless + - ALSA: usb-audio: Fix microphone sound on Nexigo webcam. + - ALSA: usb-audio: add quirk for RODE NT-USB+ + - drm/amd/display: Fix uninitialized variable usage in core_link_ 'read_dpcd() + & write_dpcd()' functions + - nfp: flower: add goto_chain_index for ct entry + - nfp: flower: add hardware offload check for post ct entry + - selftests/mm: switch to bash from sh + - selftests: mm: fix map_hugetlb failure on 64K page size systems + - xhci: process isoc TD properly when there was a transaction error mid TD. + - xhci: handle isoc Babble and Buffer Overrun events properly + - serial: max310x: use regmap methods for SPI batch operations + - serial: max310x: use a separate regmap for each port + - serial: max310x: prevent infinite while() loop in port startup + - drm/amd/pm: do not expose the API used internally only in kv_dpm.c + - drm/amdgpu: Reset IH OVERFLOW_CLEAR bit + - selftests: mptcp: decrease BW in simult flows + - hv_netvsc: use netif_is_bond_master() instead of open code + - hv_netvsc: Register VF in netvsc_probe if NET_DEVICE_REGISTER missed + - drm/amd/display: Re-arrange FPU code structure for dcn2x + - drm/amd/display: move calcs folder into DML + - drm/amd/display: remove DML Makefile duplicate lines + - drm/amd/display: Increase frame-larger-than for all display_mode_vba files + - getrusage: add the "signal_struct *sig" local variable + - getrusage: move thread_group_cputime_adjusted() outside of + lock_task_sighand() + - getrusage: use __for_each_thread() + - getrusage: use sig->stats_lock rather than lock_task_sighand() + - proc: Use task_is_running() for wchan in /proc/$pid/stat + - fs/proc: do_task_stat: move thread_group_cputime_adjusted() outside of + lock_task_sighand() + - ALSA: usb-audio: Fix wrong kfree issue in snd_usb_endpoint_free_all + - ALSA: usb-audio: Always initialize fixed_rate in + snd_usb_find_implicit_fb_sync_format() + - ALSA: usb-audio: Add FIXED_RATE quirk for JBL Quantum610 Wireless + - ALSA: usb-audio: Sort quirk table entries + - regmap: allow to define reg_update_bits for no bus configuration + - regmap: Add bulk read/write callbacks into regmap_config + - serial: max310x: make accessing revision id interface-agnostic + - serial: max310x: fix IO data corruption in batched operations + - Linux 5.15.152 + * CVE-2024-26809 + - netfilter: nft_set_pipapo: release elements in clone only from destroy path + * CVE-2024-26792 + - btrfs: fix double free of anonymous device after snapshot creation failure + * CVE-2023-52530 + - wifi: mac80211: fix potential key use-after-free + * CVE-2023-52447 + - bpf: Defer the free of inner map when necessary + - rcu-tasks: Provide rcu_trace_implies_rcu_gp() + * Avoid creating non-working backlight sysfs knob from ASUS board + (LP: #2060422) + - platform/x86: asus-wmi: Consider device is absent when the read is ~0 + * [Ubuntu 22.04.4/linux-image-6.5.0-26-generic] Kernel output "UBSAN: array- + index-out-of-bounds in /build/linux-hwe-6.5-34pCLi/linux- + hwe-6.5-6.5.0/drivers/net/hyperv/netvsc.c:1445:41" multiple times, + especially during boot. (LP: #2058477) + - hv: hyperv.h: Replace one-element array with flexible-array member + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) + - netfilter: nf_tables: disallow timeout for anonymous sets + - mtd: spinand: gigadevice: Fix the get ecc status issue + - netlink: Fix kernel-infoleak-after-free in __skb_datagram_iter + - net: ip_tunnel: prevent perpetual headroom growth + - tun: Fix xdp_rxq_info's queue_index when detaching + - cpufreq: intel_pstate: fix pstate limits enforcement for adjust_perf call + back + - net: veth: clear GRO when clearing XDP even when down + - ipv6: fix potential "struct net" leak in inet6_rtm_getaddr() + - lan78xx: enable auto speed configuration for LAN7850 if no EEPROM is + detected + - net: enable memcg accounting for veth queues + - veth: try harder when allocating queue memory + - net: usb: dm9601: fix wrong return value in dm9601_mdio_read + - uapi: in6: replace temporary label with rfc9486 + - stmmac: Clear variable when destroying workqueue + - Bluetooth: Avoid potential use-after-free in hci_error_reset + - Bluetooth: hci_event: Fix wrongly recorded wakeup BD_ADDR + - netfilter: nf_tables: allow NFPROTO_INET in nft_(match/target)_validate() + - netfilter: nfnetlink_queue: silence bogus compiler warning + - netfilter: core: move ip_ct_attach indirection to struct nf_ct_hook + - netfilter: make function op structures const + - netfilter: let reset rules clean out conntrack entries + - netfilter: bridge: confirm multicast packets before passing them up the + stack + - rtnetlink: fix error logic of IFLA_BRIDGE_FLAGS writing back + - igb: extend PTP timestamp adjustments to i211 + - efi/capsule-loader: fix incorrect allocation size + - power: supply: bq27xxx-i2c: Do not free non existing IRQ + - ALSA: Drop leftover snd-rtctimer stuff from Makefile + - fbcon: always restore the old font data in fbcon_do_set_font() + - afs: Fix endless loop in directory parsing + - riscv: Sparse-Memory/vmemmap out-of-bounds fix + - ALSA: firewire-lib: fix to check cycle continuity + - gtp: fix use-after-free and null-ptr-deref in gtp_newlink() + - wifi: nl80211: reject iftype change with mesh ID change + - btrfs: dev-replace: properly validate device names + - dmaengine: fsl-qdma: fix SoC may hang on 16 byte unaligned read + - dmaengine: ptdma: use consistent DMA masks + - dmaengine: fsl-qdma: init irq after reg initialization + - mmc: core: Fix eMMC initialization with 1-bit bus connection + - mmc: sdhci-xenon: add timeout for PHY init complete + - mmc: sdhci-xenon: fix PHY init clock stability + - pmdomain: qcom: rpmhpd: Fix enabled_corner aggregation + - x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers + - mptcp: move __mptcp_error_report in protocol.c + - mptcp: process pending subflow error on close + - mptcp: rename timer related helper to less confusing names + - selftests: mptcp: add missing kconfig for NF Filter + - selftests: mptcp: add missing kconfig for NF Filter in v6 + - mptcp: clean up harmless false expressions + - mptcp: add needs_id for netlink appending addr + - mptcp: push at DSS boundaries + - mptcp: fix possible deadlock in subflow diag + - cachefiles: fix memory leak in cachefiles_add_cache() + - fs,hugetlb: fix NULL pointer dereference in hugetlbs_fill_super + - Revert "drm/bridge: lt8912b: Register and attach our DSI device at probe" + - af_unix: Drop oob_skb ref before purging queue in GC. + - gpio: 74x164: Enable output pins after registers are reset + - gpiolib: Fix the error path order in gpiochip_add_data_with_key() + - gpio: fix resource unwinding order in error path + - Revert "interconnect: Fix locking for runpm vs reclaim" + - Revert "interconnect: Teach lockdep about icc_bw_lock order" + - bpf: Add BPF_FIB_LOOKUP_SKIP_NEIGH for bpf_fib_lookup + - bpf: Add table ID to bpf_fib_lookup BPF helper + - bpf: Derive source IP addr via bpf_*_fib_lookup() + - Linux 5.15.151 + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) // + CVE-2024-26782 + - mptcp: fix double-free on socket dismantle + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) // Fix + bluetooth connections with 3.0 device (LP: #2063067) + - Bluetooth: hci_event: Fix handling of HCI_EV_IO_CAPA_REQUEST + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) + - net/sched: Retire CBQ qdisc + - [Config] updateconfigs for NET_SCH_CBQ + - net/sched: Retire ATM qdisc + - [Config] updateconfigs for NET_SCH_ATM + - net/sched: Retire dsmark qdisc + - [Config] updateconfigs for NET_SCH_DSMARK + - smb: client: fix potential OOBs in smb2_parse_contexts() + - smb: client: fix parsing of SMB3.1.1 POSIX create context + - sched/rt: sysctl_sched_rr_timeslice show default timeslice after reset + - PCI: dwc: Fix a 64bit bug in dw_pcie_ep_raise_msix_irq() + - bpf: Merge printk and seq_printf VARARG max macros + - bpf: Add struct for bin_args arg in bpf_bprintf_prepare + - bpf: Do cleanup in bpf_bprintf_cleanup only when needed + - bpf: Remove trace_printk_lock + - userfaultfd: fix mmap_changing checking in mfill_atomic_hugetlb + - zonefs: Improve error handling + - x86/fpu: Stop relying on userspace for info to fault in xsave buffer + - sched/rt: Fix sysctl_sched_rr_timeslice intial value + - sched/rt: Disallow writing invalid values to sched_rt_period_us + - scsi: target: core: Add TMF to tmr_list handling + - dmaengine: shdma: increase size of 'dev_id' + - dmaengine: fsl-qdma: increase size of 'irq_name' + - wifi: cfg80211: fix missing interfaces when dumping + - wifi: mac80211: fix race condition on enabling fast-xmit + - fbdev: savage: Error out if pixclock equals zero + - fbdev: sis: Error out if pixclock equals zero + - spi: hisi-sfc-v3xx: Return IRQ_NONE if no interrupts were detected + - ahci: asm1166: correct count of reported ports + - ahci: add 43-bit DMA address quirk for ASMedia ASM1061 controllers + - MIPS: reserve exception vector space ONLY ONCE + - platform/x86: touchscreen_dmi: Add info for the TECLAST X16 Plus tablet + - ext4: avoid dividing by 0 in mb_update_avg_fragment_size() when block bitmap + corrupt + - ext4: avoid allocating blocks from corrupted group in + ext4_mb_try_best_found() + - ext4: avoid allocating blocks from corrupted group in ext4_mb_find_by_goal() + - dmaengine: ti: edma: Add some null pointer checks to the edma_probe + - regulator: pwm-regulator: Add validity checks in continuous .get_voltage + - nvmet-tcp: fix nvme tcp ida memory leak + - ALSA: usb-audio: Check presence of valid altsetting control + - ASoC: sunxi: sun4i-spdif: Add support for Allwinner H616 + - spi: sh-msiof: avoid integer overflow in constants + - Input: xpad - add Lenovo Legion Go controllers + - netfilter: conntrack: check SCTP_CID_SHUTDOWN_ACK for vtag setting in + sctp_new + - ALSA: usb-audio: Ignore clock selector errors for single connection + - nvme-fc: do not wait in vain when unloading module + - nvmet-fcloop: swap the list_add_tail arguments + - nvmet-fc: release reference on target port + - nvmet-fc: defer cleanup using RCU properly + - nvmet-fc: hold reference on hostport match + - nvmet-fc: abort command when there is no binding + - nvmet-fc: avoid deadlock on delete association path + - nvmet-fc: take ref count on tgtport before delete assoc + - ext4: correct the hole length returned by ext4_map_blocks() + - Input: i8042 - add Fujitsu Lifebook U728 to i8042 quirk table + - fs/ntfs3: Modified fix directory element type detection + - fs/ntfs3: Improve ntfs_dir_count + - fs/ntfs3: Correct hard links updating when dealing with DOS names + - fs/ntfs3: Print warning while fixing hard links count + - fs/ntfs3: Fix detected field-spanning write (size 8) of single field + "le->name" + - fs/ntfs3: Add NULL ptr dereference checking at the end of + attr_allocate_frame() + - fs/ntfs3: Disable ATTR_LIST_ENTRY size check + - fs/ntfs3: use non-movable memory for ntfs3 MFT buffer cache + - fs/ntfs3: Prevent generic message "attempt to access beyond end of device" + - fs/ntfs3: Correct function is_rst_area_valid + - fs/ntfs3: Update inode->i_size after success write into compressed file + - fs/ntfs3: Fix oob in ntfs_listxattr + - wifi: mac80211: adding missing drv_mgd_complete_tx() call + - efi: runtime: Fix potential overflow of soft-reserved region size + - efi: Don't add memblocks for soft-reserved memory + - hwmon: (coretemp) Enlarge per package core count limit + - scsi: lpfc: Use unsigned type for num_sge + - firewire: core: send bus reset promptly on gap count error + - drm/amdgpu: skip to program GFXDEC registers for suspend abort + - drm/amdgpu: reset gpu for s3 suspend abort case + - virtio-blk: Ensure no requests in virtqueues before deleting vqs. + - pmdomain: mediatek: fix race conditions with genpd + - ksmbd: free aux buffer if ksmbd_iov_pin_rsp_read fails + - pmdomain: renesas: r8a77980-sysc: CR7 must be always on + - erofs: fix lz4 inplace decompression + - IB/hfi1: Fix sdma.h tx->num_descs off-by-one error + - drm/ttm: Fix an invalid freeing on already freed page in error path + - dm-crypt: don't modify the data when using authenticated encryption + - platform/x86: intel-vbtn: Stop calling "VBDL" from notify_handler + - platform/x86: touchscreen_dmi: Allow partial (prefix) matches for ACPI names + - KVM: arm64: vgic-its: Test for valid IRQ in MOVALL handler + - KVM: arm64: vgic-its: Test for valid IRQ in its_sync_lpi_pending_table() + - gtp: fix use-after-free and null-ptr-deref in gtp_genl_dump_pdp() + - PCI/MSI: Prevent MSI hardware interrupt number truncation + - l2tp: pass correct message length to ip6_append_data + - ARM: ep93xx: Add terminator to gpiod_lookup_table + - Revert "x86/ftrace: Use alternative RET encoding" + - x86/text-patching: Make text_gen_insn() play nice with ANNOTATE_NOENDBR + - x86/ibt,paravirt: Use text_gen_insn() for paravirt_patch() + - x86/ftrace: Use alternative RET encoding + - x86/returnthunk: Allow different return thunks + - Revert "x86/alternative: Make custom return thunk unconditional" + - x86/alternative: Make custom return thunk unconditional + - serial: amba-pl011: Fix DMA transmission in RS485 mode + - usb: dwc3: gadget: Don't disconnect if not started + - usb: cdnsp: blocked some cdns3 specific code + - usb: cdnsp: fixed issue with incorrect detecting CDNSP family controllers + - usb: cdns3: fixed memory use after free at cdns3_gadget_ep_disable() + - usb: gadget: ncm: Avoid dropping datagrams of properly parsed NTBs + - usb: roles: fix NULL pointer issue when put module's reference + - usb: roles: don't get/set_role() when usb_role_switch is unregistered + - mptcp: fix lockless access in subflow ULP diag + - clk: imx: imx8mp: add shared clk gate for usb suspend clk + - clk: qcom: gcc-qcs404: disable gpll[04]_out_aux parents + - clk: qcom: gcc-qcs404: fix names of the DSI clocks used as parents + - mtd: rawnand: sunxi: Fix the size of the last OOB region + - RISC-V: fix funct4 definition for c.jalr in parse_asm.h + - Input: iqs269a - drop unused device node references + - Input: iqs269a - configure device with a single block write + - Input: iqs269a - increase interrupt handler return delay + - clk: renesas: cpg-mssr: Fix use after free if cpg_mssr_common_init() failed + - Input: ads7846 - don't report pressure for ads7845 + - clk: renesas: cpg-mssr: Remove superfluous check in resume code + - clk: imx: avoid memory leak + - Input: ads7846 - always set last command to PWRDOWN + - Input: ads7846 - don't check penirq immediately for 7845 + - powerpc/powernv/ioda: Skip unallocated resources when mapping to PE + - clk: qcom: gpucc-sc7180: fix clk_dis_wait being programmed for CX GDSC + - clk: qcom: gpucc-sdm845: fix clk_dis_wait being programmed for CX GDSC + - clk: Honor CLK_OPS_PARENT_ENABLE in clk_core_is_enabled() + - powerpc/pseries/lparcfg: add missing RTAS retry status handling + - powerpc/perf/hv-24x7: add missing RTAS retry status handling + - powerpc/pseries/lpar: add missing RTAS retry status handling + - MIPS: SMP-CPS: fix build error when HOTPLUG_CPU not set + - MIPS: vpe-mt: drop physical_memsize + - vdpa/mlx5: Don't clear mr struct on destroy MR + - ARM: dts: BCM53573: Drop nonexistent #usb-cells + - RDMA/siw: Balance the reference of cep->kref in the error path + - RDMA/siw: Correct wrong debug message + - clk: linux/clk-provider.h: fix kernel-doc warnings and typos + - platform/x86: asus-wmi: Document the dgpu_disable sysfs attribute + - acpi: property: Let args be NULL in __acpi_node_get_property_reference + - ARM: dts: BCM53573: Drop nonexistent "default-off" LED trigger + - tools headers UAPI: Sync linux/fscrypt.h with the kernel sources + - perf beauty: Update copy of linux/socket.h with the kernel sources + - tools/virtio: fix build + - drm/amdgpu: init iommu after amdkfd device init + - f2fs: don't set GC_FAILURE_PIN for background GC + - f2fs: write checkpoint during FG_GC + - drm/i915/dg1: Update DMC_DEBUG3 register + - kernel/sched: Remove dl_boosted flag comment + - cifs: remove useless parameter 'is_fsctl' from SMB2_ioctl() + - serial: 8250: Remove serial_rs485 sanitization from em485 + - clk: imx8mp: Add DISP2 pixel clock + - clk: imx8mp: add clkout1/2 support + - dt-bindings: clocks: imx8mp: Add ID for usb suspend clock + - net: ethernet: ti: add missing of_node_put before return + - powerpc/rtas: make all exports GPL + - powerpc/rtas: ensure 4KB alignment for rtas_data_buf + - powerpc/eeh: Small refactor of eeh_handle_normal_event() + - powerpc/eeh: Set channel state after notifying the drivers + - PM: core: Redefine pm_ptr() macro + - PM: core: Add new *_PM_OPS macros, deprecate old ones + - mmc: jz4740: Use the new PM macros + - mmc: mxc: Use the new PM macros + - PM: core: Remove static qualifier in DEFINE_SIMPLE_DEV_PM_OPS macro + - Input: iqs269a - switch to DEFINE_SIMPLE_DEV_PM_OPS() and pm_sleep_ptr() + - Input: iqs269a - do not poll during suspend or resume + - Input: iqs269a - do not poll during ATI + - net/sched: Refactor qdisc_graft() for ingress and clsact Qdiscs + - netfilter: nf_tables: add rescheduling points during loop detection walks + - debugobjects: Recheck debug_objects_enabled before reporting + - nbd: Add the maximum limit of allocated index in nbd_dev_add + - md: fix data corruption for raid456 when reshape restart while grow up + - md/raid10: prevent soft lockup while flush writes + - posix-timers: Ensure timer ID search-loop limit is valid + - btrfs: add xxhash to fast checksum implementations + - ACPI: button: Add lid disable DMI quirk for Nextbook Ares 8A + - ACPI: video: Add backlight=native DMI quirk for Apple iMac11,3 + - ACPI: video: Add backlight=native DMI quirk for Lenovo ThinkPad X131e (3371 + AMD version) + - arm64: set __exception_irq_entry with __irq_entry as a default + - arm64: mm: fix VA-range sanity check + - sched/fair: Don't balance task to its current running CPU + - wifi: ath11k: fix registration of 6Ghz-only phy without the full channel + range + - bpf: Address KCSAN report on bpf_lru_list + - devlink: report devlink_port_type_warn source device + - wifi: wext-core: Fix -Wstringop-overflow warning in + ioctl_standard_iw_point() + - wifi: iwlwifi: mvm: avoid baid size integer overflow + - exfat: support dynamic allocate bh for exfat_entry_set_cache + - arm64: dts: rockchip: fix regulator name on rk3399-rock-4 + - arm64: dts: rockchip: add ES8316 codec for ROCK Pi 4 + - arm64: dts: rockchip: add SPDIF node for ROCK Pi 4 + - ARM: dts: BCM53573: Describe on-SoC BCM53125 rev 4 switch + - ACPI: video: Add backlight=native DMI quirk for Apple iMac12,1 and iMac12,2 + - ACPI: resource: Skip IRQ override on Asus Vivobook S5602ZA + - ACPI: resource: Add Asus ExpertBook B2502 to Asus quirks + - ACPI: resource: Skip IRQ override on Asus Expertbook B2402CBA + - ACPI: resource: Skip IRQ override on ASUS ExpertBook B1502CBA + - xhci: cleanup xhci_hub_control port references + - xhci: move port specific items such as state completions to port structure + - xhci: rename resume_done to resume_timestamp + - xhci: clear usb2 resume related variables in one place. + - xhci: decouple usb2 port resume and get_port_status request handling + - xhci: track port suspend state correctly in unsuccessful resume cases + - cifs: add a warning when the in-flight count goes negative + - IB/hfi1: Fix a memleak in init_credit_return + - RDMA/bnxt_re: Return error for SRQ resize + - RDMA/irdma: Fix KASAN issue with tasklet + - RDMA/irdma: Validate max_send_wr and max_recv_wr + - RDMA/irdma: Set the CQ read threshold for GEN 1 + - RDMA/irdma: Add AE for too many RNRS + - RDMA/srpt: Support specifying the srpt_service_guid parameter + - RDMA/qedr: Fix qedr_create_user_qp error flow + - arm64: dts: rockchip: set num-cs property for spi on px30 + - RDMA/srpt: fix function pointer cast warnings + - bpf, scripts: Correct GPL license name + - scsi: jazz_esp: Only build if SCSI core is builtin + - nouveau: fix function cast warnings + - net: stmmac: Fix incorrect dereference in interrupt handlers + - ipv4: properly combine dev_base_seq and ipv4.dev_addr_genid + - ipv6: properly combine dev_base_seq and ipv6.dev_addr_genid + - ata: libahci_platform: Convert to using devm bulk clocks API + - ata: libahci_platform: Introduce reset assertion/deassertion methods + - ata: ahci_ceva: fix error handling for Xilinx GT PHY support + - bpf: Fix racing between bpf_timer_cancel_and_free and bpf_timer_cancel + - drm/nouveau/instmem: fix uninitialized_var.cocci warning + - octeontx2-af: Consider the action set by PF + - s390: use the correct count for __iowrite64_copy() + - netfilter: nf_tables: set dormant flag on hook register failure + - netfilter: flowtable: simplify route logic + - netfilter: nft_flow_offload: reset dst in route object after setting up flow + - netfilter: nft_flow_offload: release dst in case direct xmit path is used + - drm/syncobj: call drm_syncobj_fence_add_wait when WAIT_AVAILABLE flag is set + - drm/amd/display: Fix memory leak in dm_sw_fini() + - i2c: imx: Add timer for handling the stop condition + - i2c: imx: when being a target, mark the last read as processed + - fs/aio: Restrict kiocb_set_cancel_fn() to I/O submitted via libaio + - netfilter: nf_tables: fix scheduling-while-atomic splat + - ext4: regenerate buddy after block freeing failed if under fc replay + - ext4: avoid bb_free and bb_fragments inconsistency in mb_free_blocks() + - netfilter: nf_tables: can't schedule in nft_chain_validate + - r8169: use new PM macros + - Linux 5.15.150 + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26733 + - packet: move from strlcpy with unused retval to strscpy + - net: dev: Convert sa_data to flexible array in struct sockaddr + - arp: Prevent overflow in arp_req_get(). + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26735 + - ipv6: sr: fix possible use-after-free and null-ptr-deref + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26736 + - afs: Increase buffer size in afs_update_volume_status() + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26748 + - usb: cdns3: fix memory double free when handle zero packet + * CVE-2023-47233 + - wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach + * CVE-2024-26584 + - net: tls: handle backlogging of crypto requests + * CVE-2024-26585 + - tls: fix race between tx work scheduling and socket close + * CVE-2024-26583 + - tls: rx: jump to a more appropriate label + - tls: rx: drop pointless else after goto + - tls: stop recv() if initial process_rx_list gave us non-DATA + - tls: rx: don't store the record type in socket context + - tls: rx: don't store the decryption status in socket context + - tls: rx: don't issue wake ups when data is decrypted + - tls: rx: refactor decrypt_skb_update() + - tls: hw: rx: use return value of tls_device_decrypted() to carry status + - tls: rx: drop unnecessary arguments from tls_setup_from_iter() + - tls: rx: don't report text length from the bowels of decrypt + - tls: rx: wrap decryption arguments in a structure + - tls: rx: factor out writing ContentType to cmsg + - tls: rx: don't track the async count + - tls: rx: move counting TlsDecryptErrors for sync + - tls: rx: assume crypto always calls our callback + - tls: rx: use async as an in-out argument + - tls: decrement decrypt_pending if no async completion will be called + - net: tls: fix async vs NIC crypto offload + - Revert "tls: rx: move counting TlsDecryptErrors for sync" + - tls: rx: simplify async wait + - tls: rx: return the already-copied data on crypto error + - tls: rx: allow only one reader at a time + - tls: rx: release the sock lock on locking timeout + - tls: extract context alloc/initialization out of tls_set_sw_offload + - net: tls: factor out tls_*crypt_async_wait() + - tls: fix race between async notify and socket close + * CVE-2024-26622 + - tomoyo: fix UAF write bug in tomoyo_write_control() + + -- Philip Cox Fri, 10 May 2024 10:40:43 -0400 + +linux-aws (5.15.0-1062.68) jammy; urgency=medium + + * jammy/linux-aws: 5.15.0-1062.68 -proposed tracker (LP: #2063584) + + [ Ubuntu: 5.15.0-107.117 ] + + * jammy/linux: 5.15.0-107.117 -proposed tracker (LP: #2063635) + * CVE-2023-52530 + - wifi: mac80211: fix potential key use-after-free + * CVE-2024-26622 + - tomoyo: fix UAF write bug in tomoyo_write_control() + * CVE-2023-47233 + - wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach + + -- Philip Cox Tue, 30 Apr 2024 14:50:12 -0400 + linux-aws (5.15.0-1061.67) jammy; urgency=medium * jammy/linux-aws: 5.15.0-1061.67 -proposed tracker (LP: #2061762) diff -u linux-aws-5.15-5.15.0/debian.aws/config/annotations linux-aws-5.15-5.15.0/debian.aws/config/annotations --- linux-aws-5.15-5.15.0/debian.aws/config/annotations +++ linux-aws-5.15-5.15.0/debian.aws/config/annotations @@ -8,6 +8,9 @@ CONFIG_ACCESSIBILITY policy<{'amd64': 'n', 'arm64': 'n'}> CONFIG_ACCESSIBILITY note<'LP: #1967702'> +CONFIG_ARCH_HIBERNATION_HEADER policy<{'amd64': 'y', 'arm64': 'y'}> +CONFIG_ARCH_HIBERNATION_HEADER note<'LP:lp2060992'> + CONFIG_BLK_DEV_NVME policy<{'amd64': 'y', 'arm64': 'y'}> CONFIG_BLK_DEV_NVME note<'LP:#1759893'> @@ -23,6 +26,15 @@ CONFIG_DRM_ETNAVIV policy<{'amd64': 'n', 'arm64': 'n'}> note<'LP: #1990167'> CONFIG_DRM_ETNAVIV_THERMAL policy<{'arm64': '-'}> note<'LP: #1990167'> +CONFIG_HIBERNATE_CALLBACKS policy<{'amd64': 'y', 'arm64': 'y'}> +CONFIG_HIBERNATE_CALLBACKS note<'LP:lp2060992'> + +CONFIG_HIBERNATION policy<{'amd64': 'y', 'arm64': 'y'}> +CONFIG_HIBERNATION note<'LP:lp2060992'> + +CONFIG_HIBERNATION_SNAPSHOT_DEV policy<{'amd64': 'y', 'arm64': 'y'}> +CONFIG_HIBERNATION_SNAPSHOT_DEV note<'LP:lp2060992'> + CONFIG_IOMMU_DEFAULT_DMA_LAZY policy<{'amd64': 'y', 'arm64': 'y'}> CONFIG_IOMMU_DEFAULT_DMA_LAZY note<'LP: #1806488'> @@ -32,6 +44,9 @@ CONFIG_PCI_MESON policy<{'amd64': 'n', 'arm64': 'n'}> CONFIG_PCI_MESON note<'LP: #2007745'> +CONFIG_PM_STD_PARTITION policy<{'amd64': '""', 'arm64': '""'}> +CONFIG_PM_STD_PARTITION note<'LP:lp2060992'> + CONFIG_SERIAL_MULTI_INSTANTIATE policy<{'amd64': 'n'}> CONFIG_SERIAL_MULTI_INSTANTIATE note<'LP:#1965496'> @@ -116,6 +131,9 @@ CONFIG_SPEAKUP policy<{'amd64': '-', 'arm64': '-'}> CONFIG_SPEAKUP note<'LP: #1967702'> +CONFIG_TRANS_TABLE policy<{'amd64': '-', 'arm64': 'y'}> +CONFIG_TRANS_TABLE note<'LP:lp2060992'> + # ---- Annotations without notes ---- diff -u linux-aws-5.15-5.15.0/debian.aws/reconstruct linux-aws-5.15-5.15.0/debian.aws/reconstruct --- linux-aws-5.15-5.15.0/debian.aws/reconstruct +++ linux-aws-5.15-5.15.0/debian.aws/reconstruct @@ -87,6 +87,17 @@ rm -f 'drivers/bus/mhi/core/pm.c' rm -f 'drivers/bus/mhi/pci_generic.c' rm -f 'drivers/counter/counter.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/Makefile' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/custom_float.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c' +rm -f 'drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c' +rm -f 'drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h' rm -f 'drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h' rm -f 'drivers/gpu/drm/msm/hdmi/hdmi_connector.c' rm -f 'drivers/gpu/drm/vmwgfx/vmwgfx_thp.c' @@ -146,6 +157,9 @@ rm -f 'net/sched/cls_rsvp.h' rm -f 'net/sched/cls_rsvp6.c' rm -f 'net/sched/cls_tcindex.c' +rm -f 'net/sched/sch_atm.c' +rm -f 'net/sched/sch_cbq.c' +rm -f 'net/sched/sch_dsmark.c' rm -f 'net/xfrm/xfrm_interface.c' rm -f 'tools/build/feature/test-libpython-version.c' rm -f 'tools/perf/arch/arm64/util/machine.c' diff -u linux-aws-5.15-5.15.0/debian.aws/tracking-bug linux-aws-5.15-5.15.0/debian.aws/tracking-bug --- linux-aws-5.15-5.15.0/debian.aws/tracking-bug +++ linux-aws-5.15-5.15.0/debian.aws/tracking-bug @@ -1 +1 @@ -2061762 2024.04.01-3 +2063712 2024.04.29-1 diff -u linux-aws-5.15-5.15.0/debian.master/changelog linux-aws-5.15-5.15.0/debian.master/changelog --- linux-aws-5.15-5.15.0/debian.master/changelog +++ linux-aws-5.15-5.15.0/debian.master/changelog @@ -1,3 +1,824 @@ +linux (5.15.0-111.121) jammy; urgency=medium + + * jammy/linux: 5.15.0-111.121 -proposed tracker (LP: #2063763) + + * RTL8852BE fw security fail then lost WIFI function during suspend/resume + cycle (LP: #2063096) + - wifi: rtw89: download firmware with five times retry + + * Mount CIFS fails with Permission denied (LP: #2061986) + - cifs: fix ntlmssp auth when there is no key exchange + + * USB stick can't be detected (LP: #2040948) + - usb: Disable USB3 LPM at shutdown + + * Jammy update: v5.15.153 upstream stable release (LP: #2063290) + - io_uring/unix: drop usage of io_uring socket + - io_uring: drop any code related to SCM_RIGHTS + - selftests: tls: use exact comparison in recv_partial + - ASoC: rt5645: Make LattePanda board DMI match more precise + - x86/xen: Add some null pointer checking to smp.c + - MIPS: Clear Cause.BD in instruction_pointer_set + - HID: multitouch: Add required quirk for Synaptics 0xcddc device + - gen_compile_commands: fix invalid escape sequence warning + - RDMA/mlx5: Fix fortify source warning while accessing Eth segment + - RDMA/mlx5: Relax DEVX access upon modify commands + - riscv: dts: sifive: add missing #interrupt-cells to pmic + - x86/mm: Move is_vsyscall_vaddr() into asm/vsyscall.h + - x86/mm: Disallow vsyscall page read for copy_from_kernel_nofault() + - net/iucv: fix the allocation size of iucv_path_table array + - parisc/ftrace: add missing CONFIG_DYNAMIC_FTRACE check + - block: sed-opal: handle empty atoms when parsing response + - dm-verity, dm-crypt: align "struct bvec_iter" correctly + - scsi: mpt3sas: Prevent sending diag_reset when the controller is ready + - ALSA: hda/realtek - ALC285 reduce pop noise from Headphone port + - drm/amdgpu: Enable gpu reset for S3 abort cases on Raven series + - Bluetooth: rfcomm: Fix null-ptr-deref in rfcomm_check_security + - firewire: core: use long bus reset on gap count error + - ASoC: Intel: bytcr_rt5640: Add an extra entry for the Chuwi Vi8 tablet + - Input: gpio_keys_polled - suppress deferred probe error for gpio + - ASoC: wm8962: Enable oscillator if selecting WM8962_FLL_OSC + - ASoC: wm8962: Enable both SPKOUTR_ENA and SPKOUTL_ENA in mono mode + - ASoC: wm8962: Fix up incorrect error message in wm8962_set_fll + - do_sys_name_to_handle(): use kzalloc() to fix kernel-infoleak + - s390/dasd: put block allocation in separate function + - s390/dasd: add query PPRC function + - s390/dasd: add copy pair setup + - s390/dasd: add autoquiesce feature + - s390/dasd: Use dev_*() for device log messages + - s390/dasd: fix double module refcount decrement + - fs/select: rework stack allocation hack for clang + - md: Don't clear MD_CLOSING when the raid is about to stop + - lib/cmdline: Fix an invalid format specifier in an assertion msg + - time: test: Fix incorrect format specifier + - rtc: test: Fix invalid format specifier. + - aoe: fix the potential use-after-free problem in aoecmd_cfg_pkts + - timekeeping: Fix cross-timestamp interpolation on counter wrap + - timekeeping: Fix cross-timestamp interpolation corner case decision + - timekeeping: Fix cross-timestamp interpolation for non-x86 + - sched/fair: Take the scheduling domain into account in select_idle_core() + - wifi: ath10k: fix NULL pointer dereference in + ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev() + - wifi: b43: Stop/wake correct queue in DMA Tx path when QoS is disabled + - wifi: b43: Stop/wake correct queue in PIO Tx path when QoS is disabled + - wifi: b43: Stop correct queue in DMA worker when QoS is disabled + - wifi: b43: Disable QoS for bcm4331 + - wifi: wilc1000: fix declarations ordering + - wifi: wilc1000: fix RCU usage in connect path + - wifi: rtl8xxxu: add cancel_work_sync() for c2hcmd_work + - wifi: wilc1000: fix multi-vif management when deleting a vif + - wifi: mwifiex: debugfs: Drop unnecessary error check for + debugfs_create_dir() + - cpufreq: brcmstb-avs-cpufreq: add check for cpufreq_cpu_get's return value + - cpufreq: Explicitly include correct DT includes + - cpufreq: mediatek-hw: Wait for CPU supplies before probing + - sock_diag: annotate data-races around sock_diag_handlers[family] + - inet_diag: annotate data-races around inet_diag_table[] + - bpftool: Silence build warning about calloc() + - af_unix: Annotate data-race of gc_in_progress in wait_for_unix_gc(). + - cpufreq: mediatek-hw: Don't error out if supply is not found + - arm64: dts: imx8mm-kontron: Disable pullups for I2C signals on SL/BL i.MX8MM + - arm64: dts: imx8mm-kontron: Disable pullups for onboard UART signals on BL + board + - arm64: dts: imx8mm-kontron: Add support for ultra high speed modes on SD + card + - arm64: dts: imx8mm-kontron: Use the VSELECT signal to switch SD card IO + voltage + - arm64: dts: imx8mm-kontron: Disable pull resistors for SD card signals on BL + board + - wifi: ath9k: delay all of ath9k_wmi_event_tasklet() until init is complete + - wifi: iwlwifi: mvm: report beacon protection failures + - wifi: iwlwifi: dbg-tlv: ensure NUL termination + - wifi: iwlwifi: fix EWRD table validity check + - arm64: dts: imx8mm-venice-gw71xx: fix USB OTG VBUS + - pwm: atmel-hlcdc: Convert to platform remove callback returning void + - pwm: atmel-hlcdc: Use consistent variable naming + - pwm: atmel-hlcdc: Fix clock imbalance related to suspend support + - net: blackhole_dev: fix build warning for ethh set but not used + - wifi: libertas: fix some memleaks in lbs_allocate_cmd_buffer() + - pwm: sti: Implement .apply() callback + - pwm: sti: Fix capture for st,pwm-num-chan < st,capture-num-chan + - wifi: iwlwifi: mvm: don't set replay counters to 0xff + - s390/vdso: drop '-fPIC' from LDFLAGS + - ipv6: mcast: remove one synchronize_net() barrier in ipv6_mc_down() + - arm64: dts: mt8183: kukui: Add Type C node + - arm64: dts: mt8183: kukui: Split out keyboard node and describe detachables + - arm64: dts: mt8183: Move CrosEC base detection node to kukui-based DTs + - arm64: dts: mediatek: mt7622: add missing "device_type" to memory nodes + - bpf: Mark bpf_spin_{lock,unlock}() helpers with notrace correctly + - wireless: Remove redundant 'flush_workqueue()' calls + - wifi: wilc1000: prevent use-after-free on vif when cleaning up all + interfaces + - ACPI: processor_idle: Fix memory leak in acpi_processor_power_exit() + - bus: tegra-aconnect: Update dependency to ARCH_TEGRA + - [Config]: update CONFIG_TEGRA_ACONNECT + - iommu/amd: Mark interrupt as managed + - wifi: brcmsmac: avoid function pointer casts + - net: ena: Remove ena_select_queue + - ARM: dts: arm: realview: Fix development chip ROM compatible value + - arm64: dts: renesas: r8a779a0: Update to R-Car Gen4 compatible values + - arm64: dts: renesas: r8a779a0: Correct avb[01] reg sizes + - ARM: dts: imx6dl-yapp4: Move phy reset into switch node + - ARM: dts: imx6dl-yapp4: Fix typo in the QCA switch register address + - ARM: dts: imx6dl-yapp4: Move the internal switch PHYs under the switch node + - arm64: dts: marvell: reorder crypto interrupts on Armada SoCs + - ACPI: resource: Add Infinity laptops to irq1_edge_low_force_override + - ACPI: resource: Do IRQ override on Lunnen Ground laptops + - ACPI: resource: Add MAIBENBEN X577 to irq1_edge_low_force_override + - ACPI: scan: Fix device check notification handling + - x86, relocs: Ignore relocations in .notes section + - SUNRPC: fix some memleaks in gssx_dec_option_array + - mmc: wmt-sdmmc: remove an incorrect release_mem_region() call in the .remove + function + - wifi: rtw88: 8821c: Fix false alarm count + - PCI: Make pci_dev_is_disconnected() helper public for other drivers + - iommu/vt-d: Don't issue ATS Invalidation request when device is disconnected + - igb: move PEROUT and EXTTS isr logic to separate functions + - igb: Fix missing time sync events + - Bluetooth: Remove superfluous call to hci_conn_check_pending() + - Bluetooth: hci_qca: Add support for QTI Bluetooth chip wcn6855 + - Bluetooth: hci_qca: don't use IS_ERR_OR_NULL() with gpiod_get_optional() + - Bluetooth: hci_core: Fix possible buffer overflow + - sr9800: Add check for usbnet_get_endpoints + - bpf: Fix DEVMAP_HASH overflow check on 32-bit arches + - bpf: Fix hashtab overflow check on 32-bit arches + - bpf: Fix stackmap overflow check on 32-bit arches + - ipv6: fib6_rules: flush route cache when rule is changed + - net: ip_tunnel: make sure to pull inner header in ip_tunnel_rcv() + - net: phy: fix phy_get_internal_delay accessing an empty array + - net: hns3: fix kernel crash when 1588 is received on HIP08 devices + - net: hns3: fix port duplex configure error in IMP reset + - net: phy: DP83822: enable rgmii mode if phy_interface_is_rgmii + - net: phy: dp83822: Fix RGMII TX delay configuration + - OPP: debugfs: Fix warning around icc_get_name() + - tcp: fix incorrect parameter validation in the do_tcp_getsockopt() function + - net: Change sock_getsockopt() to take the sk ptr instead of the sock ptr + - bpf: net: Change sk_getsockopt() to take the sockptr_t argument + - bpf: net: Change do_ip_getsockopt() to take the sockptr_t argument + - ipmr: fix incorrect parameter validation in the ip_mroute_getsockopt() + function + - l2tp: fix incorrect parameter validation in the pppol2tp_getsockopt() + function + - udp: fix incorrect parameter validation in the udp_lib_getsockopt() function + - net: kcm: fix incorrect parameter validation in the kcm_getsockopt) function + - net/x25: fix incorrect parameter validation in the x25_getsockopt() function + - nfp: flower: handle acti_netdevs allocation failure + - dm raid: fix false positive for requeue needed during reshape + - dm: call the resume method on internal suspend + - drm/tegra: dsi: Add missing check for of_find_device_by_node + - drm/tegra: dpaux: Populate AUX bus + - drm/tegra: dpaux: Fix PM disable depth imbalance in tegra_dpaux_probe + - drm/tegra: dsi: Make use of the helper function dev_err_probe() + - drm/tegra: dsi: Fix some error handling paths in tegra_dsi_probe() + - drm/tegra: dsi: Fix missing pm_runtime_disable() in the error handling path + of tegra_dsi_probe() + - drm/tegra: dc: rgb: Allow changing PLLD rate on Tegra30+ + - drm/tegra: rgb: Fix some error handling paths in tegra_dc_rgb_probe() + - drm/tegra: rgb: Fix missing clk_put() in the error handling paths of + tegra_dc_rgb_probe() + - drm/tegra: output: Fix missing i2c_put_adapter() in the error handling paths + of tegra_output_probe() + - drm/rockchip: inno_hdmi: Fix video timing + - drm: Don't treat 0 as -1 in drm_fixp2int_ceil + - drm/ttm: add ttm_resource_fini v2 + - drm/vmwgfx: fix a memleak in vmw_gmrid_man_get_node + - drm/rockchip: lvds: do not overwrite error code + - drm/rockchip: lvds: do not print scary message when probing defer + - drm/lima: fix a memleak in lima_heap_alloc + - dmaengine: tegra210-adma: Update dependency to ARCH_TEGRA + - [Config]: update CONFIG_TEGRA210_ADMA + - media: tc358743: register v4l2 async device only after successful setup + - PCI/DPC: Print all TLP Prefixes, not just the first + - perf record: Fix possible incorrect free in record__switch_output() + - HID: lenovo: Add middleclick_workaround sysfs knob for cptkbd + - drm/amd/display: Fix a potential buffer overflow in 'dp_dsc_clock_en_read()' + - drm/amd/display: Fix potential NULL pointer dereferences in + 'dcn10_set_output_transfer_func()' + - perf evsel: Fix duplicate initialization of data->id in + evsel__parse_sample() + - clk: meson: Add missing clocks to axg_clk_regmaps + - media: em28xx: annotate unchecked call to media_device_register() + - media: v4l2-tpg: fix some memleaks in tpg_alloc + - media: v4l2-mem2mem: fix a memleak in v4l2_m2m_register_entity + - media: edia: dvbdev: fix a use-after-free + - pinctrl: mediatek: Drop bogus slew rate register range for MT8192 + - clk: qcom: reset: Commonize the de/assert functions + - clk: qcom: reset: Ensure write completion on reset de/assertion + - quota: simplify drop_dquot_ref() + - quota: Fix potential NULL pointer dereference + - quota: Fix rcu annotations of inode dquot pointers + - PCI/P2PDMA: Fix a sleeping issue in a RCU read section + - PCI: switchtec: Fix an error handling path in switchtec_pci_probe() + - crypto: xilinx - call finalize with bh disabled + - perf thread_map: Free strlist on normal path in thread_map__new_by_tid_str() + - drm/radeon/ni: Fix wrong firmware size logging in ni_init_microcode() + - ALSA: seq: fix function cast warnings + - perf stat: Avoid metric-only segv + - ASoC: meson: Use dev_err_probe() helper + - ASoC: meson: aiu: fix function pointer type mismatch + - ASoC: meson: t9015: fix function pointer type mismatch + - powerpc: Force inlining of arch_vmap_p{u/m}d_supported() + - PCI: endpoint: Support NTB transfer between RC and EP + - [Config]: update CONFIG_PCI_EPF_VNTB + - NTB: EPF: fix possible memory leak in pci_vntb_probe() + - NTB: fix possible name leak in ntb_register_device() + - media: sun8i-di: Fix coefficient writes + - media: sun8i-di: Fix power on/off sequences + - media: sun8i-di: Fix chroma difference threshold + - media: imx: csc/scaler: fix v4l2_ctrl_handler memory leak + - media: go7007: add check of return value of go7007_read_addr() + - media: pvrusb2: remove redundant NULL check + - media: pvrusb2: fix pvr2_stream_callback casts + - clk: qcom: dispcc-sdm845: Adjust internal GDSC wait times + - drm/mediatek: dsi: Fix DSI RGB666 formats and definitions + - PCI: Mark 3ware-9650SE Root Port Extended Tags as broken + - clk: hisilicon: hi3519: Release the correct number of gates in + hi3519_clk_unregister() + - clk: hisilicon: hi3559a: Fix an erroneous devm_kfree() + - drm/tegra: put drm_gem_object ref on error in tegra_fb_create + - mfd: syscon: Call of_node_put() only when of_parse_phandle() takes a ref + - mfd: altera-sysmgr: Call of_node_put() only when of_parse_phandle() takes a + ref + - crypto: arm/sha - fix function cast warnings + - drm/tidss: Fix initial plane zpos values + - mtd: maps: physmap-core: fix flash size larger than 32-bit + - mtd: rawnand: lpc32xx_mlc: fix irq handler prototype + - ASoC: meson: axg-tdm-interface: fix mclk setup without mclk-fs + - ASoC: meson: axg-tdm-interface: add frame rate constraint + - HID: amd_sfh: Update HPD sensor structure elements + - drm/amdgpu: Fix missing break in ATOM_ARG_IMM Case of atom_get_src_int() + - media: pvrusb2: fix uaf in pvr2_context_set_notify + - media: dvb-frontends: avoid stack overflow warnings with clang + - media: go7007: fix a memleak in go7007_load_encoder + - media: ttpci: fix two memleaks in budget_av_attach + - media: mediatek: vcodec: avoid -Wcast-function-type-strict warning + - drm/mediatek: Fix a null pointer crash in mtk_drm_crtc_finish_page_flip + - powerpc/hv-gpci: Fix the H_GET_PERF_COUNTER_INFO hcall return value checks + - drm/msm/dpu: add division of drm_display_mode's hskew parameter + - module: Add support for default value for module async_probe + - modules: wait do_free_init correctly + - powerpc/embedded6xx: Fix no previous prototype for avr_uart_send() etc. + - leds: aw2013: Unlock mutex before destroying it + - leds: sgm3140: Add missing timer cleanup and flash gpio control + - backlight: lm3630a: Initialize backlight_properties on init + - backlight: lm3630a: Don't set bl->props.brightness in get_brightness + - backlight: da9052: Fully initialize backlight_properties during probe + - backlight: lm3639: Fully initialize backlight_properties during probe + - backlight: lp8788: Fully initialize backlight_properties during probe + - sparc32: Fix section mismatch in leon_pci_grpci + - clk: Fix clk_core_get NULL dereference + - clk: zynq: Prevent null pointer dereference caused by kmalloc failure + - ALSA: hda/realtek: fix ALC285 issues on HP Envy x360 laptops + - ALSA: usb-audio: Stop parsing channels bits when all channels are found. + - RDMA/srpt: Do not register event handler until srpt device is fully setup + - f2fs: multidevice: support direct IO + - f2fs: invalidate META_MAPPING before IPU/DIO write + - f2fs: replace congestion_wait() calls with io_schedule_timeout() + - f2fs: fix to invalidate META_MAPPING before DIO write + - f2fs: invalidate meta pages only for post_read required inode + - f2fs: reduce stack memory cost by using bitfield in struct f2fs_io_info + - f2fs: compress: fix to cover normal cluster write with cp_rwsem + - f2fs: compress: fix to check unreleased compressed cluster + - scsi: csiostor: Avoid function pointer casts + - RDMA/device: Fix a race between mad_client and cm_client init + - RDMA/rtrs-clt: Check strnlen return len in sysfs mpath_policy_store() + - scsi: bfa: Fix function pointer type mismatch for hcb_qe->cbfn + - net: sunrpc: Fix an off by one in rpc_sockaddr2uaddr() + - NFSv4.2: fix nfs4_listxattr kernel BUG at mm/usercopy.c:102 + - NFSv4.2: fix listxattr maximum XDR buffer size + - watchdog: stm32_iwdg: initialize default timeout + - NFS: Fix an off by one in root_nfs_cat() + - f2fs: compress: fix reserve_cblocks counting error when out of space + - afs: Revert "afs: Hide silly-rename files from userspace" + - comedi: comedi_test: Prevent timers rescheduling during deletion + - remoteproc: stm32: use correct format strings on 64-bit + - remoteproc: stm32: Fix incorrect type in assignment for va + - remoteproc: stm32: Fix incorrect type assignment returned by + stm32_rproc_get_loaded_rsc_tablef + - tty: vt: fix 20 vs 0x20 typo in EScsiignore + - serial: max310x: fix syntax error in IRQ error message + - tty: serial: samsung: fix tx_empty() to return TIOCSER_TEMT + - arm64: dts: broadcom: bcmbca: bcm4908: drop invalid switch cells + - kconfig: fix infinite loop when expanding a macro at the end of file + - rtc: mt6397: select IRQ_DOMAIN instead of depending on it + - serial: 8250_exar: Don't remove GPIO device on suspend + - staging: greybus: fix get_channel_from_mode() failure path + - usb: gadget: net2272: Use irqflags in the call to net2272_probe_fin + - io_uring: don't save/restore iowait state + - nouveau: reset the bo resource bus info after an eviction + - octeontx2-af: Use matching wake_up API variant in CGX command interface + - s390/vtime: fix average steal time calculation + - soc: fsl: dpio: fix kcalloc() argument order + - hsr: Fix uninit-value access in hsr_get_node() + - net: mtk_eth_soc: move MAC_MCR setting to mac_finish() + - net: mediatek: mtk_eth_soc: clear MAC_MCR_FORCE_LINK only when MAC is up + - net: ethernet: mtk_eth_soc: fix PPE hanging issue + - packet: annotate data-races around ignore_outgoing + - net: veth: do not manipulate GRO when using XDP + - net: dsa: mt7530: prevent possible incorrect XTAL frequency selection + - vdpa/mlx5: Allow CVQ size changes + - wireguard: receive: annotate data-race around receiving_counter.counter + - rds: introduce acquire/release ordering in acquire/release_in_xmit() + - hsr: Handle failures in module init + - net: phy: fix phy_read_poll_timeout argument type in genphy_loopback + - net/bnx2x: Prevent access to a freed page in page_pool + - octeontx2-af: Use separate handlers for interrupts + - netfilter: nf_tables: do not compare internal table flags on updates + - rcu: add a helper to report consolidated flavor QS + - net: report RCU QS on threaded NAPI repolling + - bpf: report RCU QS in cpumap kthread + - net: dsa: mt7530: fix handling of LLDP frames + - net: dsa: mt7530: fix handling of 802.1X PAE frames + - net: dsa: mt7530: fix link-local frames that ingress vlan filtering ports + - net: dsa: mt7530: fix handling of all link-local frames + - spi: spi-mt65xx: Fix NULL pointer access in interrupt handler + - regmap: Add missing map->bus check + - remoteproc: stm32: fix incorrect optional pointers + + * Jammy update: v5.15.152 upstream stable release (LP: #2063276) + - mmc: mmci: stm32: use a buffer for unaligned DMA requests + - mmc: mmci: stm32: fix DMA API overlapping mappings warning + - net: lan78xx: fix runtime PM count underflow on link stop + - ixgbe: {dis, en}able irqs in ixgbe_txrx_ring_{dis, en}able + - i40e: disable NAPI right after disabling irqs when handling xsk_pool + - tracing/net_sched: Fix tracepoints that save qdisc_dev() as a string + - geneve: make sure to pull inner header in geneve_rx() + - net: sparx5: Fix use after free inside sparx5_del_mact_entry + - net: ice: Fix potential NULL pointer dereference in ice_bridge_setlink() + - net/ipv6: avoid possible UAF in ip6_route_mpath_notify() + - cpumap: Zero-initialise xdp_rxq_info struct before running XDP program + - net/rds: fix WARNING in rds_conn_connect_if_down + - netfilter: nft_ct: fix l3num expectations with inet pseudo family + - netfilter: nf_conntrack_h323: Add protection for bmp length out of range + - erofs: apply proper VMA alignment for memory mapped files on THP + - netrom: Fix a data-race around sysctl_netrom_default_path_quality + - netrom: Fix a data-race around sysctl_netrom_obsolescence_count_initialiser + - netrom: Fix data-races around sysctl_netrom_network_ttl_initialiser + - netrom: Fix a data-race around sysctl_netrom_transport_timeout + - netrom: Fix a data-race around sysctl_netrom_transport_maximum_tries + - netrom: Fix a data-race around sysctl_netrom_transport_acknowledge_delay + - netrom: Fix a data-race around sysctl_netrom_transport_busy_delay + - netrom: Fix a data-race around sysctl_netrom_transport_requested_window_size + - netrom: Fix a data-race around sysctl_netrom_transport_no_activity_timeout + - netrom: Fix a data-race around sysctl_netrom_routing_control + - netrom: Fix a data-race around sysctl_netrom_link_fails_count + - netrom: Fix data-races around sysctl_net_busy_read + - ALSA: usb-audio: Refcount multiple accesses on the single clock + - ALSA: usb-audio: Clear fixed clock rate at closing EP + - ALSA: usb-audio: Split endpoint setups for hw_params and prepare (take#2) + - ALSA: usb-audio: Properly refcounting clock rate + - ALSA: usb-audio: Apply mutex around snd_usb_endpoint_set_params() + - ALSA: usb-audio: Correct the return code from snd_usb_endpoint_set_params() + - ALSA: usb-audio: Avoid superfluous endpoint setup + - ALSA: usb-audio: Add quirk for Tascam Model 12 + - ALSA: usb-audio: Add new quirk FIXED_RATE for JBL Quantum810 Wireless + - ALSA: usb-audio: Fix microphone sound on Nexigo webcam. + - ALSA: usb-audio: add quirk for RODE NT-USB+ + - drm/amd/display: Fix uninitialized variable usage in core_link_ 'read_dpcd() + & write_dpcd()' functions + - nfp: flower: add goto_chain_index for ct entry + - nfp: flower: add hardware offload check for post ct entry + - selftests/mm: switch to bash from sh + - selftests: mm: fix map_hugetlb failure on 64K page size systems + - xhci: process isoc TD properly when there was a transaction error mid TD. + - xhci: handle isoc Babble and Buffer Overrun events properly + - serial: max310x: use regmap methods for SPI batch operations + - serial: max310x: use a separate regmap for each port + - serial: max310x: prevent infinite while() loop in port startup + - drm/amd/pm: do not expose the API used internally only in kv_dpm.c + - drm/amdgpu: Reset IH OVERFLOW_CLEAR bit + - selftests: mptcp: decrease BW in simult flows + - hv_netvsc: use netif_is_bond_master() instead of open code + - hv_netvsc: Register VF in netvsc_probe if NET_DEVICE_REGISTER missed + - drm/amd/display: Re-arrange FPU code structure for dcn2x + - drm/amd/display: move calcs folder into DML + - drm/amd/display: remove DML Makefile duplicate lines + - drm/amd/display: Increase frame-larger-than for all display_mode_vba files + - getrusage: add the "signal_struct *sig" local variable + - getrusage: move thread_group_cputime_adjusted() outside of + lock_task_sighand() + - getrusage: use __for_each_thread() + - getrusage: use sig->stats_lock rather than lock_task_sighand() + - proc: Use task_is_running() for wchan in /proc/$pid/stat + - fs/proc: do_task_stat: move thread_group_cputime_adjusted() outside of + lock_task_sighand() + - ALSA: usb-audio: Fix wrong kfree issue in snd_usb_endpoint_free_all + - ALSA: usb-audio: Always initialize fixed_rate in + snd_usb_find_implicit_fb_sync_format() + - ALSA: usb-audio: Add FIXED_RATE quirk for JBL Quantum610 Wireless + - ALSA: usb-audio: Sort quirk table entries + - regmap: allow to define reg_update_bits for no bus configuration + - regmap: Add bulk read/write callbacks into regmap_config + - serial: max310x: make accessing revision id interface-agnostic + - serial: max310x: fix IO data corruption in batched operations + - Linux 5.15.152 + + * CVE-2024-26809 + - netfilter: nft_set_pipapo: release elements in clone only from destroy path + + * CVE-2024-26792 + - btrfs: fix double free of anonymous device after snapshot creation failure + + * CVE-2023-52530 + - wifi: mac80211: fix potential key use-after-free + + * CVE-2023-52447 + - bpf: Defer the free of inner map when necessary + - rcu-tasks: Provide rcu_trace_implies_rcu_gp() + + * Avoid creating non-working backlight sysfs knob from ASUS board + (LP: #2060422) + - platform/x86: asus-wmi: Consider device is absent when the read is ~0 + + * [Ubuntu 22.04.4/linux-image-6.5.0-26-generic] Kernel output "UBSAN: array- + index-out-of-bounds in /build/linux-hwe-6.5-34pCLi/linux- + hwe-6.5-6.5.0/drivers/net/hyperv/netvsc.c:1445:41" multiple times, + especially during boot. (LP: #2058477) + - hv: hyperv.h: Replace one-element array with flexible-array member + + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) + - netfilter: nf_tables: disallow timeout for anonymous sets + - mtd: spinand: gigadevice: Fix the get ecc status issue + - netlink: Fix kernel-infoleak-after-free in __skb_datagram_iter + - net: ip_tunnel: prevent perpetual headroom growth + - tun: Fix xdp_rxq_info's queue_index when detaching + - cpufreq: intel_pstate: fix pstate limits enforcement for adjust_perf call + back + - net: veth: clear GRO when clearing XDP even when down + - ipv6: fix potential "struct net" leak in inet6_rtm_getaddr() + - lan78xx: enable auto speed configuration for LAN7850 if no EEPROM is + detected + - net: enable memcg accounting for veth queues + - veth: try harder when allocating queue memory + - net: usb: dm9601: fix wrong return value in dm9601_mdio_read + - uapi: in6: replace temporary label with rfc9486 + - stmmac: Clear variable when destroying workqueue + - Bluetooth: Avoid potential use-after-free in hci_error_reset + - Bluetooth: hci_event: Fix wrongly recorded wakeup BD_ADDR + - netfilter: nf_tables: allow NFPROTO_INET in nft_(match/target)_validate() + - netfilter: nfnetlink_queue: silence bogus compiler warning + - netfilter: core: move ip_ct_attach indirection to struct nf_ct_hook + - netfilter: make function op structures const + - netfilter: let reset rules clean out conntrack entries + - netfilter: bridge: confirm multicast packets before passing them up the + stack + - rtnetlink: fix error logic of IFLA_BRIDGE_FLAGS writing back + - igb: extend PTP timestamp adjustments to i211 + - efi/capsule-loader: fix incorrect allocation size + - power: supply: bq27xxx-i2c: Do not free non existing IRQ + - ALSA: Drop leftover snd-rtctimer stuff from Makefile + - fbcon: always restore the old font data in fbcon_do_set_font() + - afs: Fix endless loop in directory parsing + - riscv: Sparse-Memory/vmemmap out-of-bounds fix + - ALSA: firewire-lib: fix to check cycle continuity + - gtp: fix use-after-free and null-ptr-deref in gtp_newlink() + - wifi: nl80211: reject iftype change with mesh ID change + - btrfs: dev-replace: properly validate device names + - dmaengine: fsl-qdma: fix SoC may hang on 16 byte unaligned read + - dmaengine: ptdma: use consistent DMA masks + - dmaengine: fsl-qdma: init irq after reg initialization + - mmc: core: Fix eMMC initialization with 1-bit bus connection + - mmc: sdhci-xenon: add timeout for PHY init complete + - mmc: sdhci-xenon: fix PHY init clock stability + - pmdomain: qcom: rpmhpd: Fix enabled_corner aggregation + - x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers + - mptcp: move __mptcp_error_report in protocol.c + - mptcp: process pending subflow error on close + - mptcp: rename timer related helper to less confusing names + - selftests: mptcp: add missing kconfig for NF Filter + - selftests: mptcp: add missing kconfig for NF Filter in v6 + - mptcp: clean up harmless false expressions + - mptcp: add needs_id for netlink appending addr + - mptcp: push at DSS boundaries + - mptcp: fix possible deadlock in subflow diag + - cachefiles: fix memory leak in cachefiles_add_cache() + - fs,hugetlb: fix NULL pointer dereference in hugetlbs_fill_super + - Revert "drm/bridge: lt8912b: Register and attach our DSI device at probe" + - af_unix: Drop oob_skb ref before purging queue in GC. + - gpio: 74x164: Enable output pins after registers are reset + - gpiolib: Fix the error path order in gpiochip_add_data_with_key() + - gpio: fix resource unwinding order in error path + - Revert "interconnect: Fix locking for runpm vs reclaim" + - Revert "interconnect: Teach lockdep about icc_bw_lock order" + - bpf: Add BPF_FIB_LOOKUP_SKIP_NEIGH for bpf_fib_lookup + - bpf: Add table ID to bpf_fib_lookup BPF helper + - bpf: Derive source IP addr via bpf_*_fib_lookup() + - Linux 5.15.151 + + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) // + CVE-2024-26782 + - mptcp: fix double-free on socket dismantle + + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) // Fix + bluetooth connections with 3.0 device (LP: #2063067) + - Bluetooth: hci_event: Fix handling of HCI_EV_IO_CAPA_REQUEST + + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) + - net/sched: Retire CBQ qdisc + - [Config] updateconfigs for NET_SCH_CBQ + - net/sched: Retire ATM qdisc + - [Config] updateconfigs for NET_SCH_ATM + - net/sched: Retire dsmark qdisc + - [Config] updateconfigs for NET_SCH_DSMARK + - smb: client: fix potential OOBs in smb2_parse_contexts() + - smb: client: fix parsing of SMB3.1.1 POSIX create context + - sched/rt: sysctl_sched_rr_timeslice show default timeslice after reset + - PCI: dwc: Fix a 64bit bug in dw_pcie_ep_raise_msix_irq() + - bpf: Merge printk and seq_printf VARARG max macros + - bpf: Add struct for bin_args arg in bpf_bprintf_prepare + - bpf: Do cleanup in bpf_bprintf_cleanup only when needed + - bpf: Remove trace_printk_lock + - userfaultfd: fix mmap_changing checking in mfill_atomic_hugetlb + - zonefs: Improve error handling + - x86/fpu: Stop relying on userspace for info to fault in xsave buffer + - sched/rt: Fix sysctl_sched_rr_timeslice intial value + - sched/rt: Disallow writing invalid values to sched_rt_period_us + - scsi: target: core: Add TMF to tmr_list handling + - dmaengine: shdma: increase size of 'dev_id' + - dmaengine: fsl-qdma: increase size of 'irq_name' + - wifi: cfg80211: fix missing interfaces when dumping + - wifi: mac80211: fix race condition on enabling fast-xmit + - fbdev: savage: Error out if pixclock equals zero + - fbdev: sis: Error out if pixclock equals zero + - spi: hisi-sfc-v3xx: Return IRQ_NONE if no interrupts were detected + - ahci: asm1166: correct count of reported ports + - ahci: add 43-bit DMA address quirk for ASMedia ASM1061 controllers + - MIPS: reserve exception vector space ONLY ONCE + - platform/x86: touchscreen_dmi: Add info for the TECLAST X16 Plus tablet + - ext4: avoid dividing by 0 in mb_update_avg_fragment_size() when block bitmap + corrupt + - ext4: avoid allocating blocks from corrupted group in + ext4_mb_try_best_found() + - ext4: avoid allocating blocks from corrupted group in ext4_mb_find_by_goal() + - dmaengine: ti: edma: Add some null pointer checks to the edma_probe + - regulator: pwm-regulator: Add validity checks in continuous .get_voltage + - nvmet-tcp: fix nvme tcp ida memory leak + - ALSA: usb-audio: Check presence of valid altsetting control + - ASoC: sunxi: sun4i-spdif: Add support for Allwinner H616 + - spi: sh-msiof: avoid integer overflow in constants + - Input: xpad - add Lenovo Legion Go controllers + - netfilter: conntrack: check SCTP_CID_SHUTDOWN_ACK for vtag setting in + sctp_new + - ALSA: usb-audio: Ignore clock selector errors for single connection + - nvme-fc: do not wait in vain when unloading module + - nvmet-fcloop: swap the list_add_tail arguments + - nvmet-fc: release reference on target port + - nvmet-fc: defer cleanup using RCU properly + - nvmet-fc: hold reference on hostport match + - nvmet-fc: abort command when there is no binding + - nvmet-fc: avoid deadlock on delete association path + - nvmet-fc: take ref count on tgtport before delete assoc + - ext4: correct the hole length returned by ext4_map_blocks() + - Input: i8042 - add Fujitsu Lifebook U728 to i8042 quirk table + - fs/ntfs3: Modified fix directory element type detection + - fs/ntfs3: Improve ntfs_dir_count + - fs/ntfs3: Correct hard links updating when dealing with DOS names + - fs/ntfs3: Print warning while fixing hard links count + - fs/ntfs3: Fix detected field-spanning write (size 8) of single field + "le->name" + - fs/ntfs3: Add NULL ptr dereference checking at the end of + attr_allocate_frame() + - fs/ntfs3: Disable ATTR_LIST_ENTRY size check + - fs/ntfs3: use non-movable memory for ntfs3 MFT buffer cache + - fs/ntfs3: Prevent generic message "attempt to access beyond end of device" + - fs/ntfs3: Correct function is_rst_area_valid + - fs/ntfs3: Update inode->i_size after success write into compressed file + - fs/ntfs3: Fix oob in ntfs_listxattr + - wifi: mac80211: adding missing drv_mgd_complete_tx() call + - efi: runtime: Fix potential overflow of soft-reserved region size + - efi: Don't add memblocks for soft-reserved memory + - hwmon: (coretemp) Enlarge per package core count limit + - scsi: lpfc: Use unsigned type for num_sge + - firewire: core: send bus reset promptly on gap count error + - drm/amdgpu: skip to program GFXDEC registers for suspend abort + - drm/amdgpu: reset gpu for s3 suspend abort case + - virtio-blk: Ensure no requests in virtqueues before deleting vqs. + - pmdomain: mediatek: fix race conditions with genpd + - ksmbd: free aux buffer if ksmbd_iov_pin_rsp_read fails + - pmdomain: renesas: r8a77980-sysc: CR7 must be always on + - erofs: fix lz4 inplace decompression + - IB/hfi1: Fix sdma.h tx->num_descs off-by-one error + - drm/ttm: Fix an invalid freeing on already freed page in error path + - dm-crypt: don't modify the data when using authenticated encryption + - platform/x86: intel-vbtn: Stop calling "VBDL" from notify_handler + - platform/x86: touchscreen_dmi: Allow partial (prefix) matches for ACPI names + - KVM: arm64: vgic-its: Test for valid IRQ in MOVALL handler + - KVM: arm64: vgic-its: Test for valid IRQ in its_sync_lpi_pending_table() + - gtp: fix use-after-free and null-ptr-deref in gtp_genl_dump_pdp() + - PCI/MSI: Prevent MSI hardware interrupt number truncation + - l2tp: pass correct message length to ip6_append_data + - ARM: ep93xx: Add terminator to gpiod_lookup_table + - Revert "x86/ftrace: Use alternative RET encoding" + - x86/text-patching: Make text_gen_insn() play nice with ANNOTATE_NOENDBR + - x86/ibt,paravirt: Use text_gen_insn() for paravirt_patch() + - x86/ftrace: Use alternative RET encoding + - x86/returnthunk: Allow different return thunks + - Revert "x86/alternative: Make custom return thunk unconditional" + - x86/alternative: Make custom return thunk unconditional + - serial: amba-pl011: Fix DMA transmission in RS485 mode + - usb: dwc3: gadget: Don't disconnect if not started + - usb: cdnsp: blocked some cdns3 specific code + - usb: cdnsp: fixed issue with incorrect detecting CDNSP family controllers + - usb: cdns3: fixed memory use after free at cdns3_gadget_ep_disable() + - usb: gadget: ncm: Avoid dropping datagrams of properly parsed NTBs + - usb: roles: fix NULL pointer issue when put module's reference + - usb: roles: don't get/set_role() when usb_role_switch is unregistered + - mptcp: fix lockless access in subflow ULP diag + - clk: imx: imx8mp: add shared clk gate for usb suspend clk + - clk: qcom: gcc-qcs404: disable gpll[04]_out_aux parents + - clk: qcom: gcc-qcs404: fix names of the DSI clocks used as parents + - mtd: rawnand: sunxi: Fix the size of the last OOB region + - RISC-V: fix funct4 definition for c.jalr in parse_asm.h + - Input: iqs269a - drop unused device node references + - Input: iqs269a - configure device with a single block write + - Input: iqs269a - increase interrupt handler return delay + - clk: renesas: cpg-mssr: Fix use after free if cpg_mssr_common_init() failed + - Input: ads7846 - don't report pressure for ads7845 + - clk: renesas: cpg-mssr: Remove superfluous check in resume code + - clk: imx: avoid memory leak + - Input: ads7846 - always set last command to PWRDOWN + - Input: ads7846 - don't check penirq immediately for 7845 + - powerpc/powernv/ioda: Skip unallocated resources when mapping to PE + - clk: qcom: gpucc-sc7180: fix clk_dis_wait being programmed for CX GDSC + - clk: qcom: gpucc-sdm845: fix clk_dis_wait being programmed for CX GDSC + - clk: Honor CLK_OPS_PARENT_ENABLE in clk_core_is_enabled() + - powerpc/pseries/lparcfg: add missing RTAS retry status handling + - powerpc/perf/hv-24x7: add missing RTAS retry status handling + - powerpc/pseries/lpar: add missing RTAS retry status handling + - MIPS: SMP-CPS: fix build error when HOTPLUG_CPU not set + - MIPS: vpe-mt: drop physical_memsize + - vdpa/mlx5: Don't clear mr struct on destroy MR + - ARM: dts: BCM53573: Drop nonexistent #usb-cells + - RDMA/siw: Balance the reference of cep->kref in the error path + - RDMA/siw: Correct wrong debug message + - clk: linux/clk-provider.h: fix kernel-doc warnings and typos + - platform/x86: asus-wmi: Document the dgpu_disable sysfs attribute + - acpi: property: Let args be NULL in __acpi_node_get_property_reference + - ARM: dts: BCM53573: Drop nonexistent "default-off" LED trigger + - tools headers UAPI: Sync linux/fscrypt.h with the kernel sources + - perf beauty: Update copy of linux/socket.h with the kernel sources + - tools/virtio: fix build + - drm/amdgpu: init iommu after amdkfd device init + - f2fs: don't set GC_FAILURE_PIN for background GC + - f2fs: write checkpoint during FG_GC + - drm/i915/dg1: Update DMC_DEBUG3 register + - kernel/sched: Remove dl_boosted flag comment + - cifs: remove useless parameter 'is_fsctl' from SMB2_ioctl() + - serial: 8250: Remove serial_rs485 sanitization from em485 + - clk: imx8mp: Add DISP2 pixel clock + - clk: imx8mp: add clkout1/2 support + - dt-bindings: clocks: imx8mp: Add ID for usb suspend clock + - net: ethernet: ti: add missing of_node_put before return + - powerpc/rtas: make all exports GPL + - powerpc/rtas: ensure 4KB alignment for rtas_data_buf + - powerpc/eeh: Small refactor of eeh_handle_normal_event() + - powerpc/eeh: Set channel state after notifying the drivers + - PM: core: Redefine pm_ptr() macro + - PM: core: Add new *_PM_OPS macros, deprecate old ones + - mmc: jz4740: Use the new PM macros + - mmc: mxc: Use the new PM macros + - PM: core: Remove static qualifier in DEFINE_SIMPLE_DEV_PM_OPS macro + - Input: iqs269a - switch to DEFINE_SIMPLE_DEV_PM_OPS() and pm_sleep_ptr() + - Input: iqs269a - do not poll during suspend or resume + - Input: iqs269a - do not poll during ATI + - net/sched: Refactor qdisc_graft() for ingress and clsact Qdiscs + - netfilter: nf_tables: add rescheduling points during loop detection walks + - debugobjects: Recheck debug_objects_enabled before reporting + - nbd: Add the maximum limit of allocated index in nbd_dev_add + - md: fix data corruption for raid456 when reshape restart while grow up + - md/raid10: prevent soft lockup while flush writes + - posix-timers: Ensure timer ID search-loop limit is valid + - btrfs: add xxhash to fast checksum implementations + - ACPI: button: Add lid disable DMI quirk for Nextbook Ares 8A + - ACPI: video: Add backlight=native DMI quirk for Apple iMac11,3 + - ACPI: video: Add backlight=native DMI quirk for Lenovo ThinkPad X131e (3371 + AMD version) + - arm64: set __exception_irq_entry with __irq_entry as a default + - arm64: mm: fix VA-range sanity check + - sched/fair: Don't balance task to its current running CPU + - wifi: ath11k: fix registration of 6Ghz-only phy without the full channel + range + - bpf: Address KCSAN report on bpf_lru_list + - devlink: report devlink_port_type_warn source device + - wifi: wext-core: Fix -Wstringop-overflow warning in + ioctl_standard_iw_point() + - wifi: iwlwifi: mvm: avoid baid size integer overflow + - exfat: support dynamic allocate bh for exfat_entry_set_cache + - arm64: dts: rockchip: fix regulator name on rk3399-rock-4 + - arm64: dts: rockchip: add ES8316 codec for ROCK Pi 4 + - arm64: dts: rockchip: add SPDIF node for ROCK Pi 4 + - ARM: dts: BCM53573: Describe on-SoC BCM53125 rev 4 switch + - ACPI: video: Add backlight=native DMI quirk for Apple iMac12,1 and iMac12,2 + - ACPI: resource: Skip IRQ override on Asus Vivobook S5602ZA + - ACPI: resource: Add Asus ExpertBook B2502 to Asus quirks + - ACPI: resource: Skip IRQ override on Asus Expertbook B2402CBA + - ACPI: resource: Skip IRQ override on ASUS ExpertBook B1502CBA + - xhci: cleanup xhci_hub_control port references + - xhci: move port specific items such as state completions to port structure + - xhci: rename resume_done to resume_timestamp + - xhci: clear usb2 resume related variables in one place. + - xhci: decouple usb2 port resume and get_port_status request handling + - xhci: track port suspend state correctly in unsuccessful resume cases + - cifs: add a warning when the in-flight count goes negative + - IB/hfi1: Fix a memleak in init_credit_return + - RDMA/bnxt_re: Return error for SRQ resize + - RDMA/irdma: Fix KASAN issue with tasklet + - RDMA/irdma: Validate max_send_wr and max_recv_wr + - RDMA/irdma: Set the CQ read threshold for GEN 1 + - RDMA/irdma: Add AE for too many RNRS + - RDMA/srpt: Support specifying the srpt_service_guid parameter + - RDMA/qedr: Fix qedr_create_user_qp error flow + - arm64: dts: rockchip: set num-cs property for spi on px30 + - RDMA/srpt: fix function pointer cast warnings + - bpf, scripts: Correct GPL license name + - scsi: jazz_esp: Only build if SCSI core is builtin + - nouveau: fix function cast warnings + - net: stmmac: Fix incorrect dereference in interrupt handlers + - ipv4: properly combine dev_base_seq and ipv4.dev_addr_genid + - ipv6: properly combine dev_base_seq and ipv6.dev_addr_genid + - ata: libahci_platform: Convert to using devm bulk clocks API + - ata: libahci_platform: Introduce reset assertion/deassertion methods + - ata: ahci_ceva: fix error handling for Xilinx GT PHY support + - bpf: Fix racing between bpf_timer_cancel_and_free and bpf_timer_cancel + - drm/nouveau/instmem: fix uninitialized_var.cocci warning + - octeontx2-af: Consider the action set by PF + - s390: use the correct count for __iowrite64_copy() + - netfilter: nf_tables: set dormant flag on hook register failure + - netfilter: flowtable: simplify route logic + - netfilter: nft_flow_offload: reset dst in route object after setting up flow + - netfilter: nft_flow_offload: release dst in case direct xmit path is used + - drm/syncobj: call drm_syncobj_fence_add_wait when WAIT_AVAILABLE flag is set + - drm/amd/display: Fix memory leak in dm_sw_fini() + - i2c: imx: Add timer for handling the stop condition + - i2c: imx: when being a target, mark the last read as processed + - fs/aio: Restrict kiocb_set_cancel_fn() to I/O submitted via libaio + - netfilter: nf_tables: fix scheduling-while-atomic splat + - ext4: regenerate buddy after block freeing failed if under fc replay + - ext4: avoid bb_free and bb_fragments inconsistency in mb_free_blocks() + - netfilter: nf_tables: can't schedule in nft_chain_validate + - r8169: use new PM macros + - Linux 5.15.150 + + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26733 + - packet: move from strlcpy with unused retval to strscpy + - net: dev: Convert sa_data to flexible array in struct sockaddr + - arp: Prevent overflow in arp_req_get(). + + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26735 + - ipv6: sr: fix possible use-after-free and null-ptr-deref + + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26736 + - afs: Increase buffer size in afs_update_volume_status() + + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26748 + - usb: cdns3: fix memory double free when handle zero packet + + * CVE-2023-47233 + - wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach + + * CVE-2024-26584 + - net: tls: handle backlogging of crypto requests + + * CVE-2024-26585 + - tls: fix race between tx work scheduling and socket close + + * CVE-2024-26583 + - tls: rx: jump to a more appropriate label + - tls: rx: drop pointless else after goto + - tls: stop recv() if initial process_rx_list gave us non-DATA + - tls: rx: don't store the record type in socket context + - tls: rx: don't store the decryption status in socket context + - tls: rx: don't issue wake ups when data is decrypted + - tls: rx: refactor decrypt_skb_update() + - tls: hw: rx: use return value of tls_device_decrypted() to carry status + - tls: rx: drop unnecessary arguments from tls_setup_from_iter() + - tls: rx: don't report text length from the bowels of decrypt + - tls: rx: wrap decryption arguments in a structure + - tls: rx: factor out writing ContentType to cmsg + - tls: rx: don't track the async count + - tls: rx: move counting TlsDecryptErrors for sync + - tls: rx: assume crypto always calls our callback + - tls: rx: use async as an in-out argument + - tls: decrement decrypt_pending if no async completion will be called + - net: tls: fix async vs NIC crypto offload + - Revert "tls: rx: move counting TlsDecryptErrors for sync" + - tls: rx: simplify async wait + - tls: rx: return the already-copied data on crypto error + - tls: rx: allow only one reader at a time + - tls: rx: release the sock lock on locking timeout + - tls: extract context alloc/initialization out of tls_set_sw_offload + - net: tls: factor out tls_*crypt_async_wait() + - tls: fix race between async notify and socket close + + * CVE-2024-26622 + - tomoyo: fix UAF write bug in tomoyo_write_control() + + -- Roxana Nicolescu Fri, 26 Apr 2024 13:29:50 +0200 + linux (5.15.0-106.116) jammy; urgency=medium * jammy/linux: 5.15.0-106.116 -proposed tracker (LP: #2061812) diff -u linux-aws-5.15-5.15.0/debian.master/config/annotations linux-aws-5.15-5.15.0/debian.master/config/annotations --- linux-aws-5.15-5.15.0/debian.master/config/annotations +++ linux-aws-5.15-5.15.0/debian.master/config/annotations @@ -8105,15 +8105,12 @@ CONFIG_NET_RX_BUSY_POLL policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 's390x': 'y'}> CONFIG_NET_SB1000 policy<{'amd64': 'm', 'arm64': 'm'}> CONFIG_NET_SCHED policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 's390x': 'y'}> -CONFIG_NET_SCH_ATM policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm'}> CONFIG_NET_SCH_CAKE policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 's390x': 'm'}> -CONFIG_NET_SCH_CBQ policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 's390x': 'm'}> CONFIG_NET_SCH_CBS policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 's390x': 'm'}> CONFIG_NET_SCH_CHOKE policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 's390x': 'm'}> CONFIG_NET_SCH_CODEL policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 's390x': 'm'}> CONFIG_NET_SCH_DEFAULT policy<{'amd64': 'n', 'arm64': 'n', 'armhf': 'n', 'ppc64el': 'n', 's390x': 'n'}> CONFIG_NET_SCH_DRR policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 's390x': 'm'}> -CONFIG_NET_SCH_DSMARK policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 's390x': 'm'}> CONFIG_NET_SCH_ETF policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 's390x': 'm'}> CONFIG_NET_SCH_ETS policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 's390x': 'm'}> CONFIG_NET_SCH_FIFO policy<{'amd64': 'y', 'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y', 's390x': 'y'}> @@ -8857,6 +8854,7 @@ CONFIG_PCI_ENDPOINT_TEST policy<{'amd64': 'n', 'arm64': 'n', 'armhf': 'n', 'ppc64el': 'n', 's390x': 'n'}> CONFIG_PCI_EPF_NTB policy<{'amd64': 'm', 'arm64': 'm', 'armhf': 'm', 'ppc64el': 'm', 's390x': 'n'}> CONFIG_PCI_EPF_TEST policy<{'amd64': 'n', 'arm64': 'n', 'armhf': 'n', 'ppc64el': 'n', 's390x': 'n'}> +CONFIG_PCI_EPF_VNTB policy<{'amd64': 'n', 'arm64': 'n', 'armhf': 'n', 'ppc64el': 'n', 's390x': 'n'}> CONFIG_PCI_EXYNOS policy<{'armhf': 'm'}> CONFIG_PCI_FTPCI100 policy<{'arm64': 'y', 'armhf': 'y', 'ppc64el': 'y'}> CONFIG_PCI_HISI policy<{'arm64': 'y'}> @@ -12329,12 +12327,12 @@ CONFIG_TEGRA124_EMC policy<{'armhf-generic': 'y'}> CONFIG_TEGRA20_APB_DMA policy<{'arm64': 'y', 'armhf-generic': 'y'}> CONFIG_TEGRA20_EMC policy<{'armhf-generic': 'y'}> -CONFIG_TEGRA210_ADMA policy<{'arm64': 'm'}> +CONFIG_TEGRA210_ADMA policy<{'arm64': 'm', 'armhf-generic': 'n'}> CONFIG_TEGRA210_EMC policy<{'arm64': 'm'}> CONFIG_TEGRA210_EMC_TABLE policy<{'arm64': 'y'}> CONFIG_TEGRA30_EMC policy<{'armhf-generic': 'y'}> CONFIG_TEGRA30_TSENSOR policy<{'armhf-generic': 'm'}> -CONFIG_TEGRA_ACONNECT policy<{'arm64': 'm'}> +CONFIG_TEGRA_ACONNECT policy<{'arm64': 'm', 'armhf-generic': 'n'}> CONFIG_TEGRA_AHB policy<{'arm64': 'y', 'armhf-generic': 'y'}> CONFIG_TEGRA_BPMP policy<{'arm64': 'y', 'armhf-generic': 'y'}> CONFIG_TEGRA_BPMP_THERMAL policy<{'arm64': 'm', 'armhf-generic': 'm'}> diff -u linux-aws-5.15-5.15.0/debian.master/reconstruct linux-aws-5.15-5.15.0/debian.master/reconstruct --- linux-aws-5.15-5.15.0/debian.master/reconstruct +++ linux-aws-5.15-5.15.0/debian.master/reconstruct @@ -86,6 +86,17 @@ rm -f 'drivers/bus/mhi/core/pm.c' rm -f 'drivers/bus/mhi/pci_generic.c' rm -f 'drivers/counter/counter.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/Makefile' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/custom_float.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c' +rm -f 'drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c' +rm -f 'drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c' +rm -f 'drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h' rm -f 'drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h' rm -f 'drivers/gpu/drm/msm/hdmi/hdmi_connector.c' rm -f 'drivers/gpu/drm/vmwgfx/vmwgfx_thp.c' @@ -145,6 +156,9 @@ rm -f 'net/sched/cls_rsvp.h' rm -f 'net/sched/cls_rsvp6.c' rm -f 'net/sched/cls_tcindex.c' +rm -f 'net/sched/sch_atm.c' +rm -f 'net/sched/sch_cbq.c' +rm -f 'net/sched/sch_dsmark.c' rm -f 'net/xfrm/xfrm_interface.c' rm -f 'tools/build/feature/test-libpython-version.c' rm -f 'tools/perf/arch/arm64/util/machine.c' diff -u linux-aws-5.15-5.15.0/debian.master/tracking-bug linux-aws-5.15-5.15.0/debian.master/tracking-bug --- linux-aws-5.15-5.15.0/debian.master/tracking-bug +++ linux-aws-5.15-5.15.0/debian.master/tracking-bug @@ -1 +1 @@ -2061812 2024.04.01-3 +2063763 2024.04.29-1 diff -u linux-aws-5.15-5.15.0/debian.master/upstream-stable linux-aws-5.15-5.15.0/debian.master/upstream-stable --- linux-aws-5.15-5.15.0/debian.master/upstream-stable +++ linux-aws-5.15-5.15.0/debian.master/upstream-stable @@ -3 +3 @@ - linux-5.15.y = v5.15.149 + linux-5.15.y = v5.15.153 diff -u linux-aws-5.15-5.15.0/debian/changelog linux-aws-5.15-5.15.0/debian/changelog --- linux-aws-5.15-5.15.0/debian/changelog +++ linux-aws-5.15-5.15.0/debian/changelog @@ -1,3 +1,834 @@ +linux-aws-5.15 (5.15.0-1063.69~20.04.1) focal; urgency=medium + + * focal/linux-aws-5.15: 5.15.0-1063.69~20.04.1 -proposed tracker + (LP: #2063711) + + [ Ubuntu: 5.15.0-1063.69 ] + + * jammy/linux-aws: 5.15.0-1063.69 -proposed tracker (LP: #2063712) + * aws: Support hibernation on Graviton (LP: #2060992) + - SAUCE: PM: hibernate: Allow ACPI hardware signature to be honoured + - SAUCE: PM: hibernate: Honour ACPI hardware signature by default for virtual + guests + - SAUCE: ACPICA: Detect FACS even for hardware reduced platforms + - SAUCE: arm64: acpi: Honour firmware_signature field of FACS, if it exists + - SAUCE: firmware/psci: Add definitions for PSCI v1.3 specification (ALPHA) + - SAUCE: arm64: Use SYSTEM_OFF2 PSCI call to power off for hibernate + - [Config]: Enable hibernate on arm64 + - [Config]: Enable hibernate on arm64 + * jammy/linux: 5.15.0-111.121 -proposed tracker (LP: #2063763) + * RTL8852BE fw security fail then lost WIFI function during suspend/resume + cycle (LP: #2063096) + - wifi: rtw89: download firmware with five times retry + * Mount CIFS fails with Permission denied (LP: #2061986) + - cifs: fix ntlmssp auth when there is no key exchange + * USB stick can't be detected (LP: #2040948) + - usb: Disable USB3 LPM at shutdown + * Jammy update: v5.15.153 upstream stable release (LP: #2063290) + - io_uring/unix: drop usage of io_uring socket + - io_uring: drop any code related to SCM_RIGHTS + - selftests: tls: use exact comparison in recv_partial + - ASoC: rt5645: Make LattePanda board DMI match more precise + - x86/xen: Add some null pointer checking to smp.c + - MIPS: Clear Cause.BD in instruction_pointer_set + - HID: multitouch: Add required quirk for Synaptics 0xcddc device + - gen_compile_commands: fix invalid escape sequence warning + - RDMA/mlx5: Fix fortify source warning while accessing Eth segment + - RDMA/mlx5: Relax DEVX access upon modify commands + - riscv: dts: sifive: add missing #interrupt-cells to pmic + - x86/mm: Move is_vsyscall_vaddr() into asm/vsyscall.h + - x86/mm: Disallow vsyscall page read for copy_from_kernel_nofault() + - net/iucv: fix the allocation size of iucv_path_table array + - parisc/ftrace: add missing CONFIG_DYNAMIC_FTRACE check + - block: sed-opal: handle empty atoms when parsing response + - dm-verity, dm-crypt: align "struct bvec_iter" correctly + - scsi: mpt3sas: Prevent sending diag_reset when the controller is ready + - ALSA: hda/realtek - ALC285 reduce pop noise from Headphone port + - drm/amdgpu: Enable gpu reset for S3 abort cases on Raven series + - Bluetooth: rfcomm: Fix null-ptr-deref in rfcomm_check_security + - firewire: core: use long bus reset on gap count error + - ASoC: Intel: bytcr_rt5640: Add an extra entry for the Chuwi Vi8 tablet + - Input: gpio_keys_polled - suppress deferred probe error for gpio + - ASoC: wm8962: Enable oscillator if selecting WM8962_FLL_OSC + - ASoC: wm8962: Enable both SPKOUTR_ENA and SPKOUTL_ENA in mono mode + - ASoC: wm8962: Fix up incorrect error message in wm8962_set_fll + - do_sys_name_to_handle(): use kzalloc() to fix kernel-infoleak + - s390/dasd: put block allocation in separate function + - s390/dasd: add query PPRC function + - s390/dasd: add copy pair setup + - s390/dasd: add autoquiesce feature + - s390/dasd: Use dev_*() for device log messages + - s390/dasd: fix double module refcount decrement + - fs/select: rework stack allocation hack for clang + - md: Don't clear MD_CLOSING when the raid is about to stop + - lib/cmdline: Fix an invalid format specifier in an assertion msg + - time: test: Fix incorrect format specifier + - rtc: test: Fix invalid format specifier. + - aoe: fix the potential use-after-free problem in aoecmd_cfg_pkts + - timekeeping: Fix cross-timestamp interpolation on counter wrap + - timekeeping: Fix cross-timestamp interpolation corner case decision + - timekeeping: Fix cross-timestamp interpolation for non-x86 + - sched/fair: Take the scheduling domain into account in select_idle_core() + - wifi: ath10k: fix NULL pointer dereference in + ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev() + - wifi: b43: Stop/wake correct queue in DMA Tx path when QoS is disabled + - wifi: b43: Stop/wake correct queue in PIO Tx path when QoS is disabled + - wifi: b43: Stop correct queue in DMA worker when QoS is disabled + - wifi: b43: Disable QoS for bcm4331 + - wifi: wilc1000: fix declarations ordering + - wifi: wilc1000: fix RCU usage in connect path + - wifi: rtl8xxxu: add cancel_work_sync() for c2hcmd_work + - wifi: wilc1000: fix multi-vif management when deleting a vif + - wifi: mwifiex: debugfs: Drop unnecessary error check for + debugfs_create_dir() + - cpufreq: brcmstb-avs-cpufreq: add check for cpufreq_cpu_get's return value + - cpufreq: Explicitly include correct DT includes + - cpufreq: mediatek-hw: Wait for CPU supplies before probing + - sock_diag: annotate data-races around sock_diag_handlers[family] + - inet_diag: annotate data-races around inet_diag_table[] + - bpftool: Silence build warning about calloc() + - af_unix: Annotate data-race of gc_in_progress in wait_for_unix_gc(). + - cpufreq: mediatek-hw: Don't error out if supply is not found + - arm64: dts: imx8mm-kontron: Disable pullups for I2C signals on SL/BL i.MX8MM + - arm64: dts: imx8mm-kontron: Disable pullups for onboard UART signals on BL + board + - arm64: dts: imx8mm-kontron: Add support for ultra high speed modes on SD + card + - arm64: dts: imx8mm-kontron: Use the VSELECT signal to switch SD card IO + voltage + - arm64: dts: imx8mm-kontron: Disable pull resistors for SD card signals on BL + board + - wifi: ath9k: delay all of ath9k_wmi_event_tasklet() until init is complete + - wifi: iwlwifi: mvm: report beacon protection failures + - wifi: iwlwifi: dbg-tlv: ensure NUL termination + - wifi: iwlwifi: fix EWRD table validity check + - arm64: dts: imx8mm-venice-gw71xx: fix USB OTG VBUS + - pwm: atmel-hlcdc: Convert to platform remove callback returning void + - pwm: atmel-hlcdc: Use consistent variable naming + - pwm: atmel-hlcdc: Fix clock imbalance related to suspend support + - net: blackhole_dev: fix build warning for ethh set but not used + - wifi: libertas: fix some memleaks in lbs_allocate_cmd_buffer() + - pwm: sti: Implement .apply() callback + - pwm: sti: Fix capture for st,pwm-num-chan < st,capture-num-chan + - wifi: iwlwifi: mvm: don't set replay counters to 0xff + - s390/vdso: drop '-fPIC' from LDFLAGS + - ipv6: mcast: remove one synchronize_net() barrier in ipv6_mc_down() + - arm64: dts: mt8183: kukui: Add Type C node + - arm64: dts: mt8183: kukui: Split out keyboard node and describe detachables + - arm64: dts: mt8183: Move CrosEC base detection node to kukui-based DTs + - arm64: dts: mediatek: mt7622: add missing "device_type" to memory nodes + - bpf: Mark bpf_spin_{lock,unlock}() helpers with notrace correctly + - wireless: Remove redundant 'flush_workqueue()' calls + - wifi: wilc1000: prevent use-after-free on vif when cleaning up all + interfaces + - ACPI: processor_idle: Fix memory leak in acpi_processor_power_exit() + - bus: tegra-aconnect: Update dependency to ARCH_TEGRA + - [Config]: update CONFIG_TEGRA_ACONNECT + - iommu/amd: Mark interrupt as managed + - wifi: brcmsmac: avoid function pointer casts + - net: ena: Remove ena_select_queue + - ARM: dts: arm: realview: Fix development chip ROM compatible value + - arm64: dts: renesas: r8a779a0: Update to R-Car Gen4 compatible values + - arm64: dts: renesas: r8a779a0: Correct avb[01] reg sizes + - ARM: dts: imx6dl-yapp4: Move phy reset into switch node + - ARM: dts: imx6dl-yapp4: Fix typo in the QCA switch register address + - ARM: dts: imx6dl-yapp4: Move the internal switch PHYs under the switch node + - arm64: dts: marvell: reorder crypto interrupts on Armada SoCs + - ACPI: resource: Add Infinity laptops to irq1_edge_low_force_override + - ACPI: resource: Do IRQ override on Lunnen Ground laptops + - ACPI: resource: Add MAIBENBEN X577 to irq1_edge_low_force_override + - ACPI: scan: Fix device check notification handling + - x86, relocs: Ignore relocations in .notes section + - SUNRPC: fix some memleaks in gssx_dec_option_array + - mmc: wmt-sdmmc: remove an incorrect release_mem_region() call in the .remove + function + - wifi: rtw88: 8821c: Fix false alarm count + - PCI: Make pci_dev_is_disconnected() helper public for other drivers + - iommu/vt-d: Don't issue ATS Invalidation request when device is disconnected + - igb: move PEROUT and EXTTS isr logic to separate functions + - igb: Fix missing time sync events + - Bluetooth: Remove superfluous call to hci_conn_check_pending() + - Bluetooth: hci_qca: Add support for QTI Bluetooth chip wcn6855 + - Bluetooth: hci_qca: don't use IS_ERR_OR_NULL() with gpiod_get_optional() + - Bluetooth: hci_core: Fix possible buffer overflow + - sr9800: Add check for usbnet_get_endpoints + - bpf: Fix DEVMAP_HASH overflow check on 32-bit arches + - bpf: Fix hashtab overflow check on 32-bit arches + - bpf: Fix stackmap overflow check on 32-bit arches + - ipv6: fib6_rules: flush route cache when rule is changed + - net: ip_tunnel: make sure to pull inner header in ip_tunnel_rcv() + - net: phy: fix phy_get_internal_delay accessing an empty array + - net: hns3: fix kernel crash when 1588 is received on HIP08 devices + - net: hns3: fix port duplex configure error in IMP reset + - net: phy: DP83822: enable rgmii mode if phy_interface_is_rgmii + - net: phy: dp83822: Fix RGMII TX delay configuration + - OPP: debugfs: Fix warning around icc_get_name() + - tcp: fix incorrect parameter validation in the do_tcp_getsockopt() function + - net: Change sock_getsockopt() to take the sk ptr instead of the sock ptr + - bpf: net: Change sk_getsockopt() to take the sockptr_t argument + - bpf: net: Change do_ip_getsockopt() to take the sockptr_t argument + - ipmr: fix incorrect parameter validation in the ip_mroute_getsockopt() + function + - l2tp: fix incorrect parameter validation in the pppol2tp_getsockopt() + function + - udp: fix incorrect parameter validation in the udp_lib_getsockopt() function + - net: kcm: fix incorrect parameter validation in the kcm_getsockopt) function + - net/x25: fix incorrect parameter validation in the x25_getsockopt() function + - nfp: flower: handle acti_netdevs allocation failure + - dm raid: fix false positive for requeue needed during reshape + - dm: call the resume method on internal suspend + - drm/tegra: dsi: Add missing check for of_find_device_by_node + - drm/tegra: dpaux: Populate AUX bus + - drm/tegra: dpaux: Fix PM disable depth imbalance in tegra_dpaux_probe + - drm/tegra: dsi: Make use of the helper function dev_err_probe() + - drm/tegra: dsi: Fix some error handling paths in tegra_dsi_probe() + - drm/tegra: dsi: Fix missing pm_runtime_disable() in the error handling path + of tegra_dsi_probe() + - drm/tegra: dc: rgb: Allow changing PLLD rate on Tegra30+ + - drm/tegra: rgb: Fix some error handling paths in tegra_dc_rgb_probe() + - drm/tegra: rgb: Fix missing clk_put() in the error handling paths of + tegra_dc_rgb_probe() + - drm/tegra: output: Fix missing i2c_put_adapter() in the error handling paths + of tegra_output_probe() + - drm/rockchip: inno_hdmi: Fix video timing + - drm: Don't treat 0 as -1 in drm_fixp2int_ceil + - drm/ttm: add ttm_resource_fini v2 + - drm/vmwgfx: fix a memleak in vmw_gmrid_man_get_node + - drm/rockchip: lvds: do not overwrite error code + - drm/rockchip: lvds: do not print scary message when probing defer + - drm/lima: fix a memleak in lima_heap_alloc + - dmaengine: tegra210-adma: Update dependency to ARCH_TEGRA + - [Config]: update CONFIG_TEGRA210_ADMA + - media: tc358743: register v4l2 async device only after successful setup + - PCI/DPC: Print all TLP Prefixes, not just the first + - perf record: Fix possible incorrect free in record__switch_output() + - HID: lenovo: Add middleclick_workaround sysfs knob for cptkbd + - drm/amd/display: Fix a potential buffer overflow in 'dp_dsc_clock_en_read()' + - drm/amd/display: Fix potential NULL pointer dereferences in + 'dcn10_set_output_transfer_func()' + - perf evsel: Fix duplicate initialization of data->id in + evsel__parse_sample() + - clk: meson: Add missing clocks to axg_clk_regmaps + - media: em28xx: annotate unchecked call to media_device_register() + - media: v4l2-tpg: fix some memleaks in tpg_alloc + - media: v4l2-mem2mem: fix a memleak in v4l2_m2m_register_entity + - media: edia: dvbdev: fix a use-after-free + - pinctrl: mediatek: Drop bogus slew rate register range for MT8192 + - clk: qcom: reset: Commonize the de/assert functions + - clk: qcom: reset: Ensure write completion on reset de/assertion + - quota: simplify drop_dquot_ref() + - quota: Fix potential NULL pointer dereference + - quota: Fix rcu annotations of inode dquot pointers + - PCI/P2PDMA: Fix a sleeping issue in a RCU read section + - PCI: switchtec: Fix an error handling path in switchtec_pci_probe() + - crypto: xilinx - call finalize with bh disabled + - perf thread_map: Free strlist on normal path in thread_map__new_by_tid_str() + - drm/radeon/ni: Fix wrong firmware size logging in ni_init_microcode() + - ALSA: seq: fix function cast warnings + - perf stat: Avoid metric-only segv + - ASoC: meson: Use dev_err_probe() helper + - ASoC: meson: aiu: fix function pointer type mismatch + - ASoC: meson: t9015: fix function pointer type mismatch + - powerpc: Force inlining of arch_vmap_p{u/m}d_supported() + - PCI: endpoint: Support NTB transfer between RC and EP + - [Config]: update CONFIG_PCI_EPF_VNTB + - NTB: EPF: fix possible memory leak in pci_vntb_probe() + - NTB: fix possible name leak in ntb_register_device() + - media: sun8i-di: Fix coefficient writes + - media: sun8i-di: Fix power on/off sequences + - media: sun8i-di: Fix chroma difference threshold + - media: imx: csc/scaler: fix v4l2_ctrl_handler memory leak + - media: go7007: add check of return value of go7007_read_addr() + - media: pvrusb2: remove redundant NULL check + - media: pvrusb2: fix pvr2_stream_callback casts + - clk: qcom: dispcc-sdm845: Adjust internal GDSC wait times + - drm/mediatek: dsi: Fix DSI RGB666 formats and definitions + - PCI: Mark 3ware-9650SE Root Port Extended Tags as broken + - clk: hisilicon: hi3519: Release the correct number of gates in + hi3519_clk_unregister() + - clk: hisilicon: hi3559a: Fix an erroneous devm_kfree() + - drm/tegra: put drm_gem_object ref on error in tegra_fb_create + - mfd: syscon: Call of_node_put() only when of_parse_phandle() takes a ref + - mfd: altera-sysmgr: Call of_node_put() only when of_parse_phandle() takes a + ref + - crypto: arm/sha - fix function cast warnings + - drm/tidss: Fix initial plane zpos values + - mtd: maps: physmap-core: fix flash size larger than 32-bit + - mtd: rawnand: lpc32xx_mlc: fix irq handler prototype + - ASoC: meson: axg-tdm-interface: fix mclk setup without mclk-fs + - ASoC: meson: axg-tdm-interface: add frame rate constraint + - HID: amd_sfh: Update HPD sensor structure elements + - drm/amdgpu: Fix missing break in ATOM_ARG_IMM Case of atom_get_src_int() + - media: pvrusb2: fix uaf in pvr2_context_set_notify + - media: dvb-frontends: avoid stack overflow warnings with clang + - media: go7007: fix a memleak in go7007_load_encoder + - media: ttpci: fix two memleaks in budget_av_attach + - media: mediatek: vcodec: avoid -Wcast-function-type-strict warning + - drm/mediatek: Fix a null pointer crash in mtk_drm_crtc_finish_page_flip + - powerpc/hv-gpci: Fix the H_GET_PERF_COUNTER_INFO hcall return value checks + - drm/msm/dpu: add division of drm_display_mode's hskew parameter + - module: Add support for default value for module async_probe + - modules: wait do_free_init correctly + - powerpc/embedded6xx: Fix no previous prototype for avr_uart_send() etc. + - leds: aw2013: Unlock mutex before destroying it + - leds: sgm3140: Add missing timer cleanup and flash gpio control + - backlight: lm3630a: Initialize backlight_properties on init + - backlight: lm3630a: Don't set bl->props.brightness in get_brightness + - backlight: da9052: Fully initialize backlight_properties during probe + - backlight: lm3639: Fully initialize backlight_properties during probe + - backlight: lp8788: Fully initialize backlight_properties during probe + - sparc32: Fix section mismatch in leon_pci_grpci + - clk: Fix clk_core_get NULL dereference + - clk: zynq: Prevent null pointer dereference caused by kmalloc failure + - ALSA: hda/realtek: fix ALC285 issues on HP Envy x360 laptops + - ALSA: usb-audio: Stop parsing channels bits when all channels are found. + - RDMA/srpt: Do not register event handler until srpt device is fully setup + - f2fs: multidevice: support direct IO + - f2fs: invalidate META_MAPPING before IPU/DIO write + - f2fs: replace congestion_wait() calls with io_schedule_timeout() + - f2fs: fix to invalidate META_MAPPING before DIO write + - f2fs: invalidate meta pages only for post_read required inode + - f2fs: reduce stack memory cost by using bitfield in struct f2fs_io_info + - f2fs: compress: fix to cover normal cluster write with cp_rwsem + - f2fs: compress: fix to check unreleased compressed cluster + - scsi: csiostor: Avoid function pointer casts + - RDMA/device: Fix a race between mad_client and cm_client init + - RDMA/rtrs-clt: Check strnlen return len in sysfs mpath_policy_store() + - scsi: bfa: Fix function pointer type mismatch for hcb_qe->cbfn + - net: sunrpc: Fix an off by one in rpc_sockaddr2uaddr() + - NFSv4.2: fix nfs4_listxattr kernel BUG at mm/usercopy.c:102 + - NFSv4.2: fix listxattr maximum XDR buffer size + - watchdog: stm32_iwdg: initialize default timeout + - NFS: Fix an off by one in root_nfs_cat() + - f2fs: compress: fix reserve_cblocks counting error when out of space + - afs: Revert "afs: Hide silly-rename files from userspace" + - comedi: comedi_test: Prevent timers rescheduling during deletion + - remoteproc: stm32: use correct format strings on 64-bit + - remoteproc: stm32: Fix incorrect type in assignment for va + - remoteproc: stm32: Fix incorrect type assignment returned by + stm32_rproc_get_loaded_rsc_tablef + - tty: vt: fix 20 vs 0x20 typo in EScsiignore + - serial: max310x: fix syntax error in IRQ error message + - tty: serial: samsung: fix tx_empty() to return TIOCSER_TEMT + - arm64: dts: broadcom: bcmbca: bcm4908: drop invalid switch cells + - kconfig: fix infinite loop when expanding a macro at the end of file + - rtc: mt6397: select IRQ_DOMAIN instead of depending on it + - serial: 8250_exar: Don't remove GPIO device on suspend + - staging: greybus: fix get_channel_from_mode() failure path + - usb: gadget: net2272: Use irqflags in the call to net2272_probe_fin + - io_uring: don't save/restore iowait state + - nouveau: reset the bo resource bus info after an eviction + - octeontx2-af: Use matching wake_up API variant in CGX command interface + - s390/vtime: fix average steal time calculation + - soc: fsl: dpio: fix kcalloc() argument order + - hsr: Fix uninit-value access in hsr_get_node() + - net: mtk_eth_soc: move MAC_MCR setting to mac_finish() + - net: mediatek: mtk_eth_soc: clear MAC_MCR_FORCE_LINK only when MAC is up + - net: ethernet: mtk_eth_soc: fix PPE hanging issue + - packet: annotate data-races around ignore_outgoing + - net: veth: do not manipulate GRO when using XDP + - net: dsa: mt7530: prevent possible incorrect XTAL frequency selection + - vdpa/mlx5: Allow CVQ size changes + - wireguard: receive: annotate data-race around receiving_counter.counter + - rds: introduce acquire/release ordering in acquire/release_in_xmit() + - hsr: Handle failures in module init + - net: phy: fix phy_read_poll_timeout argument type in genphy_loopback + - net/bnx2x: Prevent access to a freed page in page_pool + - octeontx2-af: Use separate handlers for interrupts + - netfilter: nf_tables: do not compare internal table flags on updates + - rcu: add a helper to report consolidated flavor QS + - net: report RCU QS on threaded NAPI repolling + - bpf: report RCU QS in cpumap kthread + - net: dsa: mt7530: fix handling of LLDP frames + - net: dsa: mt7530: fix handling of 802.1X PAE frames + - net: dsa: mt7530: fix link-local frames that ingress vlan filtering ports + - net: dsa: mt7530: fix handling of all link-local frames + - spi: spi-mt65xx: Fix NULL pointer access in interrupt handler + - regmap: Add missing map->bus check + - remoteproc: stm32: fix incorrect optional pointers + * Jammy update: v5.15.152 upstream stable release (LP: #2063276) + - mmc: mmci: stm32: use a buffer for unaligned DMA requests + - mmc: mmci: stm32: fix DMA API overlapping mappings warning + - net: lan78xx: fix runtime PM count underflow on link stop + - ixgbe: {dis, en}able irqs in ixgbe_txrx_ring_{dis, en}able + - i40e: disable NAPI right after disabling irqs when handling xsk_pool + - tracing/net_sched: Fix tracepoints that save qdisc_dev() as a string + - geneve: make sure to pull inner header in geneve_rx() + - net: sparx5: Fix use after free inside sparx5_del_mact_entry + - net: ice: Fix potential NULL pointer dereference in ice_bridge_setlink() + - net/ipv6: avoid possible UAF in ip6_route_mpath_notify() + - cpumap: Zero-initialise xdp_rxq_info struct before running XDP program + - net/rds: fix WARNING in rds_conn_connect_if_down + - netfilter: nft_ct: fix l3num expectations with inet pseudo family + - netfilter: nf_conntrack_h323: Add protection for bmp length out of range + - erofs: apply proper VMA alignment for memory mapped files on THP + - netrom: Fix a data-race around sysctl_netrom_default_path_quality + - netrom: Fix a data-race around sysctl_netrom_obsolescence_count_initialiser + - netrom: Fix data-races around sysctl_netrom_network_ttl_initialiser + - netrom: Fix a data-race around sysctl_netrom_transport_timeout + - netrom: Fix a data-race around sysctl_netrom_transport_maximum_tries + - netrom: Fix a data-race around sysctl_netrom_transport_acknowledge_delay + - netrom: Fix a data-race around sysctl_netrom_transport_busy_delay + - netrom: Fix a data-race around sysctl_netrom_transport_requested_window_size + - netrom: Fix a data-race around sysctl_netrom_transport_no_activity_timeout + - netrom: Fix a data-race around sysctl_netrom_routing_control + - netrom: Fix a data-race around sysctl_netrom_link_fails_count + - netrom: Fix data-races around sysctl_net_busy_read + - ALSA: usb-audio: Refcount multiple accesses on the single clock + - ALSA: usb-audio: Clear fixed clock rate at closing EP + - ALSA: usb-audio: Split endpoint setups for hw_params and prepare (take#2) + - ALSA: usb-audio: Properly refcounting clock rate + - ALSA: usb-audio: Apply mutex around snd_usb_endpoint_set_params() + - ALSA: usb-audio: Correct the return code from snd_usb_endpoint_set_params() + - ALSA: usb-audio: Avoid superfluous endpoint setup + - ALSA: usb-audio: Add quirk for Tascam Model 12 + - ALSA: usb-audio: Add new quirk FIXED_RATE for JBL Quantum810 Wireless + - ALSA: usb-audio: Fix microphone sound on Nexigo webcam. + - ALSA: usb-audio: add quirk for RODE NT-USB+ + - drm/amd/display: Fix uninitialized variable usage in core_link_ 'read_dpcd() + & write_dpcd()' functions + - nfp: flower: add goto_chain_index for ct entry + - nfp: flower: add hardware offload check for post ct entry + - selftests/mm: switch to bash from sh + - selftests: mm: fix map_hugetlb failure on 64K page size systems + - xhci: process isoc TD properly when there was a transaction error mid TD. + - xhci: handle isoc Babble and Buffer Overrun events properly + - serial: max310x: use regmap methods for SPI batch operations + - serial: max310x: use a separate regmap for each port + - serial: max310x: prevent infinite while() loop in port startup + - drm/amd/pm: do not expose the API used internally only in kv_dpm.c + - drm/amdgpu: Reset IH OVERFLOW_CLEAR bit + - selftests: mptcp: decrease BW in simult flows + - hv_netvsc: use netif_is_bond_master() instead of open code + - hv_netvsc: Register VF in netvsc_probe if NET_DEVICE_REGISTER missed + - drm/amd/display: Re-arrange FPU code structure for dcn2x + - drm/amd/display: move calcs folder into DML + - drm/amd/display: remove DML Makefile duplicate lines + - drm/amd/display: Increase frame-larger-than for all display_mode_vba files + - getrusage: add the "signal_struct *sig" local variable + - getrusage: move thread_group_cputime_adjusted() outside of + lock_task_sighand() + - getrusage: use __for_each_thread() + - getrusage: use sig->stats_lock rather than lock_task_sighand() + - proc: Use task_is_running() for wchan in /proc/$pid/stat + - fs/proc: do_task_stat: move thread_group_cputime_adjusted() outside of + lock_task_sighand() + - ALSA: usb-audio: Fix wrong kfree issue in snd_usb_endpoint_free_all + - ALSA: usb-audio: Always initialize fixed_rate in + snd_usb_find_implicit_fb_sync_format() + - ALSA: usb-audio: Add FIXED_RATE quirk for JBL Quantum610 Wireless + - ALSA: usb-audio: Sort quirk table entries + - regmap: allow to define reg_update_bits for no bus configuration + - regmap: Add bulk read/write callbacks into regmap_config + - serial: max310x: make accessing revision id interface-agnostic + - serial: max310x: fix IO data corruption in batched operations + - Linux 5.15.152 + * CVE-2024-26809 + - netfilter: nft_set_pipapo: release elements in clone only from destroy path + * CVE-2024-26792 + - btrfs: fix double free of anonymous device after snapshot creation failure + * CVE-2023-52530 + - wifi: mac80211: fix potential key use-after-free + * CVE-2023-52447 + - bpf: Defer the free of inner map when necessary + - rcu-tasks: Provide rcu_trace_implies_rcu_gp() + * Avoid creating non-working backlight sysfs knob from ASUS board + (LP: #2060422) + - platform/x86: asus-wmi: Consider device is absent when the read is ~0 + * [Ubuntu 22.04.4/linux-image-6.5.0-26-generic] Kernel output "UBSAN: array- + index-out-of-bounds in /build/linux-hwe-6.5-34pCLi/linux- + hwe-6.5-6.5.0/drivers/net/hyperv/netvsc.c:1445:41" multiple times, + especially during boot. (LP: #2058477) + - hv: hyperv.h: Replace one-element array with flexible-array member + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) + - netfilter: nf_tables: disallow timeout for anonymous sets + - mtd: spinand: gigadevice: Fix the get ecc status issue + - netlink: Fix kernel-infoleak-after-free in __skb_datagram_iter + - net: ip_tunnel: prevent perpetual headroom growth + - tun: Fix xdp_rxq_info's queue_index when detaching + - cpufreq: intel_pstate: fix pstate limits enforcement for adjust_perf call + back + - net: veth: clear GRO when clearing XDP even when down + - ipv6: fix potential "struct net" leak in inet6_rtm_getaddr() + - lan78xx: enable auto speed configuration for LAN7850 if no EEPROM is + detected + - net: enable memcg accounting for veth queues + - veth: try harder when allocating queue memory + - net: usb: dm9601: fix wrong return value in dm9601_mdio_read + - uapi: in6: replace temporary label with rfc9486 + - stmmac: Clear variable when destroying workqueue + - Bluetooth: Avoid potential use-after-free in hci_error_reset + - Bluetooth: hci_event: Fix wrongly recorded wakeup BD_ADDR + - netfilter: nf_tables: allow NFPROTO_INET in nft_(match/target)_validate() + - netfilter: nfnetlink_queue: silence bogus compiler warning + - netfilter: core: move ip_ct_attach indirection to struct nf_ct_hook + - netfilter: make function op structures const + - netfilter: let reset rules clean out conntrack entries + - netfilter: bridge: confirm multicast packets before passing them up the + stack + - rtnetlink: fix error logic of IFLA_BRIDGE_FLAGS writing back + - igb: extend PTP timestamp adjustments to i211 + - efi/capsule-loader: fix incorrect allocation size + - power: supply: bq27xxx-i2c: Do not free non existing IRQ + - ALSA: Drop leftover snd-rtctimer stuff from Makefile + - fbcon: always restore the old font data in fbcon_do_set_font() + - afs: Fix endless loop in directory parsing + - riscv: Sparse-Memory/vmemmap out-of-bounds fix + - ALSA: firewire-lib: fix to check cycle continuity + - gtp: fix use-after-free and null-ptr-deref in gtp_newlink() + - wifi: nl80211: reject iftype change with mesh ID change + - btrfs: dev-replace: properly validate device names + - dmaengine: fsl-qdma: fix SoC may hang on 16 byte unaligned read + - dmaengine: ptdma: use consistent DMA masks + - dmaengine: fsl-qdma: init irq after reg initialization + - mmc: core: Fix eMMC initialization with 1-bit bus connection + - mmc: sdhci-xenon: add timeout for PHY init complete + - mmc: sdhci-xenon: fix PHY init clock stability + - pmdomain: qcom: rpmhpd: Fix enabled_corner aggregation + - x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers + - mptcp: move __mptcp_error_report in protocol.c + - mptcp: process pending subflow error on close + - mptcp: rename timer related helper to less confusing names + - selftests: mptcp: add missing kconfig for NF Filter + - selftests: mptcp: add missing kconfig for NF Filter in v6 + - mptcp: clean up harmless false expressions + - mptcp: add needs_id for netlink appending addr + - mptcp: push at DSS boundaries + - mptcp: fix possible deadlock in subflow diag + - cachefiles: fix memory leak in cachefiles_add_cache() + - fs,hugetlb: fix NULL pointer dereference in hugetlbs_fill_super + - Revert "drm/bridge: lt8912b: Register and attach our DSI device at probe" + - af_unix: Drop oob_skb ref before purging queue in GC. + - gpio: 74x164: Enable output pins after registers are reset + - gpiolib: Fix the error path order in gpiochip_add_data_with_key() + - gpio: fix resource unwinding order in error path + - Revert "interconnect: Fix locking for runpm vs reclaim" + - Revert "interconnect: Teach lockdep about icc_bw_lock order" + - bpf: Add BPF_FIB_LOOKUP_SKIP_NEIGH for bpf_fib_lookup + - bpf: Add table ID to bpf_fib_lookup BPF helper + - bpf: Derive source IP addr via bpf_*_fib_lookup() + - Linux 5.15.151 + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) // + CVE-2024-26782 + - mptcp: fix double-free on socket dismantle + * Jammy update: v5.15.151 upstream stable release (LP: #2060209) // Fix + bluetooth connections with 3.0 device (LP: #2063067) + - Bluetooth: hci_event: Fix handling of HCI_EV_IO_CAPA_REQUEST + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) + - net/sched: Retire CBQ qdisc + - [Config] updateconfigs for NET_SCH_CBQ + - net/sched: Retire ATM qdisc + - [Config] updateconfigs for NET_SCH_ATM + - net/sched: Retire dsmark qdisc + - [Config] updateconfigs for NET_SCH_DSMARK + - smb: client: fix potential OOBs in smb2_parse_contexts() + - smb: client: fix parsing of SMB3.1.1 POSIX create context + - sched/rt: sysctl_sched_rr_timeslice show default timeslice after reset + - PCI: dwc: Fix a 64bit bug in dw_pcie_ep_raise_msix_irq() + - bpf: Merge printk and seq_printf VARARG max macros + - bpf: Add struct for bin_args arg in bpf_bprintf_prepare + - bpf: Do cleanup in bpf_bprintf_cleanup only when needed + - bpf: Remove trace_printk_lock + - userfaultfd: fix mmap_changing checking in mfill_atomic_hugetlb + - zonefs: Improve error handling + - x86/fpu: Stop relying on userspace for info to fault in xsave buffer + - sched/rt: Fix sysctl_sched_rr_timeslice intial value + - sched/rt: Disallow writing invalid values to sched_rt_period_us + - scsi: target: core: Add TMF to tmr_list handling + - dmaengine: shdma: increase size of 'dev_id' + - dmaengine: fsl-qdma: increase size of 'irq_name' + - wifi: cfg80211: fix missing interfaces when dumping + - wifi: mac80211: fix race condition on enabling fast-xmit + - fbdev: savage: Error out if pixclock equals zero + - fbdev: sis: Error out if pixclock equals zero + - spi: hisi-sfc-v3xx: Return IRQ_NONE if no interrupts were detected + - ahci: asm1166: correct count of reported ports + - ahci: add 43-bit DMA address quirk for ASMedia ASM1061 controllers + - MIPS: reserve exception vector space ONLY ONCE + - platform/x86: touchscreen_dmi: Add info for the TECLAST X16 Plus tablet + - ext4: avoid dividing by 0 in mb_update_avg_fragment_size() when block bitmap + corrupt + - ext4: avoid allocating blocks from corrupted group in + ext4_mb_try_best_found() + - ext4: avoid allocating blocks from corrupted group in ext4_mb_find_by_goal() + - dmaengine: ti: edma: Add some null pointer checks to the edma_probe + - regulator: pwm-regulator: Add validity checks in continuous .get_voltage + - nvmet-tcp: fix nvme tcp ida memory leak + - ALSA: usb-audio: Check presence of valid altsetting control + - ASoC: sunxi: sun4i-spdif: Add support for Allwinner H616 + - spi: sh-msiof: avoid integer overflow in constants + - Input: xpad - add Lenovo Legion Go controllers + - netfilter: conntrack: check SCTP_CID_SHUTDOWN_ACK for vtag setting in + sctp_new + - ALSA: usb-audio: Ignore clock selector errors for single connection + - nvme-fc: do not wait in vain when unloading module + - nvmet-fcloop: swap the list_add_tail arguments + - nvmet-fc: release reference on target port + - nvmet-fc: defer cleanup using RCU properly + - nvmet-fc: hold reference on hostport match + - nvmet-fc: abort command when there is no binding + - nvmet-fc: avoid deadlock on delete association path + - nvmet-fc: take ref count on tgtport before delete assoc + - ext4: correct the hole length returned by ext4_map_blocks() + - Input: i8042 - add Fujitsu Lifebook U728 to i8042 quirk table + - fs/ntfs3: Modified fix directory element type detection + - fs/ntfs3: Improve ntfs_dir_count + - fs/ntfs3: Correct hard links updating when dealing with DOS names + - fs/ntfs3: Print warning while fixing hard links count + - fs/ntfs3: Fix detected field-spanning write (size 8) of single field + "le->name" + - fs/ntfs3: Add NULL ptr dereference checking at the end of + attr_allocate_frame() + - fs/ntfs3: Disable ATTR_LIST_ENTRY size check + - fs/ntfs3: use non-movable memory for ntfs3 MFT buffer cache + - fs/ntfs3: Prevent generic message "attempt to access beyond end of device" + - fs/ntfs3: Correct function is_rst_area_valid + - fs/ntfs3: Update inode->i_size after success write into compressed file + - fs/ntfs3: Fix oob in ntfs_listxattr + - wifi: mac80211: adding missing drv_mgd_complete_tx() call + - efi: runtime: Fix potential overflow of soft-reserved region size + - efi: Don't add memblocks for soft-reserved memory + - hwmon: (coretemp) Enlarge per package core count limit + - scsi: lpfc: Use unsigned type for num_sge + - firewire: core: send bus reset promptly on gap count error + - drm/amdgpu: skip to program GFXDEC registers for suspend abort + - drm/amdgpu: reset gpu for s3 suspend abort case + - virtio-blk: Ensure no requests in virtqueues before deleting vqs. + - pmdomain: mediatek: fix race conditions with genpd + - ksmbd: free aux buffer if ksmbd_iov_pin_rsp_read fails + - pmdomain: renesas: r8a77980-sysc: CR7 must be always on + - erofs: fix lz4 inplace decompression + - IB/hfi1: Fix sdma.h tx->num_descs off-by-one error + - drm/ttm: Fix an invalid freeing on already freed page in error path + - dm-crypt: don't modify the data when using authenticated encryption + - platform/x86: intel-vbtn: Stop calling "VBDL" from notify_handler + - platform/x86: touchscreen_dmi: Allow partial (prefix) matches for ACPI names + - KVM: arm64: vgic-its: Test for valid IRQ in MOVALL handler + - KVM: arm64: vgic-its: Test for valid IRQ in its_sync_lpi_pending_table() + - gtp: fix use-after-free and null-ptr-deref in gtp_genl_dump_pdp() + - PCI/MSI: Prevent MSI hardware interrupt number truncation + - l2tp: pass correct message length to ip6_append_data + - ARM: ep93xx: Add terminator to gpiod_lookup_table + - Revert "x86/ftrace: Use alternative RET encoding" + - x86/text-patching: Make text_gen_insn() play nice with ANNOTATE_NOENDBR + - x86/ibt,paravirt: Use text_gen_insn() for paravirt_patch() + - x86/ftrace: Use alternative RET encoding + - x86/returnthunk: Allow different return thunks + - Revert "x86/alternative: Make custom return thunk unconditional" + - x86/alternative: Make custom return thunk unconditional + - serial: amba-pl011: Fix DMA transmission in RS485 mode + - usb: dwc3: gadget: Don't disconnect if not started + - usb: cdnsp: blocked some cdns3 specific code + - usb: cdnsp: fixed issue with incorrect detecting CDNSP family controllers + - usb: cdns3: fixed memory use after free at cdns3_gadget_ep_disable() + - usb: gadget: ncm: Avoid dropping datagrams of properly parsed NTBs + - usb: roles: fix NULL pointer issue when put module's reference + - usb: roles: don't get/set_role() when usb_role_switch is unregistered + - mptcp: fix lockless access in subflow ULP diag + - clk: imx: imx8mp: add shared clk gate for usb suspend clk + - clk: qcom: gcc-qcs404: disable gpll[04]_out_aux parents + - clk: qcom: gcc-qcs404: fix names of the DSI clocks used as parents + - mtd: rawnand: sunxi: Fix the size of the last OOB region + - RISC-V: fix funct4 definition for c.jalr in parse_asm.h + - Input: iqs269a - drop unused device node references + - Input: iqs269a - configure device with a single block write + - Input: iqs269a - increase interrupt handler return delay + - clk: renesas: cpg-mssr: Fix use after free if cpg_mssr_common_init() failed + - Input: ads7846 - don't report pressure for ads7845 + - clk: renesas: cpg-mssr: Remove superfluous check in resume code + - clk: imx: avoid memory leak + - Input: ads7846 - always set last command to PWRDOWN + - Input: ads7846 - don't check penirq immediately for 7845 + - powerpc/powernv/ioda: Skip unallocated resources when mapping to PE + - clk: qcom: gpucc-sc7180: fix clk_dis_wait being programmed for CX GDSC + - clk: qcom: gpucc-sdm845: fix clk_dis_wait being programmed for CX GDSC + - clk: Honor CLK_OPS_PARENT_ENABLE in clk_core_is_enabled() + - powerpc/pseries/lparcfg: add missing RTAS retry status handling + - powerpc/perf/hv-24x7: add missing RTAS retry status handling + - powerpc/pseries/lpar: add missing RTAS retry status handling + - MIPS: SMP-CPS: fix build error when HOTPLUG_CPU not set + - MIPS: vpe-mt: drop physical_memsize + - vdpa/mlx5: Don't clear mr struct on destroy MR + - ARM: dts: BCM53573: Drop nonexistent #usb-cells + - RDMA/siw: Balance the reference of cep->kref in the error path + - RDMA/siw: Correct wrong debug message + - clk: linux/clk-provider.h: fix kernel-doc warnings and typos + - platform/x86: asus-wmi: Document the dgpu_disable sysfs attribute + - acpi: property: Let args be NULL in __acpi_node_get_property_reference + - ARM: dts: BCM53573: Drop nonexistent "default-off" LED trigger + - tools headers UAPI: Sync linux/fscrypt.h with the kernel sources + - perf beauty: Update copy of linux/socket.h with the kernel sources + - tools/virtio: fix build + - drm/amdgpu: init iommu after amdkfd device init + - f2fs: don't set GC_FAILURE_PIN for background GC + - f2fs: write checkpoint during FG_GC + - drm/i915/dg1: Update DMC_DEBUG3 register + - kernel/sched: Remove dl_boosted flag comment + - cifs: remove useless parameter 'is_fsctl' from SMB2_ioctl() + - serial: 8250: Remove serial_rs485 sanitization from em485 + - clk: imx8mp: Add DISP2 pixel clock + - clk: imx8mp: add clkout1/2 support + - dt-bindings: clocks: imx8mp: Add ID for usb suspend clock + - net: ethernet: ti: add missing of_node_put before return + - powerpc/rtas: make all exports GPL + - powerpc/rtas: ensure 4KB alignment for rtas_data_buf + - powerpc/eeh: Small refactor of eeh_handle_normal_event() + - powerpc/eeh: Set channel state after notifying the drivers + - PM: core: Redefine pm_ptr() macro + - PM: core: Add new *_PM_OPS macros, deprecate old ones + - mmc: jz4740: Use the new PM macros + - mmc: mxc: Use the new PM macros + - PM: core: Remove static qualifier in DEFINE_SIMPLE_DEV_PM_OPS macro + - Input: iqs269a - switch to DEFINE_SIMPLE_DEV_PM_OPS() and pm_sleep_ptr() + - Input: iqs269a - do not poll during suspend or resume + - Input: iqs269a - do not poll during ATI + - net/sched: Refactor qdisc_graft() for ingress and clsact Qdiscs + - netfilter: nf_tables: add rescheduling points during loop detection walks + - debugobjects: Recheck debug_objects_enabled before reporting + - nbd: Add the maximum limit of allocated index in nbd_dev_add + - md: fix data corruption for raid456 when reshape restart while grow up + - md/raid10: prevent soft lockup while flush writes + - posix-timers: Ensure timer ID search-loop limit is valid + - btrfs: add xxhash to fast checksum implementations + - ACPI: button: Add lid disable DMI quirk for Nextbook Ares 8A + - ACPI: video: Add backlight=native DMI quirk for Apple iMac11,3 + - ACPI: video: Add backlight=native DMI quirk for Lenovo ThinkPad X131e (3371 + AMD version) + - arm64: set __exception_irq_entry with __irq_entry as a default + - arm64: mm: fix VA-range sanity check + - sched/fair: Don't balance task to its current running CPU + - wifi: ath11k: fix registration of 6Ghz-only phy without the full channel + range + - bpf: Address KCSAN report on bpf_lru_list + - devlink: report devlink_port_type_warn source device + - wifi: wext-core: Fix -Wstringop-overflow warning in + ioctl_standard_iw_point() + - wifi: iwlwifi: mvm: avoid baid size integer overflow + - exfat: support dynamic allocate bh for exfat_entry_set_cache + - arm64: dts: rockchip: fix regulator name on rk3399-rock-4 + - arm64: dts: rockchip: add ES8316 codec for ROCK Pi 4 + - arm64: dts: rockchip: add SPDIF node for ROCK Pi 4 + - ARM: dts: BCM53573: Describe on-SoC BCM53125 rev 4 switch + - ACPI: video: Add backlight=native DMI quirk for Apple iMac12,1 and iMac12,2 + - ACPI: resource: Skip IRQ override on Asus Vivobook S5602ZA + - ACPI: resource: Add Asus ExpertBook B2502 to Asus quirks + - ACPI: resource: Skip IRQ override on Asus Expertbook B2402CBA + - ACPI: resource: Skip IRQ override on ASUS ExpertBook B1502CBA + - xhci: cleanup xhci_hub_control port references + - xhci: move port specific items such as state completions to port structure + - xhci: rename resume_done to resume_timestamp + - xhci: clear usb2 resume related variables in one place. + - xhci: decouple usb2 port resume and get_port_status request handling + - xhci: track port suspend state correctly in unsuccessful resume cases + - cifs: add a warning when the in-flight count goes negative + - IB/hfi1: Fix a memleak in init_credit_return + - RDMA/bnxt_re: Return error for SRQ resize + - RDMA/irdma: Fix KASAN issue with tasklet + - RDMA/irdma: Validate max_send_wr and max_recv_wr + - RDMA/irdma: Set the CQ read threshold for GEN 1 + - RDMA/irdma: Add AE for too many RNRS + - RDMA/srpt: Support specifying the srpt_service_guid parameter + - RDMA/qedr: Fix qedr_create_user_qp error flow + - arm64: dts: rockchip: set num-cs property for spi on px30 + - RDMA/srpt: fix function pointer cast warnings + - bpf, scripts: Correct GPL license name + - scsi: jazz_esp: Only build if SCSI core is builtin + - nouveau: fix function cast warnings + - net: stmmac: Fix incorrect dereference in interrupt handlers + - ipv4: properly combine dev_base_seq and ipv4.dev_addr_genid + - ipv6: properly combine dev_base_seq and ipv6.dev_addr_genid + - ata: libahci_platform: Convert to using devm bulk clocks API + - ata: libahci_platform: Introduce reset assertion/deassertion methods + - ata: ahci_ceva: fix error handling for Xilinx GT PHY support + - bpf: Fix racing between bpf_timer_cancel_and_free and bpf_timer_cancel + - drm/nouveau/instmem: fix uninitialized_var.cocci warning + - octeontx2-af: Consider the action set by PF + - s390: use the correct count for __iowrite64_copy() + - netfilter: nf_tables: set dormant flag on hook register failure + - netfilter: flowtable: simplify route logic + - netfilter: nft_flow_offload: reset dst in route object after setting up flow + - netfilter: nft_flow_offload: release dst in case direct xmit path is used + - drm/syncobj: call drm_syncobj_fence_add_wait when WAIT_AVAILABLE flag is set + - drm/amd/display: Fix memory leak in dm_sw_fini() + - i2c: imx: Add timer for handling the stop condition + - i2c: imx: when being a target, mark the last read as processed + - fs/aio: Restrict kiocb_set_cancel_fn() to I/O submitted via libaio + - netfilter: nf_tables: fix scheduling-while-atomic splat + - ext4: regenerate buddy after block freeing failed if under fc replay + - ext4: avoid bb_free and bb_fragments inconsistency in mb_free_blocks() + - netfilter: nf_tables: can't schedule in nft_chain_validate + - r8169: use new PM macros + - Linux 5.15.150 + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26733 + - packet: move from strlcpy with unused retval to strscpy + - net: dev: Convert sa_data to flexible array in struct sockaddr + - arp: Prevent overflow in arp_req_get(). + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26735 + - ipv6: sr: fix possible use-after-free and null-ptr-deref + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26736 + - afs: Increase buffer size in afs_update_volume_status() + * Jammy update: v5.15.150 upstream stable release (LP: #2060142) // + CVE-2024-26748 + - usb: cdns3: fix memory double free when handle zero packet + * CVE-2023-47233 + - wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach + * CVE-2024-26584 + - net: tls: handle backlogging of crypto requests + * CVE-2024-26585 + - tls: fix race between tx work scheduling and socket close + * CVE-2024-26583 + - tls: rx: jump to a more appropriate label + - tls: rx: drop pointless else after goto + - tls: stop recv() if initial process_rx_list gave us non-DATA + - tls: rx: don't store the record type in socket context + - tls: rx: don't store the decryption status in socket context + - tls: rx: don't issue wake ups when data is decrypted + - tls: rx: refactor decrypt_skb_update() + - tls: hw: rx: use return value of tls_device_decrypted() to carry status + - tls: rx: drop unnecessary arguments from tls_setup_from_iter() + - tls: rx: don't report text length from the bowels of decrypt + - tls: rx: wrap decryption arguments in a structure + - tls: rx: factor out writing ContentType to cmsg + - tls: rx: don't track the async count + - tls: rx: move counting TlsDecryptErrors for sync + - tls: rx: assume crypto always calls our callback + - tls: rx: use async as an in-out argument + - tls: decrement decrypt_pending if no async completion will be called + - net: tls: fix async vs NIC crypto offload + - Revert "tls: rx: move counting TlsDecryptErrors for sync" + - tls: rx: simplify async wait + - tls: rx: return the already-copied data on crypto error + - tls: rx: allow only one reader at a time + - tls: rx: release the sock lock on locking timeout + - tls: extract context alloc/initialization out of tls_set_sw_offload + - net: tls: factor out tls_*crypt_async_wait() + - tls: fix race between async notify and socket close + * CVE-2024-26622 + - tomoyo: fix UAF write bug in tomoyo_write_control() + + -- Philip Cox Fri, 10 May 2024 12:43:52 -0400 + +linux-aws-5.15 (5.15.0-1062.68~20.04.1) focal; urgency=medium + + * focal/linux-aws-5.15: 5.15.0-1062.68~20.04.1 -proposed tracker + (LP: #2063583) + + [ Ubuntu: 5.15.0-1062.68 ] + + * jammy/linux-aws: 5.15.0-1062.68 -proposed tracker (LP: #2063584) + * jammy/linux: 5.15.0-107.117 -proposed tracker (LP: #2063635) + * CVE-2023-52530 + - wifi: mac80211: fix potential key use-after-free + * CVE-2024-26622 + - tomoyo: fix UAF write bug in tomoyo_write_control() + * CVE-2023-47233 + - wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach + + -- Philip Cox Wed, 01 May 2024 10:57:03 -0400 + linux-aws-5.15 (5.15.0-1061.67~20.04.1) focal; urgency=medium * focal/linux-aws-5.15: 5.15.0-1061.67~20.04.1 -proposed tracker diff -u linux-aws-5.15-5.15.0/debian/control linux-aws-5.15-5.15.0/debian/control --- linux-aws-5.15-5.15.0/debian/control +++ linux-aws-5.15-5.15.0/debian/control @@ -63,7 +63,7 @@ XS-Testsuite: autopkgtest #XS-Testsuite-Depends: gcc-4.7 binutils -Package: linux-aws-5.15-headers-5.15.0-1061 +Package: linux-aws-5.15-headers-5.15.0-1063 Build-Profiles: Architecture: all Multi-Arch: foreign @@ -74,46 +74,46 @@ Description: Header files related to Linux kernel version 5.15.0 This package provides kernel header files for version 5.15.0, for sites that want the latest kernel headers. Please read - /usr/share/doc/linux-aws-5.15-headers-5.15.0-1061/debian.README.gz for details + /usr/share/doc/linux-aws-5.15-headers-5.15.0-1063/debian.README.gz for details -Package: linux-aws-5.15-tools-5.15.0-1061 +Package: linux-aws-5.15-tools-5.15.0-1063 Build-Profiles: Architecture: amd64 arm64 Section: devel Priority: optional Depends: ${misc:Depends}, ${shlibs:Depends}, linux-tools-common -Description: Linux kernel version specific tools for version 5.15.0-1061 +Description: Linux kernel version specific tools for version 5.15.0-1063 This package provides the architecture dependant parts for kernel version locked tools (such as perf and x86_energy_perf_policy) for - version 5.15.0-1061 on + version 5.15.0-1063 on 64 bit x86. - You probably want to install linux-tools-5.15.0-1061-. + You probably want to install linux-tools-5.15.0-1063-. -Package: linux-aws-5.15-cloud-tools-5.15.0-1061 +Package: linux-aws-5.15-cloud-tools-5.15.0-1063 Build-Profiles: Architecture: amd64 arm64 Section: devel Priority: optional Depends: ${misc:Depends}, ${shlibs:Depends}, linux-cloud-tools-common -Description: Linux kernel version specific cloud tools for version 5.15.0-1061 +Description: Linux kernel version specific cloud tools for version 5.15.0-1063 This package provides the architecture dependant parts for kernel - version locked tools for cloud tools for version 5.15.0-1061 on + version locked tools for cloud tools for version 5.15.0-1063 on 64 bit x86. - You probably want to install linux-cloud-tools-5.15.0-1061-. + You probably want to install linux-cloud-tools-5.15.0-1063-. -Package: linux-image-unsigned-5.15.0-1061-aws +Package: linux-image-unsigned-5.15.0-1063-aws Build-Profiles: Architecture: amd64 arm64 Section: kernel Priority: optional Provides: linux-image, fuse-module, ${linux:rprovides} -Depends: ${misc:Depends}, ${shlibs:Depends}, kmod, linux-base (>= 4.5ubuntu1~16.04.1), linux-modules-5.15.0-1061-aws +Depends: ${misc:Depends}, ${shlibs:Depends}, kmod, linux-base (>= 4.5ubuntu1~16.04.1), linux-modules-5.15.0-1063-aws Recommends: grub-pc [amd64] | grub-efi-amd64 [amd64] | grub-efi-ia32 [amd64] | grub [amd64] | lilo [amd64] | grub-efi-arm64 [arm64], initramfs-tools | linux-initramfs-tool Breaks: flash-kernel (<< 3.90ubuntu2) [arm64 armhf], s390-tools (<< 2.3.0-0ubuntu3) [s390x] -Conflicts: linux-image-5.15.0-1061-aws -Suggests: fdutils, linux-aws-5.15-doc-5.15.0 | linux-aws-5.15-source-5.15.0, linux-aws-5.15-tools, linux-headers-5.15.0-1061-aws +Conflicts: linux-image-5.15.0-1063-aws +Suggests: fdutils, linux-aws-5.15-doc-5.15.0 | linux-aws-5.15-source-5.15.0, linux-aws-5.15-tools, linux-headers-5.15.0-1063-aws Description: Linux kernel image for version 5.15.0 on 64 bit x86 SMP This package contains the unsigned Linux kernel image for version 5.15.0 on 64 bit x86 SMP. @@ -126,12 +126,12 @@ the linux-aws meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-modules-5.15.0-1061-aws +Package: linux-modules-5.15.0-1063-aws Build-Profiles: Architecture: amd64 arm64 Section: kernel Priority: optional -Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-5.15.0-1061-aws | linux-image-unsigned-5.15.0-1061-aws +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-5.15.0-1063-aws | linux-image-unsigned-5.15.0-1063-aws Built-Using: ${linux:BuiltUsing} Description: Linux kernel extra modules for version 5.15.0 on 64 bit x86 SMP Contains the corresponding System.map file, the modules built by the @@ -146,12 +146,12 @@ the linux-aws meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-modules-extra-5.15.0-1061-aws +Package: linux-modules-extra-5.15.0-1063-aws Build-Profiles: Architecture: amd64 arm64 Section: kernel Priority: optional -Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-5.15.0-1061-aws | linux-image-unsigned-5.15.0-1061-aws, crda | wireless-crda +Depends: ${misc:Depends}, ${shlibs:Depends}, linux-image-5.15.0-1063-aws | linux-image-unsigned-5.15.0-1063-aws, crda | wireless-crda Description: Linux kernel extra modules for version 5.15.0 on 64 bit x86 SMP This package contains the Linux kernel extra modules for version 5.15.0 on 64 bit x86 SMP. @@ -164,21 +164,21 @@ the linux-modules-extra-aws meta-package, which will ensure that upgrades work correctly, and that supporting packages are also installed. -Package: linux-headers-5.15.0-1061-aws +Package: linux-headers-5.15.0-1063-aws Build-Profiles: Architecture: amd64 arm64 Section: devel Priority: optional -Depends: ${misc:Depends}, linux-aws-5.15-headers-5.15.0-1061, ${shlibs:Depends} +Depends: ${misc:Depends}, linux-aws-5.15-headers-5.15.0-1063, ${shlibs:Depends} Provides: linux-headers, linux-headers-3.0 Description: Linux kernel headers for version 5.15.0 on 64 bit x86 SMP This package provides kernel header files for version 5.15.0 on 64 bit x86 SMP. . This is for sites that want the latest kernel headers. Please read - /usr/share/doc/linux-headers-5.15.0-1061/debian.README.gz for details. + /usr/share/doc/linux-headers-5.15.0-1063/debian.README.gz for details. -Package: linux-image-unsigned-5.15.0-1061-aws-dbgsym +Package: linux-image-unsigned-5.15.0-1063-aws-dbgsym Build-Profiles: Architecture: amd64 arm64 Section: devel @@ -195,27 +195,27 @@ is uncompressed, and unstripped. This package also includes the unstripped modules. -Package: linux-tools-5.15.0-1061-aws +Package: linux-tools-5.15.0-1063-aws Build-Profiles: Architecture: amd64 arm64 Section: devel Priority: optional -Depends: ${misc:Depends}, linux-aws-5.15-tools-5.15.0-1061 -Description: Linux kernel version specific tools for version 5.15.0-1061 +Depends: ${misc:Depends}, linux-aws-5.15-tools-5.15.0-1063 +Description: Linux kernel version specific tools for version 5.15.0-1063 This package provides the architecture dependant parts for kernel version locked tools (such as perf and x86_energy_perf_policy) for - version 5.15.0-1061 on + version 5.15.0-1063 on 64 bit x86. -Package: linux-cloud-tools-5.15.0-1061-aws +Package: linux-cloud-tools-5.15.0-1063-aws Build-Profiles: Architecture: amd64 arm64 Section: devel Priority: optional -Depends: ${misc:Depends}, linux-aws-5.15-cloud-tools-5.15.0-1061 -Description: Linux kernel version specific cloud tools for version 5.15.0-1061 +Depends: ${misc:Depends}, linux-aws-5.15-cloud-tools-5.15.0-1063 +Description: Linux kernel version specific cloud tools for version 5.15.0-1063 This package provides the architecture dependant parts for kernel - version locked tools for cloud for version 5.15.0-1061 on + version locked tools for cloud for version 5.15.0-1063 on 64 bit x86. Package: linux-udebs-aws @@ -229,7 +229,7 @@ for easier version and migration tracking. -Package: linux-buildinfo-5.15.0-1061-aws +Package: linux-buildinfo-5.15.0-1063-aws Build-Profiles: Architecture: amd64 arm64 Section: kernel diff -u linux-aws-5.15-5.15.0/drivers/acpi/processor_idle.c linux-aws-5.15-5.15.0/drivers/acpi/processor_idle.c --- linux-aws-5.15-5.15.0/drivers/acpi/processor_idle.c +++ linux-aws-5.15-5.15.0/drivers/acpi/processor_idle.c @@ -1424,6 +1424,8 @@ acpi_processor_registered--; if (acpi_processor_registered == 0) cpuidle_unregister_driver(&acpi_idle_driver); + + kfree(dev); } pr->flags.power_setup_done = 0; diff -u linux-aws-5.15-5.15.0/drivers/acpi/property.c linux-aws-5.15-5.15.0/drivers/acpi/property.c --- linux-aws-5.15-5.15.0/drivers/acpi/property.c +++ linux-aws-5.15-5.15.0/drivers/acpi/property.c @@ -639,6 +639,7 @@ * @index: Index of the reference to return * @num_args: Maximum number of arguments after each reference * @args: Location to store the returned reference with optional arguments + * (may be NULL) * * Find property with @name, verifify that it is a package containing at least * one object reference and if so, store the ACPI device object pointer to the @@ -697,6 +698,9 @@ if (ret) return ret == -ENODEV ? -EINVAL : ret; + if (!args) + return 0; + args->fwnode = acpi_fwnode_handle(device); args->nargs = 0; return 0; diff -u linux-aws-5.15-5.15.0/drivers/acpi/resource.c linux-aws-5.15-5.15.0/drivers/acpi/resource.c --- linux-aws-5.15-5.15.0/drivers/acpi/resource.c +++ linux-aws-5.15-5.15.0/drivers/acpi/resource.c @@ -428,6 +428,34 @@ DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"), }, }, + { + .ident = "Asus Vivobook S5602ZA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), + }, + }, + { + .ident = "Asus ExpertBook B1502CBA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "B1502CBA"), + }, + }, + { + .ident = "Asus ExpertBook B2402CBA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"), + }, + }, + { + .ident = "Asus ExpertBook B2502", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"), + }, + }, { } }; @@ -518,6 +546,39 @@ DMI_MATCH(DMI_BOARD_NAME, "17U70P"), }, }, + { + /* Infinity E15-5A165-BM */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM5RG1E0009COM"), + }, + }, + { + /* Infinity E15-5A305-1M */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GM5RGEE0016COM"), + }, + }, + { + /* Lunnen Ground 15 / AMD Ryzen 5 5500U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"), + DMI_MATCH(DMI_BOARD_NAME, "LLL5DAW"), + }, + }, + { + /* Lunnen Ground 16 / AMD Ryzen 7 5800U */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"), + DMI_MATCH(DMI_BOARD_NAME, "LL6FA"), + }, + }, + { + /* MAIBENBEN X577 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "MAIBENBEN"), + DMI_MATCH(DMI_BOARD_NAME, "X577"), + }, + }, { } }; diff -u linux-aws-5.15-5.15.0/drivers/acpi/scan.c linux-aws-5.15-5.15.0/drivers/acpi/scan.c --- linux-aws-5.15-5.15.0/drivers/acpi/scan.c +++ linux-aws-5.15-5.15.0/drivers/acpi/scan.c @@ -315,18 +315,14 @@ * again). */ if (adev->handler) { - dev_warn(&adev->dev, "Already enumerated\n"); - return -EALREADY; + dev_dbg(&adev->dev, "Already enumerated\n"); + return 0; } error = acpi_bus_scan(adev->handle); if (error) { dev_warn(&adev->dev, "Namespace scan failure\n"); return error; } - if (!adev->handler) { - dev_warn(&adev->dev, "Enumeration failure\n"); - error = -ENODEV; - } } else { error = acpi_scan_device_not_present(adev); } diff -u linux-aws-5.15-5.15.0/drivers/acpi/video_detect.c linux-aws-5.15-5.15.0/drivers/acpi/video_detect.c --- linux-aws-5.15-5.15.0/drivers/acpi/video_detect.c +++ linux-aws-5.15-5.15.0/drivers/acpi/video_detect.c @@ -342,6 +342,40 @@ }, }, { + .callback = video_detect_force_native, + /* Lenovo ThinkPad X131e (3371 AMD version) */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "3371"), + }, + }, + { + .callback = video_detect_force_native, + /* Apple iMac11,3 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac11,3"), + }, + }, + { + /* https://gitlab.freedesktop.org/drm/amd/-/issues/1838 */ + .callback = video_detect_force_native, + /* Apple iMac12,1 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,1"), + }, + }, + { + /* https://gitlab.freedesktop.org/drm/amd/-/issues/2753 */ + .callback = video_detect_force_native, + /* Apple iMac12,2 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "iMac12,2"), + }, + }, + { /* https://bugzilla.redhat.com/show_bug.cgi?id=1217249 */ .callback = video_detect_force_native, .ident = "Apple MacBook Pro 12,1", diff -u linux-aws-5.15-5.15.0/drivers/ata/ahci.c linux-aws-5.15-5.15.0/drivers/ata/ahci.c --- linux-aws-5.15-5.15.0/drivers/ata/ahci.c +++ linux-aws-5.15-5.15.0/drivers/ata/ahci.c @@ -49,6 +49,7 @@ enum board_ids { /* board IDs by feature in alphabetical order */ board_ahci, + board_ahci_43bit_dma, board_ahci_ign_iferr, board_ahci_low_power, board_ahci_no_debounce_delay, @@ -129,6 +130,13 @@ .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, + [board_ahci_43bit_dma] = { + AHCI_HFLAGS (AHCI_HFLAG_43BIT_ONLY), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, [board_ahci_ign_iferr] = { AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR), .flags = AHCI_FLAG_COMMON, @@ -599,11 +607,11 @@ { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */ - /* Asmedia */ + /* ASMedia */ { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ - { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ - { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ + { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */ + { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */ { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */ { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */ @@ -659,6 +667,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev, struct ahci_host_priv *hpriv) { + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) { + dev_info(&pdev->dev, "ASM1166 has only six ports\n"); + hpriv->saved_port_map = 0x3f; + } + if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { dev_info(&pdev->dev, "JMB361 has only one port\n"); hpriv->force_port_map = 1; @@ -951,11 +964,20 @@ #endif /* CONFIG_PM */ -static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac) +static int ahci_configure_dma_masks(struct pci_dev *pdev, + struct ahci_host_priv *hpriv) { - const int dma_bits = using_dac ? 64 : 32; + int dma_bits; int rc; + if (hpriv->cap & HOST_CAP_64) { + dma_bits = 64; + if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY) + dma_bits = 43; + } else { + dma_bits = 32; + } + /* * If the device fixup already set the dma_mask to some non-standard * value, don't extend it here. This happens on STA2X11, for example. @@ -1933,7 +1955,7 @@ ahci_gtf_filter_workaround(host); /* initialize adapter */ - rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64); + rc = ahci_configure_dma_masks(pdev, hpriv); if (rc) return rc; diff -u linux-aws-5.15-5.15.0/drivers/ata/ahci.h linux-aws-5.15-5.15.0/drivers/ata/ahci.h --- linux-aws-5.15-5.15.0/drivers/ata/ahci.h +++ linux-aws-5.15-5.15.0/drivers/ata/ahci.h @@ -39,7 +39,6 @@ enum { AHCI_MAX_PORTS = 32, - AHCI_MAX_CLKS = 5, AHCI_MAX_SG = 168, /* hardware max is 64K */ AHCI_DMA_BOUNDARY = 0xffffffff, AHCI_MAX_CMDS = 32, @@ -244,6 +243,7 @@ AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = BIT(27), /* ignore -EOPNOTSUPP from phy_power_on() */ AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */ + AHCI_HFLAG_43BIT_ONLY = BIT(29), /* 43bit DMA addr limit */ /* ap->flags bits */ @@ -342,7 +342,9 @@ u32 em_msg_type; /* EM message type */ u32 remapped_nvme; /* NVMe remapped device count */ bool got_runtime_pm; /* Did we do pm_runtime_get? */ - struct clk *clks[AHCI_MAX_CLKS]; /* Optional */ + unsigned int n_clks; + struct clk_bulk_data *clks; /* Optional */ + unsigned int f_rsts; struct reset_control *rsts; /* Optional */ struct regulator **target_pwrs; /* Optional */ struct regulator *ahci_regulator;/* Optional */ diff -u linux-aws-5.15-5.15.0/drivers/ata/libahci_platform.c linux-aws-5.15-5.15.0/drivers/ata/libahci_platform.c --- linux-aws-5.15-5.15.0/drivers/ata/libahci_platform.c +++ linux-aws-5.15-5.15.0/drivers/ata/libahci_platform.c @@ -97,28 +97,14 @@ * ahci_platform_enable_clks - Enable platform clocks * @hpriv: host private area to store config values * - * This function enables all the clks found in hpriv->clks, starting at - * index 0. If any clk fails to enable it disables all the clks already - * enabled in reverse order, and then returns an error. + * This function enables all the clks found for the AHCI device. * * RETURNS: * 0 on success otherwise a negative error code */ int ahci_platform_enable_clks(struct ahci_host_priv *hpriv) { - int c, rc; - - for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) { - rc = clk_prepare_enable(hpriv->clks[c]); - if (rc) - goto disable_unprepare_clk; - } - return 0; - -disable_unprepare_clk: - while (--c >= 0) - clk_disable_unprepare(hpriv->clks[c]); - return rc; + return clk_bulk_prepare_enable(hpriv->n_clks, hpriv->clks); } EXPORT_SYMBOL_GPL(ahci_platform_enable_clks); @@ -126,20 +112,55 @@ * ahci_platform_disable_clks - Disable platform clocks * @hpriv: host private area to store config values * - * This function disables all the clks found in hpriv->clks, in reverse - * order of ahci_platform_enable_clks (starting at the end of the array). + * This function disables all the clocks enabled before + * (bulk-clocks-disable function is supposed to do that in reverse + * from the enabling procedure order). */ void ahci_platform_disable_clks(struct ahci_host_priv *hpriv) { - int c; - - for (c = AHCI_MAX_CLKS - 1; c >= 0; c--) - if (hpriv->clks[c]) - clk_disable_unprepare(hpriv->clks[c]); + clk_bulk_disable_unprepare(hpriv->n_clks, hpriv->clks); } EXPORT_SYMBOL_GPL(ahci_platform_disable_clks); /** + * ahci_platform_deassert_rsts - Deassert/trigger platform resets + * @hpriv: host private area to store config values + * + * This function deasserts or triggers all the reset lines found for + * the AHCI device. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv) +{ + if (hpriv->f_rsts & AHCI_PLATFORM_RST_TRIGGER) + return reset_control_reset(hpriv->rsts); + + return reset_control_deassert(hpriv->rsts); +} +EXPORT_SYMBOL_GPL(ahci_platform_deassert_rsts); + +/** + * ahci_platform_assert_rsts - Assert/rearm platform resets + * @hpriv: host private area to store config values + * + * This function asserts or rearms (for self-deasserting resets) all + * the reset controls found for the AHCI device. + * + * RETURNS: + * 0 on success otherwise a negative error code + */ +int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv) +{ + if (hpriv->f_rsts & AHCI_PLATFORM_RST_TRIGGER) + return reset_control_rearm(hpriv->rsts); + + return reset_control_assert(hpriv->rsts); +} +EXPORT_SYMBOL_GPL(ahci_platform_assert_rsts); + +/** * ahci_platform_enable_regulators - Enable regulators * @hpriv: host private area to store config values * @@ -236,18 +257,18 @@ if (rc) goto disable_regulator; - rc = reset_control_deassert(hpriv->rsts); + rc = ahci_platform_deassert_rsts(hpriv); if (rc) goto disable_clks; rc = ahci_platform_enable_phys(hpriv); if (rc) - goto disable_resets; + goto disable_rsts; return 0; -disable_resets: - reset_control_assert(hpriv->rsts); +disable_rsts: + ahci_platform_assert_rsts(hpriv); disable_clks: ahci_platform_disable_clks(hpriv); @@ -274,7 +295,7 @@ { ahci_platform_disable_phys(hpriv); - reset_control_assert(hpriv->rsts); + ahci_platform_assert_rsts(hpriv); ahci_platform_disable_clks(hpriv); @@ -292,8 +313,6 @@ pm_runtime_disable(dev); } - for (c = 0; c < AHCI_MAX_CLKS && hpriv->clks[c]; c++) - clk_put(hpriv->clks[c]); /* * The regulators are tied to child node device and not to the * SATA device itself. So we can't use devm for automatically @@ -374,8 +393,8 @@ * 1) mmio registers (IORESOURCE_MEM 0, mandatory) * 2) regulator for controlling the targets power (optional) * regulator for controlling the AHCI controller (optional) - * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node, - * or for non devicetree enabled platforms a single clock + * 3) all clocks specified in the devicetree node, or a single + * clock for non-OF platforms (optional) * 4) resets, if flags has AHCI_PLATFORM_GET_RESETS (optional) * 5) phys (optional) * @@ -385,11 +404,10 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, unsigned int flags) { + int child_nodes, rc = -ENOMEM, enabled_ports = 0; struct device *dev = &pdev->dev; struct ahci_host_priv *hpriv; - struct clk *clk; struct device_node *child; - int i, enabled_ports = 0, rc = -ENOMEM, child_nodes; u32 mask_port_map = 0; if (!devres_open_group(dev, NULL, GFP_KERNEL)) @@ -409,25 +427,38 @@ goto err_out; } - for (i = 0; i < AHCI_MAX_CLKS; i++) { + /* + * Bulk clocks getting procedure can fail to find any clock due to + * running on a non-OF platform or due to the clocks being defined in + * bypass of the DT firmware (like da850, spear13xx). In that case we + * fallback to getting a single clock source right from the dev clocks + * list. + */ + rc = devm_clk_bulk_get_all(dev, &hpriv->clks); + if (rc < 0) + goto err_out; + + if (rc > 0) { + /* Got clocks in bulk */ + hpriv->n_clks = rc; + } else { /* - * For now we must use clk_get(dev, NULL) for the first clock, - * because some platforms (da850, spear13xx) are not yet - * converted to use devicetree for clocks. For new platforms - * this is equivalent to of_clk_get(dev->of_node, 0). + * No clock bulk found: fallback to manually getting + * the optional clock. */ - if (i == 0) - clk = clk_get(dev, NULL); - else - clk = of_clk_get(dev->of_node, i); - - if (IS_ERR(clk)) { - rc = PTR_ERR(clk); - if (rc == -EPROBE_DEFER) - goto err_out; - break; + hpriv->clks = devm_kzalloc(dev, sizeof(*hpriv->clks), GFP_KERNEL); + if (!hpriv->clks) { + rc = -ENOMEM; + goto err_out; + } + hpriv->clks->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(hpriv->clks->clk)) { + rc = PTR_ERR(hpriv->clks->clk); + goto err_out; + } else if (hpriv->clks->clk) { + hpriv->clks->id = "ahci"; + hpriv->n_clks = 1; } - hpriv->clks[i] = clk; } hpriv->ahci_regulator = devm_regulator_get(dev, "ahci"); @@ -449,6 +480,8 @@ rc = PTR_ERR(hpriv->rsts); goto err_out; } + + hpriv->f_rsts = flags & AHCI_PLATFORM_RST_TRIGGER; } /* diff -u linux-aws-5.15-5.15.0/drivers/base/regmap/regmap.c linux-aws-5.15-5.15.0/drivers/base/regmap/regmap.c --- linux-aws-5.15-5.15.0/drivers/base/regmap/regmap.c +++ linux-aws-5.15-5.15.0/drivers/base/regmap/regmap.c @@ -835,12 +835,15 @@ map->reg_stride_order = ilog2(map->reg_stride); else map->reg_stride_order = -1; - map->use_single_read = config->use_single_read || !bus || !bus->read; - map->use_single_write = config->use_single_write || !bus || !bus->write; - map->can_multi_write = config->can_multi_write && bus && bus->write; + map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read)); + map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write)); + map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write)); if (bus) { map->max_raw_read = bus->max_raw_read; map->max_raw_write = bus->max_raw_write; + } else if (config->max_raw_read && config->max_raw_write) { + map->max_raw_read = config->max_raw_read; + map->max_raw_write = config->max_raw_write; } map->dev = dev; map->bus = bus; @@ -874,9 +877,19 @@ map->read_flag_mask = bus->read_flag_mask; } - if (!bus) { + if (config && config->read && config->write) { + map->reg_read = _regmap_bus_read; + + /* Bulk read/write */ + map->read = config->read; + map->write = config->write; + + reg_endian = REGMAP_ENDIAN_NATIVE; + val_endian = REGMAP_ENDIAN_NATIVE; + } else if (!bus) { map->reg_read = config->reg_read; map->reg_write = config->reg_write; + map->reg_update_bits = config->reg_update_bits; map->defer_caching = false; goto skip_format_initialization; @@ -890,10 +903,13 @@ } else { map->reg_read = _regmap_bus_read; map->reg_update_bits = bus->reg_update_bits; - } + /* Bulk read/write */ + map->read = bus->read; + map->write = bus->write; - reg_endian = regmap_get_reg_endian(bus, config); - val_endian = regmap_get_val_endian(dev, bus, config); + reg_endian = regmap_get_reg_endian(bus, config); + val_endian = regmap_get_val_endian(dev, bus, config); + } switch (config->reg_bits + map->reg_shift) { case 2: @@ -1667,8 +1683,6 @@ size_t len; int i; - WARN_ON(!map->bus); - /* Check for unwritable or noinc registers in range * before we start */ @@ -1750,7 +1764,7 @@ val = work_val; } - if (map->async && map->bus->async_write) { + if (map->async && map->bus && map->bus->async_write) { struct regmap_async *async; trace_regmap_async_write_start(map, reg, val_len); @@ -1818,11 +1832,11 @@ * write. */ if (val == work_val) - ret = map->bus->write(map->bus_context, map->work_buf, - map->format.reg_bytes + - map->format.pad_bytes + - val_len); - else if (map->bus->gather_write) + ret = map->write(map->bus_context, map->work_buf, + map->format.reg_bytes + + map->format.pad_bytes + + val_len); + else if (map->bus && map->bus->gather_write) ret = map->bus->gather_write(map->bus_context, map->work_buf, map->format.reg_bytes + map->format.pad_bytes, @@ -1840,7 +1854,7 @@ memcpy(buf, map->work_buf, map->format.reg_bytes); memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, val, val_len); - ret = map->bus->write(map->bus_context, buf, len); + ret = map->write(map->bus_context, buf, len); kfree(buf); } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { @@ -1897,7 +1911,7 @@ struct regmap_range_node *range; struct regmap *map = context; - WARN_ON(!map->bus || !map->format.format_write); + WARN_ON(!map->format.format_write); range = _regmap_range_lookup(map, reg); if (range) { @@ -1910,8 +1924,7 @@ trace_regmap_hw_write_start(map, reg, 1); - ret = map->bus->write(map->bus_context, map->work_buf, - map->format.buf_size); + ret = map->write(map->bus_context, map->work_buf, map->format.buf_size); trace_regmap_hw_write_done(map, reg, 1); @@ -1931,7 +1944,7 @@ { struct regmap *map = context; - WARN_ON(!map->bus || !map->format.format_val); + WARN_ON(!map->format.format_val); map->format.format_val(map->work_buf + map->format.reg_bytes + map->format.pad_bytes, val, 0); @@ -1945,7 +1958,7 @@ static inline void *_regmap_map_get_context(struct regmap *map) { - return (map->bus) ? map : map->bus_context; + return (map->bus || (!map->bus && map->read)) ? map : map->bus_context; } int _regmap_write(struct regmap *map, unsigned int reg, @@ -2355,7 +2368,7 @@ u8 = buf; *u8 |= map->write_flag_mask; - ret = map->bus->write(map->bus_context, buf, len); + ret = map->write(map->bus_context, buf, len); kfree(buf); @@ -2661,9 +2674,7 @@ struct regmap_range_node *range; int ret; - WARN_ON(!map->bus); - - if (!map->bus || !map->bus->read) + if (!map->read) return -EINVAL; range = _regmap_range_lookup(map, reg); @@ -2679,9 +2690,9 @@ map->read_flag_mask); trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); - ret = map->bus->read(map->bus_context, map->work_buf, - map->format.reg_bytes + map->format.pad_bytes, - val, val_len); + ret = map->read(map->bus_context, map->work_buf, + map->format.reg_bytes + map->format.pad_bytes, + val, val_len); trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); @@ -2792,8 +2803,6 @@ unsigned int v; int ret, i; - if (!map->bus) - return -EINVAL; if (val_len % map->format.val_bytes) return -EINVAL; if (!IS_ALIGNED(reg, map->reg_stride)) @@ -2808,7 +2817,7 @@ size_t chunk_count, chunk_bytes; size_t chunk_regs = val_count; - if (!map->bus->read) { + if (!map->read) { ret = -ENOTSUPP; goto out; } @@ -2868,7 +2877,7 @@ * @val: Pointer to data buffer * @val_len: Length of output buffer in bytes. * - * The regmap API usually assumes that bulk bus read operations will read a + * The regmap API usually assumes that bulk read operations will read a * range of registers. Some devices have certain registers for which a read * operation read will read from an internal FIFO. * @@ -2886,10 +2895,6 @@ size_t read_len; int ret; - if (!map->bus) - return -EINVAL; - if (!map->bus->read) - return -ENOTSUPP; if (val_len % map->format.val_bytes) return -EINVAL; if (!IS_ALIGNED(reg, map->reg_stride)) @@ -3003,7 +3008,7 @@ if (val_count == 0) return -EINVAL; - if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { + if (map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { ret = regmap_raw_read(map, reg, val, val_bytes * val_count); if (ret != 0) return ret; diff -u linux-aws-5.15-5.15.0/drivers/block/nbd.c linux-aws-5.15-5.15.0/drivers/block/nbd.c --- linux-aws-5.15-5.15.0/drivers/block/nbd.c +++ linux-aws-5.15-5.15.0/drivers/block/nbd.c @@ -1752,7 +1752,8 @@ if (err == -ENOSPC) err = -EEXIST; } else { - err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); + err = idr_alloc(&nbd_index_idr, nbd, 0, + (MINORMASK >> part_shift) + 1, GFP_KERNEL); if (err >= 0) index = err; } diff -u linux-aws-5.15-5.15.0/drivers/block/virtio_blk.c linux-aws-5.15-5.15.0/drivers/block/virtio_blk.c --- linux-aws-5.15-5.15.0/drivers/block/virtio_blk.c +++ linux-aws-5.15-5.15.0/drivers/block/virtio_blk.c @@ -989,14 +989,15 @@ { struct virtio_blk *vblk = vdev->priv; + /* Ensure no requests in virtqueues before deleting vqs. */ + blk_mq_freeze_queue(vblk->disk->queue); + /* Ensure we don't receive any more interrupts */ vdev->config->reset(vdev); /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); - blk_mq_quiesce_queue(vblk->disk->queue); - vdev->config->del_vqs(vdev); kfree(vblk->vqs); @@ -1014,7 +1015,7 @@ virtio_device_ready(vdev); - blk_mq_unquiesce_queue(vblk->disk->queue); + blk_mq_unfreeze_queue(vblk->disk->queue); return 0; } #endif diff -u linux-aws-5.15-5.15.0/drivers/bluetooth/hci_qca.c linux-aws-5.15-5.15.0/drivers/bluetooth/hci_qca.c --- linux-aws-5.15-5.15.0/drivers/bluetooth/hci_qca.c +++ linux-aws-5.15-5.15.0/drivers/bluetooth/hci_qca.c @@ -1315,7 +1315,8 @@ /* Give the controller time to process the request */ if (qca_is_wcn399x(qca_soc_type(hu)) || - qca_is_wcn6750(qca_soc_type(hu))) + qca_is_wcn6750(qca_soc_type(hu)) || + qca_is_wcn6855(qca_soc_type(hu))) usleep_range(1000, 10000); else msleep(300); @@ -1392,7 +1393,8 @@ static int qca_check_speeds(struct hci_uart *hu) { if (qca_is_wcn399x(qca_soc_type(hu)) || - qca_is_wcn6750(qca_soc_type(hu))) { + qca_is_wcn6750(qca_soc_type(hu)) || + qca_is_wcn6855(qca_soc_type(hu))) { if (!qca_get_speed(hu, QCA_INIT_SPEED) && !qca_get_speed(hu, QCA_OPER_SPEED)) return -EINVAL; @@ -1426,7 +1428,8 @@ * changing the baudrate of chip and host. */ if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type)) + qca_is_wcn6750(soc_type) || + qca_is_wcn6855(soc_type)) hci_uart_set_flow_control(hu, true); if (soc_type == QCA_WCN3990) { @@ -1444,7 +1447,8 @@ error: if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type)) + qca_is_wcn6750(soc_type) || + qca_is_wcn6855(soc_type)) hci_uart_set_flow_control(hu, false); if (soc_type == QCA_WCN3990) { @@ -1680,7 +1684,8 @@ return 0; if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type)) { + qca_is_wcn6750(soc_type) || + qca_is_wcn6855(soc_type)) { ret = qca_regulator_init(hu); } else { qcadev = serdev_device_get_drvdata(hu->serdev); @@ -1721,7 +1726,8 @@ bt_dev_info(hdev, "setting up %s", qca_is_wcn399x(soc_type) ? "wcn399x" : - (soc_type == QCA_WCN6750) ? "wcn6750" : "ROME/QCA6390"); + (soc_type == QCA_WCN6750) ? "wcn6750" : + (soc_type == QCA_WCN6855) ? "wcn6855" : "ROME/QCA6390"); qca->memdump_state = QCA_MEMDUMP_IDLE; @@ -1733,7 +1739,8 @@ clear_bit(QCA_SSR_TRIGGERED, &qca->flags); if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type)) { + qca_is_wcn6750(soc_type) || + qca_is_wcn6855(soc_type)) { set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); ret = qca_read_soc_version(hdev, &ver, soc_type); @@ -1754,7 +1761,8 @@ } if (!(qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type))) { + qca_is_wcn6750(soc_type) || + qca_is_wcn6855(soc_type))) { /* Get QCA version information */ ret = qca_read_soc_version(hdev, &ver, soc_type); if (ret) @@ -1880,6 +1888,20 @@ .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, }; +static const struct qca_device_data qca_soc_data_wcn6855 = { + .soc_type = QCA_WCN6855, + .vregs = (struct qca_vreg []) { + { "vddio", 5000 }, + { "vddbtcxmx", 126000 }, + { "vddrfacmn", 12500 }, + { "vddrfa0p8", 102000 }, + { "vddrfa1p7", 302000 }, + { "vddrfa1p2", 257000 }, + }, + .num_vregs = 6, + .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, +}; + static void qca_power_shutdown(struct hci_uart *hu) { struct qca_serdev *qcadev; @@ -1909,7 +1931,7 @@ host_set_baudrate(hu, 2400); qca_send_power_pulse(hu, false); qca_regulator_disable(qcadev); - } else if (soc_type == QCA_WCN6750) { + } else if (soc_type == QCA_WCN6750 || soc_type == QCA_WCN6855) { gpiod_set_value_cansleep(qcadev->bt_en, 0); msleep(100); qca_regulator_disable(qcadev); @@ -2044,7 +2066,8 @@ if (data && (qca_is_wcn399x(data->soc_type) || - qca_is_wcn6750(data->soc_type))) { + qca_is_wcn6750(data->soc_type) || + qca_is_wcn6855(data->soc_type))) { qcadev->btsoc_type = data->soc_type; qcadev->bt_power = devm_kzalloc(&serdev->dev, sizeof(struct qca_power), @@ -2064,14 +2087,18 @@ qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR_OR_NULL(qcadev->bt_en) && data->soc_type == QCA_WCN6750) { + if (IS_ERR(qcadev->bt_en) && + (data->soc_type == QCA_WCN6750 || + data->soc_type == QCA_WCN6855)) { dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n"); power_ctrl_enabled = false; } qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl", GPIOD_IN); - if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && data->soc_type == QCA_WCN6750) + if (IS_ERR(qcadev->sw_ctrl) && + (data->soc_type == QCA_WCN6750 || + data->soc_type == QCA_WCN6855)) dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n"); qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); @@ -2093,7 +2120,7 @@ qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR_OR_NULL(qcadev->bt_en)) { + if (IS_ERR(qcadev->bt_en)) { dev_warn(&serdev->dev, "failed to acquire enable gpio\n"); power_ctrl_enabled = false; } @@ -2147,8 +2174,9 @@ struct qca_power *power = qcadev->bt_power; if ((qca_is_wcn399x(qcadev->btsoc_type) || - qca_is_wcn6750(qcadev->btsoc_type)) && - power->vregs_on) + qca_is_wcn6750(qcadev->btsoc_type) || + qca_is_wcn6855(qcadev->btsoc_type)) && + power->vregs_on) qca_power_shutdown(&qcadev->serdev_hu); else if (qcadev->susclk) clk_disable_unprepare(qcadev->susclk); @@ -2332,6 +2360,7 @@ { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991}, { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998}, { .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750}, + { .compatible = "qcom,wcn6855-bt", .data = &qca_soc_data_wcn6855}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match); diff -u linux-aws-5.15-5.15.0/drivers/clk/clk.c linux-aws-5.15-5.15.0/drivers/clk/clk.c --- linux-aws-5.15-5.15.0/drivers/clk/clk.c +++ linux-aws-5.15-5.15.0/drivers/clk/clk.c @@ -251,6 +251,17 @@ } } + /* + * This could be called with the enable lock held, or from atomic + * context. If the parent isn't enabled already, we can't do + * anything here. We can also assume this clock isn't enabled. + */ + if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent) + if (!clk_core_is_enabled(core->parent)) { + ret = false; + goto done; + } + ret = core->ops->is_enabled(core->hw); done: if (core->rpm_enabled) @@ -414,6 +425,9 @@ if (IS_ERR(hw)) return ERR_CAST(hw); + if (!hw) + return NULL; + return hw->core; } diff -u linux-aws-5.15-5.15.0/drivers/clk/hisilicon/clk-hi3559a.c linux-aws-5.15-5.15.0/drivers/clk/hisilicon/clk-hi3559a.c --- linux-aws-5.15-5.15.0/drivers/clk/hisilicon/clk-hi3559a.c +++ linux-aws-5.15-5.15.0/drivers/clk/hisilicon/clk-hi3559a.c @@ -491,7 +491,6 @@ clk = clk_register(NULL, &p_clk->hw); if (IS_ERR(clk)) { - devm_kfree(dev, p_clk); dev_err(dev, "%s: failed to register clock %s\n", __func__, clks[i].name); continue; diff -u linux-aws-5.15-5.15.0/drivers/clk/imx/clk-imx8mp.c linux-aws-5.15-5.15.0/drivers/clk/imx/clk-imx8mp.c --- linux-aws-5.15-5.15.0/drivers/clk/imx/clk-imx8mp.c +++ linux-aws-5.15-5.15.0/drivers/clk/imx/clk-imx8mp.c @@ -17,6 +17,7 @@ static u32 share_count_nand; static u32 share_count_media; +static u32 share_count_usb; static const char * const pll_ref_sels[] = { "osc_24m", "dummy", "dummy", "dummy", }; static const char * const audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", }; @@ -354,7 +355,7 @@ "clk_ext2", "audio_pll2_out", "video_pll1_out", }; -static const char * const imx8mp_media_disp1_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", +static const char * const imx8mp_media_disp_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", }; @@ -395,6 +396,11 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; +static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out", + "dummy", "dummy", "gpu_pll_out", "vpu_pll_out", + "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3", + "dummy", "dummy", "osc_24m", "dummy", "osc_32k"}; + static struct clk_hw **hws; static struct clk_hw_onecell_data *clk_hw_data; @@ -513,6 +519,15 @@ hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_500m_cg", 1, 2); hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1); + hws[IMX8MP_CLK_CLKOUT1_SEL] = imx_clk_hw_mux2("clkout1_sel", anatop_base + 0x128, 4, 4, + imx8mp_clkout_sels, ARRAY_SIZE(imx8mp_clkout_sels)); + hws[IMX8MP_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", anatop_base + 0x128, 0, 4); + hws[IMX8MP_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", anatop_base + 0x128, 8); + hws[IMX8MP_CLK_CLKOUT2_SEL] = imx_clk_hw_mux2("clkout2_sel", anatop_base + 0x128, 20, 4, + imx8mp_clkout_sels, ARRAY_SIZE(imx8mp_clkout_sels)); + hws[IMX8MP_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", anatop_base + 0x128, 16, 4); + hws[IMX8MP_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", anatop_base + 0x128, 24); + hws[IMX8MP_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mp_a53_sels, ccm_base + 0x8000); hws[IMX8MP_CLK_A53_SRC] = hws[IMX8MP_CLK_A53_DIV]; hws[IMX8MP_CLK_A53_CG] = hws[IMX8MP_CLK_A53_DIV]; @@ -547,6 +562,7 @@ hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000); hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100); hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200); + hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300); hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1); @@ -608,7 +624,7 @@ hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80); hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00); hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80); - hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp1_pix_sels, ccm_base + 0xbe00); + hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00); hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80); hws[IMX8MP_CLK_MEDIA_LDB] = imx8m_clk_hw_composite("media_ldb", imx8mp_media_ldb_sels, ccm_base + 0xbf00); hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite_critical("mem_repair", imx8mp_memrepair_sels, ccm_base + 0xbf80); @@ -667,7 +683,8 @@ hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0); hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0); hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0); - hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0); + hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate2_shared2("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0, &share_count_usb); + hws[IMX8MP_CLK_USB_SUSP] = imx_clk_hw_gate2_shared2("usb_suspend_clk", "osc_32k", ccm_base + 0x44d0, 0, &share_count_usb); hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0); hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0); hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0); diff -u linux-aws-5.15-5.15.0/drivers/clk/qcom/reset.c linux-aws-5.15-5.15.0/drivers/clk/qcom/reset.c --- linux-aws-5.15-5.15.0/drivers/clk/qcom/reset.c +++ linux-aws-5.15-5.15.0/drivers/clk/qcom/reset.c @@ -22,8 +22,8 @@ return 0; } -static int -qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) +static int qcom_reset_set_assert(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) { struct qcom_reset_controller *rst; const struct qcom_reset_map *map; @@ -33,21 +33,22 @@ map = &rst->reset_map[id]; mask = map->bitmask ? map->bitmask : BIT(map->bit); - return regmap_update_bits(rst->regmap, map->reg, mask, mask); + regmap_update_bits(rst->regmap, map->reg, mask, assert ? mask : 0); + + /* Read back the register to ensure write completion, ignore the value */ + regmap_read(rst->regmap, map->reg, &mask); + + return 0; } -static int -qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) +static int qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id) { - struct qcom_reset_controller *rst; - const struct qcom_reset_map *map; - u32 mask; - - rst = to_qcom_reset_controller(rcdev); - map = &rst->reset_map[id]; - mask = map->bitmask ? map->bitmask : BIT(map->bit); + return qcom_reset_set_assert(rcdev, id, true); +} - return regmap_update_bits(rst->regmap, map->reg, mask, 0); +static int qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id) +{ + return qcom_reset_set_assert(rcdev, id, false); } const struct reset_control_ops qcom_reset_ops = { diff -u linux-aws-5.15-5.15.0/drivers/cpufreq/armada-37xx-cpufreq.c linux-aws-5.15-5.15.0/drivers/cpufreq/armada-37xx-cpufreq.c --- linux-aws-5.15-5.15.0/drivers/cpufreq/armada-37xx-cpufreq.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/armada-37xx-cpufreq.c @@ -14,10 +14,8 @@ #include #include #include +#include #include -#include -#include -#include #include #include #include diff -u linux-aws-5.15-5.15.0/drivers/cpufreq/brcmstb-avs-cpufreq.c linux-aws-5.15-5.15.0/drivers/cpufreq/brcmstb-avs-cpufreq.c --- linux-aws-5.15-5.15.0/drivers/cpufreq/brcmstb-avs-cpufreq.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/brcmstb-avs-cpufreq.c @@ -481,6 +481,8 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu) { struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + if (!policy) + return 0; struct private_data *priv = policy->driver_data; cpufreq_cpu_put(policy); diff -u linux-aws-5.15-5.15.0/drivers/cpufreq/intel_pstate.c linux-aws-5.15-5.15.0/drivers/cpufreq/intel_pstate.c --- linux-aws-5.15-5.15.0/drivers/cpufreq/intel_pstate.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/intel_pstate.c @@ -2835,6 +2835,9 @@ if (min_pstate < cpu->min_perf_ratio) min_pstate = cpu->min_perf_ratio; + if (min_pstate > cpu->max_perf_ratio) + min_pstate = cpu->max_perf_ratio; + max_pstate = min(cap_pstate, cpu->max_perf_ratio); if (max_pstate < min_pstate) max_pstate = min_pstate; diff -u linux-aws-5.15-5.15.0/drivers/cpufreq/qcom-cpufreq-nvmem.c linux-aws-5.15-5.15.0/drivers/cpufreq/qcom-cpufreq-nvmem.c --- linux-aws-5.15-5.15.0/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff -u linux-aws-5.15-5.15.0/drivers/dma/Kconfig linux-aws-5.15-5.15.0/drivers/dma/Kconfig --- linux-aws-5.15-5.15.0/drivers/dma/Kconfig +++ linux-aws-5.15-5.15.0/drivers/dma/Kconfig @@ -645,16 +645,16 @@ config TEGRA210_ADMA tristate "NVIDIA Tegra210 ADMA support" - depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) + depends on (ARCH_TEGRA || COMPILE_TEST) select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help - Support for the NVIDIA Tegra210 ADMA controller driver. The - DMA controller has multiple DMA channels and is used to service - various audio clients in the Tegra210 audio processing engine - (APE). This DMA controller transfers data from memory to - peripheral and vice versa. It does not support memory to - memory data transfer. + Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA + controller driver. The DMA controller has multiple DMA channels + and is used to service various audio clients in the Tegra210 + audio processing engine (APE). This DMA controller transfers + data from memory to peripheral and vice versa. It does not + support memory to memory data transfer. config TIMB_DMA tristate "Timberdale FPGA DMA support" diff -u linux-aws-5.15-5.15.0/drivers/dma/fsl-qdma.c linux-aws-5.15-5.15.0/drivers/dma/fsl-qdma.c --- linux-aws-5.15-5.15.0/drivers/dma/fsl-qdma.c +++ linux-aws-5.15-5.15.0/drivers/dma/fsl-qdma.c @@ -109,6 +109,7 @@ #define FSL_QDMA_CMD_WTHROTL_OFFSET 20 #define FSL_QDMA_CMD_DSEN_OFFSET 19 #define FSL_QDMA_CMD_LWC_OFFSET 16 +#define FSL_QDMA_CMD_PF BIT(17) /* Field definition for Descriptor status */ #define QDMA_CCDF_STATUS_RTE BIT(5) @@ -384,7 +385,8 @@ qdma_csgf_set_f(csgf_dest, len); /* Descriptor Buffer */ cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << - FSL_QDMA_CMD_RWTTYPE_OFFSET); + FSL_QDMA_CMD_RWTTYPE_OFFSET) | + FSL_QDMA_CMD_PF; sdf->data = QDMA_SDDF_CMD(cmd); cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE << @@ -805,7 +807,7 @@ int i; int cpu; int ret; - char irq_name[20]; + char irq_name[32]; fsl_qdma->error_irq = platform_get_irq_byname(pdev, "qdma-error"); @@ -1201,10 +1203,6 @@ if (!fsl_qdma->queue) return -ENOMEM; - ret = fsl_qdma_irq_init(pdev, fsl_qdma); - if (ret) - return ret; - fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0"); if (fsl_qdma->irq_base < 0) return fsl_qdma->irq_base; @@ -1243,16 +1241,19 @@ platform_set_drvdata(pdev, fsl_qdma); - ret = dma_async_device_register(&fsl_qdma->dma_dev); + ret = fsl_qdma_reg_init(fsl_qdma); if (ret) { - dev_err(&pdev->dev, - "Can't register NXP Layerscape qDMA engine.\n"); + dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n"); return ret; } - ret = fsl_qdma_reg_init(fsl_qdma); + ret = fsl_qdma_irq_init(pdev, fsl_qdma); + if (ret) + return ret; + + ret = dma_async_device_register(&fsl_qdma->dma_dev); if (ret) { - dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n"); + dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n"); return ret; } diff -u linux-aws-5.15-5.15.0/drivers/dma/ti/edma.c linux-aws-5.15-5.15.0/drivers/dma/ti/edma.c --- linux-aws-5.15-5.15.0/drivers/dma/ti/edma.c +++ linux-aws-5.15-5.15.0/drivers/dma/ti/edma.c @@ -2462,6 +2462,11 @@ if (irq > 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", dev_name(dev)); + if (!irq_name) { + ret = -ENOMEM; + goto err_disable_pm; + } + ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, ecc); if (ret) { @@ -2478,6 +2483,11 @@ if (irq > 0) { irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", dev_name(dev)); + if (!irq_name) { + ret = -ENOMEM; + goto err_disable_pm; + } + ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, ecc); if (ret) { diff -u linux-aws-5.15-5.15.0/drivers/firewire/core-card.c linux-aws-5.15-5.15.0/drivers/firewire/core-card.c --- linux-aws-5.15-5.15.0/drivers/firewire/core-card.c +++ linux-aws-5.15-5.15.0/drivers/firewire/core-card.c @@ -429,7 +429,23 @@ */ card->bm_generation = generation; - if (root_device == NULL) { + if (card->gap_count == 0) { + /* + * If self IDs have inconsistent gap counts, do a + * bus reset ASAP. The config rom read might never + * complete, so don't wait for it. However, still + * send a PHY configuration packet prior to the + * bus reset. The PHY configuration packet might + * fail, but 1394-2008 8.4.5.2 explicitly permits + * it in this case, so it should be safe to try. + */ + new_root_id = local_id; + /* + * We must always send a bus reset if the gap count + * is inconsistent, so bypass the 5-reset limit. + */ + card->bm_retries = 0; + } else if (root_device == NULL) { /* * Either link_on is false, or we failed to read the * config rom. In either case, pick another root. @@ -484,7 +500,19 @@ fw_notice(card, "phy config: new root=%x, gap_count=%d\n", new_root_id, gap_count); fw_send_phy_config(card, new_root_id, generation, gap_count); - reset_bus(card, true); + /* + * Where possible, use a short bus reset to minimize + * disruption to isochronous transfers. But in the event + * of a gap count inconsistency, use a long bus reset. + * + * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus + * may set different gap counts after a bus reset. On a mixed + * 1394/1394a bus, a short bus reset can get doubled. Some + * nodes may treat the double reset as one bus reset and others + * may treat it as two, causing a gap count inconsistency + * again. Using a long bus reset prevents this. + */ + reset_bus(card, card->gap_count != 0); /* Will allocate broadcast channel after the reset. */ goto out; } diff -u linux-aws-5.15-5.15.0/drivers/firmware/efi/capsule-loader.c linux-aws-5.15-5.15.0/drivers/firmware/efi/capsule-loader.c --- linux-aws-5.15-5.15.0/drivers/firmware/efi/capsule-loader.c +++ linux-aws-5.15-5.15.0/drivers/firmware/efi/capsule-loader.c @@ -292,7 +292,7 @@ return -ENOMEM; } - cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL); + cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL); if (!cap_info->phys) { kfree(cap_info->pages); kfree(cap_info); diff -u linux-aws-5.15-5.15.0/drivers/firmware/efi/efi-init.c linux-aws-5.15-5.15.0/drivers/firmware/efi/efi-init.c --- linux-aws-5.15-5.15.0/drivers/firmware/efi/efi-init.c +++ linux-aws-5.15-5.15.0/drivers/firmware/efi/efi-init.c @@ -143,15 +143,6 @@ case EFI_CONVENTIONAL_MEMORY: case EFI_PERSISTENT_MEMORY: /* - * Special purpose memory is 'soft reserved', which means it - * is set aside initially, but can be hotplugged back in or - * be assigned to the dax driver after boot. - */ - if (efi_soft_reserve_enabled() && - (md->attribute & EFI_MEMORY_SP)) - return false; - - /* * According to the spec, these regions are no longer reserved * after calling ExitBootServices(). However, we can only use * them as System RAM if they can be mapped writeback cacheable. @@ -195,6 +186,16 @@ size = npages << PAGE_SHIFT; if (is_memory(md)) { + /* + * Special purpose memory is 'soft reserved', which + * means it is set aside initially. Don't add a memblock + * for it now so that it can be hotplugged back in or + * be assigned to the dax driver after boot. + */ + if (efi_soft_reserve_enabled() && + (md->attribute & EFI_MEMORY_SP)) + continue; + early_init_dt_add_memory_arch(paddr, size); if (!is_usable_memory(md)) diff -u linux-aws-5.15-5.15.0/drivers/gpio/gpiolib.c linux-aws-5.15-5.15.0/drivers/gpio/gpiolib.c --- linux-aws-5.15-5.15.0/drivers/gpio/gpiolib.c +++ linux-aws-5.15-5.15.0/drivers/gpio/gpiolib.c @@ -774,11 +774,11 @@ ret = gpiochip_irqchip_init_valid_mask(gc); if (ret) - goto err_remove_acpi_chip; + goto err_free_hogs; ret = gpiochip_irqchip_init_hw(gc); if (ret) - goto err_remove_acpi_chip; + goto err_remove_irqchip_mask; ret = gpiochip_add_irqchip(gc, lock_key, request_key); if (ret) @@ -803,13 +803,13 @@ gpiochip_irqchip_remove(gc); err_remove_irqchip_mask: gpiochip_irqchip_free_valid_mask(gc); -err_remove_acpi_chip: +err_free_hogs: + gpiochip_free_hogs(gc); acpi_gpiochip_remove(gc); + gpiochip_remove_pin_ranges(gc); err_remove_of_chip: - gpiochip_free_hogs(gc); of_gpiochip_remove(gc); err_free_gpiochip_mask: - gpiochip_remove_pin_ranges(gc); gpiochip_free_valid_mask(gc); if (gdev->dev.release) { /* release() has been registered by gpiochip_setup_dev() */ diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu.h linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu.h --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1045,6 +1045,8 @@ bool in_s3; bool in_s4; bool in_s0ix; + /* indicate amdgpu suspension status */ + bool suspend_complete; atomic_t in_gpu_reset; enum pp_mp1_state mp1_state; diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2487,10 +2487,6 @@ if (r) goto init_failed; - r = amdgpu_amdkfd_resume_iommu(adev); - if (r) - goto init_failed; - r = amdgpu_device_ip_hw_init_phase1(adev); if (r) goto init_failed; @@ -2529,6 +2525,10 @@ if (!adev->gmc.xgmi.pending_reset) amdgpu_amdkfd_device_init(adev); + r = amdgpu_amdkfd_resume_iommu(adev); + if (r) + goto init_failed; + amdgpu_fru_get_product_info(adev); init_failed: diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2249,6 +2249,7 @@ struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = false; if (amdgpu_acpi_is_s0ix_active(adev)) adev->in_s0ix = true; else @@ -2261,6 +2262,7 @@ struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + adev->suspend_complete = true; if (amdgpu_acpi_should_gpu_reset(adev)) return amdgpu_asic_reset(adev); diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3268,6 +3268,14 @@ gfx_v9_0_cp_gfx_enable(adev, true); + /* Now only limit the quirk on the APU gfx9 series and already + * confirmed that the APU gfx10/gfx11 needn't such update. + */ + if (adev->flags & AMD_IS_APU && + adev->in_s3 && !adev->suspend_complete) { + DRM_INFO(" Will skip the CSB packet resubmit\n"); + return 0; + } r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/soc15.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/soc15.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/soc15.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -605,11 +605,34 @@ return AMD_RESET_METHOD_MODE1; } +static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + + /* Will reset for the following suspend abort cases. + * 1) Only reset limit on APU side, dGPU hasn't checked yet. + * 2) S3 suspend abort and TOS already launched. + */ + if (adev->flags & AMD_IS_APU && adev->in_s3 && + !adev->suspend_complete && + sol_reg) + return true; + + return false; +} + static int soc15_asic_reset(struct amdgpu_device *adev) { /* original raven doesn't have full asic reset */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN) || - (adev->apu_flags & AMD_APU_IS_RAVEN2)) + /* On the latest Raven, the GPU reset can be performed + * successfully. So now, temporarily enable it for the + * S3 suspend abort case. + */ + if (((adev->apu_flags & AMD_APU_IS_RAVEN) || + (adev->apu_flags & AMD_APU_IS_RAVEN2)) && + !soc15_need_reset_on_resume(adev)) return 0; switch (soc15_asic_reset_method(adev)) { @@ -1494,6 +1517,10 @@ { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (soc15_need_reset_on_resume(adev)) { + dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n"); + soc15_asic_reset(adev); + } return soc15_common_hw_init(adev); } diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/vega10_ih.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/vega10_ih.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -371,6 +371,12 @@ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + out: return (wptr & ih->ptr_mask); } diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/vega20_ih.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/vega20_ih.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -422,6 +422,12 @@ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + out: return (wptr & ih->ptr_mask); } diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2139,6 +2139,7 @@ if (adev->dm.dmub_srv) { dmub_srv_destroy(adev->dm.dmub_srv); + kfree(adev->dm.dmub_srv); adev->dm.dmub_srv = NULL; } diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1304,7 +1304,7 @@ const uint32_t rd_buf_size = 10; struct pipe_ctx *pipe_ctx; ssize_t result = 0; - int i, r, str_len = 30; + int i, r, str_len = 10; rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/Makefile linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/Makefile --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/Makefile +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/Makefile @@ -23,12 +23,12 @@ # Makefile for Display Core (dc) component. # -DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual +DC_LIBS = basics bios clk_mgr dce dml gpio irq virtual ifdef CONFIG_DRM_AMD_DC_DCN DC_LIBS += dcn20 DC_LIBS += dsc -DC_LIBS += dcn10 dml +DC_LIBS += dcn10 DC_LIBS += dcn21 DC_LIBS += dcn30 DC_LIBS += dcn301 reverted: --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c +++ linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c @@ -26,12 +26,12 @@ #include "bw_fixed.h" +#define MIN_I64 \ + (int64_t)(-(1LL << 63)) + #define MAX_I64 \ (int64_t)((1ULL << 63) - 1) -#define MIN_I64 \ - (-MAX_I64 - 1) - #define FRACTIONAL_PART_MASK \ ((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1) diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1706,6 +1706,9 @@ { struct dpp *dpp = pipe_ctx->plane_res.dpp; + if (!stream) + return false; + if (dpp == NULL) return false; @@ -1728,8 +1731,8 @@ } else dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS); - if (stream != NULL && stream->ctx != NULL && - stream->out_transfer_func != NULL) { + if (stream->ctx && + stream->out_transfer_func) { log_tf(stream->ctx, stream->out_transfer_func, dpp->regamma_params.hw_points_num); diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -35,7 +35,7 @@ #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" -#include "dml/dcn2x/dcn2x.h" +#include "dml/dcn20/dcn20_fpu.h" #include "dcn10/dcn10_hubp.h" #include "dcn10/dcn10_ipp.h" diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -35,7 +35,7 @@ #include "include/irq_service_interface.h" #include "dcn20/dcn20_resource.h" -#include "dml/dcn2x/dcn2x.h" +#include "dml/dcn20/dcn20_fpu.h" #include "clk_mgr.h" #include "dcn10/dcn10_hubp.h" diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/Makefile linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/Makefile --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -58,20 +58,21 @@ ifdef CONFIG_DRM_AMD_DC_DCN CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_rcflags) @@ -91,17 +92,22 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags) -DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ +DML = calcs/dce_calcs.o calcs/custom_float.o calcs/bw_fixed.o ifdef CONFIG_DRM_AMD_DC_DCN +DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o +DML += dcn20/dcn20_fpu.o DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o -DML += dcn2x/dcn2x.o DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o DML += dsc/rc_calc_fpu.o +DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o endif AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/amdgpu_dpm.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/amdgpu_dpm.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -188,29 +188,6 @@ return vrefresh; } -bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) -{ - switch (sensor) { - case THERMAL_TYPE_RV6XX: - case THERMAL_TYPE_RV770: - case THERMAL_TYPE_EVERGREEN: - case THERMAL_TYPE_SUMO: - case THERMAL_TYPE_NI: - case THERMAL_TYPE_SI: - case THERMAL_TYPE_CI: - case THERMAL_TYPE_KV: - return true; - case THERMAL_TYPE_ADT7473_WITH_INTERNAL: - case THERMAL_TYPE_EMC2103_WITH_INTERNAL: - return false; /* need special handling */ - case THERMAL_TYPE_NONE: - case THERMAL_TYPE_EXTERNAL: - case THERMAL_TYPE_EXTERNAL_GPIO: - default: - return false; - } -} - union power_info { struct _ATOM_POWERPLAY_INFO info; struct _ATOM_POWERPLAY_INFO_V2 info_2; diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -501,8 +501,6 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, void *data, uint32_t *size); -bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor); - int amdgpu_get_platform_caps(struct amdgpu_device *adev); int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c @@ -1256,6 +1256,29 @@ } } +static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) +{ + switch (sensor) { + case THERMAL_TYPE_RV6XX: + case THERMAL_TYPE_RV770: + case THERMAL_TYPE_EVERGREEN: + case THERMAL_TYPE_SUMO: + case THERMAL_TYPE_NI: + case THERMAL_TYPE_SI: + case THERMAL_TYPE_CI: + case THERMAL_TYPE_KV: + return true; + case THERMAL_TYPE_ADT7473_WITH_INTERNAL: + case THERMAL_TYPE_EMC2103_WITH_INTERNAL: + return false; /* need special handling */ + case THERMAL_TYPE_NONE: + case THERMAL_TYPE_EXTERNAL: + case THERMAL_TYPE_EXTERNAL_GPIO: + default: + return false; + } +} + static int kv_dpm_enable(struct amdgpu_device *adev) { struct kv_power_info *pi = kv_get_pi(adev); @@ -1352,7 +1375,7 @@ } if (adev->irq.installed && - amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { + kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); if (ret) { DRM_ERROR("kv_set_thermal_temperature_range failed\n"); diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/bridge/lontium-lt8912b.c linux-aws-5.15-5.15.0/drivers/gpu/drm/bridge/lontium-lt8912b.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -571,6 +571,10 @@ if (ret) goto error; + ret = lt8912_attach_dsi(lt); + if (ret) + goto error; + return 0; error: @@ -726,15 +730,8 @@ drm_bridge_add(<->bridge); - ret = lt8912_attach_dsi(lt); - if (ret) - goto err_attach; - return 0; -err_attach: - drm_bridge_remove(<->bridge); - lt8912_free_i2c(lt); err_i2c: lt8912_put_dt(lt); err_dt_parse: diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/drm_syncobj.c linux-aws-5.15-5.15.0/drivers/gpu/drm/drm_syncobj.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/drm_syncobj.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/drm_syncobj.c @@ -1021,7 +1021,8 @@ uint64_t *points; uint32_t signaled_count, i; - if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) + if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) lockdep_assert_none_held_once(); points = kmalloc_array(count, sizeof(*points), GFP_KERNEL); @@ -1090,7 +1091,8 @@ * fallthough and try a 0 timeout wait! */ - if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { + if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT | + DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) { for (i = 0; i < count; ++i) drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]); } diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/i915/display/intel_display_debugfs.c linux-aws-5.15-5.15.0/drivers/gpu/drm/i915/display/intel_display_debugfs.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -571,8 +571,8 @@ * reg for DC3CO debugging and validation, * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter. */ - seq_printf(m, "DC3CO count: %d\n", - intel_de_read(dev_priv, DMC_DEBUG3)); + seq_printf(m, "DC3CO count: %d\n", intel_de_read(dev_priv, IS_DGFX(dev_priv) ? + DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3)); } else { dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT : SKL_DMC_DC3_DC5_COUNT; diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/i915/i915_reg.h linux-aws-5.15-5.15.0/drivers/gpu/drm/i915/i915_reg.h --- linux-aws-5.15-5.15.0/drivers/gpu/drm/i915/i915_reg.h +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/i915/i915_reg.h @@ -7830,7 +7830,8 @@ #define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088) #define DG1_DMC_DEBUG_DC5_COUNT _MMIO(0x134154) -#define DMC_DEBUG3 _MMIO(0x101090) +#define TGL_DMC_DEBUG3 _MMIO(0x101090) +#define DG1_DMC_DEBUG3 _MMIO(0x13415c) /* Display Internal Timeout Register */ #define RM_TIMEOUT _MMIO(0x42060) diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/lima/lima_gem.c linux-aws-5.15-5.15.0/drivers/gpu/drm/lima/lima_gem.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/lima/lima_gem.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/lima/lima_gem.c @@ -74,29 +74,34 @@ } else { bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL); if (!bo->base.sgt) { - sg_free_table(&sgt); - return -ENOMEM; + ret = -ENOMEM; + goto err_out0; } } ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0); - if (ret) { - sg_free_table(&sgt); - kfree(bo->base.sgt); - bo->base.sgt = NULL; - return ret; - } + if (ret) + goto err_out1; *bo->base.sgt = sgt; if (vm) { ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT); if (ret) - return ret; + goto err_out2; } bo->heap_size = new_size; return 0; + +err_out2: + dma_unmap_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0); +err_out1: + kfree(bo->base.sgt); + bo->base.sgt = NULL; +err_out0: + sg_free_table(&sgt); + return ret; } int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/mediatek/mtk_drm_crtc.c linux-aws-5.15-5.15.0/drivers/gpu/drm/mediatek/mtk_drm_crtc.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -92,11 +92,13 @@ struct drm_crtc *crtc = &mtk_crtc->base; unsigned long flags; - spin_lock_irqsave(&crtc->dev->event_lock, flags); - drm_crtc_send_vblank_event(crtc, mtk_crtc->event); - drm_crtc_vblank_put(crtc); - mtk_crtc->event = NULL; - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + if (mtk_crtc->event) { + spin_lock_irqsave(&crtc->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, mtk_crtc->event); + drm_crtc_vblank_put(crtc); + mtk_crtc->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } } static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/mediatek/mtk_dsi.c linux-aws-5.15-5.15.0/drivers/gpu/drm/mediatek/mtk_dsi.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/mediatek/mtk_dsi.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -70,8 +70,8 @@ #define DSI_PS_WC 0x3fff #define DSI_PS_SEL (3 << 16) #define PACKED_PS_16BIT_RGB565 (0 << 16) -#define LOOSELY_PS_18BIT_RGB666 (1 << 16) -#define PACKED_PS_18BIT_RGB666 (2 << 16) +#define PACKED_PS_18BIT_RGB666 (1 << 16) +#define LOOSELY_PS_24BIT_RGB666 (2 << 16) #define PACKED_PS_24BIT_RGB888 (3 << 16) #define DSI_VSA_NL 0x20 @@ -366,10 +366,10 @@ ps_bpp_mode |= PACKED_PS_24BIT_RGB888; break; case MIPI_DSI_FMT_RGB666: - ps_bpp_mode |= PACKED_PS_18BIT_RGB666; + ps_bpp_mode |= LOOSELY_PS_24BIT_RGB666; break; case MIPI_DSI_FMT_RGB666_PACKED: - ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666; + ps_bpp_mode |= PACKED_PS_18BIT_RGB666; break; case MIPI_DSI_FMT_RGB565: ps_bpp_mode |= PACKED_PS_16BIT_RGB565; @@ -423,7 +423,7 @@ dsi_tmp_buf_bpp = 3; break; case MIPI_DSI_FMT_RGB666: - tmp_reg = LOOSELY_PS_18BIT_RGB666; + tmp_reg = LOOSELY_PS_24BIT_RGB666; dsi_tmp_buf_bpp = 3; break; case MIPI_DSI_FMT_RGB666_PACKED: diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/nouveau/nouveau_bo.c linux-aws-5.15-5.15.0/drivers/gpu/drm/nouveau/nouveau_bo.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/nouveau/nouveau_bo.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1186,6 +1186,8 @@ drm_vma_node_unmap(&nvbo->bo.base.vma_node, bdev->dev_mapping); nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); + nvbo->bo.resource->bus.offset = 0; + nvbo->bo.resource->bus.addr = NULL; goto retry; } diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/rockchip/inno_hdmi.c linux-aws-5.15-5.15.0/drivers/gpu/drm/rockchip/inno_hdmi.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/rockchip/inno_hdmi.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -402,7 +402,7 @@ hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF); hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF); - value = mode->hsync_start - mode->hdisplay; + value = mode->htotal - mode->hsync_start; hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF); hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF); @@ -417,7 +417,7 @@ value = mode->vtotal - mode->vdisplay; hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF); - value = mode->vsync_start - mode->vdisplay; + value = mode->vtotal - mode->vsync_start; hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF); value = mode->vsync_end - mode->vsync_start; diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/rockchip/rockchip_lvds.c linux-aws-5.15-5.15.0/drivers/gpu/drm/rockchip/rockchip_lvds.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -572,8 +572,7 @@ ret = -EINVAL; goto err_put_port; } else if (ret) { - DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n"); - ret = -EPROBE_DEFER; + dev_err_probe(dev, ret, "failed to find panel and bridge node\n"); goto err_put_port; } if (lvds->panel) diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dc.c linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dc.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dc.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dc.c @@ -1762,10 +1762,9 @@ return 0; } -static void tegra_dc_commit_state(struct tegra_dc *dc, - struct tegra_dc_state *state) +static void tegra_dc_set_clock_rate(struct tegra_dc *dc, + struct tegra_dc_state *state) { - u32 value; int err; err = clk_set_parent(dc->clk, state->clk); @@ -1796,11 +1795,6 @@ DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk), state->div); DRM_DEBUG_KMS("pclk: %lu\n", state->pclk); - - if (!dc->soc->has_nvdisplay) { - value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1; - tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); - } } static void tegra_dc_stop(struct tegra_dc *dc) @@ -2002,6 +1996,9 @@ u32 value; int err; + /* apply PLL changes */ + tegra_dc_set_clock_rate(dc, crtc_state); + err = host1x_client_resume(&dc->client); if (err < 0) { dev_err(dc->dev, "failed to resume: %d\n", err); @@ -2076,8 +2073,11 @@ else tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); - /* apply PLL and pixel clock changes */ - tegra_dc_commit_state(dc, crtc_state); + /* apply pixel clock changes */ + if (!dc->soc->has_nvdisplay) { + value = SHIFT_CLK_DIVIDER(crtc_state->div) | PIXEL_CLK_DIVIDER_PCD1; + tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); + } /* program display mode */ tegra_dc_set_timings(dc, mode); @@ -2685,6 +2685,7 @@ .has_win_b_vfilter_mem_client = true, .has_win_c_without_vert_filter = true, .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = false, }; static const struct tegra_dc_soc_info tegra30_dc_soc_info = { @@ -2707,6 +2708,7 @@ .has_win_b_vfilter_mem_client = true, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = true, + .has_pll_d2_out0 = true, }; static const struct tegra_dc_soc_info tegra114_dc_soc_info = { @@ -2729,6 +2731,7 @@ .has_win_b_vfilter_mem_client = false, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = true, + .has_pll_d2_out0 = true, }; static const struct tegra_dc_soc_info tegra124_dc_soc_info = { @@ -2751,6 +2754,7 @@ .has_win_b_vfilter_mem_client = false, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = true, }; static const struct tegra_dc_soc_info tegra210_dc_soc_info = { @@ -2773,6 +2777,7 @@ .has_win_b_vfilter_mem_client = false, .has_win_c_without_vert_filter = false, .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = true, }; static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = { @@ -2823,6 +2828,7 @@ .wgrps = tegra186_dc_wgrps, .num_wgrps = ARRAY_SIZE(tegra186_dc_wgrps), .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = false, }; static const struct tegra_windowgroup_soc tegra194_dc_wgrps[] = { @@ -2873,6 +2879,7 @@ .wgrps = tegra194_dc_wgrps, .num_wgrps = ARRAY_SIZE(tegra194_dc_wgrps), .plane_tiled_memory_bandwidth_x2 = false, + .has_pll_d2_out0 = false, }; static const struct of_device_id tegra_dc_of_match[] = { diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dpaux.c linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dpaux.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dpaux.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dpaux.c @@ -19,6 +19,7 @@ #include #include +#include #include #include "dp.h" @@ -524,7 +525,7 @@ if (err < 0) { dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n", dpaux->irq, err); - return err; + goto err_pm_disable; } disable_irq(dpaux->irq); @@ -544,7 +545,7 @@ */ err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C); if (err < 0) - return err; + goto err_pm_disable; #ifdef CONFIG_GENERIC_PINCONF dpaux->desc.name = dev_name(&pdev->dev); @@ -557,7 +558,8 @@ dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux); if (IS_ERR(dpaux->pinctrl)) { dev_err(&pdev->dev, "failed to register pincontrol\n"); - return PTR_ERR(dpaux->pinctrl); + err = PTR_ERR(dpaux->pinctrl); + goto err_pm_disable; } #endif /* enable and clear all interrupts */ @@ -570,7 +572,18 @@ list_add_tail(&dpaux->list, &dpaux_list); mutex_unlock(&dpaux_lock); + err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux); + if (err < 0) { + dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err); + goto err_pm_disable; + } + return 0; + +err_pm_disable: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return err; } static int tegra_dpaux_remove(struct platform_device *pdev) diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dsi.c linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dsi.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dsi.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dsi.c @@ -1534,9 +1534,11 @@ np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0); if (np) { struct platform_device *gangster = of_find_device_by_node(np); + of_node_put(np); + if (!gangster) + return -EPROBE_DEFER; dsi->slave = platform_get_drvdata(gangster); - of_node_put(np); if (!dsi->slave) { put_device(&gangster->dev); @@ -1584,48 +1586,58 @@ if (!pdev->dev.pm_domain) { dsi->rst = devm_reset_control_get(&pdev->dev, "dsi"); - if (IS_ERR(dsi->rst)) - return PTR_ERR(dsi->rst); + if (IS_ERR(dsi->rst)) { + err = PTR_ERR(dsi->rst); + goto remove; + } } dsi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(dsi->clk)) { - dev_err(&pdev->dev, "cannot get DSI clock\n"); - return PTR_ERR(dsi->clk); + err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk), + "cannot get DSI clock\n"); + goto remove; } dsi->clk_lp = devm_clk_get(&pdev->dev, "lp"); if (IS_ERR(dsi->clk_lp)) { - dev_err(&pdev->dev, "cannot get low-power clock\n"); - return PTR_ERR(dsi->clk_lp); + err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp), + "cannot get low-power clock\n"); + goto remove; } dsi->clk_parent = devm_clk_get(&pdev->dev, "parent"); if (IS_ERR(dsi->clk_parent)) { - dev_err(&pdev->dev, "cannot get parent clock\n"); - return PTR_ERR(dsi->clk_parent); + err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent), + "cannot get parent clock\n"); + goto remove; } dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi"); if (IS_ERR(dsi->vdd)) { - dev_err(&pdev->dev, "cannot get VDD supply\n"); - return PTR_ERR(dsi->vdd); + err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd), + "cannot get VDD supply\n"); + goto remove; } err = tegra_dsi_setup_clocks(dsi); if (err < 0) { dev_err(&pdev->dev, "cannot setup clocks\n"); - return err; + goto remove; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); dsi->regs = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(dsi->regs)) - return PTR_ERR(dsi->regs); + if (IS_ERR(dsi->regs)) { + err = PTR_ERR(dsi->regs); + goto remove; + } dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node); - if (IS_ERR(dsi->mipi)) - return PTR_ERR(dsi->mipi); + if (IS_ERR(dsi->mipi)) { + err = PTR_ERR(dsi->mipi); + goto remove; + } dsi->host.ops = &tegra_dsi_host_ops; dsi->host.dev = &pdev->dev; @@ -1653,9 +1665,12 @@ return 0; unregister: + pm_runtime_disable(&pdev->dev); mipi_dsi_host_unregister(&dsi->host); mipi_free: tegra_mipi_free(dsi->mipi); +remove: + tegra_output_remove(&dsi->output); return err; } diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/ttm/ttm_pool.c linux-aws-5.15-5.15.0/drivers/gpu/drm/ttm/ttm_pool.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/ttm/ttm_pool.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/ttm/ttm_pool.c @@ -384,7 +384,7 @@ enum ttm_caching caching, pgoff_t start_page, pgoff_t end_page) { - struct page **pages = tt->pages; + struct page **pages = &tt->pages[start_page]; unsigned int order; pgoff_t i, nr; diff -u linux-aws-5.15-5.15.0/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c linux-aws-5.15-5.15.0/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c --- linux-aws-5.15-5.15.0/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c @@ -49,6 +49,7 @@ static void vmw_sys_man_free(struct ttm_resource_manager *man, struct ttm_resource *res) { + ttm_resource_fini(man, res); kfree(res); } diff -u linux-aws-5.15-5.15.0/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h linux-aws-5.15-5.15.0/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h --- linux-aws-5.15-5.15.0/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h +++ linux-aws-5.15-5.15.0/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h @@ -119,10 +119,10 @@ struct hpd_status { union { struct { - u32 human_presence_report : 4; - u32 human_presence_actual : 4; - u32 probablity : 8; u32 object_distance : 16; + u32 probablity : 8; + u32 human_presence_actual : 4; + u32 human_presence_report : 4; } shpd; u32 val; }; diff -u linux-aws-5.15-5.15.0/drivers/hid/hid-lenovo.c linux-aws-5.15-5.15.0/drivers/hid/hid-lenovo.c --- linux-aws-5.15-5.15.0/drivers/hid/hid-lenovo.c +++ linux-aws-5.15-5.15.0/drivers/hid/hid-lenovo.c @@ -53,10 +53,10 @@ /* 0: Up * 1: Down (undecided) * 2: Scrolling - * 3: Patched firmware, disable workaround */ u8 middlebutton_state; bool fn_lock; + bool middleclick_workaround_cptkbd; }; #define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) @@ -469,6 +469,36 @@ return count; } +static ssize_t attr_middleclick_workaround_show_cptkbd(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct hid_device *hdev = to_hid_device(dev); + struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); + + return snprintf(buf, PAGE_SIZE, "%u\n", + cptkbd_data->middleclick_workaround_cptkbd); +} + +static ssize_t attr_middleclick_workaround_store_cptkbd(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct hid_device *hdev = to_hid_device(dev); + struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); + int value; + + if (kstrtoint(buf, 10, &value)) + return -EINVAL; + if (value < 0 || value > 1) + return -EINVAL; + + cptkbd_data->middleclick_workaround_cptkbd = !!value; + + return count; +} + static struct device_attribute dev_attr_fn_lock = __ATTR(fn_lock, S_IWUSR | S_IRUGO, @@ -480,10 +510,16 @@ attr_sensitivity_show_cptkbd, attr_sensitivity_store_cptkbd); +static struct device_attribute dev_attr_middleclick_workaround_cptkbd = + __ATTR(middleclick_workaround, S_IWUSR | S_IRUGO, + attr_middleclick_workaround_show_cptkbd, + attr_middleclick_workaround_store_cptkbd); + static struct attribute *lenovo_attributes_cptkbd[] = { &dev_attr_fn_lock.attr, &dev_attr_sensitivity_cptkbd.attr, + &dev_attr_middleclick_workaround_cptkbd.attr, NULL }; @@ -534,23 +570,7 @@ { struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); - if (cptkbd_data->middlebutton_state != 3) { - /* REL_X and REL_Y events during middle button pressed - * are only possible on patched, bug-free firmware - * so set middlebutton_state to 3 - * to never apply workaround anymore - */ - if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD && - cptkbd_data->middlebutton_state == 1 && - usage->type == EV_REL && - (usage->code == REL_X || usage->code == REL_Y)) { - cptkbd_data->middlebutton_state = 3; - /* send middle button press which was hold before */ - input_event(field->hidinput->input, - EV_KEY, BTN_MIDDLE, 1); - input_sync(field->hidinput->input); - } - + if (cptkbd_data->middleclick_workaround_cptkbd) { /* "wheel" scroll events */ if (usage->type == EV_REL && (usage->code == REL_WHEEL || usage->code == REL_HWHEEL)) { @@ -1015,6 +1035,7 @@ cptkbd_data->middlebutton_state = 0; cptkbd_data->fn_lock = true; cptkbd_data->sensitivity = 0x05; + cptkbd_data->middleclick_workaround_cptkbd = true; lenovo_features_set_cptkbd(hdev); ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd); diff -u linux-aws-5.15-5.15.0/drivers/hid/hid-multitouch.c linux-aws-5.15-5.15.0/drivers/hid/hid-multitouch.c --- linux-aws-5.15-5.15.0/drivers/hid/hid-multitouch.c +++ linux-aws-5.15-5.15.0/drivers/hid/hid-multitouch.c @@ -2153,6 +2153,10 @@ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_SYNAPTICS, 0xcddc) }, + + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_SYNAPTICS, 0xce08) }, { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, diff -u linux-aws-5.15-5.15.0/drivers/hwmon/coretemp.c linux-aws-5.15-5.15.0/drivers/hwmon/coretemp.c --- linux-aws-5.15-5.15.0/drivers/hwmon/coretemp.c +++ linux-aws-5.15-5.15.0/drivers/hwmon/coretemp.c @@ -40,7 +40,7 @@ #define PKG_SYSFS_ATTR_NO 1 /* Sysfs attribute for package temp */ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ -#define NUM_REAL_CORES 128 /* Number of Real cores per cpu */ +#define NUM_REAL_CORES 512 /* Number of Real cores per cpu */ #define CORETEMP_NAME_LENGTH 28 /* String Length of attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) diff -u linux-aws-5.15-5.15.0/drivers/i2c/busses/i2c-imx.c linux-aws-5.15-5.15.0/drivers/i2c/busses/i2c-imx.c --- linux-aws-5.15-5.15.0/drivers/i2c/busses/i2c-imx.c +++ linux-aws-5.15-5.15.0/drivers/i2c/busses/i2c-imx.c @@ -37,6 +37,8 @@ #include #include #include +#include +#include #include #include #include @@ -51,6 +53,8 @@ /* This will be the driver name the kernel reports */ #define DRIVER_NAME "imx-i2c" +#define I2C_IMX_CHECK_DELAY 30000 /* Time to check for bus idle, in NS */ + /* * Enable DMA if transfer byte size is bigger than this threshold. * As the hardware request, it must bigger than 4 bytes.\ @@ -210,6 +214,10 @@ struct imx_i2c_dma *dma; struct i2c_client *slave; enum i2c_slave_event last_slave_event; + + /* For checking slave events. */ + spinlock_t slave_lock; + struct hrtimer slave_timer; }; static const struct imx_i2c_hwdata imx1_i2c_hwdata = { @@ -680,7 +688,7 @@ static void i2c_imx_slave_finish_op(struct imx_i2c_struct *i2c_imx) { - u8 val; + u8 val = 0; while (i2c_imx->last_slave_event != I2C_SLAVE_STOP) { switch (i2c_imx->last_slave_event) { @@ -701,10 +709,11 @@ } } -static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx, - unsigned int status, unsigned int ctl) +/* Returns true if the timer should be restarted, false if not. */ +static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx, + unsigned int status, unsigned int ctl) { - u8 value; + u8 value = 0; if (status & I2SR_IAL) { /* Arbitration lost */ i2c_imx_clear_irq(i2c_imx, I2SR_IAL); @@ -712,6 +721,16 @@ return IRQ_HANDLED; } + if (!(status & I2SR_IBB)) { + /* No master on the bus, that could mean a stop condition. */ + i2c_imx_slave_finish_op(i2c_imx); + return IRQ_HANDLED; + } + + if (!(status & I2SR_ICF)) + /* Data transfer still in progress, ignore this. */ + goto out; + if (status & I2SR_IAAS) { /* Addressed as a slave */ i2c_imx_slave_finish_op(i2c_imx); if (status & I2SR_SRW) { /* Master wants to read from us*/ @@ -737,16 +756,9 @@ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); } } else if (!(ctl & I2CR_MTX)) { /* Receive mode */ - if (status & I2SR_IBB) { /* No STOP signal detected */ - value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); - i2c_imx_slave_event(i2c_imx, - I2C_SLAVE_WRITE_RECEIVED, &value); - } else { /* STOP signal is detected */ - dev_dbg(&i2c_imx->adapter.dev, - "STOP signal detected"); - i2c_imx_slave_event(i2c_imx, - I2C_SLAVE_STOP, &value); - } + value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); + i2c_imx_slave_event(i2c_imx, + I2C_SLAVE_WRITE_RECEIVED, &value); } else if (!(status & I2SR_RXAK)) { /* Transmit mode received ACK */ ctl |= I2CR_MTX; imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR); @@ -755,15 +767,48 @@ I2C_SLAVE_READ_PROCESSED, &value); imx_i2c_write_reg(value, i2c_imx, IMX_I2C_I2DR); - } else { /* Transmit mode received NAK */ + } else { /* Transmit mode received NAK, operation is done */ ctl &= ~I2CR_MTX; imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR); imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); + + /* flag the last byte as processed */ + i2c_imx_slave_event(i2c_imx, + I2C_SLAVE_READ_PROCESSED, &value); + + i2c_imx_slave_finish_op(i2c_imx); + return IRQ_HANDLED; } +out: + /* + * No need to check the return value here. If it returns 0 or + * 1, then everything is fine. If it returns -1, then the + * timer is running in the handler. This will still work, + * though it may be redone (or already have been done) by the + * timer function. + */ + hrtimer_try_to_cancel(&i2c_imx->slave_timer); + hrtimer_forward_now(&i2c_imx->slave_timer, I2C_IMX_CHECK_DELAY); + hrtimer_restart(&i2c_imx->slave_timer); return IRQ_HANDLED; } +static enum hrtimer_restart i2c_imx_slave_timeout(struct hrtimer *t) +{ + struct imx_i2c_struct *i2c_imx = container_of(t, struct imx_i2c_struct, + slave_timer); + unsigned int ctl, status; + unsigned long flags; + + spin_lock_irqsave(&i2c_imx->slave_lock, flags); + status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); + ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); + i2c_imx_slave_handle(i2c_imx, status, ctl); + spin_unlock_irqrestore(&i2c_imx->slave_lock, flags); + return HRTIMER_NORESTART; +} + static void i2c_imx_slave_init(struct imx_i2c_struct *i2c_imx) { int temp; @@ -843,7 +888,9 @@ { struct imx_i2c_struct *i2c_imx = dev_id; unsigned int ctl, status; + unsigned long flags; + spin_lock_irqsave(&i2c_imx->slave_lock, flags); status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); @@ -851,14 +898,20 @@ i2c_imx_clear_irq(i2c_imx, I2SR_IIF); if (i2c_imx->slave) { if (!(ctl & I2CR_MSTA)) { - return i2c_imx_slave_isr(i2c_imx, status, ctl); - } else if (i2c_imx->last_slave_event != - I2C_SLAVE_STOP) { - i2c_imx_slave_finish_op(i2c_imx); + irqreturn_t ret; + + ret = i2c_imx_slave_handle(i2c_imx, + status, ctl); + spin_unlock_irqrestore(&i2c_imx->slave_lock, + flags); + return ret; } + i2c_imx_slave_finish_op(i2c_imx); } + spin_unlock_irqrestore(&i2c_imx->slave_lock, flags); return i2c_imx_master_isr(i2c_imx, status); } + spin_unlock_irqrestore(&i2c_imx->slave_lock, flags); return IRQ_NONE; } @@ -1380,6 +1433,10 @@ if (!i2c_imx) return -ENOMEM; + spin_lock_init(&i2c_imx->slave_lock); + hrtimer_init(&i2c_imx->slave_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + i2c_imx->slave_timer.function = i2c_imx_slave_timeout; + match = device_get_match_data(&pdev->dev); if (match) i2c_imx->hwdata = match; @@ -1492,4 +1549,6 @@ ret = pm_runtime_get_sync(&pdev->dev); + hrtimer_cancel(&i2c_imx->slave_timer); + /* remove adapter */ dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n"); diff -u linux-aws-5.15-5.15.0/drivers/infiniband/core/device.c linux-aws-5.15-5.15.0/drivers/infiniband/core/device.c --- linux-aws-5.15-5.15.0/drivers/infiniband/core/device.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/core/device.c @@ -1729,7 +1729,7 @@ { int ret; - down_write(&clients_rwsem); + lockdep_assert_held(&clients_rwsem); /* * The add/remove callbacks must be called in FIFO/LIFO order. To * achieve this we assign client_ids so they are sorted in @@ -1738,14 +1738,11 @@ client->client_id = highest_client_id; ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL); if (ret) - goto out; + return ret; highest_client_id++; xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED); - -out: - up_write(&clients_rwsem); - return ret; + return 0; } static void remove_client_id(struct ib_client *client) @@ -1775,25 +1772,35 @@ { struct ib_device *device; unsigned long index; + bool need_unreg = false; int ret; refcount_set(&client->uses, 1); init_completion(&client->uses_zero); + + /* + * The devices_rwsem is held in write mode to ensure that a racing + * ib_register_device() sees a consisent view of clients and devices. + */ + down_write(&devices_rwsem); + down_write(&clients_rwsem); ret = assign_client_id(client); if (ret) - return ret; + goto out; - down_read(&devices_rwsem); + need_unreg = true; xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) { ret = add_client_context(device, client); - if (ret) { - up_read(&devices_rwsem); - ib_unregister_client(client); - return ret; - } + if (ret) + goto out; } - up_read(&devices_rwsem); - return 0; + ret = 0; +out: + up_write(&clients_rwsem); + up_write(&devices_rwsem); + if (need_unreg && ret) + ib_unregister_client(client); + return ret; } EXPORT_SYMBOL(ib_register_client); diff -u linux-aws-5.15-5.15.0/drivers/infiniband/hw/bnxt_re/ib_verbs.c linux-aws-5.15-5.15.0/drivers/infiniband/hw/bnxt_re/ib_verbs.c --- linux-aws-5.15-5.15.0/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1705,7 +1705,7 @@ switch (srq_attr_mask) { case IB_SRQ_MAX_WR: /* SRQ resize is not supported */ - break; + return -EINVAL; case IB_SRQ_LIMIT: /* Change the SRQ threshold */ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe) @@ -1720,13 +1720,12 @@ /* On success, update the shadow */ srq->srq_limit = srq_attr->srq_limit; /* No need to Build and send response back to udata */ - break; + return 0; default: ibdev_err(&rdev->ibdev, "Unsupported srq_attr_mask 0x%x", srq_attr_mask); return -EINVAL; } - return 0; } int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) diff -u linux-aws-5.15-5.15.0/drivers/infiniband/hw/hfi1/pio.c linux-aws-5.15-5.15.0/drivers/infiniband/hw/hfi1/pio.c --- linux-aws-5.15-5.15.0/drivers/infiniband/hw/hfi1/pio.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/hw/hfi1/pio.c @@ -2089,7 +2089,7 @@ "Unable to allocate credit return DMA range for NUMA %d\n", i); ret = -ENOMEM; - goto done; + goto free_cr_base; } } set_dev_node(&dd->pcidev->dev, dd->node); @@ -2097,6 +2097,10 @@ ret = 0; done: return ret; + +free_cr_base: + free_credit_return(dd); + goto done; } void free_credit_return(struct hfi1_devdata *dd) diff -u linux-aws-5.15-5.15.0/drivers/infiniband/hw/hfi1/sdma.c linux-aws-5.15-5.15.0/drivers/infiniband/hw/hfi1/sdma.c --- linux-aws-5.15-5.15.0/drivers/infiniband/hw/hfi1/sdma.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/hw/hfi1/sdma.c @@ -3158,7 +3158,7 @@ { int rval = 0; - if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) { + if ((unlikely(tx->num_desc == tx->desc_limit))) { rval = _extend_sdma_tx_descs(dd, tx); if (rval) { __sdma_txclean(dd, tx); diff -u linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/defs.h linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/defs.h --- linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/defs.h +++ linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/defs.h @@ -345,6 +345,7 @@ #define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b #define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c #define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e +#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f #define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520 #define IRDMA_AE_RESET_SENT 0x0601 #define IRDMA_AE_TERMINATE_SENT 0x0602 diff -u linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/hw.c linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/hw.c --- linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/hw.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/hw.c @@ -379,6 +379,7 @@ case IRDMA_AE_LLP_TOO_MANY_RETRIES: case IRDMA_AE_LCE_QP_CATASTROPHIC: case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC: + case IRDMA_AE_LLP_TOO_MANY_RNRS: case IRDMA_AE_LCE_CQ_CATASTROPHIC: case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: default: @@ -562,6 +563,13 @@ dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); irq_set_affinity_hint(msix_vec->irq, NULL); free_irq(msix_vec->irq, dev_id); + if (rf == dev_id) { + tasklet_kill(&rf->dpc_tasklet); + } else { + struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id; + + tasklet_kill(&iwceq->dpc_tasklet); + } } /** diff -u linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/verbs.c linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/verbs.c --- linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/verbs.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/hw/irdma/verbs.c @@ -749,7 +749,9 @@ if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || - init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags) + init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags || + init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta || + init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta) return -EINVAL; if (rdma_protocol_roce(&iwdev->ibdev, 1)) { @@ -2068,9 +2070,8 @@ info.cq_base_pa = iwcq->kmem.pa; } - if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) - info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, - (u32)IRDMA_MAX_CQ_READ_THRESH); + info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, + (u32)IRDMA_MAX_CQ_READ_THRESH); if (irdma_sc_cq_init(cq, &info)) { ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n"); diff -u linux-aws-5.15-5.15.0/drivers/infiniband/hw/mlx5/devx.c linux-aws-5.15-5.15.0/drivers/infiniband/hw/mlx5/devx.c --- linux-aws-5.15-5.15.0/drivers/infiniband/hw/mlx5/devx.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/hw/mlx5/devx.c @@ -2924,7 +2924,7 @@ MLX5_IB_METHOD_DEVX_OBJ_MODIFY, UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE, UVERBS_IDR_ANY_OBJECT, - UVERBS_ACCESS_WRITE, + UVERBS_ACCESS_READ, UA_MANDATORY), UVERBS_ATTR_PTR_IN( MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN, diff -u linux-aws-5.15-5.15.0/drivers/infiniband/hw/qedr/verbs.c linux-aws-5.15-5.15.0/drivers/infiniband/hw/qedr/verbs.c --- linux-aws-5.15-5.15.0/drivers/infiniband/hw/qedr/verbs.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/hw/qedr/verbs.c @@ -1888,8 +1888,17 @@ /* RQ - read access only (0) */ rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr, ureq.rq_len, true, 0, alloc_and_init); - if (rc) + if (rc) { + ib_umem_release(qp->usq.umem); + qp->usq.umem = NULL; + if (rdma_protocol_roce(&dev->ibdev, 1)) { + qedr_free_pbl(dev, &qp->usq.pbl_info, + qp->usq.pbl_tbl); + } else { + kfree(qp->usq.pbl_tbl); + } return rc; + } } memset(&in_params, 0, sizeof(in_params)); diff -u linux-aws-5.15-5.15.0/drivers/infiniband/sw/siw/siw_cm.c linux-aws-5.15-5.15.0/drivers/infiniband/sw/siw/siw_cm.c --- linux-aws-5.15-5.15.0/drivers/infiniband/sw/siw/siw_cm.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/sw/siw/siw_cm.c @@ -1504,7 +1504,6 @@ cep->cm_id = NULL; id->rem_ref(id); - siw_cep_put(cep); qp->cep = NULL; siw_cep_put(cep); diff -u linux-aws-5.15-5.15.0/drivers/infiniband/sw/siw/siw_verbs.c linux-aws-5.15-5.15.0/drivers/infiniband/sw/siw/siw_verbs.c --- linux-aws-5.15-5.15.0/drivers/infiniband/sw/siw/siw_verbs.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/sw/siw/siw_verbs.c @@ -1492,7 +1492,7 @@ if (pbl->max_buf < num_sle) { siw_dbg_mem(mem, "too many SGE's: %d > %d\n", - mem->pbl->max_buf, num_sle); + num_sle, pbl->max_buf); return -ENOMEM; } for_each_sg(sl, slp, num_sle, i) { diff -u linux-aws-5.15-5.15.0/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c linux-aws-5.15-5.15.0/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c --- linux-aws-5.15-5.15.0/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c @@ -131,7 +131,7 @@ /* distinguish "mi" and "min-latency" with length */ len = strnlen(buf, NAME_MAX); - if (buf[len - 1] == '\n') + if (len && buf[len - 1] == '\n') len--; if (!strncasecmp(buf, "round-robin", 11) || diff -u linux-aws-5.15-5.15.0/drivers/infiniband/ulp/srpt/ib_srpt.c linux-aws-5.15-5.15.0/drivers/infiniband/ulp/srpt/ib_srpt.c --- linux-aws-5.15-5.15.0/drivers/infiniband/ulp/srpt/ib_srpt.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -79,12 +79,16 @@ MODULE_PARM_DESC(srpt_srq_size, "Shared receive queue (SRQ) size."); +static int srpt_set_u64_x(const char *buffer, const struct kernel_param *kp) +{ + return kstrtou64(buffer, 16, (u64 *)kp->arg); +} static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp) { return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg); } -module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid, - 0444); +module_param_call(srpt_service_guid, srpt_set_u64_x, srpt_get_u64_x, + &srpt_service_guid, 0444); MODULE_PARM_DESC(srpt_service_guid, "Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA."); @@ -210,10 +214,12 @@ /** * srpt_qp_event - QP event callback function * @event: Description of the event that occurred. - * @ch: SRPT RDMA channel. + * @ptr: SRPT RDMA channel. */ -static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch) +static void srpt_qp_event(struct ib_event *event, void *ptr) { + struct srpt_rdma_ch *ch = ptr; + pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n", event->event, ch, ch->sess_name, ch->qp->qp_num, get_ch_state_name(ch->state)); @@ -1807,8 +1813,7 @@ ch->cq_size = ch->rq_size + sq_size; qp_init->qp_context = (void *)ch; - qp_init->event_handler - = (void(*)(struct ib_event *, void*))srpt_qp_event; + qp_init->event_handler = srpt_qp_event; qp_init->send_cq = ch->cq; qp_init->recv_cq = ch->cq; qp_init->sq_sig_type = IB_SIGNAL_REQ_WR; @@ -3204,7 +3209,6 @@ INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, srpt_event_handler); - ib_register_event_handler(&sdev->event_handler); for (i = 1; i <= sdev->device->phys_port_cnt; i++) { sport = &sdev->port[i - 1]; @@ -3227,6 +3231,7 @@ } } + ib_register_event_handler(&sdev->event_handler); spin_lock(&srpt_dev_lock); list_add_tail(&sdev->list, &srpt_dev_list); spin_unlock(&srpt_dev_lock); @@ -3237,7 +3242,6 @@ err_port: srpt_unregister_mad_agent(sdev, i); - ib_unregister_event_handler(&sdev->event_handler); err_cm: if (sdev->cm_id) ib_destroy_cm_id(sdev->cm_id); diff -u linux-aws-5.15-5.15.0/drivers/input/joystick/xpad.c linux-aws-5.15-5.15.0/drivers/input/joystick/xpad.c --- linux-aws-5.15-5.15.0/drivers/input/joystick/xpad.c +++ linux-aws-5.15-5.15.0/drivers/input/joystick/xpad.c @@ -276,6 +276,7 @@ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 }, { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 }, { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 }, + { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 }, { 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, @@ -464,6 +465,7 @@ XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ + XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */ XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ diff -u linux-aws-5.15-5.15.0/drivers/input/serio/i8042-acpipnpio.h linux-aws-5.15-5.15.0/drivers/input/serio/i8042-acpipnpio.h --- linux-aws-5.15-5.15.0/drivers/input/serio/i8042-acpipnpio.h +++ linux-aws-5.15-5.15.0/drivers/input/serio/i8042-acpipnpio.h @@ -626,6 +626,14 @@ .driver_data = (void *)(SERIO_QUIRK_NOAUX) }, { + /* Fujitsu Lifebook U728 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U728"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOAUX) + }, + { /* Gigabyte M912 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), diff -u linux-aws-5.15-5.15.0/drivers/interconnect/core.c linux-aws-5.15-5.15.0/drivers/interconnect/core.c --- linux-aws-5.15-5.15.0/drivers/interconnect/core.c +++ linux-aws-5.15-5.15.0/drivers/interconnect/core.c @@ -30,7 +30,6 @@ static int providers_count; static bool synced_state; static DEFINE_MUTEX(icc_lock); -static DEFINE_MUTEX(icc_bw_lock); static struct dentry *icc_debugfs_dir; static void icc_summary_show_one(struct seq_file *s, struct icc_node *n) @@ -637,7 +636,7 @@ if (WARN_ON(IS_ERR(path) || !path->num_nodes)) return -EINVAL; - mutex_lock(&icc_bw_lock); + mutex_lock(&icc_lock); old_avg = path->reqs[0].avg_bw; old_peak = path->reqs[0].peak_bw; @@ -669,7 +668,7 @@ apply_constraints(path); } - mutex_unlock(&icc_bw_lock); + mutex_unlock(&icc_lock); trace_icc_set_bw_end(path, ret); @@ -972,7 +971,6 @@ return; mutex_lock(&icc_lock); - mutex_lock(&icc_bw_lock); node->provider = provider; list_add_tail(&node->node_list, &provider->nodes); @@ -998,7 +996,6 @@ node->avg_bw = 0; node->peak_bw = 0; - mutex_unlock(&icc_bw_lock); mutex_unlock(&icc_lock); } EXPORT_SYMBOL_GPL(icc_node_add); @@ -1126,7 +1123,6 @@ return; mutex_lock(&icc_lock); - mutex_lock(&icc_bw_lock); synced_state = true; list_for_each_entry(p, &icc_providers, provider_list) { dev_dbg(p->dev, "interconnect provider is in synced state\n"); @@ -1139,21 +1135,13 @@ } } } - mutex_unlock(&icc_bw_lock); mutex_unlock(&icc_lock); } EXPORT_SYMBOL_GPL(icc_sync_state); static int __init icc_init(void) { - struct device_node *root; - - /* Teach lockdep about lock ordering wrt. shrinker: */ - fs_reclaim_acquire(GFP_KERNEL); - might_lock(&icc_bw_lock); - fs_reclaim_release(GFP_KERNEL); - - root = of_find_node_by_path("/"); + struct device_node *root = of_find_node_by_path("/"); providers_count = of_count_icc_providers(root); of_node_put(root); diff -u linux-aws-5.15-5.15.0/drivers/iommu/amd/init.c linux-aws-5.15-5.15.0/drivers/iommu/amd/init.c --- linux-aws-5.15-5.15.0/drivers/iommu/amd/init.c +++ linux-aws-5.15-5.15.0/drivers/iommu/amd/init.c @@ -1821,6 +1821,9 @@ /* Prevent binding other PCI device drivers to IOMMU devices */ iommu->dev->match_driver = false; + /* ACPI _PRT won't have an IRQ for IOMMU */ + iommu->dev->irq_managed = 1; + pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); diff -u linux-aws-5.15-5.15.0/drivers/iommu/intel/pasid.c linux-aws-5.15-5.15.0/drivers/iommu/intel/pasid.c --- linux-aws-5.15-5.15.0/drivers/iommu/intel/pasid.c +++ linux-aws-5.15-5.15.0/drivers/iommu/intel/pasid.c @@ -445,6 +445,9 @@ if (!info || !info->ats_enabled) return; + if (pci_dev_is_disconnected(to_pci_dev(dev))) + return; + sid = info->bus << 8 | info->devfn; qdep = info->ats_qdep; pfsid = info->pfsid; diff -u linux-aws-5.15-5.15.0/drivers/md/dm-crypt.c linux-aws-5.15-5.15.0/drivers/md/dm-crypt.c --- linux-aws-5.15-5.15.0/drivers/md/dm-crypt.c +++ linux-aws-5.15-5.15.0/drivers/md/dm-crypt.c @@ -49,11 +49,11 @@ struct convert_context { struct completion restart; struct bio *bio_in; - struct bio *bio_out; struct bvec_iter iter_in; + struct bio *bio_out; struct bvec_iter iter_out; - u64 cc_sector; atomic_t cc_pending; + u64 cc_sector; union { struct skcipher_request *req; struct aead_request *req_aead; @@ -2065,6 +2065,12 @@ io->ctx.bio_out = clone; io->ctx.iter_out = clone->bi_iter; + if (crypt_integrity_aead(cc)) { + bio_copy_data(clone, io->base_bio); + io->ctx.bio_in = clone; + io->ctx.iter_in = clone->bi_iter; + } + sector += bio_sectors(clone); crypt_inc_pending(io); diff -u linux-aws-5.15-5.15.0/drivers/md/dm-raid.c linux-aws-5.15-5.15.0/drivers/md/dm-raid.c --- linux-aws-5.15-5.15.0/drivers/md/dm-raid.c +++ linux-aws-5.15-5.15.0/drivers/md/dm-raid.c @@ -3329,14 +3329,14 @@ struct mddev *mddev = &rs->md; /* - * If we're reshaping to add disk(s)), ti->len and + * If we're reshaping to add disk(s), ti->len and * mddev->array_sectors will differ during the process * (ti->len > mddev->array_sectors), so we have to requeue * bios with addresses > mddev->array_sectors here or * there will occur accesses past EOD of the component * data images thus erroring the raid set. */ - if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) + if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors)) return DM_MAPIO_REQUEUE; md_handle_request(mddev, bio); diff -u linux-aws-5.15-5.15.0/drivers/md/dm-verity.h linux-aws-5.15-5.15.0/drivers/md/dm-verity.h --- linux-aws-5.15-5.15.0/drivers/md/dm-verity.h +++ linux-aws-5.15-5.15.0/drivers/md/dm-verity.h @@ -74,11 +74,11 @@ /* original value of bio->bi_end_io */ bio_end_io_t *orig_bi_end_io; + struct bvec_iter iter; + sector_t block; unsigned n_blocks; - struct bvec_iter iter; - struct work_struct work; /* diff -u linux-aws-5.15-5.15.0/drivers/md/dm.c linux-aws-5.15-5.15.0/drivers/md/dm.c --- linux-aws-5.15-5.15.0/drivers/md/dm.c +++ linux-aws-5.15-5.15.0/drivers/md/dm.c @@ -2641,6 +2641,9 @@ static void __dm_internal_resume(struct mapped_device *md) { + int r; + struct dm_table *map; + BUG_ON(!md->internal_suspend_count); if (--md->internal_suspend_count) @@ -2649,12 +2652,23 @@ if (dm_suspended_md(md)) goto done; /* resume from nested suspend */ - /* - * NOTE: existing callers don't need to call dm_table_resume_targets - * (which may fail -- so best to avoid it for now by passing NULL map) - */ - (void) __dm_resume(md, NULL); - + map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); + r = __dm_resume(md, map); + if (r) { + /* + * If a preresume method of some target failed, we are in a + * tricky situation. We can't return an error to the caller. We + * can't fake success because then the "resume" and + * "postsuspend" methods would not be paired correctly, and it + * would break various targets, for example it would cause list + * corruption in the "origin" target. + * + * So, we fake normal suspend here, to make sure that the + * "resume" and "postsuspend" methods will be paired correctly. + */ + DMERR("Preresume method failed: %d", r); + set_bit(DMF_SUSPENDED, &md->flags); + } done: clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); smp_mb__after_atomic(); diff -u linux-aws-5.15-5.15.0/drivers/md/md.c linux-aws-5.15-5.15.0/drivers/md/md.c --- linux-aws-5.15-5.15.0/drivers/md/md.c +++ linux-aws-5.15-5.15.0/drivers/md/md.c @@ -4919,11 +4919,21 @@ return -EINVAL; err = mddev_lock(mddev); if (!err) { - if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { err = -EBUSY; - else { + } else if (mddev->reshape_position == MaxSector || + mddev->pers->check_reshape == NULL || + mddev->pers->check_reshape(mddev)) { clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); err = mddev->pers->start_reshape(mddev); + } else { + /* + * If reshape is still in progress, and + * md_check_recovery() can continue to reshape, + * don't restart reshape because data can be + * corrupted for raid456. + */ + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); } mddev_unlock(mddev); } @@ -6220,7 +6230,15 @@ mddev->persistent = 0; mddev->level = LEVEL_NONE; mddev->clevel[0] = 0; - mddev->flags = 0; + /* + * Don't clear MD_CLOSING, or mddev can be opened again. + * 'hold_active != 0' means mddev is still in the creation + * process and will be used later. + */ + if (mddev->hold_active) + mddev->flags = 0; + else + mddev->flags &= BIT_ULL_MASK(MD_CLOSING); mddev->sb_flags = 0; mddev->ro = MD_RDWR; mddev->metadata_type[0] = 0; @@ -7540,7 +7558,6 @@ int err = 0; void __user *argp = (void __user *)arg; struct mddev *mddev = NULL; - bool did_set_md_closing = false; if (!md_ioctl_valid(cmd)) return -ENOTTY; @@ -7627,7 +7644,6 @@ err = -EBUSY; goto out; } - did_set_md_closing = true; mutex_unlock(&mddev->open_mutex); sync_blockdev(bdev); } @@ -7790,7 +7806,7 @@ mddev->hold_active = 0; mddev_unlock(mddev); out: - if(did_set_md_closing) + if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY)) clear_bit(MD_CLOSING, &mddev->flags); return err; } diff -u linux-aws-5.15-5.15.0/drivers/md/raid10.c linux-aws-5.15-5.15.0/drivers/md/raid10.c --- linux-aws-5.15-5.15.0/drivers/md/raid10.c +++ linux-aws-5.15-5.15.0/drivers/md/raid10.c @@ -903,6 +903,7 @@ else submit_bio_noacct(bio); bio = next; + cond_resched(); } blk_finish_plug(&plug); } else @@ -1116,6 +1117,7 @@ else submit_bio_noacct(bio); bio = next; + cond_resched(); } kfree(plug); } diff -u linux-aws-5.15-5.15.0/drivers/media/dvb-core/dvbdev.c linux-aws-5.15-5.15.0/drivers/media/dvb-core/dvbdev.c --- linux-aws-5.15-5.15.0/drivers/media/dvb-core/dvbdev.c +++ linux-aws-5.15-5.15.0/drivers/media/dvb-core/dvbdev.c @@ -504,6 +504,7 @@ dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL); if (!dvbdevfops) { kfree(dvbdev); + *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } @@ -512,6 +513,7 @@ if (!new_node) { kfree(dvbdevfops); kfree(dvbdev); + *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return -ENOMEM; } @@ -545,6 +547,7 @@ } list_del (&dvbdev->list_head); kfree(dvbdev); + *pdvbdev = NULL; up_write(&minor_rwsem); mutex_unlock(&dvbdev_register_lock); return -EINVAL; @@ -567,6 +570,7 @@ dvb_media_device_free(dvbdev); list_del (&dvbdev->list_head); kfree(dvbdev); + *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return ret; } @@ -585,6 +589,7 @@ dvb_media_device_free(dvbdev); list_del (&dvbdev->list_head); kfree(dvbdev); + *pdvbdev = NULL; mutex_unlock(&dvbdev_register_lock); return PTR_ERR(clsdev); } diff -u linux-aws-5.15-5.15.0/drivers/media/dvb-frontends/stv0367.c linux-aws-5.15-5.15.0/drivers/media/dvb-frontends/stv0367.c --- linux-aws-5.15-5.15.0/drivers/media/dvb-frontends/stv0367.c +++ linux-aws-5.15-5.15.0/drivers/media/dvb-frontends/stv0367.c @@ -118,50 +118,32 @@ } }; -static -int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len) +static noinline_for_stack +int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data) { - u8 buf[MAX_XFER_SIZE]; + u8 buf[3] = { MSB(reg), LSB(reg), data }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, - .len = len + 2 + .len = 3, }; int ret; - if (2 + len > sizeof(buf)) { - printk(KERN_WARNING - "%s: i2c wr reg=%04x: len=%d is too big!\n", - KBUILD_MODNAME, reg, len); - return -EINVAL; - } - - - buf[0] = MSB(reg); - buf[1] = LSB(reg); - memcpy(buf + 2, data, len); - if (i2cdebug) printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__, - state->config->demod_address, reg, buf[2]); + state->config->demod_address, reg, data); ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n", - __func__, state->config->demod_address, reg, buf[2]); + __func__, state->config->demod_address, reg, data); return (ret != 1) ? -EREMOTEIO : 0; } -static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data) -{ - u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */ - - return stv0367_writeregs(state, reg, &tmp, 1); -} - -static u8 stv0367_readreg(struct stv0367_state *state, u16 reg) +static noinline_for_stack +u8 stv0367_readreg(struct stv0367_state *state, u16 reg) { u8 b0[] = { 0, 0 }; u8 b1[] = { 0 }; diff -u linux-aws-5.15-5.15.0/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c linux-aws-5.15-5.15.0/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c --- linux-aws-5.15-5.15.0/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c +++ linux-aws-5.15-5.15.0/drivers/media/platform/mtk-vcodec/mtk_vcodec_fw_vpu.c @@ -29,15 +29,7 @@ mtk_vcodec_ipi_handler handler, const char *name, void *priv) { - /* - * The handler we receive takes a void * as its first argument. We - * cannot change this because it needs to be passed down to the rproc - * subsystem when SCP is used. VPU takes a const argument, which is - * more constrained, so the conversion below is safe. - */ - ipi_handler_t handler_const = (ipi_handler_t)handler; - - return vpu_ipi_register(fw->pdev, id, handler_const, name, priv); + return vpu_ipi_register(fw->pdev, id, handler, name, priv); } static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf, diff -u linux-aws-5.15-5.15.0/drivers/media/platform/mtk-vpu/mtk_vpu.c linux-aws-5.15-5.15.0/drivers/media/platform/mtk-vpu/mtk_vpu.c --- linux-aws-5.15-5.15.0/drivers/media/platform/mtk-vpu/mtk_vpu.c +++ linux-aws-5.15-5.15.0/drivers/media/platform/mtk-vpu/mtk_vpu.c @@ -635,7 +635,7 @@ } EXPORT_SYMBOL_GPL(vpu_load_firmware); -static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv) +static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv) { struct mtk_vpu *vpu = priv; const struct vpu_run *run = data; diff -u linux-aws-5.15-5.15.0/drivers/media/usb/em28xx/em28xx-cards.c linux-aws-5.15-5.15.0/drivers/media/usb/em28xx/em28xx-cards.c --- linux-aws-5.15-5.15.0/drivers/media/usb/em28xx/em28xx-cards.c +++ linux-aws-5.15-5.15.0/drivers/media/usb/em28xx/em28xx-cards.c @@ -4099,6 +4099,10 @@ * topology will likely change after the load of the em28xx subdrivers. */ #ifdef CONFIG_MEDIA_CONTROLLER + /* + * No need to check the return value, the device will still be + * usable without media controller API. + */ retval = media_device_register(dev->media_dev); #endif diff -u linux-aws-5.15-5.15.0/drivers/media/usb/pvrusb2/pvrusb2-context.c linux-aws-5.15-5.15.0/drivers/media/usb/pvrusb2/pvrusb2-context.c --- linux-aws-5.15-5.15.0/drivers/media/usb/pvrusb2/pvrusb2-context.c +++ linux-aws-5.15-5.15.0/drivers/media/usb/pvrusb2/pvrusb2-context.c @@ -90,8 +90,10 @@ } -static void pvr2_context_notify(struct pvr2_context *mp) +static void pvr2_context_notify(void *ptr) { + struct pvr2_context *mp = ptr; + pvr2_context_set_notify(mp,!0); } @@ -106,9 +108,7 @@ pvr2_trace(PVR2_TRACE_CTXT, "pvr2_context %p (initialize)", mp); /* Finish hardware initialization */ - if (pvr2_hdw_initialize(mp->hdw, - (void (*)(void *))pvr2_context_notify, - mp)) { + if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) { mp->video_stream.stream = pvr2_hdw_get_video_stream(mp->hdw); /* Trigger interface initialization. By doing this @@ -267,9 +267,9 @@ void pvr2_context_disconnect(struct pvr2_context *mp) { pvr2_hdw_disconnect(mp->hdw); - mp->disconnect_flag = !0; if (!pvr2_context_shutok()) pvr2_context_notify(mp); + mp->disconnect_flag = !0; } diff -u linux-aws-5.15-5.15.0/drivers/media/v4l2-core/v4l2-mem2mem.c linux-aws-5.15-5.15.0/drivers/media/v4l2-core/v4l2-mem2mem.c --- linux-aws-5.15-5.15.0/drivers/media/v4l2-core/v4l2-mem2mem.c +++ linux-aws-5.15-5.15.0/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -1062,11 +1062,17 @@ entity->function = function; ret = media_entity_pads_init(entity, num_pads, pads); - if (ret) + if (ret) { + kfree(entity->name); + entity->name = NULL; return ret; + } ret = media_device_register_entity(mdev, entity); - if (ret) + if (ret) { + kfree(entity->name); + entity->name = NULL; return ret; + } return 0; } diff -u linux-aws-5.15-5.15.0/drivers/mfd/altera-sysmgr.c linux-aws-5.15-5.15.0/drivers/mfd/altera-sysmgr.c --- linux-aws-5.15-5.15.0/drivers/mfd/altera-sysmgr.c +++ linux-aws-5.15-5.15.0/drivers/mfd/altera-sysmgr.c @@ -110,7 +110,9 @@ dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver, (void *)sysmgr_np); - of_node_put(sysmgr_np); + if (property) + of_node_put(sysmgr_np); + if (!dev) return ERR_PTR(-EPROBE_DEFER); diff -u linux-aws-5.15-5.15.0/drivers/mfd/syscon.c linux-aws-5.15-5.15.0/drivers/mfd/syscon.c --- linux-aws-5.15-5.15.0/drivers/mfd/syscon.c +++ linux-aws-5.15-5.15.0/drivers/mfd/syscon.c @@ -224,7 +224,9 @@ return ERR_PTR(-ENODEV); regmap = syscon_node_to_regmap(syscon_np); - of_node_put(syscon_np); + + if (property) + of_node_put(syscon_np); return regmap; } diff -u linux-aws-5.15-5.15.0/drivers/mmc/core/mmc.c linux-aws-5.15-5.15.0/drivers/mmc/core/mmc.c --- linux-aws-5.15-5.15.0/drivers/mmc/core/mmc.c +++ linux-aws-5.15-5.15.0/drivers/mmc/core/mmc.c @@ -1000,10 +1000,12 @@ static unsigned ext_csd_bits[] = { EXT_CSD_BUS_WIDTH_8, EXT_CSD_BUS_WIDTH_4, + EXT_CSD_BUS_WIDTH_1, }; static unsigned bus_widths[] = { MMC_BUS_WIDTH_8, MMC_BUS_WIDTH_4, + MMC_BUS_WIDTH_1, }; struct mmc_host *host = card->host; unsigned idx, bus_width = 0; diff -u linux-aws-5.15-5.15.0/drivers/mmc/host/jz4740_mmc.c linux-aws-5.15-5.15.0/drivers/mmc/host/jz4740_mmc.c --- linux-aws-5.15-5.15.0/drivers/mmc/host/jz4740_mmc.c +++ linux-aws-5.15-5.15.0/drivers/mmc/host/jz4740_mmc.c @@ -1133,18 +1133,18 @@ return 0; } -static int __maybe_unused jz4740_mmc_suspend(struct device *dev) +static int jz4740_mmc_suspend(struct device *dev) { return pinctrl_pm_select_sleep_state(dev); } -static int __maybe_unused jz4740_mmc_resume(struct device *dev) +static int jz4740_mmc_resume(struct device *dev) { return pinctrl_select_default_state(dev); } -static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, - jz4740_mmc_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, + jz4740_mmc_resume); static struct platform_driver jz4740_mmc_driver = { .probe = jz4740_mmc_probe, @@ -1153,7 +1153,7 @@ .name = "jz4740-mmc", .probe_type = PROBE_PREFER_ASYNCHRONOUS, .of_match_table = of_match_ptr(jz4740_mmc_of_match), - .pm = pm_ptr(&jz4740_mmc_pm_ops), + .pm = pm_sleep_ptr(&jz4740_mmc_pm_ops), }, }; diff -u linux-aws-5.15-5.15.0/drivers/mmc/host/mmci_stm32_sdmmc.c linux-aws-5.15-5.15.0/drivers/mmc/host/mmci_stm32_sdmmc.c --- linux-aws-5.15-5.15.0/drivers/mmc/host/mmci_stm32_sdmmc.c +++ linux-aws-5.15-5.15.0/drivers/mmc/host/mmci_stm32_sdmmc.c @@ -43,6 +43,9 @@ struct sdmmc_idma { dma_addr_t sg_dma; void *sg_cpu; + dma_addr_t bounce_dma_addr; + void *bounce_buf; + bool use_bounce_buffer; }; struct sdmmc_dlyb { @@ -54,6 +57,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host, struct mmc_data *data) { + struct sdmmc_idma *idma = host->dma_priv; + struct device *dev = mmc_dev(host->mmc); struct scatterlist *sg; int i; @@ -61,41 +66,69 @@ * idma has constraints on idmabase & idmasize for each element * excepted the last element which has no constraint on idmasize */ + idma->use_bounce_buffer = false; for_each_sg(data->sg, sg, data->sg_len - 1, i) { if (!IS_ALIGNED(sg->offset, sizeof(u32)) || !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) { - dev_err(mmc_dev(host->mmc), + dev_dbg(mmc_dev(host->mmc), "unaligned scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); - return -EINVAL; + goto use_bounce_buffer; } } if (!IS_ALIGNED(sg->offset, sizeof(u32))) { - dev_err(mmc_dev(host->mmc), + dev_dbg(mmc_dev(host->mmc), "unaligned last scatterlist: ofst:%x length:%d\n", data->sg->offset, data->sg->length); - return -EINVAL; + goto use_bounce_buffer; + } + + return 0; + +use_bounce_buffer: + if (!idma->bounce_buf) { + idma->bounce_buf = dmam_alloc_coherent(dev, + host->mmc->max_req_size, + &idma->bounce_dma_addr, + GFP_KERNEL); + if (!idma->bounce_buf) { + dev_err(dev, "Unable to map allocate DMA bounce buffer.\n"); + return -ENOMEM; + } } + idma->use_bounce_buffer = true; + return 0; } static int _sdmmc_idma_prep_data(struct mmci_host *host, struct mmc_data *data) { - int n_elem; + struct sdmmc_idma *idma = host->dma_priv; - n_elem = dma_map_sg(mmc_dev(host->mmc), - data->sg, - data->sg_len, - mmc_get_dma_dir(data)); + if (idma->use_bounce_buffer) { + if (data->flags & MMC_DATA_WRITE) { + unsigned int xfer_bytes = data->blksz * data->blocks; + + sg_copy_to_buffer(data->sg, data->sg_len, + idma->bounce_buf, xfer_bytes); + dma_wmb(); + } + } else { + int n_elem; - if (!n_elem) { - dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); - return -EINVAL; - } + n_elem = dma_map_sg(mmc_dev(host->mmc), + data->sg, + data->sg_len, + mmc_get_dma_dir(data)); + if (!n_elem) { + dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); + return -EINVAL; + } + } return 0; } @@ -112,8 +145,19 @@ static void sdmmc_idma_unprep_data(struct mmci_host *host, struct mmc_data *data, int err) { - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, - mmc_get_dma_dir(data)); + struct sdmmc_idma *idma = host->dma_priv; + + if (idma->use_bounce_buffer) { + if (data->flags & MMC_DATA_READ) { + unsigned int xfer_bytes = data->blksz * data->blocks; + + sg_copy_from_buffer(data->sg, data->sg_len, + idma->bounce_buf, xfer_bytes); + } + } else { + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + } } static int sdmmc_idma_setup(struct mmci_host *host) @@ -137,6 +181,8 @@ host->mmc->max_segs = SDMMC_LLI_BUF_LEN / sizeof(struct sdmmc_lli_desc); host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask; + + host->mmc->max_req_size = SZ_1M; } else { host->mmc->max_segs = 1; host->mmc->max_seg_size = host->mmc->max_req_size; @@ -154,8 +200,18 @@ struct scatterlist *sg; int i; - if (!host->variant->dma_lli || data->sg_len == 1) { - writel_relaxed(sg_dma_address(data->sg), + host->dma_in_progress = true; + + if (!host->variant->dma_lli || data->sg_len == 1 || + idma->use_bounce_buffer) { + u32 dma_addr; + + if (idma->use_bounce_buffer) + dma_addr = idma->bounce_dma_addr; + else + dma_addr = sg_dma_address(data->sg); + + writel_relaxed(dma_addr, host->base + MMCI_STM32_IDMABASE0R); writel_relaxed(MMCI_STM32_IDMAEN, host->base + MMCI_STM32_IDMACTRLR); @@ -184,9 +240,30 @@ return 0; } +static void sdmmc_idma_error(struct mmci_host *host) +{ + struct mmc_data *data = host->data; + struct sdmmc_idma *idma = host->dma_priv; + + if (!dma_inprogress(host)) + return; + + writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); + host->dma_in_progress = false; + data->host_cookie = 0; + + if (!idma->use_bounce_buffer) + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); +} + static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) { + if (!dma_inprogress(host)) + return; + writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); + host->dma_in_progress = false; if (!data->host_cookie) sdmmc_idma_unprep_data(host, data, 0); @@ -512,6 +589,7 @@ .dma_setup = sdmmc_idma_setup, .dma_start = sdmmc_idma_start, .dma_finalize = sdmmc_idma_finalize, + .dma_error = sdmmc_idma_error, .set_clkreg = mmci_sdmmc_set_clkreg, .set_pwrreg = mmci_sdmmc_set_pwrreg, .busy_complete = sdmmc_busy_complete, diff -u linux-aws-5.15-5.15.0/drivers/mmc/host/mxcmmc.c linux-aws-5.15-5.15.0/drivers/mmc/host/mxcmmc.c --- linux-aws-5.15-5.15.0/drivers/mmc/host/mxcmmc.c +++ linux-aws-5.15-5.15.0/drivers/mmc/host/mxcmmc.c @@ -1185,7 +1185,6 @@ return 0; } -#ifdef CONFIG_PM_SLEEP static int mxcmci_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); @@ -1212,9 +1211,8 @@ return ret; } -#endif -static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); static struct platform_driver mxcmci_driver = { .probe = mxcmci_probe, @@ -1222,7 +1220,7 @@ .driver = { .name = DRIVER_NAME, .probe_type = PROBE_PREFER_ASYNCHRONOUS, - .pm = &mxcmci_pm_ops, + .pm = pm_sleep_ptr(&mxcmci_pm_ops), .of_match_table = mxcmci_of_match, } }; diff -u linux-aws-5.15-5.15.0/drivers/mmc/host/wmt-sdmmc.c linux-aws-5.15-5.15.0/drivers/mmc/host/wmt-sdmmc.c --- linux-aws-5.15-5.15.0/drivers/mmc/host/wmt-sdmmc.c +++ linux-aws-5.15-5.15.0/drivers/mmc/host/wmt-sdmmc.c @@ -889,7 +889,6 @@ { struct mmc_host *mmc; struct wmt_mci_priv *priv; - struct resource *res; u32 reg_tmp; mmc = platform_get_drvdata(pdev); @@ -917,9 +916,6 @@ clk_disable_unprepare(priv->clk_sdmmc); clk_put(priv->clk_sdmmc); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - mmc_free_host(mmc); dev_info(&pdev->dev, "WMT MCI device removed\n"); diff -u linux-aws-5.15-5.15.0/drivers/mtd/maps/physmap-core.c linux-aws-5.15-5.15.0/drivers/mtd/maps/physmap-core.c --- linux-aws-5.15-5.15.0/drivers/mtd/maps/physmap-core.c +++ linux-aws-5.15-5.15.0/drivers/mtd/maps/physmap-core.c @@ -528,7 +528,7 @@ if (!info->maps[i].phys) info->maps[i].phys = res->start; - info->win_order = get_bitmask_order(resource_size(res)) - 1; + info->win_order = fls64(resource_size(res)) - 1; info->maps[i].size = BIT(info->win_order + (info->gpios ? info->gpios->ndescs : 0)); diff -u linux-aws-5.15-5.15.0/drivers/mtd/nand/spi/gigadevice.c linux-aws-5.15-5.15.0/drivers/mtd/nand/spi/gigadevice.c --- linux-aws-5.15-5.15.0/drivers/mtd/nand/spi/gigadevice.c +++ linux-aws-5.15-5.15.0/drivers/mtd/nand/spi/gigadevice.c @@ -178,7 +178,7 @@ { u8 status2; struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, - &status2); + spinand->scratchbuf); int ret; switch (status & STATUS_ECC_MASK) { @@ -199,6 +199,7 @@ * report the maximum of 4 in this case */ /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */ + status2 = *(spinand->scratchbuf); return ((status & STATUS_ECC_MASK) >> 2) | ((status2 & STATUS_ECC_MASK) >> 4); @@ -220,7 +221,7 @@ { u8 status2; struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2, - &status2); + spinand->scratchbuf); int ret; switch (status & STATUS_ECC_MASK) { @@ -240,6 +241,7 @@ * 1 ... 4 bits are flipped (and corrected) */ /* bits sorted this way (1...0): ECCSE1, ECCSE0 */ + status2 = *(spinand->scratchbuf); return ((status2 & STATUS_ECC_MASK) >> 4) + 1; case STATUS_ECC_UNCOR_ERROR: diff -u linux-aws-5.15-5.15.0/drivers/net/dsa/mt7530.c linux-aws-5.15-5.15.0/drivers/net/dsa/mt7530.c --- linux-aws-5.15-5.15.0/drivers/net/dsa/mt7530.c +++ linux-aws-5.15-5.15.0/drivers/net/dsa/mt7530.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -993,11 +994,55 @@ mutex_unlock(&priv->reg_mutex); } +/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std + * 802.1Qâ„¢-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA + * must only be propagated to C-VLAN and MAC Bridge components. That means + * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports, + * these frames are supposed to be processed by the CPU (software). So we make + * the switch only forward them to the CPU port. And if received from a CPU + * port, forward to a single port. The software is responsible of making the + * switch conform to the latter by setting a single port as destination port on + * the special tag. + * + * This switch intellectual property cannot conform to this part of the standard + * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC + * DAs, it also includes :22-FF which the scope of propagation is not supposed + * to be restricted for these MAC DAs. + */ static void mt753x_trap_frames(struct mt7530_priv *priv) { - /* Trap BPDUs to the CPU port(s) */ - mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, + /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them + * VLAN-untagged. + */ + mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK | + MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK | + MT753X_BPDU_PORT_FW_MASK, + MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) | + MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + MT753X_BPDU_CPU_ONLY); + + /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress + * them VLAN-untagged. + */ + mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK | + MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK | + MT753X_R01_PORT_FW_MASK, + MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) | + MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + MT753X_BPDU_CPU_ONLY); + + /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress + * them VLAN-untagged. + */ + mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK | + MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK | + MT753X_R03_PORT_FW_MASK, + MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | + MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) | + MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) | MT753X_BPDU_CPU_ONLY); } @@ -2154,11 +2199,11 @@ */ if (priv->mcm) { reset_control_assert(priv->rstc); - usleep_range(1000, 1100); + usleep_range(5000, 5100); reset_control_deassert(priv->rstc); } else { gpiod_set_value_cansleep(priv->reset, 0); - usleep_range(1000, 1100); + usleep_range(5000, 5100); gpiod_set_value_cansleep(priv->reset, 1); } @@ -2368,11 +2413,11 @@ */ if (priv->mcm) { reset_control_assert(priv->rstc); - usleep_range(1000, 1100); + usleep_range(5000, 5100); reset_control_deassert(priv->rstc); } else { gpiod_set_value_cansleep(priv->reset, 0); - usleep_range(1000, 1100); + usleep_range(5000, 5100); gpiod_set_value_cansleep(priv->reset, 1); } diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/amazon/ena/ena_netdev.c linux-aws-5.15-5.15.0/drivers/net/ethernet/amazon/ena/ena_netdev.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3193,22 +3193,6 @@ return NETDEV_TX_OK; } -static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev) -{ - u16 qid; - /* we suspect that this is good for in--kernel network services that - * want to loop incoming skb rx to tx in normal user generated traffic, - * most probably we will not get to this - */ - if (skb_rx_queue_recorded(skb)) - qid = skb_get_rx_queue(skb); - else - qid = netdev_pick_tx(dev, skb, NULL); - - return qid; -} - static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { struct device *dev = &pdev->dev; @@ -3378,7 +3362,6 @@ .ndo_open = ena_open, .ndo_stop = ena_close, .ndo_start_xmit = ena_start_xmit, - .ndo_select_queue = ena_select_queue, .ndo_get_stats64 = ena_get_stats64, .ndo_tx_timeout = ena_tx_timeout, .ndo_change_mtu = ena_change_mtu, diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c linux-aws-5.15-5.15.0/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2803,7 +2803,10 @@ int ret; hdev->support_sfp_query = true; - hdev->hw.mac.duplex = HCLGE_MAC_FULL; + + if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, hdev->hw.mac.duplex); if (ret) diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/i40e/i40e_main.c linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/i40e/i40e_main.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/i40e/i40e_main.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -13558,9 +13558,9 @@ return err; i40e_queue_pair_disable_irq(vsi, queue_pair); + i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); - i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/ice/ice_main.c linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/ice/ice_main.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/ice/ice_main.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/ice/ice_main.c @@ -7008,6 +7008,8 @@ pf_sw = pf->first_sw; /* find the attribute in the netlink message */ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/igb/igb_main.c linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/igb/igb_main.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/igb/igb_main.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/igb/igb_main.c @@ -6756,77 +6756,75 @@ } } +static void igb_perout(struct igb_adapter *adapter, int tsintr_tt) +{ + int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt); + struct e1000_hw *hw = &adapter->hw; + struct timespec64 ts; + u32 tsauxc; + + if (pin < 0 || pin >= IGB_N_PEROUT) + return; + + spin_lock(&adapter->tmreg_lock); + ts = timespec64_add(adapter->perout[pin].start, + adapter->perout[pin].period); + /* u32 conversion of tv_sec is safe until y2106 */ + wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec); + wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec); + tsauxc = rd32(E1000_TSAUXC); + tsauxc |= TSAUXC_EN_TT0; + wr32(E1000_TSAUXC, tsauxc); + adapter->perout[pin].start = ts; + spin_unlock(&adapter->tmreg_lock); +} + +static void igb_extts(struct igb_adapter *adapter, int tsintr_tt) +{ + int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt); + struct e1000_hw *hw = &adapter->hw; + struct ptp_clock_event event; + u32 sec, nsec; + + if (pin < 0 || pin >= IGB_N_EXTTS) + return; + + nsec = rd32((tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0); + sec = rd32((tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0); + event.type = PTP_CLOCK_EXTTS; + event.index = tsintr_tt; + event.timestamp = sec * 1000000000ULL + nsec; + ptp_clock_event(adapter->ptp_clock, &event); +} + static void igb_tsync_interrupt(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + u32 tsicr = rd32(E1000_TSICR); struct ptp_clock_event event; - struct timespec64 ts; - u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR); if (tsicr & TSINTR_SYS_WRAP) { event.type = PTP_CLOCK_PPS; if (adapter->ptp_caps.pps) ptp_clock_event(adapter->ptp_clock, &event); - ack |= TSINTR_SYS_WRAP; } if (tsicr & E1000_TSICR_TXTS) { /* retrieve hardware timestamp */ schedule_work(&adapter->ptp_tx_work); - ack |= E1000_TSICR_TXTS; } - if (tsicr & TSINTR_TT0) { - spin_lock(&adapter->tmreg_lock); - ts = timespec64_add(adapter->perout[0].start, - adapter->perout[0].period); - /* u32 conversion of tv_sec is safe until y2106 */ - wr32(E1000_TRGTTIML0, ts.tv_nsec); - wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec); - tsauxc = rd32(E1000_TSAUXC); - tsauxc |= TSAUXC_EN_TT0; - wr32(E1000_TSAUXC, tsauxc); - adapter->perout[0].start = ts; - spin_unlock(&adapter->tmreg_lock); - ack |= TSINTR_TT0; - } + if (tsicr & TSINTR_TT0) + igb_perout(adapter, 0); - if (tsicr & TSINTR_TT1) { - spin_lock(&adapter->tmreg_lock); - ts = timespec64_add(adapter->perout[1].start, - adapter->perout[1].period); - wr32(E1000_TRGTTIML1, ts.tv_nsec); - wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec); - tsauxc = rd32(E1000_TSAUXC); - tsauxc |= TSAUXC_EN_TT1; - wr32(E1000_TSAUXC, tsauxc); - adapter->perout[1].start = ts; - spin_unlock(&adapter->tmreg_lock); - ack |= TSINTR_TT1; - } + if (tsicr & TSINTR_TT1) + igb_perout(adapter, 1); - if (tsicr & TSINTR_AUTT0) { - nsec = rd32(E1000_AUXSTMPL0); - sec = rd32(E1000_AUXSTMPH0); - event.type = PTP_CLOCK_EXTTS; - event.index = 0; - event.timestamp = sec * 1000000000ULL + nsec; - ptp_clock_event(adapter->ptp_clock, &event); - ack |= TSINTR_AUTT0; - } - - if (tsicr & TSINTR_AUTT1) { - nsec = rd32(E1000_AUXSTMPL1); - sec = rd32(E1000_AUXSTMPH1); - event.type = PTP_CLOCK_EXTTS; - event.index = 1; - event.timestamp = sec * 1000000000ULL + nsec; - ptp_clock_event(adapter->ptp_clock, &event); - ack |= TSINTR_AUTT1; - } + if (tsicr & TSINTR_AUTT0) + igb_extts(adapter, 0); - /* acknowledge the interrupts */ - wr32(E1000_TSICR, ack); + if (tsicr & TSINTR_AUTT1) + igb_extts(adapter, 1); } static irqreturn_t igb_msix_other(int irq, void *data) diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/igb/igb_ptp.c linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/igb/igb_ptp.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/igb/igb_ptp.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -826,7 +826,7 @@ igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); /* adjust timestamp for the TX latency based on link speed */ - if (adapter->hw.mac.type == e1000_i210) { + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { switch (adapter->link_speed) { case SPEED_10: adjust = IGB_I210_TX_LATENCY_10; @@ -872,6 +872,7 @@ ktime_t *timestamp) { struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps ts; __le64 *regval = (__le64 *)va; int adjust = 0; @@ -891,7 +892,7 @@ igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1])); /* adjust timestamp for the RX latency based on link speed */ - if (adapter->hw.mac.type == e1000_i210) { + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { switch (adapter->link_speed) { case SPEED_10: adjust = IGB_I210_RX_LATENCY_10; diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2941,8 +2941,8 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { - u32 mask; struct ixgbe_hw *hw = &adapter->hw; + u32 mask; switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -10405,6 +10405,44 @@ } /** + * ixgbe_irq_disable_single - Disable single IRQ vector + * @adapter: adapter structure + * @ring: ring index + **/ +static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 qmask = BIT_ULL(ring); + u32 mask; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + mask = qmask & IXGBE_EIMC_RTX_QUEUE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + break; + default: + break; + } + IXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + synchronize_irq(adapter->msix_entries[ring].vector); + else + synchronize_irq(adapter->pdev->irq); +} + +/** * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings * @adapter: adapter structure * @ring: ring index @@ -10420,6 +10458,11 @@ tx_ring = adapter->tx_ring[ring]; xdp_ring = adapter->xdp_ring[ring]; + ixgbe_irq_disable_single(adapter, ring); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + ixgbe_disable_txr(adapter, tx_ring); if (xdp_ring) ixgbe_disable_txr(adapter, xdp_ring); @@ -10428,9 +10471,6 @@ if (xdp_ring) synchronize_rcu(); - /* Rx/Tx/XDP Tx share the same napi context. */ - napi_disable(&rx_ring->q_vector->napi); - ixgbe_clean_tx_ring(tx_ring); if (xdp_ring) ixgbe_clean_tx_ring(xdp_ring); @@ -10458,9 +10498,6 @@ tx_ring = adapter->tx_ring[ring]; xdp_ring = adapter->xdp_ring[ring]; - /* Rx/Tx/XDP Tx share the same napi context. */ - napi_enable(&rx_ring->q_vector->napi); - ixgbe_configure_tx_ring(adapter, tx_ring); if (xdp_ring) ixgbe_configure_tx_ring(adapter, xdp_ring); @@ -10469,6 +10506,11 @@ clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); if (xdp_ring) clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); + ixgbe_irq_enable_queues(adapter, BIT_ULL(ring)); + IXGBE_WRITE_FLUSH(&adapter->hw); } /** diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/cgx.c linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/cgx.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -1205,7 +1205,7 @@ /* Release thread waiting for completion */ lmac->cmd_pend = false; - wake_up_interruptible(&lmac->wq_cmd_cmplt); + wake_up(&lmac->wq_cmd_cmplt); break; case CGX_EVT_ASYNC: if (cgx_event_is_linkevent(event)) diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/rvu.c linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/rvu.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2435,10 +2435,9 @@ } } -static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq) { struct rvu *rvu = (struct rvu *)rvu_irq; - int vfs = rvu->vfs; u64 intr; intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); @@ -2452,6 +2451,18 @@ rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); + return IRQ_HANDLED; +} + +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int vfs = rvu->vfs; + u64 intr; + + /* Sync with mbox memory region */ + rmb(); + /* Handle VF interrupts */ if (vfs > 64) { intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); @@ -2779,7 +2790,7 @@ /* Register mailbox interrupt handler */ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox"); ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX), - rvu_mbox_intr_handler, 0, + rvu_mbox_pf_intr_handler, 0, &rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu); if (ret) { dev_err(rvu->dev, diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -436,6 +436,10 @@ return; } + /* AF modifies given action iff PF/VF has requested for it */ + if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT) + return; + /* copy VF default entry action to the VF mcam entry */ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr, target_func); diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/mediatek/mtk_eth_soc.c linux-aws-5.15-5.15.0/drivers/net/ethernet/mediatek/mtk_eth_soc.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -223,8 +223,8 @@ struct mtk_mac *mac = container_of(config, struct mtk_mac, phylink_config); struct mtk_eth *eth = mac->hw; - u32 mcr_cur, mcr_new, sid, i; int val, ge_mode, err = 0; + u32 sid, i; /* MT76x8 has no hardware settings between for the MAC */ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && @@ -359,17 +359,6 @@ return; } - /* Setup gmac */ - mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr_new = mcr_cur; - mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | - MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK | - MAC_MCR_RX_FIFO_CLR_DIS; - - /* Only update control register when needed! */ - if (mcr_new != mcr_cur) - mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); - return; err_phy: @@ -382,6 +371,26 @@ mac->id, phy_modes(state->interface), err); } +static int mtk_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 mcr_cur, mcr_new; + + /* Setup gmac */ + mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr_new = mcr_cur; + mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | + MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS; + + /* Only update control register when needed! */ + if (mcr_new != mcr_cur) + mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); + + return 0; +} + static void mtk_mac_pcs_get_state(struct phylink_config *config, struct phylink_link_state *state) { @@ -429,7 +438,7 @@ phylink_config); u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); - mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN); + mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK); mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } @@ -467,7 +476,7 @@ if (rx_pause) mcr |= MAC_MCR_FORCE_RX_FC; - mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN; + mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK; mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } @@ -562,6 +571,7 @@ .mac_pcs_get_state = mtk_mac_pcs_get_state, .mac_an_restart = mtk_mac_an_restart, .mac_config = mtk_mac_config, + .mac_finish = mtk_mac_finish, .mac_link_down = mtk_mac_link_down, .mac_link_up = mtk_mac_link_up, }; diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/mediatek/mtk_ppe.c linux-aws-5.15-5.15.0/drivers/net/ethernet/mediatek/mtk_ppe.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/mediatek/mtk_ppe.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -425,7 +425,7 @@ MTK_PPE_KEEPALIVE_DISABLE) | FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) | FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE, - MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | + MTK_PPE_SCAN_MODE_CHECK_AGE) | FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, MTK_PPE_ENTRIES_SHIFT); ppe_w32(ppe, MTK_PPE_TB_CFG, val); @@ -495,15 +495,19 @@ - /* disable offload engine */ - ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); - ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); - /* disable aging */ val = MTK_PPE_TB_CFG_AGE_NON_L4 | MTK_PPE_TB_CFG_AGE_UNBIND | MTK_PPE_TB_CFG_AGE_TCP | MTK_PPE_TB_CFG_AGE_UDP | - MTK_PPE_TB_CFG_AGE_TCP_FIN; + MTK_PPE_TB_CFG_AGE_TCP_FIN | + MTK_PPE_TB_CFG_SCAN_MODE; ppe_clear(ppe, MTK_PPE_TB_CFG, val); - return mtk_ppe_wait_busy(ppe); + if (mtk_ppe_wait_busy(ppe)) + return -ETIMEDOUT; + + /* disable offload engine */ + ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); + ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); + + return 0; } diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/netronome/nfp/flower/conntrack.c linux-aws-5.15-5.15.0/drivers/net/ethernet/netronome/nfp/flower/conntrack.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -1004,7 +1004,7 @@ /* Checks that the chain_index of the filter matches the * chain_index of the GOTO action. */ - if (post_ct_entry->chain_index != pre_ct_entry->chain_index) + if (post_ct_entry->chain_index != pre_ct_entry->goto_chain_index) return -EINVAL; err = nfp_ct_merge_check(post_ct_entry, pre_ct_entry); @@ -1450,7 +1450,8 @@ if (IS_ERR(ct_entry)) return PTR_ERR(ct_entry); ct_entry->type = CT_TYPE_PRE_CT; - ct_entry->chain_index = ct_goto->chain_index; + ct_entry->chain_index = flow->common.chain_index; + ct_entry->goto_chain_index = ct_goto->chain_index; list_add(&ct_entry->list_node, &zt->pre_ct_list); zt->pre_ct_count++; @@ -1470,9 +1471,30 @@ { struct flow_rule *rule = flow_cls_offload_flow_rule(flow); struct nfp_fl_ct_flow_entry *ct_entry; + struct flow_action_entry *ct_goto; struct nfp_fl_ct_zone_entry *zt; + struct flow_action_entry *act; bool wildcarded = false; struct flow_match_ct ct; + int i; + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_REDIRECT_INGRESS: + case FLOW_ACTION_MIRRED: + case FLOW_ACTION_MIRRED_INGRESS: + if (act->dev->rtnl_link_ops && + !strcmp(act->dev->rtnl_link_ops->kind, "openvswitch")) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: out port is openvswitch internal port"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + } flow_rule_match_ct(rule, &ct); if (!ct.mask->ct_zone) { @@ -1497,6 +1519,8 @@ ct_entry->type = CT_TYPE_POST_CT; ct_entry->chain_index = flow->common.chain_index; + ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO); + ct_entry->goto_chain_index = ct_goto ? ct_goto->chain_index : 0; list_add(&ct_entry->list_node, &zt->post_ct_list); zt->post_ct_count++; diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/realtek/r8169_main.c linux-aws-5.15-5.15.0/drivers/net/ethernet/realtek/r8169_main.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/realtek/r8169_main.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/realtek/r8169_main.c @@ -4949,8 +4949,6 @@ rtl8169_down(tp); } -#ifdef CONFIG_PM - static int rtl8169_runtime_resume(struct device *dev) { struct rtl8169_private *tp = dev_get_drvdata(dev); @@ -4966,7 +4964,7 @@ return 0; } -static int __maybe_unused rtl8169_suspend(struct device *device) +static int rtl8169_suspend(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); @@ -4979,7 +4977,7 @@ return 0; } -static int __maybe_unused rtl8169_resume(struct device *device) +static int rtl8169_resume(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); @@ -5021,13 +5019,11 @@ } static const struct dev_pm_ops rtl8169_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) - SET_RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, - rtl8169_runtime_idle) + SYSTEM_SLEEP_PM_OPS(rtl8169_suspend, rtl8169_resume) + RUNTIME_PM_OPS(rtl8169_runtime_suspend, rtl8169_runtime_resume, + rtl8169_runtime_idle) }; -#endif /* CONFIG_PM */ - static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) { /* WoL fails with 8168b when the receiver is disabled. */ diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c linux-aws-5.15-5.15.0/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3825,8 +3825,10 @@ { set_bit(__FPE_REMOVING, &priv->fpe_task_state); - if (priv->fpe_wq) + if (priv->fpe_wq) { destroy_workqueue(priv->fpe_wq); + priv->fpe_wq = NULL; + } netdev_info(priv->dev, "FPE workqueue stop"); } @@ -5737,11 +5739,6 @@ struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); - if (unlikely(!dev)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -5757,11 +5754,6 @@ struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); - if (unlikely(!dev)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -5781,11 +5773,6 @@ priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]); - if (unlikely(!data)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -5824,11 +5811,6 @@ priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]); - if (unlikely(!data)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; diff -u linux-aws-5.15-5.15.0/drivers/net/ethernet/ti/am65-cpsw-nuss.c linux-aws-5.15-5.15.0/drivers/net/ethernet/ti/am65-cpsw-nuss.c --- linux-aws-5.15-5.15.0/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1856,13 +1856,14 @@ if (ret < 0) { dev_err(dev, "%pOF error reading port_id %d\n", port_np, ret); - return ret; + goto of_node_put; } if (!port_id || port_id > common->port_num) { dev_err(dev, "%pOF has invalid port_id %u %s\n", port_np, port_id, port_np->name); - return -EINVAL; + ret = -EINVAL; + goto of_node_put; } port = am65_common_get_port(common, port_id); @@ -1878,8 +1879,10 @@ (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1)); port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base); - if (IS_ERR(port->slave.mac_sl)) - return PTR_ERR(port->slave.mac_sl); + if (IS_ERR(port->slave.mac_sl)) { + ret = PTR_ERR(port->slave.mac_sl); + goto of_node_put; + } port->disabled = !of_device_is_available(port_np); if (port->disabled) { @@ -1892,7 +1895,7 @@ ret = PTR_ERR(port->slave.ifphy); dev_err(dev, "%pOF error retrieving port phy: %d\n", port_np, ret); - return ret; + goto of_node_put; } port->slave.mac_only = @@ -1901,10 +1904,12 @@ /* get phy/link info */ if (of_phy_is_fixed_link(port_np)) { ret = of_phy_register_fixed_link(port_np); - if (ret) - return dev_err_probe(dev, ret, + if (ret) { + ret = dev_err_probe(dev, ret, "failed to register fixed-link phy %pOF\n", port_np); + goto of_node_put; + } port->slave.phy_node = of_node_get(port_np); } else { port->slave.phy_node = @@ -1914,14 +1919,15 @@ if (!port->slave.phy_node) { dev_err(dev, "slave[%d] no phy found\n", port_id); - return -ENODEV; + ret = -ENODEV; + goto of_node_put; } ret = of_get_phy_mode(port_np, &port->slave.phy_if); if (ret) { dev_err(dev, "%pOF read phy-mode err %d\n", port_np, ret); - return ret; + goto of_node_put; } ret = of_get_mac_address(port_np, port->slave.mac_addr); @@ -1944,6 +1950,11 @@ } return 0; + +of_node_put: + of_node_put(port_np); + of_node_put(node); + return ret; } static void am65_cpsw_pcpu_stats_free(void *data) diff -u linux-aws-5.15-5.15.0/drivers/net/geneve.c linux-aws-5.15-5.15.0/drivers/net/geneve.c --- linux-aws-5.15-5.15.0/drivers/net/geneve.c +++ linux-aws-5.15-5.15.0/drivers/net/geneve.c @@ -219,7 +219,7 @@ struct genevehdr *gnvh = geneve_hdr(skb); struct metadata_dst *tun_dst = NULL; unsigned int len; - int err = 0; + int nh, err = 0; void *oiph; if (ip_tunnel_collect_metadata() || gs->collect_md) { @@ -263,9 +263,23 @@ goto drop; } - oiph = skb_network_header(skb); + /* Save offset of outer header relative to skb->head, + * because we are going to reset the network header to the inner header + * and might change skb->head. + */ + nh = skb_network_header(skb) - skb->head; + skb_reset_network_header(skb); + if (!pskb_inet_may_pull(skb)) { + DEV_STATS_INC(geneve->dev, rx_length_errors); + DEV_STATS_INC(geneve->dev, rx_errors); + goto drop; + } + + /* Get the outer header. */ + oiph = skb->head + nh; + if (geneve_get_sk_family(gs) == AF_INET) err = IP_ECN_decapsulate(oiph, skb); #if IS_ENABLED(CONFIG_IPV6) diff -u linux-aws-5.15-5.15.0/drivers/net/gtp.c linux-aws-5.15-5.15.0/drivers/net/gtp.c --- linux-aws-5.15-5.15.0/drivers/net/gtp.c +++ linux-aws-5.15-5.15.0/drivers/net/gtp.c @@ -1422,26 +1422,26 @@ get_random_bytes(>p_h_initval, sizeof(gtp_h_initval)); - err = rtnl_link_register(>p_link_ops); + err = register_pernet_subsys(>p_net_ops); if (err < 0) goto error_out; - err = genl_register_family(>p_genl_family); + err = rtnl_link_register(>p_link_ops); if (err < 0) - goto unreg_rtnl_link; + goto unreg_pernet_subsys; - err = register_pernet_subsys(>p_net_ops); + err = genl_register_family(>p_genl_family); if (err < 0) - goto unreg_genl_family; + goto unreg_rtnl_link; pr_info("GTP module loaded (pdp ctx size %zd bytes)\n", sizeof(struct pdp_ctx)); return 0; -unreg_genl_family: - genl_unregister_family(>p_genl_family); unreg_rtnl_link: rtnl_link_unregister(>p_link_ops); +unreg_pernet_subsys: + unregister_pernet_subsys(>p_net_ops); error_out: pr_err("error loading GTP module loaded\n"); return err; diff -u linux-aws-5.15-5.15.0/drivers/net/hyperv/netvsc_drv.c linux-aws-5.15-5.15.0/drivers/net/hyperv/netvsc_drv.c --- linux-aws-5.15-5.15.0/drivers/net/hyperv/netvsc_drv.c +++ linux-aws-5.15-5.15.0/drivers/net/hyperv/netvsc_drv.c @@ -42,6 +42,10 @@ #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) +/* Macros to define the context of vf registration */ +#define VF_REG_IN_PROBE 1 +#define VF_REG_IN_NOTIFIER 2 + static unsigned int ring_size __ro_after_init = 128; module_param(ring_size, uint, 0444); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)"); @@ -2204,7 +2208,7 @@ } static int netvsc_vf_join(struct net_device *vf_netdev, - struct net_device *ndev) + struct net_device *ndev, int context) { struct net_device_context *ndev_ctx = netdev_priv(ndev); int ret; @@ -2227,7 +2231,11 @@ goto upper_link_failed; } - schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + /* If this registration is called from probe context vf_takeover + * is taken care of later in probe itself. + */ + if (context == VF_REG_IN_NOTIFIER) + schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); @@ -2365,7 +2373,7 @@ return NOTIFY_DONE; } -static int netvsc_register_vf(struct net_device *vf_netdev) +static int netvsc_register_vf(struct net_device *vf_netdev, int context) { struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; @@ -2405,7 +2413,7 @@ netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); - if (netvsc_vf_join(vf_netdev, ndev) != 0) + if (netvsc_vf_join(vf_netdev, ndev, context) != 0) return NOTIFY_DONE; dev_hold(vf_netdev); @@ -2503,10 +2511,31 @@ return NOTIFY_OK; } +static int check_dev_is_matching_vf(struct net_device *event_ndev) +{ + /* Skip NetVSC interfaces */ + if (event_ndev->netdev_ops == &device_ops) + return -ENODEV; + + /* Avoid non-Ethernet type devices */ + if (event_ndev->type != ARPHRD_ETHER) + return -ENODEV; + + /* Avoid Vlan dev with same MAC registering as VF */ + if (is_vlan_dev(event_ndev)) + return -ENODEV; + + /* Avoid Bonding master dev with same MAC registering as VF */ + if (netif_is_bond_master(event_ndev)) + return -ENODEV; + + return 0; +} + static int netvsc_probe(struct hv_device *dev, const struct hv_vmbus_device_id *dev_id) { - struct net_device *net = NULL; + struct net_device *net = NULL, *vf_netdev; struct net_device_context *net_device_ctx; struct netvsc_device_info *device_info = NULL; struct netvsc_device *nvdev; @@ -2614,6 +2643,30 @@ } list_add(&net_device_ctx->list, &netvsc_dev_list); + + /* When the hv_netvsc driver is unloaded and reloaded, the + * NET_DEVICE_REGISTER for the vf device is replayed before probe + * is complete. This is because register_netdevice_notifier() gets + * registered before vmbus_driver_register() so that callback func + * is set before probe and we don't miss events like NETDEV_POST_INIT + * So, in this section we try to register the matching vf device that + * is present as a netdevice, knowing that its register call is not + * processed in the netvsc_netdev_notifier(as probing is progress and + * get_netvsc_byslot fails). + */ + for_each_netdev(dev_net(net), vf_netdev) { + ret = check_dev_is_matching_vf(vf_netdev); + if (ret != 0) + continue; + + if (net != get_netvsc_byslot(vf_netdev)) + continue; + + netvsc_prepare_bonding(vf_netdev); + netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE); + __netvsc_vf_setup(net, vf_netdev); + break; + } rtnl_unlock(); netvsc_devinfo_put(device_info); @@ -2770,29 +2823,17 @@ unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + int ret = 0; - /* Skip our own events */ - if (event_dev->netdev_ops == &device_ops) - return NOTIFY_DONE; - - /* Avoid non-Ethernet type devices */ - if (event_dev->type != ARPHRD_ETHER) - return NOTIFY_DONE; - - /* Avoid Vlan dev with same MAC registering as VF */ - if (is_vlan_dev(event_dev)) - return NOTIFY_DONE; - - /* Avoid Bonding master dev with same MAC registering as VF */ - if ((event_dev->priv_flags & IFF_BONDING) && - (event_dev->flags & IFF_MASTER)) + ret = check_dev_is_matching_vf(event_dev); + if (ret != 0) return NOTIFY_DONE; switch (event) { case NETDEV_POST_INIT: return netvsc_prepare_bonding(event_dev); case NETDEV_REGISTER: - return netvsc_register_vf(event_dev); + return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER); case NETDEV_UNREGISTER: return netvsc_unregister_vf(event_dev); case NETDEV_UP: diff -u linux-aws-5.15-5.15.0/drivers/net/phy/dp83822.c linux-aws-5.15-5.15.0/drivers/net/phy/dp83822.c --- linux-aws-5.15-5.15.0/drivers/net/phy/dp83822.c +++ linux-aws-5.15-5.15.0/drivers/net/phy/dp83822.c @@ -94,7 +94,8 @@ #define DP83822_WOL_INDICATION_SEL BIT(8) #define DP83822_WOL_CLR_INDICATION BIT(11) -/* RSCR bits */ +/* RCSR bits */ +#define DP83822_RGMII_MODE_EN BIT(9) #define DP83822_RX_CLK_SHIFT BIT(12) #define DP83822_TX_CLK_SHIFT BIT(11) @@ -379,7 +380,7 @@ { struct dp83822_private *dp83822 = phydev->priv; struct device *dev = &phydev->mdio.dev; - int rgmii_delay; + int rgmii_delay = 0; s32 rx_int_delay; s32 tx_int_delay; int err = 0; @@ -389,24 +390,33 @@ rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0, true); - if (rx_int_delay <= 0) - rgmii_delay = 0; - else - rgmii_delay = DP83822_RX_CLK_SHIFT; + /* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */ + if (rx_int_delay > 0) + rgmii_delay |= DP83822_RX_CLK_SHIFT; tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0, false); + + /* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */ if (tx_int_delay <= 0) - rgmii_delay &= ~DP83822_TX_CLK_SHIFT; - else rgmii_delay |= DP83822_TX_CLK_SHIFT; - if (rgmii_delay) { - err = phy_set_bits_mmd(phydev, DP83822_DEVADDR, - MII_DP83822_RCSR, rgmii_delay); - if (err) - return err; - } + err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR, + DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay); + if (err) + return err; + + err = phy_set_bits_mmd(phydev, DP83822_DEVADDR, + MII_DP83822_RCSR, DP83822_RGMII_MODE_EN); + + if (err) + return err; + } else { + err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR, + MII_DP83822_RCSR, DP83822_RGMII_MODE_EN); + + if (err) + return err; } if (dp83822->fx_enabled) { diff -u linux-aws-5.15-5.15.0/drivers/net/phy/phy_device.c linux-aws-5.15-5.15.0/drivers/net/phy/phy_device.c --- linux-aws-5.15-5.15.0/drivers/net/phy/phy_device.c +++ linux-aws-5.15-5.15.0/drivers/net/phy/phy_device.c @@ -2645,8 +2645,8 @@ int genphy_loopback(struct phy_device *phydev, bool enable) { if (enable) { - u16 val, ctl = BMCR_LOOPBACK; - int ret; + u16 ctl = BMCR_LOOPBACK; + int ret, val; if (phydev->speed == SPEED_1000) ctl |= BMCR_SPEED1000; @@ -2904,7 +2904,7 @@ if (delay < 0) return delay; - if (delay && size == 0) + if (size == 0) return delay; if (delay < delay_values[0] || delay > delay_values[size - 1]) { diff -u linux-aws-5.15-5.15.0/drivers/net/tun.c linux-aws-5.15-5.15.0/drivers/net/tun.c --- linux-aws-5.15-5.15.0/drivers/net/tun.c +++ linux-aws-5.15-5.15.0/drivers/net/tun.c @@ -654,6 +654,7 @@ tun->tfiles[tun->numqueues - 1]); ntfile = rtnl_dereference(tun->tfiles[index]); ntfile->queue_index = index; + ntfile->xdp_rxq.queue_index = index; rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], NULL); diff -u linux-aws-5.15-5.15.0/drivers/net/usb/dm9601.c linux-aws-5.15-5.15.0/drivers/net/usb/dm9601.c --- linux-aws-5.15-5.15.0/drivers/net/usb/dm9601.c +++ linux-aws-5.15-5.15.0/drivers/net/usb/dm9601.c @@ -232,7 +232,7 @@ err = dm_read_shared_word(dev, 1, loc, &res); if (err < 0) { netdev_err(dev->net, "MDIO read error: %d\n", err); - return err; + return 0; } netdev_dbg(dev->net, diff -u linux-aws-5.15-5.15.0/drivers/net/usb/lan78xx.c linux-aws-5.15-5.15.0/drivers/net/usb/lan78xx.c --- linux-aws-5.15-5.15.0/drivers/net/usb/lan78xx.c +++ linux-aws-5.15-5.15.0/drivers/net/usb/lan78xx.c @@ -2862,7 +2862,8 @@ if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; - if (dev->chipid == ID_REV_CHIP_ID_7800_) { + if (dev->chipid == ID_REV_CHIP_ID_7800_ || + dev->chipid == ID_REV_CHIP_ID_7850_) { ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); if (!ret && sig != EEPROM_INDICATOR) { /* Implies there is no external eeprom. Set mac speed */ @@ -2959,7 +2960,8 @@ done: mutex_unlock(&dev->dev_mutex); - usb_autopm_put_interface(dev->intf); + if (ret < 0) + usb_autopm_put_interface(dev->intf); return ret; } diff -u linux-aws-5.15-5.15.0/drivers/net/veth.c linux-aws-5.15-5.15.0/drivers/net/veth.c --- linux-aws-5.15-5.15.0/drivers/net/veth.c +++ linux-aws-5.15-5.15.0/drivers/net/veth.c @@ -1079,14 +1079,6 @@ veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true); return err; } - - if (!veth_gro_requested(dev)) { - /* user-space did not require GRO, but adding XDP - * is supposed to get GRO working - */ - dev->features |= NETIF_F_GRO; - netdev_features_change(dev); - } } } @@ -1106,18 +1098,9 @@ for (i = 0; i < dev->real_num_rx_queues; i++) rcu_assign_pointer(priv->rq[i].xdp_prog, NULL); - if (!netif_running(dev) || !veth_gro_requested(dev)) { + if (!netif_running(dev) || !veth_gro_requested(dev)) veth_napi_del(dev); - /* if user-space did not require GRO, since adding XDP - * enabled it, clear it now - */ - if (!veth_gro_requested(dev) && netif_running(dev)) { - dev->features &= ~NETIF_F_GRO; - netdev_features_change(dev); - } - } - veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false); } @@ -1320,7 +1303,8 @@ struct veth_priv *priv = netdev_priv(dev); int i; - priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL); + priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq), + GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); if (!priv->rq) return -ENOMEM; @@ -1336,7 +1320,7 @@ { struct veth_priv *priv = netdev_priv(dev); - kfree(priv->rq); + kvfree(priv->rq); } static int veth_dev_init(struct net_device *dev) @@ -1404,8 +1388,6 @@ if (peer_priv->_xdp_prog) features &= ~NETIF_F_GSO_SOFTWARE; } - if (priv->_xdp_prog) - features |= NETIF_F_GRO; return features; } diff -u linux-aws-5.15-5.15.0/drivers/net/wireguard/receive.c linux-aws-5.15-5.15.0/drivers/net/wireguard/receive.c --- linux-aws-5.15-5.15.0/drivers/net/wireguard/receive.c +++ linux-aws-5.15-5.15.0/drivers/net/wireguard/receive.c @@ -258,7 +258,7 @@ if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || - keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) { + READ_ONCE(keypair->receiving_counter.counter) >= REJECT_AFTER_MESSAGES)) { WRITE_ONCE(keypair->receiving.is_valid, false); return false; } @@ -325,7 +325,7 @@ for (i = 1; i <= top; ++i) counter->backtrack[(i + index_current) & ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; - counter->counter = their_counter; + WRITE_ONCE(counter->counter, their_counter); } index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; @@ -470,7 +470,7 @@ net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", peer->device->dev->name, PACKET_CB(skb)->nonce, - keypair->receiving_counter.counter); + READ_ONCE(keypair->receiving_counter.counter)); goto next; } diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath10k/core.c linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath10k/core.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath10k/core.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath10k/core.c @@ -3544,13 +3544,10 @@ void ath10k_core_destroy(struct ath10k *ar) { - flush_workqueue(ar->workqueue); destroy_workqueue(ar->workqueue); - flush_workqueue(ar->workqueue_aux); destroy_workqueue(ar->workqueue_aux); - flush_workqueue(ar->workqueue_tx_complete); destroy_workqueue(ar->workqueue_tx_complete); ath10k_debug_destroy(ar); diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath10k/sdio.c linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath10k/sdio.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath10k/sdio.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath10k/sdio.c @@ -2650,7 +2650,6 @@ ath10k_core_destroy(ar); - flush_workqueue(ar_sdio->workqueue); destroy_workqueue(ar_sdio->workqueue); } diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath11k/mac.c linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath11k/mac.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath11k/mac.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath11k/mac.c @@ -6322,7 +6322,7 @@ } if (supported_bands & WMI_HOST_WLAN_5G_CAP) { - if (reg_cap->high_5ghz_chan >= ATH11K_MAX_6G_FREQ) { + if (reg_cap->high_5ghz_chan >= ATH11K_MIN_6G_FREQ) { channels = kmemdup(ath11k_6ghz_channels, sizeof(ath11k_6ghz_channels), GFP_KERNEL); if (!channels) { diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc.h linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc.h --- linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc.h +++ linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc.h @@ -306,7 +306,6 @@ DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM); struct timer_list cleanup_timer; spinlock_t tx_lock; - bool initialized; }; struct ath9k_htc_tx_ctl { @@ -515,6 +514,7 @@ unsigned long ps_usecount; bool ps_enabled; bool ps_idle; + bool initialized; #ifdef CONFIG_MAC80211_LEDS enum led_brightness brightness; diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc_drv_init.c linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc_drv_init.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -966,6 +966,10 @@ htc_handle->drv_priv = priv; + /* Allow ath9k_wmi_event_tasklet() to operate. */ + smp_wmb(); + priv->initialized = true; + return 0; err_init: diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -815,10 +815,6 @@ skb_queue_head_init(&priv->tx.data_vo_queue); skb_queue_head_init(&priv->tx.tx_failed); - /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */ - smp_wmb(); - priv->tx.initialized = true; - return 0; } diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/wmi.c linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/wmi.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/wmi.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath9k/wmi.c @@ -155,6 +155,12 @@ } spin_unlock_irqrestore(&wmi->wmi_lock, flags); + /* Check if ath9k_htc_probe_device() completed. */ + if (!data_race(priv->initialized)) { + kfree_skb(skb); + continue; + } + hdr = (struct wmi_cmd_hdr *) skb->data; cmd_id = be16_to_cpu(hdr->command_id); wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr)); @@ -169,10 +175,6 @@ &wmi->drv_priv->fatal_work); break; case WMI_TXSTATUS_EVENTID: - /* Check if ath9k_tx_init() completed. */ - if (!data_race(priv->tx.initialized)) - break; - spin_lock_bh(&priv->tx.tx_lock); if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) { spin_unlock_bh(&priv->tx.tx_lock); diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/b43/b43.h linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/b43/b43.h --- linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/b43/b43.h +++ linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/b43/b43.h @@ -1082,6 +1082,22 @@ return dev->__using_pio_transfers; } +static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio) +{ + if (dev->qos_enabled) + ieee80211_wake_queue(dev->wl->hw, queue_prio); + else + ieee80211_wake_queue(dev->wl->hw, 0); +} + +static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio) +{ + if (dev->qos_enabled) + ieee80211_stop_queue(dev->wl->hw, queue_prio); + else + ieee80211_stop_queue(dev->wl->hw, 0); +} + /* Message printing */ __printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...); __printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...); diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -790,8 +790,7 @@ scan_request = cfg->scan_request; cfg->scan_request = NULL; - if (timer_pending(&cfg->escan_timeout)) - del_timer_sync(&cfg->escan_timeout); + timer_delete_sync(&cfg->escan_timeout); if (fw_abort) { /* Do a scan abort to stop the driver's scan engine */ @@ -7781,6 +7780,7 @@ brcmf_btcoex_detach(cfg); wiphy_unregister(cfg->wiphy); wl_deinit_priv(cfg); + cancel_work_sync(&cfg->escan_timeout_work); brcmf_free_wiphy(cfg->wiphy); kfree(cfg); } diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlegacy/3945-mac.c linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlegacy/3945-mac.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3827,7 +3827,6 @@ il3945_unset_hw_params(il); /*netif_stop_queue(dev); */ - flush_workqueue(il->workqueue); /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes * il->workqueue... so we can't take down the workqueue diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlegacy/4965-mac.c linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlegacy/4965-mac.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -6739,7 +6739,6 @@ il_eeprom_free(il); /*netif_stop_queue(dev); */ - flush_workqueue(il->workqueue); /* ieee80211_unregister_hw calls il_mac_stop, which flushes * il->workqueue... so we can't take down the workqueue diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/fw/acpi.c linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/fw/acpi.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -674,7 +674,7 @@ * from index 1, so the maximum value allowed here is * ACPI_SAR_PROFILES_NUM - 1. */ - if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) { + if (n_profiles >= ACPI_SAR_PROFILE_NUM) { ret = -EINVAL; goto out_free; } diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -99,6 +99,12 @@ if (le32_to_cpu(tlv->length) != sizeof(*debug_info)) return -EINVAL; + /* we use this as a string, ensure input was NUL terminated */ + if (strnlen(debug_info->debug_cfg_name, + sizeof(debug_info->debug_cfg_name)) == + sizeof(debug_info->debug_cfg_name)) + return -EINVAL; + IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n", debug_info->debug_cfg_name); diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/d3.c linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/d3.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -462,12 +462,10 @@ struct wowlan_key_rsc_v5_data data = {}; int i; - data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL); + data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL); if (!data.rsc) return -ENOMEM; - memset(data.rsc, 0xff, sizeof(*data.rsc)); - for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++) data.rsc->mcast_key_id_map[i] = IWL_MCAST_KEY_MAP_INVALID; diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -305,6 +305,7 @@ u32 status, struct ieee80211_rx_status *stats) { + struct wireless_dev *wdev; struct iwl_mvm_sta *mvmsta; struct iwl_mvm_vif *mvmvif; u8 keyid; @@ -326,9 +327,15 @@ if (!ieee80211_is_beacon(hdr->frame_control)) return 0; + if (!sta) + return -1; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); + /* key mismatch - will also report !MIC_OK but we shouldn't count it */ if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID)) - return -1; + goto report; /* good cases */ if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK && @@ -338,11 +345,4 @@ } - if (!sta) - return -1; - - mvmsta = iwl_mvm_sta_from_mac80211(sta); - - mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); - /* * both keys will have the same cipher and MIC length, use @@ -352,11 +352,11 @@ if (!key) { key = rcu_dereference(mvmvif->bcn_prot.keys[1]); if (!key) - return -1; + goto report; } if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2) - return -1; + goto report; /* get the real key ID */ keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2]; @@ -370,7 +370,7 @@ return -1; key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]); if (!key) - return -1; + goto report; } /* Report status to mac80211 */ @@ -378,6 +378,10 @@ ieee80211_key_mic_failure(key); else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR) ieee80211_key_replay(key); +report: + wdev = ieee80211_vif_to_wdev(mvmsta->vif); + if (wdev->netdev) + cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len); return -1; } diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/sta.c linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/sta.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2544,7 +2544,7 @@ } if (iwl_mvm_has_new_rx_api(mvm) && start) { - u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); + u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); /* sparse doesn't like the __align() so don't check */ #ifndef __CHECKER__ diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/cfg80211.c linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/cfg80211.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3169,13 +3169,11 @@ cfg80211_unregister_netdevice(wdev->netdev); if (priv->dfs_cac_workqueue) { - flush_workqueue(priv->dfs_cac_workqueue); destroy_workqueue(priv->dfs_cac_workqueue); priv->dfs_cac_workqueue = NULL; } if (priv->dfs_chan_sw_workqueue) { - flush_workqueue(priv->dfs_chan_sw_workqueue); destroy_workqueue(priv->dfs_chan_sw_workqueue); priv->dfs_chan_sw_workqueue = NULL; } diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/debugfs.c linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/debugfs.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -976,9 +976,6 @@ priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name, mwifiex_dfs_dir); - if (!priv->dfs_dev_dir) - return; - MWIFIEX_DFS_ADD_FILE(info); MWIFIEX_DFS_ADD_FILE(debug); MWIFIEX_DFS_ADD_FILE(getlog); diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/main.c linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/main.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/main.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/mwifiex/main.c @@ -498,13 +498,11 @@ static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter) { if (adapter->workqueue) { - flush_workqueue(adapter->workqueue); destroy_workqueue(adapter->workqueue); adapter->workqueue = NULL; } if (adapter->rx_workqueue) { - flush_workqueue(adapter->rx_workqueue); destroy_workqueue(adapter->rx_workqueue); adapter->rx_workqueue = NULL; } diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/cfg80211.c linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/cfg80211.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -1562,7 +1562,6 @@ cfg80211_unregister_netdevice(vif->ndev); vif->monitor_flag = 0; - wilc_set_operation_mode(vif, 0, 0, 0); mutex_lock(&wl->vif_mutex); list_del_rcu(&vif->list); wl->vif_num--; diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/hif.c linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/hif.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/hif.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/hif.c @@ -359,38 +359,49 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, struct cfg80211_crypto_settings *crypto) { - struct wilc_join_bss_param *param; - struct ieee80211_p2p_noa_attr noa_attr; - u8 rates_len = 0; - const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie; + const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie; const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie; + struct ieee80211_p2p_noa_attr noa_attr; + const struct cfg80211_bss_ies *ies; + struct wilc_join_bss_param *param; + u8 rates_len = 0, ies_len; int ret; - const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies); param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) return NULL; + rcu_read_lock(); + ies = rcu_dereference(bss->ies); + ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC); + if (!ies_data) { + rcu_read_unlock(); + kfree(param); + return NULL; + } + ies_len = ies->len; + rcu_read_unlock(); + param->beacon_period = cpu_to_le16(bss->beacon_interval); param->cap_info = cpu_to_le16(bss->capability); param->bss_type = WILC_FW_BSS_TYPE_INFRA; param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq); ether_addr_copy(param->bssid, bss->bssid); - ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); + ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len); if (ssid_elm) { if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN) memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]); } - tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len); + tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len); if (tim_elm && tim_elm[1] >= 2) param->dtim_period = tim_elm[3]; memset(param->p_suites, 0xFF, 3); memset(param->akm_suites, 0xFF, 3); - rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len); + rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len); if (rates_ie) { rates_len = rates_ie[1]; if (rates_len > WILC_MAX_RATES_SUPPORTED) @@ -401,7 +412,7 @@ if (rates_len < WILC_MAX_RATES_SUPPORTED) { supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, - ies->data, ies->len); + ies_data, ies_len); if (supp_rates_ie) { u8 ext_rates = supp_rates_ie[1]; @@ -416,11 +427,11 @@ } } - ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len); + ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len); if (ht_ie) param->ht_capable = true; - ret = cfg80211_get_p2p_attr(ies->data, ies->len, + ret = cfg80211_get_p2p_attr(ies_data, ies_len, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, (u8 *)&noa_attr, sizeof(noa_attr)); if (ret > 0) { @@ -444,7 +455,7 @@ } wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, - ies->data, ies->len); + ies_data, ies_len); if (wmm_ie) { struct ieee80211_wmm_param_ie *ie; @@ -459,13 +470,13 @@ wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, - ies->data, ies->len); + ies_data, ies_len); if (wpa_ie) { param->mode_802_11i = 1; param->rsn_found = true; } - rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len); + rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len); if (rsn_ie) { int rsn_ie_len = sizeof(struct element) + rsn_ie[1]; int offset = 8; @@ -499,6 +510,7 @@ param->akm_suites[i] = crypto->akm_suites[i] & 0xFF; } + kfree(ies_data); return (void *)param; } diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/netdev.c linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/netdev.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/netdev.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -862,8 +862,7 @@ void wilc_netdev_cleanup(struct wilc *wilc) { - struct wilc_vif *vif; - int srcu_idx, ifc_cnt = 0; + struct wilc_vif *vif, *vif_tmp; if (!wilc) return; @@ -873,33 +872,19 @@ wilc->firmware = NULL; } - srcu_idx = srcu_read_lock(&wilc->srcu); - list_for_each_entry_rcu(vif, &wilc->vif_list, list) { + list_for_each_entry_safe(vif, vif_tmp, &wilc->vif_list, list) { + mutex_lock(&wilc->vif_mutex); + list_del_rcu(&vif->list); + wilc->vif_num--; + mutex_unlock(&wilc->vif_mutex); + synchronize_srcu(&wilc->srcu); if (vif->ndev) unregister_netdev(vif->ndev); } - srcu_read_unlock(&wilc->srcu, srcu_idx); wilc_wfi_deinit_mon_interface(wilc, false); - flush_workqueue(wilc->hif_workqueue); destroy_workqueue(wilc->hif_workqueue); - while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) { - mutex_lock(&wilc->vif_mutex); - if (wilc->vif_num <= 0) { - mutex_unlock(&wilc->vif_mutex); - break; - } - vif = wilc_get_wl_to_vif(wilc); - if (!IS_ERR(vif)) - list_del_rcu(&vif->list); - - wilc->vif_num--; - mutex_unlock(&wilc->vif_mutex); - synchronize_srcu(&wilc->srcu); - ifc_cnt++; - } - wilc_wlan_cfg_deinit(wilc); wlan_deinit_locks(wilc); wiphy_unregister(wilc->wiphy); diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -6484,6 +6484,7 @@ if (priv->usb_interrupts) rtl8xxxu_write32(priv, REG_USB_HIMR, 0); + cancel_work_sync(&priv->c2hcmd_work); cancel_delayed_work_sync(&priv->ra_watchdog); rtl8xxxu_free_rx_resources(priv); diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtlwifi/pci.c linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtlwifi/pci.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtlwifi/pci.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1704,7 +1704,6 @@ tasklet_kill(&rtlpriv->works.irq_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); - flush_workqueue(rtlpriv->works.rtl_wq); destroy_workqueue(rtlpriv->works.rtl_wq); } diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtw88/rtw8821c.c linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtw88/rtw8821c.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -667,9 +667,9 @@ dm_info->cck_fa_cnt = cck_fa_cnt; dm_info->ofdm_fa_cnt = ofdm_fa_cnt; + dm_info->total_fa_cnt = ofdm_fa_cnt; if (cck_enable) dm_info->total_fa_cnt += cck_fa_cnt; - dm_info->total_fa_cnt = ofdm_fa_cnt; crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK); dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt); diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtw89/fw.c linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtw89/fw.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtw89/fw.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/realtek/rtw89/fw.c @@ -403,18 +403,18 @@ static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) { u32 val32; - u16 val16; val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); - val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); - rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); + val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); + rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); rtw89_fw_prog_cnt_dump(rtwdev); } -int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) +static +int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) { struct rtw89_fw_info *fw_info = &rtwdev->fw; struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); @@ -467,6 +467,20 @@ return ret; } +int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) +{ + int retry; + int ret; + + for (retry = 0; retry < 5; retry++) { + ret = __rtw89_fw_download(rtwdev, type); + if (!ret) + return 0; + } + + return ret; +} + int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) { struct rtw89_fw_info *fw = &rtwdev->fw; diff -u linux-aws-5.15-5.15.0/drivers/net/wireless/rndis_wlan.c linux-aws-5.15-5.15.0/drivers/net/wireless/rndis_wlan.c --- linux-aws-5.15-5.15.0/drivers/net/wireless/rndis_wlan.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/rndis_wlan.c @@ -3494,7 +3494,6 @@ cancel_delayed_work_sync(&priv->dev_poller_work); cancel_delayed_work_sync(&priv->scan_work); cancel_work_sync(&priv->work); - flush_workqueue(priv->workqueue); destroy_workqueue(priv->workqueue); wiphy_free(wiphy); @@ -3511,7 +3510,6 @@ cancel_delayed_work_sync(&priv->dev_poller_work); cancel_delayed_work_sync(&priv->scan_work); cancel_work_sync(&priv->work); - flush_workqueue(priv->workqueue); destroy_workqueue(priv->workqueue); rndis_unbind(usbdev, intf); diff -u linux-aws-5.15-5.15.0/drivers/nvme/target/fc.c linux-aws-5.15-5.15.0/drivers/nvme/target/fc.c --- linux-aws-5.15-5.15.0/drivers/nvme/target/fc.c +++ linux-aws-5.15-5.15.0/drivers/nvme/target/fc.c @@ -111,6 +111,8 @@ struct nvmet_fc_port_entry *pe; struct kref ref; u32 max_sg_cnt; + + struct work_struct put_work; }; struct nvmet_fc_port_entry { @@ -165,7 +167,7 @@ struct nvmet_fc_hostport *hostport; struct nvmet_fc_ls_iod *rcv_disconn; struct list_head a_list; - struct nvmet_fc_tgt_queue __rcu *queues[NVMET_NR_QUEUES + 1]; + struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; struct kref ref; struct work_struct del_work; struct rcu_head rcu; @@ -248,6 +250,13 @@ static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); +static void nvmet_fc_put_tgtport_work(struct work_struct *work) +{ + struct nvmet_fc_tgtport *tgtport = + container_of(work, struct nvmet_fc_tgtport, put_work); + + nvmet_fc_tgtport_put(tgtport); +} static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, struct nvmet_fc_fcp_iod *fod); @@ -359,7 +368,7 @@ if (!lsop->req_queued) { spin_unlock_irqrestore(&tgtport->lock, flags); - return; + goto out_putwork; } list_del(&lsop->lsreq_list); @@ -372,7 +381,8 @@ (lsreq->rqstlen + lsreq->rsplen), DMA_BIDIRECTIONAL); - nvmet_fc_tgtport_put(tgtport); +out_putwork: + queue_work(nvmet_wq, &tgtport->put_work); } static int @@ -801,14 +811,11 @@ if (!queue) return NULL; - if (!nvmet_fc_tgt_a_get(assoc)) - goto out_free_queue; - queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, assoc->tgtport->fc_target_port.port_num, assoc->a_id, qid); if (!queue->work_q) - goto out_a_put; + goto out_free_queue; queue->qid = qid; queue->sqsize = sqsize; @@ -830,15 +837,13 @@ goto out_fail_iodlist; WARN_ON(assoc->queues[qid]); - rcu_assign_pointer(assoc->queues[qid], queue); + assoc->queues[qid] = queue; return queue; out_fail_iodlist: nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); destroy_workqueue(queue->work_q); -out_a_put: - nvmet_fc_tgt_a_put(assoc); out_free_queue: kfree(queue); return NULL; @@ -851,12 +856,8 @@ struct nvmet_fc_tgt_queue *queue = container_of(ref, struct nvmet_fc_tgt_queue, ref); - rcu_assign_pointer(queue->assoc->queues[queue->qid], NULL); - nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); - nvmet_fc_tgt_a_put(queue->assoc); - destroy_workqueue(queue->work_q); kfree_rcu(queue, rcu); @@ -968,7 +969,7 @@ rcu_read_lock(); list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (association_id == assoc->association_id) { - queue = rcu_dereference(assoc->queues[qid]); + queue = assoc->queues[qid]; if (queue && (!atomic_read(&queue->connected) || !nvmet_fc_tgt_q_get(queue))) @@ -1077,8 +1078,6 @@ /* new allocation not needed */ kfree(newhost); newhost = match; - /* no new allocation - release reference */ - nvmet_fc_tgtport_put(tgtport); } else { newhost->tgtport = tgtport; newhost->hosthandle = hosthandle; @@ -1093,13 +1092,28 @@ } static void -nvmet_fc_delete_assoc(struct work_struct *work) +nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) +{ + nvmet_fc_delete_target_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); +} + +static void +nvmet_fc_delete_assoc_work(struct work_struct *work) { struct nvmet_fc_tgt_assoc *assoc = container_of(work, struct nvmet_fc_tgt_assoc, del_work); + struct nvmet_fc_tgtport *tgtport = assoc->tgtport; - nvmet_fc_delete_target_assoc(assoc); - nvmet_fc_tgt_a_put(assoc); + nvmet_fc_delete_assoc(assoc); + nvmet_fc_tgtport_put(tgtport); +} + +static void +nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) +{ + nvmet_fc_tgtport_get(assoc->tgtport); + queue_work(nvmet_wq, &assoc->del_work); } static struct nvmet_fc_tgt_assoc * @@ -1111,6 +1125,9 @@ int idx; bool needrandom = true; + if (!tgtport->pe) + return NULL; + assoc = kzalloc(sizeof(*assoc), GFP_KERNEL); if (!assoc) return NULL; @@ -1130,7 +1147,7 @@ assoc->a_id = idx; INIT_LIST_HEAD(&assoc->a_list); kref_init(&assoc->ref); - INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); + INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); atomic_set(&assoc->terminating, 0); while (needrandom) { @@ -1171,13 +1188,18 @@ struct nvmet_fc_tgtport *tgtport = assoc->tgtport; struct nvmet_fc_ls_iod *oldls; unsigned long flags; + int i; + + for (i = NVMET_NR_QUEUES; i >= 0; i--) { + if (assoc->queues[i]) + nvmet_fc_delete_target_queue(assoc->queues[i]); + } /* Send Disconnect now that all i/o has completed */ nvmet_fc_xmt_disconnect_assoc(assoc); nvmet_fc_free_hostport(assoc->hostport); spin_lock_irqsave(&tgtport->lock, flags); - list_del_rcu(&assoc->a_list); oldls = assoc->rcv_disconn; spin_unlock_irqrestore(&tgtport->lock, flags); /* if pending Rcv Disconnect Association LS, send rsp now */ @@ -1207,7 +1229,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) { struct nvmet_fc_tgtport *tgtport = assoc->tgtport; - struct nvmet_fc_tgt_queue *queue; + unsigned long flags; int i, terminating; terminating = atomic_xchg(&assoc->terminating, 1); @@ -1216,29 +1238,21 @@ if (terminating) return; + spin_lock_irqsave(&tgtport->lock, flags); + list_del_rcu(&assoc->a_list); + spin_unlock_irqrestore(&tgtport->lock, flags); - for (i = NVMET_NR_QUEUES; i >= 0; i--) { - rcu_read_lock(); - queue = rcu_dereference(assoc->queues[i]); - if (!queue) { - rcu_read_unlock(); - continue; - } + synchronize_rcu(); - if (!nvmet_fc_tgt_q_get(queue)) { - rcu_read_unlock(); - continue; - } - rcu_read_unlock(); - nvmet_fc_delete_target_queue(queue); - nvmet_fc_tgt_q_put(queue); + /* ensure all in-flight I/Os have been processed */ + for (i = NVMET_NR_QUEUES; i >= 0; i--) { + if (assoc->queues[i]) + flush_workqueue(assoc->queues[i]->work_q); } dev_info(tgtport->dev, "{%d:%d} Association deleted\n", tgtport->fc_target_port.port_num, assoc->a_id); - - nvmet_fc_tgt_a_put(assoc); } static struct nvmet_fc_tgt_assoc * @@ -1414,6 +1428,7 @@ kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); newrec->max_sg_cnt = template->max_sgl_segments; + INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); ret = nvmet_fc_alloc_ls_iodlist(newrec); if (ret) { @@ -1491,9 +1506,8 @@ list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { if (!nvmet_fc_tgt_a_get(assoc)) continue; - if (!queue_work(nvmet_wq, &assoc->del_work)) - /* already deleting - release local reference */ - nvmet_fc_tgt_a_put(assoc); + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); } rcu_read_unlock(); } @@ -1546,9 +1560,8 @@ continue; assoc->hostport->invalid = 1; noassoc = false; - if (!queue_work(nvmet_wq, &assoc->del_work)) - /* already deleting - release local reference */ - nvmet_fc_tgt_a_put(assoc); + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); } spin_unlock_irqrestore(&tgtport->lock, flags); @@ -1580,7 +1593,7 @@ rcu_read_lock(); list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { - queue = rcu_dereference(assoc->queues[0]); + queue = assoc->queues[0]; if (queue && queue->nvme_sq.ctrl == ctrl) { if (nvmet_fc_tgt_a_get(assoc)) found_ctrl = true; @@ -1592,9 +1605,8 @@ nvmet_fc_tgtport_put(tgtport); if (found_ctrl) { - if (!queue_work(nvmet_wq, &assoc->del_work)) - /* already deleting - release local reference */ - nvmet_fc_tgt_a_put(assoc); + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); return; } @@ -1624,6 +1636,8 @@ /* terminate any outstanding associations */ __nvmet_fc_free_assocs(tgtport); + flush_workqueue(nvmet_wq); + /* * should terminate LS's as well. However, LS's will be generated * at the tail end of association termination, so they likely don't @@ -1869,9 +1883,6 @@ sizeof(struct fcnvme_ls_disconnect_assoc_acc)), FCNVME_LS_DISCONNECT_ASSOC); - /* release get taken in nvmet_fc_find_target_assoc */ - nvmet_fc_tgt_a_put(assoc); - /* * The rules for LS response says the response cannot * go back until ABTS's have been sent for all outstanding @@ -1886,8 +1897,6 @@ assoc->rcv_disconn = iod; spin_unlock_irqrestore(&tgtport->lock, flags); - nvmet_fc_delete_target_assoc(assoc); - if (oldls) { dev_info(tgtport->dev, "{%d:%d} Multiple Disconnect Association LS's " @@ -1903,6 +1912,9 @@ nvmet_fc_xmt_ls_rsp(tgtport, oldls); } + nvmet_fc_schedule_delete_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); + return false; } @@ -2539,8 +2551,9 @@ fod->req.cmd = &fod->cmdiubuf.sqe; fod->req.cqe = &fod->rspiubuf.cqe; - if (tgtport->pe) - fod->req.port = tgtport->pe->port; + if (!tgtport->pe) + goto transport_error; + fod->req.port = tgtport->pe->port; /* clear any response payload */ memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); @@ -2901,6 +2914,9 @@ nvmet_fc_portentry_unbind(pe); + /* terminate any outstanding associations */ + __nvmet_fc_free_assocs(pe->tgtport); + kfree(pe); } @@ -2932,6 +2948,9 @@ static void __exit nvmet_fc_exit_module(void) { + /* ensure any shutdown operation, e.g. delete ctrls have finished */ + flush_workqueue(nvmet_wq); + /* sanity check - all lports should be removed */ if (!list_empty(&nvmet_fc_target_list)) pr_warn("%s: targetport list not empty\n", __func__); diff -u linux-aws-5.15-5.15.0/drivers/nvme/target/fcloop.c linux-aws-5.15-5.15.0/drivers/nvme/target/fcloop.c --- linux-aws-5.15-5.15.0/drivers/nvme/target/fcloop.c +++ linux-aws-5.15-5.15.0/drivers/nvme/target/fcloop.c @@ -358,7 +358,7 @@ if (!rport->targetport) { tls_req->status = -ECONNREFUSED; spin_lock(&rport->lock); - list_add_tail(&rport->ls_list, &tls_req->ls_list); + list_add_tail(&tls_req->ls_list, &rport->ls_list); spin_unlock(&rport->lock); queue_work(nvmet_wq, &rport->ls_work); return ret; @@ -391,7 +391,7 @@ if (remoteport) { rport = remoteport->private; spin_lock(&rport->lock); - list_add_tail(&rport->ls_list, &tls_req->ls_list); + list_add_tail(&tls_req->ls_list, &rport->ls_list); spin_unlock(&rport->lock); queue_work(nvmet_wq, &rport->ls_work); } @@ -446,7 +446,7 @@ if (!tport->remoteport) { tls_req->status = -ECONNREFUSED; spin_lock(&tport->lock); - list_add_tail(&tport->ls_list, &tls_req->ls_list); + list_add_tail(&tls_req->ls_list, &tport->ls_list); spin_unlock(&tport->lock); queue_work(nvmet_wq, &tport->ls_work); return ret; diff -u linux-aws-5.15-5.15.0/drivers/nvme/target/tcp.c linux-aws-5.15-5.15.0/drivers/nvme/target/tcp.c --- linux-aws-5.15-5.15.0/drivers/nvme/target/tcp.c +++ linux-aws-5.15-5.15.0/drivers/nvme/target/tcp.c @@ -1884,6 +1884,7 @@ flush_workqueue(nvmet_wq); destroy_workqueue(nvmet_tcp_wq); + ida_destroy(&nvmet_tcp_queue_ida); } module_init(nvmet_tcp_init); diff -u linux-aws-5.15-5.15.0/drivers/opp/debugfs.c linux-aws-5.15-5.15.0/drivers/opp/debugfs.c --- linux-aws-5.15-5.15.0/drivers/opp/debugfs.c +++ linux-aws-5.15-5.15.0/drivers/opp/debugfs.c @@ -37,10 +37,12 @@ size_t count, loff_t *ppos) { struct icc_path *path = fp->private_data; + const char *name = icc_get_name(path); char buf[64]; - int i; + int i = 0; - i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path)); + if (name) + i = scnprintf(buf, sizeof(buf), "%.62s\n", name); return simple_read_from_buffer(userbuf, count, ppos, buf, i); } diff -u linux-aws-5.15-5.15.0/drivers/pci/controller/dwc/pcie-designware-ep.c linux-aws-5.15-5.15.0/drivers/pci/controller/dwc/pcie-designware-ep.c --- linux-aws-5.15-5.15.0/drivers/pci/controller/dwc/pcie-designware-ep.c +++ linux-aws-5.15-5.15.0/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -6,6 +6,7 @@ * Author: Kishon Vijay Abraham I */ +#include #include #include @@ -589,7 +590,7 @@ } aligned_offset = msg_addr & (epc->mem->window.page_size - 1); - msg_addr &= ~aligned_offset; + msg_addr = ALIGN_DOWN(msg_addr, epc->mem->window.page_size); ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) diff -u linux-aws-5.15-5.15.0/drivers/pci/msi.c linux-aws-5.15-5.15.0/drivers/pci/msi.c --- linux-aws-5.15-5.15.0/drivers/pci/msi.c +++ linux-aws-5.15-5.15.0/drivers/pci/msi.c @@ -1304,7 +1304,7 @@ return (irq_hw_number_t)desc->msi_attrib.entry_nr | pci_dev_id(dev) << 11 | - (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; + ((irq_hw_number_t)(pci_domain_nr(dev->bus) & 0xFFFFFFFF)) << 27; } static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) diff -u linux-aws-5.15-5.15.0/drivers/pci/p2pdma.c linux-aws-5.15-5.15.0/drivers/pci/p2pdma.c --- linux-aws-5.15-5.15.0/drivers/pci/p2pdma.c +++ linux-aws-5.15-5.15.0/drivers/pci/p2pdma.c @@ -536,7 +536,7 @@ p2pdma = rcu_dereference(provider->p2pdma); if (p2pdma) xa_store(&p2pdma->map_types, map_types_idx(client), - xa_mk_value(map_type), GFP_KERNEL); + xa_mk_value(map_type), GFP_ATOMIC); rcu_read_unlock(); return map_type; } diff -u linux-aws-5.15-5.15.0/drivers/pci/pci.h linux-aws-5.15-5.15.0/drivers/pci/pci.h --- linux-aws-5.15-5.15.0/drivers/pci/pci.h +++ linux-aws-5.15-5.15.0/drivers/pci/pci.h @@ -396,11 +396,6 @@ return 0; } -static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) -{ - return dev->error_state == pci_channel_io_perm_failure; -} - /* pci_dev priv_flags */ #define PCI_DEV_ADDED 0 #define PCI_DPC_RECOVERED 1 diff -u linux-aws-5.15-5.15.0/drivers/pci/pcie/dpc.c linux-aws-5.15-5.15.0/drivers/pci/pcie/dpc.c --- linux-aws-5.15-5.15.0/drivers/pci/pcie/dpc.c +++ linux-aws-5.15-5.15.0/drivers/pci/pcie/dpc.c @@ -231,7 +231,7 @@ for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) { pci_read_config_dword(pdev, - cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix); + cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix); pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix); } clear_status: diff -u linux-aws-5.15-5.15.0/drivers/pci/quirks.c linux-aws-5.15-5.15.0/drivers/pci/quirks.c --- linux-aws-5.15-5.15.0/drivers/pci/quirks.c +++ linux-aws-5.15-5.15.0/drivers/pci/quirks.c @@ -5405,6 +5405,7 @@ pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL); } +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags); diff -u linux-aws-5.15-5.15.0/drivers/pci/switch/switchtec.c linux-aws-5.15-5.15.0/drivers/pci/switch/switchtec.c --- linux-aws-5.15-5.15.0/drivers/pci/switch/switchtec.c +++ linux-aws-5.15-5.15.0/drivers/pci/switch/switchtec.c @@ -1614,7 +1614,7 @@ rc = switchtec_init_isr(stdev); if (rc) { dev_err(&stdev->dev, "failed to init isr.\n"); - goto err_put; + goto err_exit_pci; } iowrite32(SWITCHTEC_EVENT_CLEAR | @@ -1635,6 +1635,8 @@ err_devadd: stdev_kill(stdev); +err_exit_pci: + switchtec_exit_pci(stdev); err_put: ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt)); put_device(&stdev->dev); diff -u linux-aws-5.15-5.15.0/drivers/platform/x86/asus-wmi.c linux-aws-5.15-5.15.0/drivers/platform/x86/asus-wmi.c --- linux-aws-5.15-5.15.0/drivers/platform/x86/asus-wmi.c +++ linux-aws-5.15-5.15.0/drivers/platform/x86/asus-wmi.c @@ -321,7 +321,17 @@ static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval) { - return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval); + int err; + + err = asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval); + + if (err) + return err; + + if (*retval == ~0) + return -ENODEV; + + return 0; } static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, diff -u linux-aws-5.15-5.15.0/drivers/platform/x86/intel/vbtn.c linux-aws-5.15-5.15.0/drivers/platform/x86/intel/vbtn.c --- linux-aws-5.15-5.15.0/drivers/platform/x86/intel/vbtn.c +++ linux-aws-5.15-5.15.0/drivers/platform/x86/intel/vbtn.c @@ -200,9 +200,6 @@ autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE); sparse_keymap_report_event(input_dev, event, val, autorelease); - - /* Some devices need this to report further events */ - acpi_evaluate_object(handle, "VBDL", NULL, NULL); } /* diff -u linux-aws-5.15-5.15.0/drivers/platform/x86/touchscreen_dmi.c linux-aws-5.15-5.15.0/drivers/platform/x86/touchscreen_dmi.c --- linux-aws-5.15-5.15.0/drivers/platform/x86/touchscreen_dmi.c +++ linux-aws-5.15-5.15.0/drivers/platform/x86/touchscreen_dmi.c @@ -50,7 +50,7 @@ }; static const struct ts_dmi_data chuwi_hi8_air_data = { - .acpi_name = "MSSL1680:00", + .acpi_name = "MSSL1680", .properties = chuwi_hi8_air_props, }; @@ -916,6 +916,32 @@ .properties = teclast_tbook11_props, }; +static const struct property_entry teclast_x16_plus_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 8), + PROPERTY_ENTRY_U32("touchscreen-min-y", 14), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1916), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1264), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct ts_dmi_data teclast_x16_plus_data = { + .embedded_fw = { + .name = "silead/gsl3692-teclast-x16-plus.fw", + .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 }, + .length = 43560, + .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25, + 0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75, + 0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f, + 0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 }, + }, + .acpi_name = "MSSL1680:00", + .properties = teclast_x16_plus_props, +}; + static const struct property_entry teclast_x3_plus_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), @@ -1553,6 +1579,15 @@ }, }, { + /* Teclast X16 Plus */ + .driver_data = (void *)&teclast_x16_plus_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"), + DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), + DMI_MATCH(DMI_PRODUCT_SKU, "D3A5_A1"), + }, + }, + { /* Teclast X3 Plus */ .driver_data = (void *)&teclast_x3_plus_data, .matches = { @@ -1710,7 +1745,7 @@ int error; if (has_acpi_companion(dev) && - !strncmp(ts_data->acpi_name, client->name, I2C_NAME_SIZE)) { + strstarts(client->name, ts_data->acpi_name)) { error = device_create_managed_software_node(dev, ts_data->properties, NULL); if (error) dev_err(dev, "failed to add properties: %d\n", error); diff -u linux-aws-5.15-5.15.0/drivers/power/supply/bq27xxx_battery_i2c.c linux-aws-5.15-5.15.0/drivers/power/supply/bq27xxx_battery_i2c.c --- linux-aws-5.15-5.15.0/drivers/power/supply/bq27xxx_battery_i2c.c +++ linux-aws-5.15-5.15.0/drivers/power/supply/bq27xxx_battery_i2c.c @@ -209,7 +209,9 @@ { struct bq27xxx_device_info *di = i2c_get_clientdata(client); - free_irq(client->irq, di); + if (client->irq) + free_irq(client->irq, di); + bq27xxx_battery_teardown(di); mutex_lock(&battery_mutex); diff -u linux-aws-5.15-5.15.0/drivers/pwm/pwm-sti.c linux-aws-5.15-5.15.0/drivers/pwm/pwm-sti.c --- linux-aws-5.15-5.15.0/drivers/pwm/pwm-sti.c +++ linux-aws-5.15-5.15.0/drivers/pwm/pwm-sti.c @@ -392,11 +392,43 @@ return ret; } +static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct sti_pwm_chip *pc = to_sti_pwmchip(chip); + struct sti_pwm_compat_data *cdata = pc->cdata; + struct device *dev = pc->dev; + int err; + + if (pwm->hwpwm >= cdata->pwm_num_devs) { + dev_err(dev, "device %u is not valid for pwm mode\n", + pwm->hwpwm); + return -EINVAL; + } + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + sti_pwm_disable(chip, pwm); + + return 0; + } + + err = sti_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = sti_pwm_enable(chip, pwm); + + return err; +} + static const struct pwm_ops sti_pwm_ops = { .capture = sti_pwm_capture, - .config = sti_pwm_config, - .enable = sti_pwm_enable, - .disable = sti_pwm_disable, + .apply = sti_pwm_apply, .free = sti_pwm_free, .owner = THIS_MODULE, }; @@ -624,7 +656,7 @@ pc->chip.dev = dev; pc->chip.ops = &sti_pwm_ops; - pc->chip.npwm = pc->cdata->pwm_num_devs; + pc->chip.npwm = max(cdata->pwm_num_devs, cdata->cpt_num_devs); for (i = 0; i < cdata->cpt_num_devs; i++) { struct sti_cpt_ddata *ddata = &cdata->ddata[i]; diff -u linux-aws-5.15-5.15.0/drivers/remoteproc/stm32_rproc.c linux-aws-5.15-5.15.0/drivers/remoteproc/stm32_rproc.c --- linux-aws-5.15-5.15.0/drivers/remoteproc/stm32_rproc.c +++ linux-aws-5.15-5.15.0/drivers/remoteproc/stm32_rproc.c @@ -118,10 +118,10 @@ struct device *dev = rproc->dev.parent; void *va; - dev_dbg(dev, "map memory: %pa+%x\n", &mem->dma, mem->len); - va = ioremap_wc(mem->dma, mem->len); + dev_dbg(dev, "map memory: %pad+%zx\n", &mem->dma, mem->len); + va = (__force void *)ioremap_wc(mem->dma, mem->len); if (IS_ERR_OR_NULL(va)) { - dev_err(dev, "Unable to map memory region: %pa+%x\n", + dev_err(dev, "Unable to map memory region: %pad+0x%zx\n", &mem->dma, mem->len); return -ENOMEM; } @@ -136,7 +136,7 @@ struct rproc_mem_entry *mem) { dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma); - iounmap(mem->va); + iounmap((__force __iomem void *)mem->va); return 0; } @@ -627,7 +627,7 @@ ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE); if (IS_ERR_OR_NULL(ddata->rsc_va)) { - dev_err(dev, "Unable to map memory region: %pa+%zx\n", + dev_err(dev, "Unable to map memory region: %pa+%x\n", &rsc_pa, RSC_TBL_SIZE); ddata->rsc_va = NULL; return ERR_PTR(-ENOMEM); @@ -641,7 +641,7 @@ * entire area by overwriting it with the initial values stored in rproc->clean_table. */ *table_sz = RSC_TBL_SIZE; - return (struct resource_table *)ddata->rsc_va; + return (__force struct resource_table *)ddata->rsc_va; } static const struct rproc_ops st_rproc_ops = { @@ -889,7 +889,7 @@ return 0; } -static int __maybe_unused stm32_rproc_suspend(struct device *dev) +static int stm32_rproc_suspend(struct device *dev) { struct rproc *rproc = dev_get_drvdata(dev); struct stm32_rproc *ddata = rproc->priv; @@ -900,7 +900,7 @@ return 0; } -static int __maybe_unused stm32_rproc_resume(struct device *dev) +static int stm32_rproc_resume(struct device *dev) { struct rproc *rproc = dev_get_drvdata(dev); struct stm32_rproc *ddata = rproc->priv; @@ -911,16 +911,16 @@ return 0; } -static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops, - stm32_rproc_suspend, stm32_rproc_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops, + stm32_rproc_suspend, stm32_rproc_resume); static struct platform_driver stm32_rproc_driver = { .probe = stm32_rproc_probe, .remove = stm32_rproc_remove, .driver = { .name = "stm32-rproc", - .pm = &stm32_rproc_pm_ops, - .of_match_table = of_match_ptr(stm32_rproc_match), + .pm = pm_ptr(&stm32_rproc_pm_ops), + .of_match_table = stm32_rproc_match, }, }; module_platform_driver(stm32_rproc_driver); diff -u linux-aws-5.15-5.15.0/drivers/rtc/Kconfig linux-aws-5.15-5.15.0/drivers/rtc/Kconfig --- linux-aws-5.15-5.15.0/drivers/rtc/Kconfig +++ linux-aws-5.15-5.15.0/drivers/rtc/Kconfig @@ -1824,7 +1824,8 @@ config RTC_DRV_MT6397 tristate "MediaTek PMIC based RTC" - depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN) + depends on MFD_MT6397 || COMPILE_TEST + select IRQ_DOMAIN help This selects the MediaTek(R) RTC driver. RTC is part of MediaTek MT6397 PMIC. You should enable MT6397 PMIC MFD before select diff -u linux-aws-5.15-5.15.0/drivers/s390/block/dasd.c linux-aws-5.15-5.15.0/drivers/s390/block/dasd.c --- linux-aws-5.15-5.15.0/drivers/s390/block/dasd.c +++ linux-aws-5.15-5.15.0/drivers/s390/block/dasd.c @@ -8,9 +8,6 @@ * Copyright IBM Corp. 1999, 2009 */ -#define KMSG_COMPONENT "dasd" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - #include #include #include @@ -84,7 +81,8 @@ static void dasd_profile_exit(struct dasd_profile *); static void dasd_hosts_init(struct dentry *, struct dasd_device *); static void dasd_hosts_exit(struct dasd_device *); - +static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *, + unsigned int); /* * SECTION: Operations on the device structure. */ @@ -2349,7 +2347,7 @@ /* Non-temporary stop condition will trigger fail fast */ if (device->stopped & ~DASD_STOPPED_PENDING && test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && - (!dasd_eer_enabled(device))) { + !dasd_eer_enabled(device) && device->aq_mask == 0) { cqr->status = DASD_CQR_FAILED; cqr->intrc = -ENOLINK; continue; @@ -2825,20 +2823,18 @@ dasd_log_sense(cqr, &cqr->irb); } - /* First of all call extended error reporting. */ - if (dasd_eer_enabled(base) && - cqr->status == DASD_CQR_FAILED) { - dasd_eer_write(base, cqr, DASD_EER_FATALERROR); - - /* restart request */ + /* + * First call extended error reporting and check for autoquiesce + */ + spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); + if (cqr->status == DASD_CQR_FAILED && + dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) { cqr->status = DASD_CQR_FILLED; cqr->retries = 255; - spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); - dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); - spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), - flags); + spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); goto restart; } + spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); /* Process finished ERP request. */ if (cqr->refers) { @@ -2880,7 +2876,7 @@ /* Non-temporary stop condition will trigger fail fast */ if (block->base->stopped & ~DASD_STOPPED_PENDING && test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && - (!dasd_eer_enabled(block->base))) { + !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) { cqr->status = DASD_CQR_FAILED; cqr->intrc = -ENOLINK; dasd_schedule_block_bh(block); @@ -3452,8 +3448,7 @@ ret = ccw_device_set_online(cdev); if (ret) - pr_warn("%s: Setting the DASD online failed with rc=%d\n", - dev_name(&cdev->dev), ret); + dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret); } /* @@ -3540,8 +3535,11 @@ { struct dasd_discipline *discipline; struct dasd_device *device; + struct device *dev; int rc; + dev = &cdev->dev; + /* first online clears initial online feature flag */ dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); device = dasd_create_device(cdev); @@ -3554,11 +3552,10 @@ /* Try to load the required module. */ rc = request_module(DASD_DIAG_MOD); if (rc) { - pr_warn("%s Setting the DASD online failed " - "because the required module %s " - "could not be loaded (rc=%d)\n", - dev_name(&cdev->dev), DASD_DIAG_MOD, - rc); + dev_warn(dev, "Setting the DASD online failed " + "because the required module %s " + "could not be loaded (rc=%d)\n", + DASD_DIAG_MOD, rc); dasd_delete_device(device); return -ENODEV; } @@ -3566,8 +3563,7 @@ /* Module init could have failed, so check again here after * request_module(). */ if (!dasd_diag_discipline_pointer) { - pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", - dev_name(&cdev->dev)); + dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n"); dasd_delete_device(device); return -ENODEV; } @@ -3577,37 +3573,33 @@ dasd_delete_device(device); return -EINVAL; } + device->base_discipline = base_discipline; if (!try_module_get(discipline->owner)) { - module_put(base_discipline->owner); dasd_delete_device(device); return -EINVAL; } - device->base_discipline = base_discipline; device->discipline = discipline; /* check_device will allocate block device if necessary */ rc = discipline->check_device(device); if (rc) { - pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", - dev_name(&cdev->dev), discipline->name, rc); - module_put(discipline->owner); - module_put(base_discipline->owner); + dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n", + discipline->name, rc); dasd_delete_device(device); return rc; } dasd_set_target_state(device, DASD_STATE_ONLINE); if (device->state <= DASD_STATE_KNOWN) { - pr_warn("%s Setting the DASD online failed because of a missing discipline\n", - dev_name(&cdev->dev)); + dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n"); rc = -ENODEV; dasd_set_target_state(device, DASD_STATE_NEW); if (device->block) dasd_free_block(device->block); dasd_delete_device(device); - } else - pr_debug("dasd_generic device %s found\n", - dev_name(&cdev->dev)); + } else { + dev_dbg(dev, "dasd_generic device found\n"); + } wait_event(dasd_init_waitq, _wait_for_device(device)); @@ -3618,10 +3610,13 @@ int dasd_generic_set_offline(struct ccw_device *cdev) { + int max_count, open_count, rc; struct dasd_device *device; struct dasd_block *block; - int max_count, open_count, rc; unsigned long flags; + struct device *dev; + + dev = &cdev->dev; rc = 0; spin_lock_irqsave(get_ccwdev_lock(cdev), flags); @@ -3642,11 +3637,10 @@ open_count = atomic_read(&device->block->open_count); if (open_count > max_count) { if (open_count > 0) - pr_warn("%s: The DASD cannot be set offline with open count %i\n", - dev_name(&cdev->dev), open_count); + dev_warn(dev, "The DASD cannot be set offline with open count %i\n", + open_count); else - pr_warn("%s: The DASD cannot be set offline while it is in use\n", - dev_name(&cdev->dev)); + dev_warn(dev, "The DASD cannot be set offline while it is in use\n"); rc = -EBUSY; goto out_err; } @@ -3743,8 +3737,8 @@ dev_warn(&device->cdev->dev, "No operational channel path is left " "for the device\n"); DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); - /* First of all call extended error reporting. */ - dasd_eer_write(device, NULL, DASD_EER_NOPATH); + /* First call extended error reporting and check for autoquiesce. */ + dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); if (device->state < DASD_STATE_BASIC) return 0; @@ -3877,7 +3871,8 @@ "No verified channel paths remain for the device\n"); DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last verified path gone"); - dasd_eer_write(device, NULL, DASD_EER_NOPATH); + /* First call extended error reporting and check for autoquiesce. */ + dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); } @@ -3899,7 +3894,8 @@ void dasd_generic_space_exhaust(struct dasd_device *device, struct dasd_ccw_req *cqr) { - dasd_eer_write(device, NULL, DASD_EER_NOSPC); + /* First call extended error reporting and check for autoquiesce. */ + dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC); if (device->state < DASD_STATE_BASIC) return; @@ -3992,6 +3988,31 @@ } EXPORT_SYMBOL(dasd_schedule_requeue); +static int dasd_handle_autoquiesce(struct dasd_device *device, + struct dasd_ccw_req *cqr, + unsigned int reason) +{ + /* in any case write eer message with reason */ + if (dasd_eer_enabled(device)) + dasd_eer_write(device, cqr, reason); + + if (!test_bit(reason, &device->aq_mask)) + return 0; + + /* notify eer about autoquiesce */ + if (dasd_eer_enabled(device)) + dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE); + + dev_info(&device->cdev->dev, + "The DASD has been put in the quiesce state\n"); + dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE); + + if (device->features & DASD_FEATURE_REQUEUEQUIESCE) + dasd_schedule_requeue(device); + + return 1; +} + static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, int rdc_buffer_size, int magic) diff -u linux-aws-5.15-5.15.0/drivers/s390/block/dasd_eckd.c linux-aws-5.15-5.15.0/drivers/s390/block/dasd_eckd.c --- linux-aws-5.15-5.15.0/drivers/s390/block/dasd_eckd.c +++ linux-aws-5.15-5.15.0/drivers/s390/block/dasd_eckd.c @@ -2029,6 +2029,49 @@ } /* + * return if the device is the copy relation primary if a copy relation is active + */ +static int dasd_device_is_primary(struct dasd_device *device) +{ + if (!device->copy) + return 1; + + if (device->copy->active->device == device) + return 1; + + return 0; +} + +static int dasd_eckd_alloc_block(struct dasd_device *device) +{ + struct dasd_block *block; + struct dasd_uid temp_uid; + + if (!dasd_device_is_primary(device)) + return 0; + + dasd_eckd_get_uid(device, &temp_uid); + if (temp_uid.type == UA_BASE_DEVICE) { + block = dasd_alloc_block(); + if (IS_ERR(block)) { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", + "could not allocate dasd block structure"); + return PTR_ERR(block); + } + device->block = block; + block->base = device; + } + return 0; +} + +static bool dasd_eckd_pprc_enabled(struct dasd_device *device) +{ + struct dasd_eckd_private *private = device->private; + + return private->rdc_data.facilities.PPRC_enabled; +} + +/* * Check device characteristics. * If the device is accessible using ECKD discipline, the device is enabled. */ @@ -2036,8 +2079,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device) { struct dasd_eckd_private *private = device->private; - struct dasd_block *block; - struct dasd_uid temp_uid; int rc, i; int readonly; unsigned long value; @@ -2095,20 +2136,29 @@ device->default_expires = value; } - dasd_eckd_get_uid(device, &temp_uid); - if (temp_uid.type == UA_BASE_DEVICE) { - block = dasd_alloc_block(); - if (IS_ERR(block)) { - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", - "could not allocate dasd " - "block structure"); - rc = PTR_ERR(block); - goto out_err1; - } - device->block = block; - block->base = device; + /* Read Device Characteristics */ + rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, + &private->rdc_data, 64); + if (rc) { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, + "Read device characteristic failed, rc=%d", rc); + goto out_err1; + } + + /* setup PPRC for device from devmap */ + rc = dasd_devmap_set_device_copy_relation(device->cdev, + dasd_eckd_pprc_enabled(device)); + if (rc) { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, + "copy relation setup failed, rc=%d", rc); + goto out_err1; } + /* check if block device is needed and allocate in case */ + rc = dasd_eckd_alloc_block(device); + if (rc) + goto out_err1; + /* register lcu with alias handling, enable PAV */ rc = dasd_alias_make_device_known_to_lcu(device); if (rc) @@ -2132,15 +2182,6 @@ /* Read Extent Pool Information */ dasd_eckd_read_ext_pool_info(device); - /* Read Device Characteristics */ - rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, - &private->rdc_data, 64); - if (rc) { - DBF_EVENT_DEVID(DBF_WARNING, device->cdev, - "Read device characteristic failed, rc=%d", rc); - goto out_err3; - } - if ((device->features & DASD_FEATURE_USERAW) && !(private->rdc_data.facilities.RT_in_LR)) { dev_err(&device->cdev->dev, "The storage server does not " @@ -6098,6 +6139,71 @@ } /* + * Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query + */ +static int dasd_eckd_query_pprc_status(struct dasd_device *device, + struct dasd_pprc_data_sc4 *data) +{ + struct dasd_pprc_data_sc4 *pprc_data; + struct dasd_psf_prssd_data *prssdp; + struct dasd_ccw_req *cqr; + struct ccw1 *ccw; + int rc; + + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, + sizeof(*prssdp) + sizeof(*pprc_data) + 1, + device, NULL); + if (IS_ERR(cqr)) { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", + "Could not allocate query PPRC status request"); + return PTR_ERR(cqr); + } + cqr->startdev = device; + cqr->memdev = device; + cqr->block = NULL; + cqr->retries = 256; + cqr->expires = 10 * HZ; + + /* Prepare for Read Subsystem Data */ + prssdp = (struct dasd_psf_prssd_data *)cqr->data; + memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); + prssdp->order = PSF_ORDER_PRSSD; + prssdp->suborder = PSF_SUBORDER_PPRCEQ; + prssdp->varies[0] = PPRCEQ_SCOPE_4; + pprc_data = (struct dasd_pprc_data_sc4 *)(prssdp + 1); + + ccw = cqr->cpaddr; + ccw->cmd_code = DASD_ECKD_CCW_PSF; + ccw->count = sizeof(struct dasd_psf_prssd_data); + ccw->flags |= CCW_FLAG_CC; + ccw->flags |= CCW_FLAG_SLI; + ccw->cda = (__u32)(addr_t)prssdp; + + /* Read Subsystem Data - query host access */ + ccw++; + ccw->cmd_code = DASD_ECKD_CCW_RSSD; + ccw->count = sizeof(*pprc_data); + ccw->flags |= CCW_FLAG_SLI; + ccw->cda = (__u32)(addr_t)pprc_data; + + cqr->buildclk = get_tod_clock(); + cqr->status = DASD_CQR_FILLED; + + rc = dasd_sleep_on_interruptible(cqr); + if (rc == 0) { + *data = *pprc_data; + } else { + DBF_EVENT_DEVID(DBF_WARNING, device->cdev, + "PPRC Extended Query failed with rc=%d\n", + rc); + rc = -EOPNOTSUPP; + } + + dasd_sfree_request(cqr, cqr->memdev); + return rc; +} + +/* * Perform Subsystem Function - CUIR response */ static int @@ -6715,6 +6821,8 @@ .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust, .ese_format = dasd_eckd_ese_format, .ese_read = dasd_eckd_ese_read, + .pprc_status = dasd_eckd_query_pprc_status, + .pprc_enabled = dasd_eckd_pprc_enabled, }; static int __init diff -u linux-aws-5.15-5.15.0/drivers/s390/block/dasd_int.h linux-aws-5.15-5.15.0/drivers/s390/block/dasd_int.h --- linux-aws-5.15-5.15.0/drivers/s390/block/dasd_int.h +++ linux-aws-5.15-5.15.0/drivers/s390/block/dasd_int.h @@ -261,6 +261,55 @@ }; /* + * PPRC Status data + */ +struct dasd_pprc_header { + __u8 entries; /* 0 Number of device entries */ + __u8 unused; /* 1 unused */ + __u16 entry_length; /* 2-3 Length of device entry */ + __u32 unused2; /* 4-7 unused */ +} __packed; + +struct dasd_pprc_dev_info { + __u8 state; /* 0 Copy State */ + __u8 flags; /* 1 Flags */ + __u8 reserved1[2]; /* 2-3 reserved */ + __u8 prim_lss; /* 4 Primary device LSS */ + __u8 primary; /* 5 Primary device address */ + __u8 sec_lss; /* 6 Secondary device LSS */ + __u8 secondary; /* 7 Secondary device address */ + __u16 pprc_id; /* 8-9 Peer-to-Peer Remote Copy ID */ + __u8 reserved2[12]; /* 10-21 reserved */ + __u16 prim_cu_ssid; /* 22-23 Pimary Control Unit SSID */ + __u8 reserved3[12]; /* 24-35 reserved */ + __u16 sec_cu_ssid; /* 36-37 Secondary Control Unit SSID */ + __u8 reserved4[90]; /* 38-127 reserved */ +} __packed; + +struct dasd_pprc_data_sc4 { + struct dasd_pprc_header header; + struct dasd_pprc_dev_info dev_info[5]; +} __packed; + +#define DASD_BUS_ID_SIZE 20 +#define DASD_CP_ENTRIES 5 + +struct dasd_copy_entry { + char busid[DASD_BUS_ID_SIZE]; + struct dasd_device *device; + bool primary; + bool configured; +}; + +struct dasd_copy_relation { + struct dasd_copy_entry entry[DASD_CP_ENTRIES]; + struct dasd_copy_entry *active; +}; + +int dasd_devmap_set_device_copy_relation(struct ccw_device *, + bool pprc_enabled); + +/* * the struct dasd_discipline is * sth like a table of virtual functions, if you think of dasd_eckd * inheriting dasd... @@ -388,6 +437,8 @@ struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *, struct irb *); int (*ese_read)(struct dasd_ccw_req *, struct irb *); + int (*pprc_status)(struct dasd_device *, struct dasd_pprc_data_sc4 *); + bool (*pprc_enabled)(struct dasd_device *); }; extern struct dasd_discipline *dasd_diag_discipline_pointer; @@ -408,6 +459,7 @@ #define DASD_EER_STATECHANGE 3 #define DASD_EER_PPRCSUSPEND 4 #define DASD_EER_NOSPC 5 +#define DASD_EER_AUTOQUIESCE 31 /* DASD path handling */ @@ -584,6 +636,8 @@ struct dasd_profile profile; struct dasd_format_entry format_entry; struct kset *paths_info; + struct dasd_copy_relation *copy; + unsigned long aq_mask; }; struct dasd_block { diff -u linux-aws-5.15-5.15.0/drivers/scsi/Kconfig linux-aws-5.15-5.15.0/drivers/scsi/Kconfig --- linux-aws-5.15-5.15.0/drivers/scsi/Kconfig +++ linux-aws-5.15-5.15.0/drivers/scsi/Kconfig @@ -1296,7 +1296,7 @@ config JAZZ_ESP bool "MIPS JAZZ FAS216 SCSI support" - depends on MACH_JAZZ && SCSI + depends on MACH_JAZZ && SCSI=y select SCSI_SPI_ATTRS help This is the driver for the onboard SCSI host adapter of MIPS Magnum diff -u linux-aws-5.15-5.15.0/drivers/scsi/csiostor/csio_lnode.c linux-aws-5.15-5.15.0/drivers/scsi/csiostor/csio_lnode.c --- linux-aws-5.15-5.15.0/drivers/scsi/csiostor/csio_lnode.c +++ linux-aws-5.15-5.15.0/drivers/scsi/csiostor/csio_lnode.c @@ -1095,7 +1095,7 @@ int csio_is_lnode_ready(struct csio_lnode *ln) { - return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)); + return (csio_get_state(ln) == csio_lns_ready); } /*****************************************************************************/ @@ -1366,15 +1366,15 @@ void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str) { - if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) { + if (csio_get_state(ln) == csio_lns_uninit) { strcpy(str, "UNINIT"); return; } - if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) { + if (csio_get_state(ln) == csio_lns_ready) { strcpy(str, "READY"); return; } - if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) { + if (csio_get_state(ln) == csio_lns_offline) { strcpy(str, "OFFLINE"); return; } diff -u linux-aws-5.15-5.15.0/drivers/scsi/lpfc/lpfc_scsi.c linux-aws-5.15-5.15.0/drivers/scsi/lpfc/lpfc_scsi.c --- linux-aws-5.15-5.15.0/drivers/scsi/lpfc/lpfc_scsi.c +++ linux-aws-5.15-5.15.0/drivers/scsi/lpfc/lpfc_scsi.c @@ -1983,7 +1983,7 @@ * * Returns the number of SGEs added to the SGL. **/ -static int +static uint32_t lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datasegcnt, struct lpfc_io_buf *lpfc_cmd) @@ -1991,8 +1991,8 @@ struct scatterlist *sgde = NULL; /* s/g data entry */ struct sli4_sge_diseed *diseed = NULL; dma_addr_t physaddr; - int i = 0, num_sge = 0, status; - uint32_t reftag; + int i = 0, status; + uint32_t reftag, num_sge = 0; uint8_t txop, rxop; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint32_t rc; @@ -2164,7 +2164,7 @@ * * Returns the number of SGEs added to the SGL. **/ -static int +static uint32_t lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge *sgl, int datacnt, int protcnt, struct lpfc_io_buf *lpfc_cmd) @@ -2188,8 +2188,8 @@ uint32_t rc; #endif uint32_t checking = 1; - uint32_t dma_offset = 0; - int num_sge = 0, j = 2; + uint32_t dma_offset = 0, num_sge = 0; + int j = 2; struct sli4_hybrid_sgl *sgl_xtra = NULL; sgpe = scsi_prot_sglist(sc); diff -u linux-aws-5.15-5.15.0/drivers/scsi/mpt3sas/mpt3sas_base.c linux-aws-5.15-5.15.0/drivers/scsi/mpt3sas/mpt3sas_base.c --- linux-aws-5.15-5.15.0/drivers/scsi/mpt3sas/mpt3sas_base.c +++ linux-aws-5.15-5.15.0/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -7238,7 +7238,9 @@ return -EFAULT; } - issue_diag_reset: + return 0; + +issue_diag_reset: rc = _base_diag_reset(ioc); return rc; } diff -u linux-aws-5.15-5.15.0/drivers/soc/fsl/dpio/dpio-service.c linux-aws-5.15-5.15.0/drivers/soc/fsl/dpio/dpio-service.c --- linux-aws-5.15-5.15.0/drivers/soc/fsl/dpio/dpio-service.c +++ linux-aws-5.15-5.15.0/drivers/soc/fsl/dpio/dpio-service.c @@ -485,7 +485,7 @@ struct qbman_eq_desc *ed; int i, ret; - ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL); + ed = kcalloc(32, sizeof(struct qbman_eq_desc), GFP_KERNEL); if (!ed) return -ENOMEM; diff -u linux-aws-5.15-5.15.0/drivers/soc/mediatek/mtk-pm-domains.c linux-aws-5.15-5.15.0/drivers/soc/mediatek/mtk-pm-domains.c --- linux-aws-5.15-5.15.0/drivers/soc/mediatek/mtk-pm-domains.c +++ linux-aws-5.15-5.15.0/drivers/soc/mediatek/mtk-pm-domains.c @@ -493,6 +493,11 @@ goto err_put_node; } + /* recursive call to add all subdomains */ + ret = scpsys_add_subdomain(scpsys, child); + if (ret) + goto err_put_node; + ret = pm_genpd_add_subdomain(parent_pd, child_pd); if (ret) { dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n", @@ -502,11 +507,6 @@ dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name, child_pd->name); } - - /* recursive call to add all subdomains */ - ret = scpsys_add_subdomain(scpsys, child); - if (ret) - goto err_put_node; } return 0; @@ -520,9 +520,6 @@ { int ret; - if (scpsys_domain_is_on(pd)) - scpsys_power_off(&pd->genpd); - /* * We're in the error cleanup already, so we only complain, * but won't emit another error on top of the original one. @@ -532,6 +529,8 @@ dev_err(pd->scpsys->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n", pd->genpd.name, ret); + if (scpsys_domain_is_on(pd)) + scpsys_power_off(&pd->genpd); clk_bulk_put(pd->num_clks, pd->clks); clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks); diff -u linux-aws-5.15-5.15.0/drivers/soc/qcom/rpmhpd.c linux-aws-5.15-5.15.0/drivers/soc/qcom/rpmhpd.c --- linux-aws-5.15-5.15.0/drivers/soc/qcom/rpmhpd.c +++ linux-aws-5.15-5.15.0/drivers/soc/qcom/rpmhpd.c @@ -351,12 +351,15 @@ unsigned int active_corner, sleep_corner; unsigned int this_active_corner = 0, this_sleep_corner = 0; unsigned int peer_active_corner = 0, peer_sleep_corner = 0; + unsigned int peer_enabled_corner; to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner); - if (peer && peer->enabled) - to_active_sleep(peer, peer->corner, &peer_active_corner, + if (peer && peer->enabled) { + peer_enabled_corner = max(peer->corner, peer->enable_corner); + to_active_sleep(peer, peer_enabled_corner, &peer_active_corner, &peer_sleep_corner); + } active_corner = max(this_active_corner, peer_active_corner); diff -u linux-aws-5.15-5.15.0/drivers/spi/spi-mt65xx.c linux-aws-5.15-5.15.0/drivers/spi/spi-mt65xx.c --- linux-aws-5.15-5.15.0/drivers/spi/spi-mt65xx.c +++ linux-aws-5.15-5.15.0/drivers/spi/spi-mt65xx.c @@ -659,17 +659,19 @@ mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len); mtk_spi_setup_packet(master); - cnt = mdata->xfer_len / 4; - iowrite32_rep(mdata->base + SPI_TX_DATA_REG, - trans->tx_buf + mdata->num_xfered, cnt); + if (trans->tx_buf) { + cnt = mdata->xfer_len / 4; + iowrite32_rep(mdata->base + SPI_TX_DATA_REG, + trans->tx_buf + mdata->num_xfered, cnt); - remainder = mdata->xfer_len % 4; - if (remainder > 0) { - reg_val = 0; - memcpy(®_val, - trans->tx_buf + (cnt * 4) + mdata->num_xfered, - remainder); - writel(reg_val, mdata->base + SPI_TX_DATA_REG); + remainder = mdata->xfer_len % 4; + if (remainder > 0) { + reg_val = 0; + memcpy(®_val, + trans->tx_buf + (cnt * 4) + mdata->num_xfered, + remainder); + writel(reg_val, mdata->base + SPI_TX_DATA_REG); + } } mtk_spi_enable_transfer(master); diff -u linux-aws-5.15-5.15.0/drivers/spi/spi-sh-msiof.c linux-aws-5.15-5.15.0/drivers/spi/spi-sh-msiof.c --- linux-aws-5.15-5.15.0/drivers/spi/spi-sh-msiof.c +++ linux-aws-5.15-5.15.0/drivers/spi/spi-sh-msiof.c @@ -137,14 +137,14 @@ /* SIFCTR */ #define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ -#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */ -#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */ -#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */ -#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */ -#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */ -#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */ -#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */ -#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */ +#define SIFCTR_TFWM_64 (0UL << 29) /* Transfer Request when 64 empty stages */ +#define SIFCTR_TFWM_32 (1UL << 29) /* Transfer Request when 32 empty stages */ +#define SIFCTR_TFWM_24 (2UL << 29) /* Transfer Request when 24 empty stages */ +#define SIFCTR_TFWM_16 (3UL << 29) /* Transfer Request when 16 empty stages */ +#define SIFCTR_TFWM_12 (4UL << 29) /* Transfer Request when 12 empty stages */ +#define SIFCTR_TFWM_8 (5UL << 29) /* Transfer Request when 8 empty stages */ +#define SIFCTR_TFWM_4 (6UL << 29) /* Transfer Request when 4 empty stages */ +#define SIFCTR_TFWM_1 (7UL << 29) /* Transfer Request when 1 empty stage */ #define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ #define SIFCTR_TFUA_SHIFT 20 #define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT) diff -u linux-aws-5.15-5.15.0/drivers/target/target_core_device.c linux-aws-5.15-5.15.0/drivers/target/target_core_device.c --- linux-aws-5.15-5.15.0/drivers/target/target_core_device.c +++ linux-aws-5.15-5.15.0/drivers/target/target_core_device.c @@ -147,7 +147,6 @@ struct se_session *se_sess = se_cmd->se_sess; struct se_node_acl *nacl = se_sess->se_node_acl; struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; - unsigned long flags; rcu_read_lock(); deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun); @@ -178,10 +177,6 @@ se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); - spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags); - list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); - spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags); - return 0; } EXPORT_SYMBOL(transport_lookup_tmr_lun); diff -u linux-aws-5.15-5.15.0/drivers/target/target_core_transport.c linux-aws-5.15-5.15.0/drivers/target/target_core_transport.c --- linux-aws-5.15-5.15.0/drivers/target/target_core_transport.c +++ linux-aws-5.15-5.15.0/drivers/target/target_core_transport.c @@ -3568,6 +3568,10 @@ unsigned long flags; bool aborted = false; + spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags); + list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list); + spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->transport_state & CMD_T_ABORTED) { aborted = true; diff -u linux-aws-5.15-5.15.0/drivers/tty/serial/8250/8250_exar.c linux-aws-5.15-5.15.0/drivers/tty/serial/8250/8250_exar.c --- linux-aws-5.15-5.15.0/drivers/tty/serial/8250/8250_exar.c +++ linux-aws-5.15-5.15.0/drivers/tty/serial/8250/8250_exar.c @@ -715,6 +715,7 @@ for (i = 0; i < priv->nr; i++) serial8250_unregister_port(priv->line[i]); + /* Ensure that every init quirk is properly torn down */ if (priv->board->exit) priv->board->exit(pcidev); } @@ -729,10 +730,6 @@ if (priv->line[i] >= 0) serial8250_suspend_port(priv->line[i]); - /* Ensure that every init quirk is properly torn down */ - if (priv->board->exit) - priv->board->exit(pcidev); - return 0; } diff -u linux-aws-5.15-5.15.0/drivers/tty/serial/8250/8250_port.c linux-aws-5.15-5.15.0/drivers/tty/serial/8250/8250_port.c --- linux-aws-5.15-5.15.0/drivers/tty/serial/8250/8250_port.c +++ linux-aws-5.15-5.15.0/drivers/tty/serial/8250/8250_port.c @@ -670,13 +670,6 @@ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND; } - /* clamp the delays to [0, 100ms] */ - rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U); - rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U); - - memset(rs485->padding, 0, sizeof(rs485->padding)); - port->rs485 = *rs485; - gpiod_set_value(port->rs485_term_gpio, rs485->flags & SER_RS485_TERMINATE_BUS); @@ -684,15 +677,8 @@ * Both serial8250_em485_init() and serial8250_em485_destroy() * are idempotent. */ - if (rs485->flags & SER_RS485_ENABLED) { - int ret = serial8250_em485_init(up); - - if (ret) { - rs485->flags &= ~SER_RS485_ENABLED; - port->rs485.flags &= ~SER_RS485_ENABLED; - } - return ret; - } + if (rs485->flags & SER_RS485_ENABLED) + return serial8250_em485_init(up); serial8250_em485_destroy(up); return 0; diff -u linux-aws-5.15-5.15.0/drivers/tty/serial/amba-pl011.c linux-aws-5.15-5.15.0/drivers/tty/serial/amba-pl011.c --- linux-aws-5.15-5.15.0/drivers/tty/serial/amba-pl011.c +++ linux-aws-5.15-5.15.0/drivers/tty/serial/amba-pl011.c @@ -1350,11 +1350,41 @@ } } +static void pl011_rs485_tx_start(struct uart_amba_port *uap) +{ + struct uart_port *port = &uap->port; + u32 cr; + + /* Enable transmitter */ + cr = pl011_read(uap, REG_CR); + cr |= UART011_CR_TXE; + + /* Disable receiver if half-duplex */ + if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) + cr &= ~UART011_CR_RXE; + + if (port->rs485.flags & SER_RS485_RTS_ON_SEND) + cr &= ~UART011_CR_RTS; + else + cr |= UART011_CR_RTS; + + pl011_write(cr, uap, REG_CR); + + if (port->rs485.delay_rts_before_send) + mdelay(port->rs485.delay_rts_before_send); + + uap->rs485_tx_started = true; +} + static void pl011_start_tx(struct uart_port *port) { struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); + if ((uap->port.rs485.flags & SER_RS485_ENABLED) && + !uap->rs485_tx_started) + pl011_rs485_tx_start(uap); + if (!pl011_dma_tx_start(uap)) pl011_start_tx_pio(uap); } @@ -1436,42 +1466,12 @@ return true; } -static void pl011_rs485_tx_start(struct uart_amba_port *uap) -{ - struct uart_port *port = &uap->port; - u32 cr; - - /* Enable transmitter */ - cr = pl011_read(uap, REG_CR); - cr |= UART011_CR_TXE; - - /* Disable receiver if half-duplex */ - if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) - cr &= ~UART011_CR_RXE; - - if (port->rs485.flags & SER_RS485_RTS_ON_SEND) - cr &= ~UART011_CR_RTS; - else - cr |= UART011_CR_RTS; - - pl011_write(cr, uap, REG_CR); - - if (port->rs485.delay_rts_before_send) - mdelay(port->rs485.delay_rts_before_send); - - uap->rs485_tx_started = true; -} - /* Returns true if tx interrupts have to be (kept) enabled */ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) { struct circ_buf *xmit = &uap->port.state->xmit; int count = uap->fifosize >> 1; - if ((uap->port.rs485.flags & SER_RS485_ENABLED) && - !uap->rs485_tx_started) - pl011_rs485_tx_start(uap); - if (uap->port.x_char) { if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) return true; diff -u linux-aws-5.15-5.15.0/drivers/tty/serial/max310x.c linux-aws-5.15-5.15.0/drivers/tty/serial/max310x.c --- linux-aws-5.15-5.15.0/drivers/tty/serial/max310x.c +++ linux-aws-5.15-5.15.0/drivers/tty/serial/max310x.c @@ -72,7 +72,7 @@ #define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */ /* Extended registers */ -#define MAX310X_REVID_EXTREG MAX310X_REG_05 /* Revision ID */ +#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */ /* IRQ register bits */ #define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */ @@ -235,6 +235,10 @@ #define MAX310x_REV_MASK (0xf8) #define MAX310X_WRITE_BIT 0x80 +/* Port startup definitions */ +#define MAX310X_PORT_STARTUP_WAIT_RETRIES 20 /* Number of retries */ +#define MAX310X_PORT_STARTUP_WAIT_DELAY_MS 10 /* Delay between retries */ + /* Crystal-related definitions */ #define MAX310X_XTAL_WAIT_RETRIES 20 /* Number of retries */ #define MAX310X_XTAL_WAIT_DELAY_MS 10 /* Delay between retries */ @@ -249,6 +253,12 @@ #define MAX14830_BRGCFG_CLKDIS_BIT (1 << 6) /* Clock Disable */ #define MAX14830_REV_ID (0xb0) +struct max310x_if_cfg { + int (*extended_reg_enable)(struct device *dev, bool enable); + + unsigned int rev_id_reg; +}; + struct max310x_devtype { char name[9]; int nr; @@ -262,9 +272,8 @@ struct work_struct tx_work; struct work_struct md_work; struct work_struct rs_work; + struct regmap *regmap; - u8 wr_header; - u8 rd_header; u8 rx_buf[MAX310X_FIFO_SIZE]; }; #define to_max310x_port(_port) \ @@ -272,6 +281,7 @@ struct max310x_port { const struct max310x_devtype *devtype; + const struct max310x_if_cfg *if_cfg; struct regmap *regmap; struct clk *clk; #ifdef CONFIG_GPIOLIB @@ -293,26 +303,26 @@ static u8 max310x_port_read(struct uart_port *port, u8 reg) { - struct max310x_port *s = dev_get_drvdata(port->dev); + struct max310x_one *one = to_max310x_port(port); unsigned int val = 0; - regmap_read(s->regmap, port->iobase + reg, &val); + regmap_read(one->regmap, reg, &val); return val; } static void max310x_port_write(struct uart_port *port, u8 reg, u8 val) { - struct max310x_port *s = dev_get_drvdata(port->dev); + struct max310x_one *one = to_max310x_port(port); - regmap_write(s->regmap, port->iobase + reg, val); + regmap_write(one->regmap, reg, val); } static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val) { - struct max310x_port *s = dev_get_drvdata(port->dev); + struct max310x_one *one = to_max310x_port(port); - regmap_update_bits(s->regmap, port->iobase + reg, mask, val); + regmap_update_bits(one->regmap, reg, mask, val); } static int max3107_detect(struct device *dev) @@ -361,13 +371,12 @@ unsigned int val = 0; int ret; - ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, - MAX310X_EXTREG_ENBL); + ret = s->if_cfg->extended_reg_enable(dev, true); if (ret) return ret; - regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val); - regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL); + regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val); + s->if_cfg->extended_reg_enable(dev, false); if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) { dev_err(dev, "%s ID 0x%02x does not match\n", s->devtype->name, val); @@ -392,13 +401,12 @@ unsigned int val = 0; int ret; - ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, - MAX310X_EXTREG_ENBL); + ret = s->if_cfg->extended_reg_enable(dev, true); if (ret) return ret; - regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val); - regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL); + regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val); + s->if_cfg->extended_reg_enable(dev, false); if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) { dev_err(dev, "%s ID 0x%02x does not match\n", s->devtype->name, val); @@ -451,7 +459,7 @@ static bool max310x_reg_writeable(struct device *dev, unsigned int reg) { - switch (reg & 0x1f) { + switch (reg) { case MAX310X_IRQSTS_REG: case MAX310X_LSR_IRQSTS_REG: case MAX310X_SPCHR_IRQSTS_REG: @@ -468,7 +476,7 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg) { - switch (reg & 0x1f) { + switch (reg) { case MAX310X_RHR_REG: case MAX310X_IRQSTS_REG: case MAX310X_LSR_IRQSTS_REG: @@ -490,7 +498,7 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg) { - switch (reg & 0x1f) { + switch (reg) { case MAX310X_RHR_REG: case MAX310X_IRQSTS_REG: case MAX310X_SPCHR_IRQSTS_REG: @@ -503,6 +511,11 @@ return false; } +static bool max310x_reg_noinc(struct device *dev, unsigned int reg) +{ + return reg == MAX310X_RHR_REG; +} + static int max310x_set_baud(struct uart_port *port, int baud) { unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0; @@ -636,31 +649,15 @@ static void max310x_batch_write(struct uart_port *port, u8 *txbuf, unsigned int len) { struct max310x_one *one = to_max310x_port(port); - struct spi_transfer xfer[] = { - { - .tx_buf = &one->wr_header, - .len = sizeof(one->wr_header), - }, { - .tx_buf = txbuf, - .len = len, - } - }; - spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer)); + + regmap_noinc_write(one->regmap, MAX310X_THR_REG, txbuf, len); } static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int len) { struct max310x_one *one = to_max310x_port(port); - struct spi_transfer xfer[] = { - { - .tx_buf = &one->rd_header, - .len = sizeof(one->rd_header), - }, { - .rx_buf = rxbuf, - .len = len, - } - }; - spi_sync_transfer(to_spi_device(port->dev), xfer, ARRAY_SIZE(xfer)); + + regmap_noinc_read(one->regmap, MAX310X_RHR_REG, rxbuf, len); } static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen) @@ -1263,15 +1260,17 @@ #endif static int max310x_probe(struct device *dev, const struct max310x_devtype *devtype, - struct regmap *regmap, int irq) + const struct max310x_if_cfg *if_cfg, + struct regmap *regmaps[], int irq) { int i, ret, fmin, fmax, freq; struct max310x_port *s; s32 uartclk = 0; bool xtal; - if (IS_ERR(regmap)) - return PTR_ERR(regmap); + for (i = 0; i < devtype->nr; i++) + if (IS_ERR(regmaps[i])) + return PTR_ERR(regmaps[i]); /* Alloc port structure */ s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL); @@ -1318,8 +1317,9 @@ goto out_clk; } - s->regmap = regmap; + s->regmap = regmaps[0]; s->devtype = devtype; + s->if_cfg = if_cfg; dev_set_drvdata(dev, s); /* Check device to ensure we are talking to what we expect */ @@ -1328,22 +1328,30 @@ goto out_clk; for (i = 0; i < devtype->nr; i++) { - unsigned int offs = i << 5; + bool started = false; + unsigned int try = 0, val = 0; /* Reset port */ - regmap_write(s->regmap, MAX310X_MODE2_REG + offs, + regmap_write(regmaps[i], MAX310X_MODE2_REG, MAX310X_MODE2_RST_BIT); /* Clear port reset */ - regmap_write(s->regmap, MAX310X_MODE2_REG + offs, 0); + regmap_write(regmaps[i], MAX310X_MODE2_REG, 0); /* Wait for port startup */ do { - regmap_read(s->regmap, - MAX310X_BRGDIVLSB_REG + offs, &ret); - } while (ret != 0x01); + msleep(MAX310X_PORT_STARTUP_WAIT_DELAY_MS); + regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &val); + + if (val == 0x01) + started = true; + } while (!started && (++try < MAX310X_PORT_STARTUP_WAIT_RETRIES)); + + if (!started) { + ret = dev_err_probe(dev, -EAGAIN, "port reset failed\n"); + goto out_uart; + } - regmap_write(s->regmap, MAX310X_MODE1_REG + offs, - devtype->mode1); + regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1); } uartclk = max310x_set_ref_clk(dev, s, freq, xtal); @@ -1371,11 +1379,13 @@ s->p[i].port.fifosize = MAX310X_FIFO_SIZE; s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY; s->p[i].port.iotype = UPIO_PORT; - s->p[i].port.iobase = i * 0x20; + s->p[i].port.iobase = i; s->p[i].port.membase = (void __iomem *)~0; s->p[i].port.uartclk = uartclk; s->p[i].port.rs485_config = max310x_rs485_config; s->p[i].port.ops = &max310x_ops; + s->p[i].regmap = regmaps[i]; + /* Disable all interrupts */ max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0); /* Clear IRQ status register */ @@ -1386,10 +1396,6 @@ INIT_WORK(&s->p[i].md_work, max310x_md_proc); /* Initialize queue for changing RS485 mode */ INIT_WORK(&s->p[i].rs_work, max310x_rs_proc); - /* Initialize SPI-transfer buffers */ - s->p[i].wr_header = (s->p[i].port.iobase + MAX310X_THR_REG) | - MAX310X_WRITE_BIT; - s->p[i].rd_header = (s->p[i].port.iobase + MAX310X_RHR_REG); /* Register port */ ret = uart_add_one_port(&max310x_uart, &s->p[i].port); @@ -1427,7 +1433,7 @@ if (!ret) return 0; - dev_err(dev, "Unable to reguest IRQ %i\n", irq); + dev_err(dev, "Unable to request IRQ %i\n", irq); out_uart: for (i = 0; i < devtype->nr; i++) { @@ -1476,16 +1482,35 @@ .val_bits = 8, .write_flag_mask = MAX310X_WRITE_BIT, .cache_type = REGCACHE_RBTREE, + .max_register = MAX310X_REG_1F, .writeable_reg = max310x_reg_writeable, .volatile_reg = max310x_reg_volatile, .precious_reg = max310x_reg_precious, + .writeable_noinc_reg = max310x_reg_noinc, + .readable_noinc_reg = max310x_reg_noinc, + .max_raw_read = MAX310X_FIFO_SIZE, + .max_raw_write = MAX310X_FIFO_SIZE, }; #ifdef CONFIG_SPI_MASTER +static int max310x_spi_extended_reg_enable(struct device *dev, bool enable) +{ + struct max310x_port *s = dev_get_drvdata(dev); + + return regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, + enable ? MAX310X_EXTREG_ENBL : MAX310X_EXTREG_DSBL); +} + +static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = { + .extended_reg_enable = max310x_spi_extended_reg_enable, + .rev_id_reg = MAX310X_SPI_REVID_EXTREG, +}; + static int max310x_spi_probe(struct spi_device *spi) { const struct max310x_devtype *devtype; - struct regmap *regmap; + struct regmap *regmaps[4]; + unsigned int i; int ret; /* Setup SPI bus */ @@ -1500,10 +1525,14 @@ if (!devtype) devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data; - regcfg.max_register = devtype->nr * 0x20 - 1; - regmap = devm_regmap_init_spi(spi, ®cfg); + for (i = 0; i < devtype->nr; i++) { + u8 port_mask = i * 0x20; + regcfg.read_flag_mask = port_mask; + regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT; + regmaps[i] = devm_regmap_init_spi(spi, ®cfg); + } - return max310x_probe(&spi->dev, devtype, regmap, spi->irq); + return max310x_probe(&spi->dev, devtype, &max310x_spi_if_cfg, regmaps, spi->irq); } static int max310x_spi_remove(struct spi_device *spi) diff -u linux-aws-5.15-5.15.0/drivers/tty/serial/samsung_tty.c linux-aws-5.15-5.15.0/drivers/tty/serial/samsung_tty.c --- linux-aws-5.15-5.15.0/drivers/tty/serial/samsung_tty.c +++ linux-aws-5.15-5.15.0/drivers/tty/serial/samsung_tty.c @@ -991,11 +991,10 @@ if ((ufstat & info->tx_fifomask) != 0 || (ufstat & info->tx_fifofull)) return 0; - - return 1; + return TIOCSER_TEMT; } - return s3c24xx_serial_txempty_nofifo(port); + return s3c24xx_serial_txempty_nofifo(port) ? TIOCSER_TEMT : 0; } /* no modem control lines */ diff -u linux-aws-5.15-5.15.0/drivers/tty/vt/vt.c linux-aws-5.15-5.15.0/drivers/tty/vt/vt.c --- linux-aws-5.15-5.15.0/drivers/tty/vt/vt.c +++ linux-aws-5.15-5.15.0/drivers/tty/vt/vt.c @@ -2515,7 +2515,7 @@ } return; case EScsiignore: - if (c >= 20 && c <= 0x3f) + if (c >= 0x20 && c <= 0x3f) return; vc->vc_state = ESnormal; return; diff -u linux-aws-5.15-5.15.0/drivers/usb/cdns3/cdns3-gadget.c linux-aws-5.15-5.15.0/drivers/usb/cdns3/cdns3-gadget.c --- linux-aws-5.15-5.15.0/drivers/usb/cdns3/cdns3-gadget.c +++ linux-aws-5.15-5.15.0/drivers/usb/cdns3/cdns3-gadget.c @@ -826,7 +826,11 @@ return; } - if (request->complete) { + /* + * zlp request is appended by driver, needn't call usb_gadget_giveback_request() to notify + * gadget composite driver. + */ + if (request->complete && request->buf != priv_dev->zlp_buf) { spin_unlock(&priv_dev->lock); usb_gadget_giveback_request(&priv_ep->endpoint, request); @@ -2537,11 +2541,11 @@ while (!list_empty(&priv_ep->wa2_descmiss_req_list)) { priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list); + list_del_init(&priv_req->list); kfree(priv_req->request.buf); cdns3_gadget_ep_free_request(&priv_ep->endpoint, &priv_req->request); - list_del_init(&priv_req->list); --priv_ep->wa2_counter; } diff -u linux-aws-5.15-5.15.0/drivers/usb/cdns3/core.c linux-aws-5.15-5.15.0/drivers/usb/cdns3/core.c --- linux-aws-5.15-5.15.0/drivers/usb/cdns3/core.c +++ linux-aws-5.15-5.15.0/drivers/usb/cdns3/core.c @@ -394,7 +394,6 @@ return ret; } - /** * cdns_wakeup_irq - interrupt handler for wakeup events * @irq: irq number for cdns3/cdnsp core device diff -u linux-aws-5.15-5.15.0/drivers/usb/cdns3/drd.c linux-aws-5.15-5.15.0/drivers/usb/cdns3/drd.c --- linux-aws-5.15-5.15.0/drivers/usb/cdns3/drd.c +++ linux-aws-5.15-5.15.0/drivers/usb/cdns3/drd.c @@ -156,7 +156,8 @@ */ static void cdns_otg_disable_irq(struct cdns *cdns) { - writel(0, &cdns->otg_irq_regs->ien); + if (cdns->version) + writel(0, &cdns->otg_irq_regs->ien); } /** @@ -418,15 +419,20 @@ cdns->otg_regs = (void __iomem *)&cdns->otg_v1_regs->cmd; - if (readl(&cdns->otg_cdnsp_regs->did) == OTG_CDNSP_DID) { + state = readl(&cdns->otg_cdnsp_regs->did); + + if (OTG_CDNSP_CHECK_DID(state)) { cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) &cdns->otg_cdnsp_regs->ien; cdns->version = CDNSP_CONTROLLER_V2; - } else { + } else if (OTG_CDNS3_CHECK_DID(state)) { cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) &cdns->otg_v1_regs->ien; writel(1, &cdns->otg_v1_regs->simulate); cdns->version = CDNS3_CONTROLLER_V1; + } else { + dev_err(cdns->dev, "not supporte DID=0x%08x\n", state); + return -EINVAL; } dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n", @@ -479,7 +485,6 @@ return 0; } - /* Indicate the cdns3 core was power lost before */ bool cdns_power_is_lost(struct cdns *cdns) { diff -u linux-aws-5.15-5.15.0/drivers/usb/cdns3/host.c linux-aws-5.15-5.15.0/drivers/usb/cdns3/host.c --- linux-aws-5.15-5.15.0/drivers/usb/cdns3/host.c +++ linux-aws-5.15-5.15.0/drivers/usb/cdns3/host.c @@ -17,6 +17,11 @@ #include "../host/xhci.h" #include "../host/xhci-plat.h" +/* + * The XECP_PORT_CAP_REG and XECP_AUX_CTRL_REG1 exist only + * in Cadence USB3 dual-role controller, so it can't be used + * with Cadence CDNSP dual-role controller. + */ #define XECP_PORT_CAP_REG 0x8000 #define XECP_AUX_CTRL_REG1 0x8120 @@ -56,6 +61,8 @@ .resume_quirk = xhci_cdns3_resume_quirk, }; +static const struct xhci_plat_priv xhci_plat_cdnsp_xhci; + static int __cdns_host_init(struct cdns *cdns) { struct platform_device *xhci; @@ -80,8 +87,13 @@ goto err1; } - cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci, - sizeof(struct xhci_plat_priv), GFP_KERNEL); + if (cdns->version < CDNSP_CONTROLLER_V2) + cdns->xhci_plat_data = kmemdup(&xhci_plat_cdns3_xhci, + sizeof(struct xhci_plat_priv), GFP_KERNEL); + else + cdns->xhci_plat_data = kmemdup(&xhci_plat_cdnsp_xhci, + sizeof(struct xhci_plat_priv), GFP_KERNEL); + if (!cdns->xhci_plat_data) { ret = -ENOMEM; goto err1; diff -u linux-aws-5.15-5.15.0/drivers/usb/core/port.c linux-aws-5.15-5.15.0/drivers/usb/core/port.c --- linux-aws-5.15-5.15.0/drivers/usb/core/port.c +++ linux-aws-5.15-5.15.0/drivers/usb/core/port.c @@ -295,8 +295,10 @@ { struct usb_port *port_dev = to_usb_port(dev); - if (port_dev->child) + if (port_dev->child) { usb_disable_usb2_hardware_lpm(port_dev->child); + usb_unlocked_disable_lpm(port_dev->child); + } } static const struct dev_pm_ops usb_port_pm_ops = { diff -u linux-aws-5.15-5.15.0/drivers/usb/dwc3/gadget.c linux-aws-5.15-5.15.0/drivers/usb/dwc3/gadget.c --- linux-aws-5.15-5.15.0/drivers/usb/dwc3/gadget.c +++ linux-aws-5.15-5.15.0/drivers/usb/dwc3/gadget.c @@ -2501,6 +2501,11 @@ int ret; spin_lock_irqsave(&dwc->lock, flags); + if (!dwc->pullups_connected) { + spin_unlock_irqrestore(&dwc->lock, flags); + return 0; + } + dwc->connected = false; /* diff -u linux-aws-5.15-5.15.0/drivers/usb/gadget/function/f_ncm.c linux-aws-5.15-5.15.0/drivers/usb/gadget/function/f_ncm.c --- linux-aws-5.15-5.15.0/drivers/usb/gadget/function/f_ncm.c +++ linux-aws-5.15-5.15.0/drivers/usb/gadget/function/f_ncm.c @@ -1344,7 +1344,15 @@ "Parsed NTB with %d frames\n", dgram_counter); to_process -= block_len; - if (to_process != 0) { + + /* + * Windows NCM driver avoids USB ZLPs by adding a 1-byte + * zero pad as needed. + */ + if (to_process == 1 && + (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) { + to_process--; + } else if (to_process > 0) { ntb_ptr = (unsigned char *)(ntb_ptr + block_len); goto parse_ntb; } diff -u linux-aws-5.15-5.15.0/drivers/usb/host/xhci-hub.c linux-aws-5.15-5.15.0/drivers/usb/host/xhci-hub.c --- linux-aws-5.15-5.15.0/drivers/usb/host/xhci-hub.c +++ linux-aws-5.15-5.15.0/drivers/usb/host/xhci-hub.c @@ -905,7 +905,7 @@ } static int xhci_handle_usb2_port_link_resume(struct xhci_port *port, - u32 *status, u32 portsc, + u32 portsc, unsigned long *flags) { struct xhci_bus_state *bus_state; @@ -920,11 +920,10 @@ wIndex = port->hcd_portnum; if ((portsc & PORT_RESET) || !(portsc & PORT_PE)) { - *status = 0xffffffff; return -EINVAL; } /* did port event handler already start resume timing? */ - if (!bus_state->resume_done[wIndex]) { + if (!port->resume_timestamp) { /* If not, maybe we are in a host initated resume? */ if (test_bit(wIndex, &bus_state->resuming_ports)) { /* Host initated resume doesn't time the resume @@ -941,28 +940,29 @@ msecs_to_jiffies(USB_RESUME_TIMEOUT); set_bit(wIndex, &bus_state->resuming_ports); - bus_state->resume_done[wIndex] = timeout; + port->resume_timestamp = timeout; mod_timer(&hcd->rh_timer, timeout); usb_hcd_start_port_resume(&hcd->self, wIndex); } /* Has resume been signalled for USB_RESUME_TIME yet? */ - } else if (time_after_eq(jiffies, bus_state->resume_done[wIndex])) { + } else if (time_after_eq(jiffies, port->resume_timestamp)) { int time_left; xhci_dbg(xhci, "resume USB2 port %d-%d\n", hcd->self.busnum, wIndex + 1); - bus_state->resume_done[wIndex] = 0; + port->resume_timestamp = 0; clear_bit(wIndex, &bus_state->resuming_ports); - set_bit(wIndex, &bus_state->rexit_ports); + reinit_completion(&port->rexit_done); + port->rexit_active = true; xhci_test_and_clear_bit(xhci, port, PORT_PLC); xhci_set_link_state(xhci, port, XDEV_U0); spin_unlock_irqrestore(&xhci->lock, *flags); time_left = wait_for_completion_timeout( - &bus_state->rexit_done[wIndex], + &port->rexit_done, msecs_to_jiffies(XHCI_MAX_REXIT_TIMEOUT_MS)); spin_lock_irqsave(&xhci->lock, *flags); @@ -971,7 +971,6 @@ wIndex + 1); if (!slot_id) { xhci_dbg(xhci, "slot_id is zero\n"); - *status = 0xffffffff; return -ENODEV; } xhci_ring_device(xhci, slot_id); @@ -980,22 +979,19 @@ xhci_warn(xhci, "Port resume timed out, port %d-%d: 0x%x\n", hcd->self.busnum, wIndex + 1, port_status); - *status |= USB_PORT_STAT_SUSPEND; - clear_bit(wIndex, &bus_state->rexit_ports); + /* + * keep rexit_active set if U0 transition failed so we + * know to report PORT_STAT_SUSPEND status back to + * usbcore. It will be cleared later once the port is + * out of RESUME/U3 state + */ } usb_hcd_end_port_resume(&hcd->self, wIndex); bus_state->port_c_suspend |= 1 << wIndex; bus_state->suspended_ports &= ~(1 << wIndex); - } else { - /* - * The resume has been signaling for less than - * USB_RESUME_TIME. Report the port status as SUSPEND, - * let the usbcore check port status again and clear - * resume signaling later. - */ - *status |= USB_PORT_STAT_SUSPEND; } + return 0; } @@ -1047,19 +1043,19 @@ *status |= USB_PORT_STAT_C_CONFIG_ERROR << 16; /* USB3 specific wPortStatus bits */ - if (portsc & PORT_POWER) { + if (portsc & PORT_POWER) *status |= USB_SS_PORT_STAT_POWER; - /* link state handling */ - if (link_state == XDEV_U0) - bus_state->suspended_ports &= ~(1 << portnum); - } - /* remote wake resume signaling complete */ - if (bus_state->port_remote_wakeup & (1 << portnum) && + /* no longer suspended or resuming */ + if (link_state != XDEV_U3 && link_state != XDEV_RESUME && link_state != XDEV_RECOVERY) { - bus_state->port_remote_wakeup &= ~(1 << portnum); - usb_hcd_end_port_resume(&hcd->self, portnum); + /* remote wake resume signaling complete */ + if (bus_state->port_remote_wakeup & (1 << portnum)) { + bus_state->port_remote_wakeup &= ~(1 << portnum); + usb_hcd_end_port_resume(&hcd->self, portnum); + } + bus_state->suspended_ports &= ~(1 << portnum); } xhci_hub_report_usb3_link_state(xhci, status, portsc); @@ -1072,7 +1068,7 @@ struct xhci_bus_state *bus_state; u32 link_state; u32 portnum; - int ret; + int err; bus_state = &port->rhub->bus_state; link_state = portsc & PORT_PLS_MASK; @@ -1088,23 +1084,36 @@ if (link_state == XDEV_U2) *status |= USB_PORT_STAT_L1; if (link_state == XDEV_U0) { - if (bus_state->resume_done[portnum]) - usb_hcd_end_port_resume(&port->rhub->hcd->self, - portnum); - bus_state->resume_done[portnum] = 0; - clear_bit(portnum, &bus_state->resuming_ports); if (bus_state->suspended_ports & (1 << portnum)) { bus_state->suspended_ports &= ~(1 << portnum); bus_state->port_c_suspend |= 1 << portnum; } } if (link_state == XDEV_RESUME) { - ret = xhci_handle_usb2_port_link_resume(port, status, - portsc, flags); - if (ret) - return; + err = xhci_handle_usb2_port_link_resume(port, portsc, + flags); + if (err < 0) + *status = 0xffffffff; + else if (port->resume_timestamp || port->rexit_active) + *status |= USB_PORT_STAT_SUSPEND; } } + + /* + * Clear usb2 resume signalling variables if port is no longer suspended + * or resuming. Port either resumed to U0/U1/U2, disconnected, or in a + * error state. Resume related variables should be cleared in all those cases. + */ + if (link_state != XDEV_U3 && link_state != XDEV_RESUME) { + if (port->resume_timestamp || + test_bit(portnum, &bus_state->resuming_ports)) { + port->resume_timestamp = 0; + clear_bit(portnum, &bus_state->resuming_ports); + usb_hcd_end_port_resume(&port->rhub->hcd->self, portnum); + } + port->rexit_active = 0; + bus_state->suspended_ports &= ~(1 << portnum); + } } /* @@ -1159,18 +1168,6 @@ else xhci_get_usb2_port_status(port, &status, raw_port_status, flags); - /* - * Clear stale usb2 resume signalling variables in case port changed - * state during resume signalling. For example on error - */ - if ((bus_state->resume_done[wIndex] || - test_bit(wIndex, &bus_state->resuming_ports)) && - (raw_port_status & PORT_PLS_MASK) != XDEV_U3 && - (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) { - bus_state->resume_done[wIndex] = 0; - clear_bit(wIndex, &bus_state->resuming_ports); - usb_hcd_end_port_resume(&hcd->self, wIndex); - } if (bus_state->port_c_suspend & (1 << wIndex)) status |= USB_PORT_STAT_C_SUSPEND << 16; @@ -1194,11 +1191,14 @@ u16 test_mode = 0; struct xhci_hub *rhub; struct xhci_port **ports; + struct xhci_port *port; + int portnum1; rhub = xhci_get_rhub(hcd); ports = rhub->ports; max_ports = rhub->num_ports; bus_state = &rhub->bus_state; + portnum1 = wIndex & 0xff; spin_lock_irqsave(&xhci->lock, flags); switch (typeReq) { @@ -1232,10 +1232,12 @@ spin_unlock_irqrestore(&xhci->lock, flags); return retval; case GetPortStatus: - if (!wIndex || wIndex > max_ports) + if (!portnum1 || portnum1 > max_ports) goto error; + wIndex--; - temp = readl(ports[wIndex]->addr); + port = ports[portnum1 - 1]; + temp = readl(port->addr); if (temp == ~(u32)0) { xhci_hc_died(xhci); retval = -ENODEV; @@ -1248,7 +1250,7 @@ goto error; xhci_dbg(xhci, "Get port status %d-%d read: 0x%x, return 0x%x", - hcd->self.busnum, wIndex + 1, temp, status); + hcd->self.busnum, portnum1, temp, status); put_unaligned(cpu_to_le32(status), (__le32 *) buf); /* if USB 3.1 extended port status return additional 4 bytes */ @@ -1260,7 +1262,7 @@ retval = -EINVAL; break; } - port_li = readl(ports[wIndex]->addr + PORTLI); + port_li = readl(port->addr + PORTLI); status = xhci_get_ext_port_status(temp, port_li); put_unaligned_le32(status, &buf[4]); } @@ -1274,11 +1276,14 @@ test_mode = (wIndex & 0xff00) >> 8; /* The MSB of wIndex is the U1/U2 timeout */ timeout = (wIndex & 0xff00) >> 8; + wIndex &= 0xff; - if (!wIndex || wIndex > max_ports) + if (!portnum1 || portnum1 > max_ports) goto error; + + port = ports[portnum1 - 1]; wIndex--; - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); if (temp == ~(u32)0) { xhci_hc_died(xhci); retval = -ENODEV; @@ -1288,11 +1293,10 @@ /* FIXME: What new port features do we need to support? */ switch (wValue) { case USB_PORT_FEAT_SUSPEND: - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); if ((temp & PORT_PLS_MASK) != XDEV_U0) { /* Resume the port to U0 first */ - xhci_set_link_state(xhci, ports[wIndex], - XDEV_U0); + xhci_set_link_state(xhci, port, XDEV_U0); spin_unlock_irqrestore(&xhci->lock, flags); msleep(10); spin_lock_irqsave(&xhci->lock, flags); @@ -1301,16 +1305,16 @@ * a port unless the port reports that it is in the * enabled (PED = ‘1’,PLS < ‘3’) state. */ - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) || (temp & PORT_PLS_MASK) >= XDEV_U3) { xhci_warn(xhci, "USB core suspending port %d-%d not in U0/U1/U2\n", - hcd->self.busnum, wIndex + 1); + hcd->self.busnum, portnum1); goto error; } slot_id = xhci_find_slot_id_by_port(hcd, xhci, - wIndex + 1); + portnum1); if (!slot_id) { xhci_warn(xhci, "slot_id is zero\n"); goto error; @@ -1320,21 +1324,21 @@ xhci_stop_device(xhci, slot_id, 1); spin_lock_irqsave(&xhci->lock, flags); - xhci_set_link_state(xhci, ports[wIndex], XDEV_U3); + xhci_set_link_state(xhci, port, XDEV_U3); spin_unlock_irqrestore(&xhci->lock, flags); msleep(10); /* wait device to enter */ spin_lock_irqsave(&xhci->lock, flags); - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); bus_state->suspended_ports |= 1 << wIndex; break; case USB_PORT_FEAT_LINK_STATE: - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); /* Disable port */ if (link_state == USB_SS_PORT_LS_SS_DISABLED) { xhci_dbg(xhci, "Disable port %d-%d\n", - hcd->self.busnum, wIndex + 1); + hcd->self.busnum, portnum1); temp = xhci_port_state_to_neutral(temp); /* * Clear all change bits, so that we get a new @@ -1343,18 +1347,17 @@ temp |= PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | PORT_RC | PORT_PLC | PORT_CEC; - writel(temp | PORT_PE, ports[wIndex]->addr); - temp = readl(ports[wIndex]->addr); + writel(temp | PORT_PE, port->addr); + temp = readl(port->addr); break; } /* Put link in RxDetect (enable port) */ if (link_state == USB_SS_PORT_LS_RX_DETECT) { xhci_dbg(xhci, "Enable port %d-%d\n", - hcd->self.busnum, wIndex + 1); - xhci_set_link_state(xhci, ports[wIndex], - link_state); - temp = readl(ports[wIndex]->addr); + hcd->self.busnum, portnum1); + xhci_set_link_state(xhci, port, link_state); + temp = readl(port->addr); break; } @@ -1384,11 +1387,10 @@ } xhci_dbg(xhci, "Enable compliance mode transition for port %d-%d\n", - hcd->self.busnum, wIndex + 1); - xhci_set_link_state(xhci, ports[wIndex], - link_state); + hcd->self.busnum, portnum1); + xhci_set_link_state(xhci, port, link_state); - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); break; } /* Port must be enabled */ @@ -1399,8 +1401,7 @@ /* Can't set port link state above '3' (U3) */ if (link_state > USB_SS_PORT_LS_U3) { xhci_warn(xhci, "Cannot set port %d-%d link state %d\n", - hcd->self.busnum, wIndex + 1, - link_state); + hcd->self.busnum, portnum1, link_state); goto error; } @@ -1422,30 +1423,29 @@ pls == XDEV_RESUME || pls == XDEV_RECOVERY) { wait_u0 = true; - reinit_completion(&bus_state->u3exit_done[wIndex]); + reinit_completion(&port->u3exit_done); } if (pls <= XDEV_U3) /* U1, U2, U3 */ - xhci_set_link_state(xhci, ports[wIndex], - USB_SS_PORT_LS_U0); + xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U0); if (!wait_u0) { if (pls > XDEV_U3) goto error; break; } spin_unlock_irqrestore(&xhci->lock, flags); - if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex], + if (!wait_for_completion_timeout(&port->u3exit_done, msecs_to_jiffies(500))) xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n", - hcd->self.busnum, wIndex + 1); + hcd->self.busnum, portnum1); spin_lock_irqsave(&xhci->lock, flags); - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); break; } if (link_state == USB_SS_PORT_LS_U3) { int retries = 16; slot_id = xhci_find_slot_id_by_port(hcd, xhci, - wIndex + 1); + portnum1); if (slot_id) { /* unlock to execute stop endpoint * commands */ @@ -1454,16 +1454,16 @@ xhci_stop_device(xhci, slot_id, 1); spin_lock_irqsave(&xhci->lock, flags); } - xhci_set_link_state(xhci, ports[wIndex], USB_SS_PORT_LS_U3); + xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U3); spin_unlock_irqrestore(&xhci->lock, flags); while (retries--) { usleep_range(4000, 8000); - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); if ((temp & PORT_PLS_MASK) == XDEV_U3) break; } spin_lock_irqsave(&xhci->lock, flags); - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); bus_state->suspended_ports |= 1 << wIndex; } break; @@ -1478,39 +1478,38 @@ break; case USB_PORT_FEAT_RESET: temp = (temp | PORT_RESET); - writel(temp, ports[wIndex]->addr); + writel(temp, port->addr); - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); xhci_dbg(xhci, "set port reset, actual port %d-%d status = 0x%x\n", - hcd->self.busnum, wIndex + 1, temp); + hcd->self.busnum, portnum1, temp); break; case USB_PORT_FEAT_REMOTE_WAKE_MASK: - xhci_set_remote_wake_mask(xhci, ports[wIndex], - wake_mask); - temp = readl(ports[wIndex]->addr); + xhci_set_remote_wake_mask(xhci, port, wake_mask); + temp = readl(port->addr); xhci_dbg(xhci, "set port remote wake mask, actual port %d-%d status = 0x%x\n", - hcd->self.busnum, wIndex + 1, temp); + hcd->self.busnum, portnum1, temp); break; case USB_PORT_FEAT_BH_PORT_RESET: temp |= PORT_WR; - writel(temp, ports[wIndex]->addr); - temp = readl(ports[wIndex]->addr); + writel(temp, port->addr); + temp = readl(port->addr); break; case USB_PORT_FEAT_U1_TIMEOUT: if (hcd->speed < HCD_USB3) goto error; - temp = readl(ports[wIndex]->addr + PORTPMSC); + temp = readl(port->addr + PORTPMSC); temp &= ~PORT_U1_TIMEOUT_MASK; temp |= PORT_U1_TIMEOUT(timeout); - writel(temp, ports[wIndex]->addr + PORTPMSC); + writel(temp, port->addr + PORTPMSC); break; case USB_PORT_FEAT_U2_TIMEOUT: if (hcd->speed < HCD_USB3) goto error; - temp = readl(ports[wIndex]->addr + PORTPMSC); + temp = readl(port->addr + PORTPMSC); temp &= ~PORT_U2_TIMEOUT_MASK; temp |= PORT_U2_TIMEOUT(timeout); - writel(temp, ports[wIndex]->addr + PORTPMSC); + writel(temp, port->addr + PORTPMSC); break; case USB_PORT_FEAT_TEST: /* 4.19.6 Port Test Modes (USB2 Test Mode) */ @@ -1526,13 +1525,16 @@ goto error; } /* unblock any posted writes */ - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); break; case ClearPortFeature: - if (!wIndex || wIndex > max_ports) + if (!portnum1 || portnum1 > max_ports) goto error; + + port = ports[portnum1 - 1]; + wIndex--; - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); if (temp == ~(u32)0) { xhci_hc_died(xhci); retval = -ENODEV; @@ -1542,7 +1544,7 @@ temp = xhci_port_state_to_neutral(temp); switch (wValue) { case USB_PORT_FEAT_SUSPEND: - temp = readl(ports[wIndex]->addr); + temp = readl(port->addr); xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n"); xhci_dbg(xhci, "PORTSC %04x\n", temp); if (temp & PORT_RESET) @@ -1553,20 +1555,18 @@ set_bit(wIndex, &bus_state->resuming_ports); usb_hcd_start_port_resume(&hcd->self, wIndex); - xhci_set_link_state(xhci, ports[wIndex], - XDEV_RESUME); + xhci_set_link_state(xhci, port, XDEV_RESUME); spin_unlock_irqrestore(&xhci->lock, flags); msleep(USB_RESUME_TIMEOUT); spin_lock_irqsave(&xhci->lock, flags); - xhci_set_link_state(xhci, ports[wIndex], - XDEV_U0); + xhci_set_link_state(xhci, port, XDEV_U0); clear_bit(wIndex, &bus_state->resuming_ports); usb_hcd_end_port_resume(&hcd->self, wIndex); } bus_state->port_c_suspend |= 1 << wIndex; slot_id = xhci_find_slot_id_by_port(hcd, xhci, - wIndex + 1); + portnum1); if (!slot_id) { xhci_dbg(xhci, "slot_id is zero\n"); goto error; @@ -1584,11 +1584,11 @@ case USB_PORT_FEAT_C_PORT_LINK_STATE: case USB_PORT_FEAT_C_PORT_CONFIG_ERROR: xhci_clear_port_change_bit(xhci, wValue, wIndex, - ports[wIndex]->addr, temp); + port->addr, temp); break; case USB_PORT_FEAT_ENABLE: xhci_disable_port(hcd, xhci, wIndex, - ports[wIndex]->addr, temp); + port->addr, temp); break; case USB_PORT_FEAT_POWER: xhci_set_port_power(xhci, hcd, wIndex, false, &flags); @@ -1672,8 +1672,8 @@ if ((temp & mask) != 0 || (bus_state->port_c_suspend & 1 << i) || - (bus_state->resume_done[i] && time_after_eq( - jiffies, bus_state->resume_done[i]))) { + (ports[i]->resume_timestamp && time_after_eq( + jiffies, ports[i]->resume_timestamp))) { buf[(i + 1) / 8] |= 1 << (i + 1) % 8; status = 1; } diff -u linux-aws-5.15-5.15.0/drivers/usb/host/xhci-mem.c linux-aws-5.15-5.15.0/drivers/usb/host/xhci-mem.c --- linux-aws-5.15-5.15.0/drivers/usb/host/xhci-mem.c +++ linux-aws-5.15-5.15.0/drivers/usb/host/xhci-mem.c @@ -2318,6 +2318,9 @@ xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * i; xhci->hw_ports[i].hw_portnum = i; + + init_completion(&xhci->hw_ports[i].rexit_done); + init_completion(&xhci->hw_ports[i].u3exit_done); } xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags, @@ -2587,13 +2590,6 @@ */ for (i = 0; i < MAX_HC_SLOTS; i++) xhci->devs[i] = NULL; - for (i = 0; i < USB_MAXCHILDREN; i++) { - xhci->usb2_rhub.bus_state.resume_done[i] = 0; - xhci->usb3_rhub.bus_state.resume_done[i] = 0; - /* Only the USB 2.0 completions will ever be used. */ - init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]); - init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]); - } if (scratchpad_alloc(xhci, flags)) goto fail; diff -u linux-aws-5.15-5.15.0/drivers/usb/host/xhci-ring.c linux-aws-5.15-5.15.0/drivers/usb/host/xhci-ring.c --- linux-aws-5.15-5.15.0/drivers/usb/host/xhci-ring.c +++ linux-aws-5.15-5.15.0/drivers/usb/host/xhci-ring.c @@ -1987,7 +1987,7 @@ goto cleanup; } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) { xhci_dbg(xhci, "resume HS port %d\n", port_id); - bus_state->resume_done[hcd_portnum] = jiffies + + port->resume_timestamp = jiffies + msecs_to_jiffies(USB_RESUME_TIMEOUT); set_bit(hcd_portnum, &bus_state->resuming_ports); /* Do the rest in GetPortStatus after resume time delay. @@ -1996,7 +1996,7 @@ */ set_bit(HCD_FLAG_POLL_RH, &hcd->flags); mod_timer(&hcd->rh_timer, - bus_state->resume_done[hcd_portnum]); + port->resume_timestamp); usb_hcd_start_port_resume(&hcd->self, hcd_portnum); bogus_port_status = true; } @@ -2008,7 +2008,7 @@ (portsc & PORT_PLS_MASK) == XDEV_U1 || (portsc & PORT_PLS_MASK) == XDEV_U2)) { xhci_dbg(xhci, "resume SS port %d finished\n", port_id); - complete(&bus_state->u3exit_done[hcd_portnum]); + complete(&port->u3exit_done); /* We've just brought the device into U0/1/2 through either the * Resume state after a device remote wakeup, or through the * U3Exit state after a host-initiated resume. If it's a device @@ -2033,10 +2033,9 @@ * RExit to a disconnect state). If so, let the the driver know it's * out of the RExit state. */ - if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 && - test_and_clear_bit(hcd_portnum, - &bus_state->rexit_ports)) { - complete(&bus_state->rexit_done[hcd_portnum]); + if (hcd->speed < HCD_USB3 && port->rexit_active) { + complete(&port->rexit_done); + port->rexit_active = false; bogus_port_status = true; goto cleanup; } @@ -2429,6 +2428,9 @@ /* handle completion code */ switch (trb_comp_code) { case COMP_SUCCESS: + /* Don't overwrite status if TD had an error, see xHCI 4.9.1 */ + if (td->error_mid_td) + break; if (remaining) { frame->status = short_framestatus; if (xhci->quirks & XHCI_TRUST_TX_LENGTH) @@ -2444,9 +2446,13 @@ case COMP_BANDWIDTH_OVERRUN_ERROR: frame->status = -ECOMM; break; - case COMP_ISOCH_BUFFER_OVERRUN: case COMP_BABBLE_DETECTED_ERROR: + sum_trbs_for_length = true; + fallthrough; + case COMP_ISOCH_BUFFER_OVERRUN: frame->status = -EOVERFLOW; + if (ep_trb != td->last_trb) + td->error_mid_td = true; break; case COMP_INCOMPATIBLE_DEVICE_ERROR: case COMP_STALL_ERROR: @@ -2454,8 +2460,9 @@ break; case COMP_USB_TRANSACTION_ERROR: frame->status = -EPROTO; + sum_trbs_for_length = true; if (ep_trb != td->last_trb) - return 0; + td->error_mid_td = true; break; case COMP_STOPPED: sum_trbs_for_length = true; @@ -2475,6 +2482,9 @@ break; } + if (td->urb_length_set) + goto finish_td; + if (sum_trbs_for_length) frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) + ep_trb_len - remaining; @@ -2483,6 +2493,14 @@ td->urb->actual_length += frame->actual_length; +finish_td: + /* Don't give back TD yet if we encountered an error mid TD */ + if (td->error_mid_td && ep_trb != td->last_trb) { + xhci_dbg(xhci, "Error mid isoc TD, wait for final completion event\n"); + td->urb_length_set = true; + return 0; + } + return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } @@ -2867,17 +2885,51 @@ } if (!ep_seg) { - if (!ep->skip || - !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { - /* Some host controllers give a spurious - * successful event after a short transfer. - * Ignore it. - */ - if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && - ep_ring->last_td_was_short) { - ep_ring->last_td_was_short = false; - goto cleanup; + + if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { + skip_isoc_td(xhci, td, ep, status); + goto cleanup; + } + + /* + * Some hosts give a spurious success event after a short + * transfer. Ignore it. + */ + if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && + ep_ring->last_td_was_short) { + ep_ring->last_td_was_short = false; + goto cleanup; + } + + /* + * xhci 4.10.2 states isoc endpoints should continue + * processing the next TD if there was an error mid TD. + * So host like NEC don't generate an event for the last + * isoc TRB even if the IOC flag is set. + * xhci 4.9.1 states that if there are errors in mult-TRB + * TDs xHC should generate an error for that TRB, and if xHC + * proceeds to the next TD it should genete an event for + * any TRB with IOC flag on the way. Other host follow this. + * So this event might be for the next TD. + */ + if (td->error_mid_td && + !list_is_last(&td->td_list, &ep_ring->td_list)) { + struct xhci_td *td_next = list_next_entry(td, td_list); + + ep_seg = trb_in_td(xhci, td_next->start_seg, td_next->first_trb, + td_next->last_trb, ep_trb_dma, false); + if (ep_seg) { + /* give back previous TD, start handling new */ + xhci_dbg(xhci, "Missing TD completion event after mid TD error\n"); + ep_ring->dequeue = td->last_trb; + ep_ring->deq_seg = td->last_trb_seg; + inc_deq(xhci, ep_ring); + xhci_td_cleanup(xhci, td, ep_ring, td->status); + td = td_next; } + } + + if (!ep_seg) { /* HC is busted, give up! */ xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not " @@ -2889,9 +2941,6 @@ ep_trb_dma, true); return -ESHUTDOWN; } - - skip_isoc_td(xhci, td, ep, status); - goto cleanup; } if (trb_comp_code == COMP_SHORT_PACKET) ep_ring->last_td_was_short = true; diff -u linux-aws-5.15-5.15.0/drivers/usb/host/xhci.h linux-aws-5.15-5.15.0/drivers/usb/host/xhci.h --- linux-aws-5.15-5.15.0/drivers/usb/host/xhci.h +++ linux-aws-5.15-5.15.0/drivers/usb/host/xhci.h @@ -1572,6 +1572,7 @@ struct xhci_segment *bounce_seg; /* actual_length of the URB has already been set */ bool urb_length_set; + bool error_mid_td; unsigned int num_trbs; }; @@ -1708,13 +1709,8 @@ u32 port_c_suspend; u32 suspended_ports; u32 port_remote_wakeup; - unsigned long resume_done[USB_MAXCHILDREN]; /* which ports have started to resume */ unsigned long resuming_ports; - /* Which ports are waiting on RExit to U0 transition. */ - unsigned long rexit_ports; - struct completion rexit_done[USB_MAXCHILDREN]; - struct completion u3exit_done[USB_MAXCHILDREN]; }; @@ -1738,6 +1734,10 @@ struct xhci_hub *rhub; struct xhci_port_cap *port_cap; unsigned int lpm_incapable:1; + unsigned long resume_timestamp; + bool rexit_active; + struct completion rexit_done; + struct completion u3exit_done; }; struct xhci_hub { diff -u linux-aws-5.15-5.15.0/drivers/usb/roles/class.c linux-aws-5.15-5.15.0/drivers/usb/roles/class.c --- linux-aws-5.15-5.15.0/drivers/usb/roles/class.c +++ linux-aws-5.15-5.15.0/drivers/usb/roles/class.c @@ -19,7 +19,9 @@ struct usb_role_switch { struct device dev; struct mutex lock; /* device lock*/ + struct module *module; /* the module this device depends on */ enum usb_role role; + bool registered; /* From descriptor */ struct device *usb2_port; @@ -46,6 +48,9 @@ if (IS_ERR_OR_NULL(sw)) return 0; + if (!sw->registered) + return -EOPNOTSUPP; + mutex_lock(&sw->lock); ret = sw->set(sw, role); @@ -71,7 +76,7 @@ { enum usb_role role; - if (IS_ERR_OR_NULL(sw)) + if (IS_ERR_OR_NULL(sw) || !sw->registered) return USB_ROLE_NONE; mutex_lock(&sw->lock); @@ -133,7 +138,7 @@ usb_role_switch_match); if (!IS_ERR_OR_NULL(sw)) - WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); + WARN_ON(!try_module_get(sw->module)); return sw; } @@ -155,7 +160,7 @@ sw = fwnode_connection_find_match(fwnode, "usb-role-switch", NULL, usb_role_switch_match); if (!IS_ERR_OR_NULL(sw)) - WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); + WARN_ON(!try_module_get(sw->module)); return sw; } @@ -170,7 +175,7 @@ void usb_role_switch_put(struct usb_role_switch *sw) { if (!IS_ERR_OR_NULL(sw)) { - module_put(sw->dev.parent->driver->owner); + module_put(sw->module); put_device(&sw->dev); } } @@ -187,15 +192,18 @@ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode) { struct device *dev; + struct usb_role_switch *sw = NULL; if (!fwnode) return NULL; dev = class_find_device_by_fwnode(role_class, fwnode); - if (dev) - WARN_ON(!try_module_get(dev->parent->driver->owner)); + if (dev) { + sw = to_role_switch(dev); + WARN_ON(!try_module_get(sw->module)); + } - return dev ? to_role_switch(dev) : NULL; + return sw; } EXPORT_SYMBOL_GPL(usb_role_switch_find_by_fwnode); @@ -337,6 +345,7 @@ sw->set = desc->set; sw->get = desc->get; + sw->module = parent->driver->owner; sw->dev.parent = parent; sw->dev.fwnode = desc->fwnode; sw->dev.class = role_class; @@ -351,6 +360,8 @@ return ERR_PTR(ret); } + sw->registered = true; + /* TODO: Symlinks for the host port and the device controller. */ return sw; @@ -365,8 +376,10 @@ */ void usb_role_switch_unregister(struct usb_role_switch *sw) { - if (!IS_ERR_OR_NULL(sw)) + if (!IS_ERR_OR_NULL(sw)) { + sw->registered = false; device_unregister(&sw->dev); + } } EXPORT_SYMBOL_GPL(usb_role_switch_unregister); diff -u linux-aws-5.15-5.15.0/drivers/vdpa/mlx5/net/mlx5_vnet.c linux-aws-5.15-5.15.0/drivers/vdpa/mlx5/net/mlx5_vnet.c --- linux-aws-5.15-5.15.0/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ linux-aws-5.15-5.15.0/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -173,8 +173,6 @@ static bool mlx5_vdpa_debug; -#define MLX5_CVQ_MAX_ENT 16 - #define MLX5_LOG_VIO_FLAG(_feature) \ do { \ if (features & BIT_ULL(_feature)) \ @@ -1685,9 +1683,16 @@ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); struct mlx5_vdpa_virtqueue *mvq; - if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx)) + if (!is_index_valid(mvdev, idx)) return; + if (is_ctrl_vq_idx(mvdev, idx)) { + struct mlx5_control_vq *cvq = &mvdev->cvq; + + cvq->vring.vring.num = num; + return; + } + mvq = &ndev->vqs[idx]; mvq->num_ent = num; } @@ -2182,7 +2187,7 @@ u16 idx = cvq->vring.last_avail_idx; err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, - MLX5_CVQ_MAX_ENT, false, + cvq->vring.vring.num, false, (struct vring_desc *)(uintptr_t)cvq->desc_addr, (struct vring_avail *)(uintptr_t)cvq->driver_addr, (struct vring_used *)(uintptr_t)cvq->device_addr); diff -u linux-aws-5.15-5.15.0/drivers/video/fbdev/core/fbcon.c linux-aws-5.15-5.15.0/drivers/video/fbdev/core/fbcon.c --- linux-aws-5.15-5.15.0/drivers/video/fbdev/core/fbcon.c +++ linux-aws-5.15-5.15.0/drivers/video/fbdev/core/fbcon.c @@ -2409,11 +2409,9 @@ struct fbcon_ops *ops = info->fbcon_par; struct fbcon_display *p = &fb_display[vc->vc_num]; int resize, ret, old_userfont, old_width, old_height, old_charcount; - char *old_data = NULL; + u8 *old_data = vc->vc_font.data; resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); - if (p->userfont) - old_data = vc->vc_font.data; vc->vc_font.data = (void *)(p->fontdata = data); old_userfont = p->userfont; if ((p->userfont = userfont)) @@ -2447,13 +2445,13 @@ update_screen(vc); } - if (old_data && (--REFCOUNT(old_data) == 0)) + if (old_userfont && (--REFCOUNT(old_data) == 0)) kfree(old_data - FONT_EXTRA_WORDS * sizeof(int)); return 0; err_out: p->fontdata = old_data; - vc->vc_font.data = (void *)old_data; + vc->vc_font.data = old_data; if (userfont) { p->userfont = old_userfont; diff -u linux-aws-5.15-5.15.0/fs/afs/dir.c linux-aws-5.15-5.15.0/fs/afs/dir.c --- linux-aws-5.15-5.15.0/fs/afs/dir.c +++ linux-aws-5.15-5.15.0/fs/afs/dir.c @@ -493,12 +493,4 @@ } - /* Don't expose silly rename entries to userspace. */ - if (nlen > 6 && - dire->u.name[0] == '.' && - ctx->actor != afs_lookup_filldir && - ctx->actor != afs_lookup_one_filldir && - memcmp(dire->u.name, ".__afs", 6) == 0) - continue; - /* found the next entry */ if (!dir_emit(ctx, dire->u.name, nlen, diff -u linux-aws-5.15-5.15.0/fs/afs/volume.c linux-aws-5.15-5.15.0/fs/afs/volume.c --- linux-aws-5.15-5.15.0/fs/afs/volume.c +++ linux-aws-5.15-5.15.0/fs/afs/volume.c @@ -327,7 +327,7 @@ { struct afs_server_list *new, *old, *discard; struct afs_vldb_entry *vldb; - char idbuf[16]; + char idbuf[24]; int ret, idsz; _enter(""); @@ -335,7 +335,7 @@ /* We look up an ID by passing it as a decimal string in the * operation's name parameter. */ - idsz = sprintf(idbuf, "%llu", volume->vid); + idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid); vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz); if (IS_ERR(vldb)) { diff -u linux-aws-5.15-5.15.0/fs/aio.c linux-aws-5.15-5.15.0/fs/aio.c --- linux-aws-5.15-5.15.0/fs/aio.c +++ linux-aws-5.15-5.15.0/fs/aio.c @@ -568,6 +568,13 @@ struct kioctx *ctx = req->ki_ctx; unsigned long flags; + /* + * kiocb didn't come from aio or is neither a read nor a write, hence + * ignore it. + */ + if (!(iocb->ki_flags & IOCB_AIO_RW)) + return; + if (WARN_ON_ONCE(!list_empty(&req->ki_list))) return; @@ -1453,7 +1460,7 @@ req->ki_complete = aio_complete_rw; req->private = NULL; req->ki_pos = iocb->aio_offset; - req->ki_flags = iocb_flags(req->ki_filp); + req->ki_flags = iocb_flags(req->ki_filp) | IOCB_AIO_RW; if (iocb->aio_flags & IOCB_FLAG_RESFD) req->ki_flags |= IOCB_EVENTFD; req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp)); diff -u linux-aws-5.15-5.15.0/fs/btrfs/dev-replace.c linux-aws-5.15-5.15.0/fs/btrfs/dev-replace.c --- linux-aws-5.15-5.15.0/fs/btrfs/dev-replace.c +++ linux-aws-5.15-5.15.0/fs/btrfs/dev-replace.c @@ -763,6 +763,23 @@ return ret; } +static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args) +{ + if (args->start.srcdevid == 0) { + if (memchr(args->start.srcdev_name, 0, + sizeof(args->start.srcdev_name)) == NULL) + return -ENAMETOOLONG; + } else { + args->start.srcdev_name[0] = 0; + } + + if (memchr(args->start.tgtdev_name, 0, + sizeof(args->start.tgtdev_name)) == NULL) + return -ENAMETOOLONG; + + return 0; +} + int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args) { @@ -775,10 +792,9 @@ default: return -EINVAL; } - - if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') || - args->start.tgtdev_name[0] == '\0') - return -EINVAL; + ret = btrfs_check_replace_dev_names(args); + if (ret < 0) + return ret; ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name, args->start.srcdevid, diff -u linux-aws-5.15-5.15.0/fs/btrfs/disk-io.c linux-aws-5.15-5.15.0/fs/btrfs/disk-io.c --- linux-aws-5.15-5.15.0/fs/btrfs/disk-io.c +++ linux-aws-5.15-5.15.0/fs/btrfs/disk-io.c @@ -1600,12 +1600,12 @@ * * @objectid: root id * @anon_dev: preallocated anonymous block device number for new roots, - * pass 0 for new allocation. + * pass NULL for a new allocation. * @check_ref: whether to check root item references, If true, return -ENOENT * for orphan roots */ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, - u64 objectid, dev_t anon_dev, + u64 objectid, dev_t *anon_dev, bool check_ref) { struct btrfs_root *root; @@ -1625,9 +1625,9 @@ * that common but still possible. In that case, we just need * to free the anon_dev. */ - if (unlikely(anon_dev)) { - free_anon_bdev(anon_dev); - anon_dev = 0; + if (unlikely(anon_dev && *anon_dev)) { + free_anon_bdev(*anon_dev); + *anon_dev = 0; } if (check_ref && btrfs_root_refs(&root->root_item) == 0) { @@ -1649,7 +1649,7 @@ goto fail; } - ret = btrfs_init_fs_root(root, anon_dev); + ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0); if (ret) goto fail; @@ -1685,7 +1685,7 @@ * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root() * and once again by our caller. */ - if (anon_dev) + if (anon_dev && *anon_dev) root->anon_dev = 0; btrfs_put_root(root); return ERR_PTR(ret); @@ -1701,7 +1701,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, u64 objectid, bool check_ref) { - return btrfs_get_root_ref(fs_info, objectid, 0, check_ref); + return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref); } /* @@ -1709,11 +1709,11 @@ * the anonymous block device id * * @objectid: tree objectid - * @anon_dev: if zero, allocate a new anonymous block device or use the - * parameter value + * @anon_dev: if NULL, allocate a new anonymous block device or use the + * parameter value if not NULL */ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, - u64 objectid, dev_t anon_dev) + u64 objectid, dev_t *anon_dev) { return btrfs_get_root_ref(fs_info, objectid, anon_dev, true); } @@ -2338,6 +2338,9 @@ if (!strstr(crypto_shash_driver_name(csum_shash), "generic")) set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); break; + case BTRFS_CSUM_TYPE_XXHASH: + set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); + break; default: break; } diff -u linux-aws-5.15-5.15.0/fs/btrfs/disk-io.h linux-aws-5.15-5.15.0/fs/btrfs/disk-io.h --- linux-aws-5.15-5.15.0/fs/btrfs/disk-io.h +++ linux-aws-5.15-5.15.0/fs/btrfs/disk-io.h @@ -74,7 +74,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, u64 objectid, bool check_ref); struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, - u64 objectid, dev_t anon_dev); + u64 objectid, dev_t *anon_dev); struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 objectid); diff -u linux-aws-5.15-5.15.0/fs/btrfs/ioctl.c linux-aws-5.15-5.15.0/fs/btrfs/ioctl.c --- linux-aws-5.15-5.15.0/fs/btrfs/ioctl.c +++ linux-aws-5.15-5.15.0/fs/btrfs/ioctl.c @@ -630,7 +630,7 @@ leaf = NULL; key.offset = (u64)-1; - new_root = btrfs_get_new_fs_root(fs_info, objectid, anon_dev); + new_root = btrfs_get_new_fs_root(fs_info, objectid, &anon_dev); if (IS_ERR(new_root)) { free_anon_bdev(anon_dev); ret = PTR_ERR(new_root); diff -u linux-aws-5.15-5.15.0/fs/btrfs/transaction.c linux-aws-5.15-5.15.0/fs/btrfs/transaction.c --- linux-aws-5.15-5.15.0/fs/btrfs/transaction.c +++ linux-aws-5.15-5.15.0/fs/btrfs/transaction.c @@ -1789,7 +1789,7 @@ } key.offset = (u64)-1; - pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev); + pending->snap = btrfs_get_new_fs_root(fs_info, objectid, &pending->anon_dev); if (IS_ERR(pending->snap)) { ret = PTR_ERR(pending->snap); pending->snap = NULL; diff -u linux-aws-5.15-5.15.0/fs/cachefiles/bind.c linux-aws-5.15-5.15.0/fs/cachefiles/bind.c --- linux-aws-5.15-5.15.0/fs/cachefiles/bind.c +++ linux-aws-5.15-5.15.0/fs/cachefiles/bind.c @@ -249,6 +249,8 @@ kmem_cache_free(cachefiles_object_jar, fsdef); error_root_object: cachefiles_end_secure(cache, saved_cred); + put_cred(cache->cache_cred); + cache->cache_cred = NULL; pr_err("Failed to register: %d\n", ret); return ret; } @@ -269,6 +271,7 @@ dput(cache->graveyard); mntput(cache->mnt); + put_cred(cache->cache_cred); kfree(cache->rootdirname); kfree(cache->secctx); diff -u linux-aws-5.15-5.15.0/fs/cifs/sess.c linux-aws-5.15-5.15.0/fs/cifs/sess.c --- linux-aws-5.15-5.15.0/fs/cifs/sess.c +++ linux-aws-5.15-5.15.0/fs/cifs/sess.c @@ -582,8 +582,8 @@ { unsigned int tioffset; /* challenge message target info area */ unsigned int tilen; /* challenge message target info area length */ - CHALLENGE_MESSAGE *pblob = (CHALLENGE_MESSAGE *)bcc_ptr; + __u32 server_flags; if (blob_len < sizeof(CHALLENGE_MESSAGE)) { cifs_dbg(VFS, "challenge blob len %d too small\n", blob_len); @@ -601,12 +601,37 @@ return -EINVAL; } + server_flags = le32_to_cpu(pblob->NegotiateFlags); + cifs_dbg(FYI, "%s: negotiate=0x%08x challenge=0x%08x\n", __func__, + ses->ntlmssp->client_flags, server_flags); + + if ((ses->ntlmssp->client_flags & (NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_SIGN)) && + (!(server_flags & NTLMSSP_NEGOTIATE_56) && !(server_flags & NTLMSSP_NEGOTIATE_128))) { + cifs_dbg(VFS, "%s: requested signing/encryption but server did not return either 56-bit or 128-bit session key size\n", + __func__); + return -EINVAL; + } + if (!(server_flags & NTLMSSP_NEGOTIATE_NTLM) && !(server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) { + cifs_dbg(VFS, "%s: server does not seem to support either NTLMv1 or NTLMv2\n", __func__); + return -EINVAL; + } + if (ses->server->sign && !(server_flags & NTLMSSP_NEGOTIATE_SIGN)) { + cifs_dbg(VFS, "%s: forced packet signing but server does not seem to support it\n", + __func__); + return -EOPNOTSUPP; + } + if ((ses->ntlmssp->client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && + !(server_flags & NTLMSSP_NEGOTIATE_KEY_XCH)) + pr_warn_once("%s: authentication has been weakened as server does not support key exchange\n", + __func__); + + ses->ntlmssp->server_flags = server_flags; + memcpy(ses->ntlmssp->cryptkey, pblob->Challenge, CIFS_CRYPTO_KEY_SIZE); - /* BB we could decode pblob->NegotiateFlags; some may be useful */ /* In particular we can examine sign flags */ /* BB spec says that if AvId field of MsvAvTimestamp is populated then we must set the MIC field of the AUTHENTICATE_MESSAGE */ - ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags); + tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset); tilen = le16_to_cpu(pblob->TargetInfoArray.Length); if (tioffset > blob_len || tioffset + tilen > blob_len) { @@ -646,12 +671,12 @@ flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC | - NTLMSSP_NEGOTIATE_SEAL; - if (server->sign) - flags |= NTLMSSP_NEGOTIATE_SIGN; + NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL | + NTLMSSP_NEGOTIATE_SIGN; if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess) flags |= NTLMSSP_NEGOTIATE_KEY_XCH; + ses->ntlmssp->client_flags = flags; sec_blob->NegotiateFlags = cpu_to_le32(flags); sec_blob->WorkstationName.BufferOffset = 0; @@ -710,15 +735,8 @@ memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); sec_blob->MessageType = NtLmAuthenticate; - flags = NTLMSSP_NEGOTIATE_56 | - NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | - NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | - NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC | - NTLMSSP_NEGOTIATE_SEAL; - if (ses->server->sign) - flags |= NTLMSSP_NEGOTIATE_SIGN; - if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) - flags |= NTLMSSP_NEGOTIATE_KEY_XCH; + flags = ses->ntlmssp->server_flags | NTLMSSP_REQUEST_TARGET | + NTLMSSP_NEGOTIATE_TARGET_INFO; tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE); sec_blob->NegotiateFlags = cpu_to_le32(flags); @@ -784,9 +802,9 @@ sec_blob->WorkstationName.MaximumLength = 0; tmp += 2; - if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) || - (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) - && !calc_seckey(ses)) { + if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && + (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess) && + !calc_seckey(ses)) { memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer); sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE); diff -u linux-aws-5.15-5.15.0/fs/cifs/smb2ops.c linux-aws-5.15-5.15.0/fs/cifs/smb2ops.c --- linux-aws-5.15-5.15.0/fs/cifs/smb2ops.c +++ linux-aws-5.15-5.15.0/fs/cifs/smb2ops.c @@ -85,6 +85,7 @@ *val = 65000; /* Don't get near 64K credits, avoid srv bugs */ pr_warn_once("server overflowed SMB3 credits\n"); } + WARN_ON_ONCE(server->in_flight == 0); server->in_flight--; if (server->in_flight == 0 && ((optype & CIFS_OP_MASK) != CIFS_NEG_OP) && @@ -647,7 +648,7 @@ struct cifs_ses *ses = tcon->ses; rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, - FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, + FSCTL_QUERY_NETWORK_INTERFACE_INFO, NULL /* no data input */, 0 /* no data input */, CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); if (rc == -EOPNOTSUPP) { @@ -908,10 +909,12 @@ */ kref_get(&tcon->crfid.refcount); tcon->crfid.has_lease = true; - smb2_parse_contexts(server, o_rsp, + rc = smb2_parse_contexts(server, rsp_iov, &oparms.fid->epoch, oparms.fid->lease_key, &oplock, NULL, NULL); + if (rc) + goto oshr_exit; } else goto oshr_exit; @@ -1580,9 +1583,8 @@ struct resume_key_req *res_key; rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, - FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, - NULL, 0 /* no input */, CIFSMaxBufSize, - (char **)&res_key, &ret_data_len); + FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */, + CIFSMaxBufSize, (char **)&res_key, &ret_data_len); if (rc == -EOPNOTSUPP) { pr_warn_once("Server share %s does not support copy range\n", tcon->treeName); @@ -1724,7 +1726,7 @@ rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, - qi.info_type, true, buffer, qi.output_buffer_length, + qi.info_type, buffer, qi.output_buffer_length, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); free_req1_func = SMB2_ioctl_free; @@ -1900,9 +1902,8 @@ retbuf = NULL; rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, - true /* is_fsctl */, (char *)pcchunk, - sizeof(struct copychunk_ioctl), CIFSMaxBufSize, - (char **)&retbuf, &ret_data_len); + (char *)pcchunk, sizeof(struct copychunk_ioctl), + CIFSMaxBufSize, (char **)&retbuf, &ret_data_len); if (rc == 0) { if (ret_data_len != sizeof(struct copychunk_ioctl_rsp)) { @@ -2062,7 +2063,6 @@ rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_SPARSE, - true /* is_fctl */, &setsparse, 1, CIFSMaxBufSize, NULL, NULL); if (rc) { tcon->broken_sparse_sup = true; @@ -2145,7 +2145,6 @@ rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, trgtfile->fid.volatile_fid, FSCTL_DUPLICATE_EXTENTS_TO_FILE, - true /* is_fsctl */, (char *)&dup_ext_buf, sizeof(struct duplicate_extents_to_file), CIFSMaxBufSize, NULL, @@ -2180,7 +2179,6 @@ return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_INTEGRITY_INFORMATION, - true /* is_fsctl */, (char *)&integr_info, sizeof(struct fsctl_set_integrity_information_req), CIFSMaxBufSize, NULL, @@ -2233,7 +2231,6 @@ rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SRV_ENUMERATE_SNAPSHOTS, - true /* is_fsctl */, NULL, 0 /* no input data */, max_response_size, (char **)&retbuf, &ret_data_len); @@ -2918,7 +2915,6 @@ do { rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, FSCTL_DFS_GET_REFERRALS, - true /* is_fsctl */, (char *)dfs_req, dfs_req_size, CIFSMaxBufSize, (char **)&dfs_rsp, &dfs_rsp_size); if (!is_retryable_error(rc)) @@ -3127,8 +3123,7 @@ rc = SMB2_ioctl_init(tcon, server, &rqst[1], fid.persistent_fid, - fid.volatile_fid, FSCTL_GET_REPARSE_POINT, - true /* is_fctl */, NULL, 0, + fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); @@ -3308,8 +3303,7 @@ rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, - COMPOUND_FID, FSCTL_GET_REPARSE_POINT, - true /* is_fctl */, NULL, 0, + COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0, CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CLOSE_RESPONSE_SIZE); @@ -3585,7 +3579,7 @@ fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, - cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true, + cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, (char *)&fsctl_buf, sizeof(struct file_zero_data_information), 0, NULL, NULL); @@ -3646,7 +3640,7 @@ rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, - true /* is_fctl */, (char *)&fsctl_buf, + (char *)&fsctl_buf, sizeof(struct file_zero_data_information), CIFSMaxBufSize, NULL, NULL); filemap_invalidate_unlock(inode->i_mapping); @@ -3708,7 +3702,7 @@ in_data.length = cpu_to_le64(len); rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), 1024 * sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -4020,7 +4014,7 @@ rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); @@ -4080,7 +4074,7 @@ rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, cfile->fid.volatile_fid, - FSCTL_QUERY_ALLOCATED_RANGES, true, + FSCTL_QUERY_ALLOCATED_RANGES, (char *)&in_data, sizeof(in_data), 1024 * sizeof(struct file_allocated_range_buffer), (char **)&out_data, &out_data_len); diff -u linux-aws-5.15-5.15.0/fs/cifs/smb2pdu.c linux-aws-5.15-5.15.0/fs/cifs/smb2pdu.c --- linux-aws-5.15-5.15.0/fs/cifs/smb2pdu.c +++ linux-aws-5.15-5.15.0/fs/cifs/smb2pdu.c @@ -1152,7 +1152,7 @@ } rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, - FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, + FSCTL_VALIDATE_NEGOTIATE_INFO, (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, (char **)&pneg_rsp, &rsplen); if (rc == -EOPNOTSUPP) { @@ -2055,17 +2055,18 @@ posix->nlink, posix->mode, posix->reparse_tag); } -void -smb2_parse_contexts(struct TCP_Server_Info *server, - struct smb2_create_rsp *rsp, - unsigned int *epoch, char *lease_key, __u8 *oplock, - struct smb2_file_all_info *buf, - struct create_posix_rsp *posix) +int smb2_parse_contexts(struct TCP_Server_Info *server, + struct kvec *rsp_iov, + unsigned int *epoch, + char *lease_key, __u8 *oplock, + struct smb2_file_all_info *buf, + struct create_posix_rsp *posix) { - char *data_offset; + struct smb2_create_rsp *rsp = rsp_iov->iov_base; struct create_context *cc; - unsigned int next; - unsigned int remaining; + size_t rem, off, len; + size_t doff, dlen; + size_t noff, nlen; char *name; static const char smb3_create_tag_posix[] = { 0x93, 0xAD, 0x25, 0x50, 0x9C, @@ -2074,45 +2075,63 @@ }; *oplock = 0; - data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset); - remaining = le32_to_cpu(rsp->CreateContextsLength); - cc = (struct create_context *)data_offset; + + off = le32_to_cpu(rsp->CreateContextsOffset); + rem = le32_to_cpu(rsp->CreateContextsLength); + if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len) + return -EINVAL; + cc = (struct create_context *)((u8 *)rsp + off); /* Initialize inode number to 0 in case no valid data in qfid context */ if (buf) buf->IndexNumber = 0; - while (remaining >= sizeof(struct create_context)) { - name = le16_to_cpu(cc->NameOffset) + (char *)cc; - if (le16_to_cpu(cc->NameLength) == 4 && - strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0) - *oplock = server->ops->parse_lease_buf(cc, epoch, - lease_key); - else if (buf && (le16_to_cpu(cc->NameLength) == 4) && - strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0) - parse_query_id_ctxt(cc, buf); - else if ((le16_to_cpu(cc->NameLength) == 16)) { - if (posix && - memcmp(name, smb3_create_tag_posix, 16) == 0) + while (rem >= sizeof(*cc)) { + doff = le16_to_cpu(cc->DataOffset); + dlen = le32_to_cpu(cc->DataLength); + if (check_add_overflow(doff, dlen, &len) || len > rem) + return -EINVAL; + + noff = le16_to_cpu(cc->NameOffset); + nlen = le16_to_cpu(cc->NameLength); + if (noff + nlen > doff) + return -EINVAL; + + name = (char *)cc + noff; + switch (nlen) { + case 4: + if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) { + *oplock = server->ops->parse_lease_buf(cc, epoch, + lease_key); + } else if (buf && + !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) { + parse_query_id_ctxt(cc, buf); + } + break; + case 16: + if (posix && !memcmp(name, smb3_create_tag_posix, 16)) parse_posix_ctxt(cc, buf, posix); + break; + default: + cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n", + __func__, nlen, dlen); + if (IS_ENABLED(CONFIG_CIFS_DEBUG2)) + cifs_dump_mem("context data: ", cc, dlen); + break; } - /* else { - cifs_dbg(FYI, "Context not matched with len %d\n", - le16_to_cpu(cc->NameLength)); - cifs_dump_mem("Cctxt name: ", name, 4); - } */ - next = le32_to_cpu(cc->Next); - if (!next) + off = le32_to_cpu(cc->Next); + if (!off) break; - remaining -= next; - cc = (struct create_context *)((char *)cc + next); + if (check_sub_overflow(rem, off, &rem)) + return -EINVAL; + cc = (struct create_context *)((u8 *)cc + off); } if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) *oplock = rsp->OplockLevel; - return; + return 0; } static int @@ -2982,8 +3001,8 @@ } - smb2_parse_contexts(server, rsp, &oparms->fid->epoch, - oparms->fid->lease_key, oplock, buf, posix); + rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch, + oparms->fid->lease_key, oplock, buf, posix); creat_exit: SMB2_open_free(&rqst); free_rsp_buf(resp_buftype, rsp); @@ -2994,7 +3013,7 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, + char *in_data, u32 indatalen, __u32 max_response_size) { struct smb2_ioctl_req *req; @@ -3069,10 +3088,8 @@ req->sync_hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), SMB2_MAX_BUFFER_SIZE)); - if (is_fsctl) - req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); - else - req->Flags = 0; + /* always an FSCTL (for now) */ + req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) @@ -3099,9 +3116,9 @@ */ int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, - u64 volatile_fid, u32 opcode, bool is_fsctl, - char *in_data, u32 indatalen, u32 max_out_data_len, - char **out_data, u32 *plen /* returned data len */) + u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen, + u32 max_out_data_len, char **out_data, + u32 *plen /* returned data len */) { struct smb_rqst rqst; struct smb2_ioctl_rsp *rsp = NULL; @@ -3143,7 +3160,7 @@ rc = SMB2_ioctl_init(tcon, server, &rqst, persistent_fid, volatile_fid, opcode, - is_fsctl, in_data, indatalen, max_out_data_len); + in_data, indatalen, max_out_data_len); if (rc) goto ioctl_exit; @@ -3225,7 +3242,7 @@ cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, - FSCTL_SET_COMPRESSION, true /* is_fsctl */, + FSCTL_SET_COMPRESSION, (char *)&fsctl_input /* data input */, 2 /* in data len */, CIFSMaxBufSize /* max out data */, &ret_data /* out data */, NULL); diff -u linux-aws-5.15-5.15.0/fs/erofs/data.c linux-aws-5.15-5.15.0/fs/erofs/data.c --- linux-aws-5.15-5.15.0/fs/erofs/data.c +++ linux-aws-5.15-5.15.0/fs/erofs/data.c @@ -342,2 +342,3 @@ .splice_read = generic_file_splice_read, + .get_unmapped_area = thp_get_unmapped_area, }; diff -u linux-aws-5.15-5.15.0/fs/erofs/decompressor.c linux-aws-5.15-5.15.0/fs/erofs/decompressor.c --- linux-aws-5.15-5.15.0/fs/erofs/decompressor.c +++ linux-aws-5.15-5.15.0/fs/erofs/decompressor.c @@ -124,11 +124,11 @@ } static void *z_erofs_handle_inplace_io(struct z_erofs_decompress_req *rq, - void *inpage, unsigned int *inputmargin, int *maptype, - bool support_0padding) + void *inpage, void *out, unsigned int *inputmargin, int *maptype, + bool support_0padding) { unsigned int nrpages_in, nrpages_out; - unsigned int ofull, oend, inputsize, total, i, j; + unsigned int ofull, oend, inputsize, total, i; struct page **in; void *src, *tmp; @@ -143,12 +143,13 @@ ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize)) goto docopy; - for (i = 0; i < nrpages_in; ++i) { - DBG_BUGON(rq->in[i] == NULL); - for (j = 0; j < nrpages_out - nrpages_in + i; ++j) - if (rq->out[j] == rq->in[i]) - goto docopy; - } + for (i = 0; i < nrpages_in; ++i) + if (rq->out[nrpages_out - nrpages_in + i] != + rq->in[i]) + goto docopy; + kunmap_atomic(inpage); + *maptype = 3; + return out + ((nrpages_out - nrpages_in) << PAGE_SHIFT); } if (nrpages_in <= 1) { @@ -156,7 +157,6 @@ return inpage; } kunmap_atomic(inpage); - might_sleep(); src = erofs_vm_map_ram(rq->in, nrpages_in); if (!src) return ERR_PTR(-ENOMEM); @@ -193,10 +193,10 @@ return src; } -static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) +static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *dst) { unsigned int inputmargin; - u8 *headpage, *src; + u8 *out, *headpage, *src; bool support_0padding; int ret, maptype; @@ -220,11 +220,12 @@ } rq->inputsize -= inputmargin; - src = z_erofs_handle_inplace_io(rq, headpage, &inputmargin, &maptype, - support_0padding); + src = z_erofs_handle_inplace_io(rq, headpage, dst, &inputmargin, + &maptype, support_0padding); if (IS_ERR(src)) return PTR_ERR(src); + out = dst + rq->pageofs_out; /* legacy format could compress extra data in a pcluster. */ if (rq->partial_decoding || !support_0padding) ret = LZ4_decompress_safe_partial(src + inputmargin, out, @@ -253,7 +254,7 @@ vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT); } else if (maptype == 2) { erofs_put_pcpubuf(src); - } else { + } else if (maptype != 3) { DBG_BUGON(1); return -EFAULT; } @@ -354,8 +355,7 @@ dst_maptype = 2; dstmap_out: - ret = alg->decompress(rq, dst + rq->pageofs_out); - + ret = alg->decompress(rq, dst); if (!dst_maptype) kunmap_atomic(dst); else if (dst_maptype == 2) diff -u linux-aws-5.15-5.15.0/fs/exfat/dir.c linux-aws-5.15-5.15.0/fs/exfat/dir.c --- linux-aws-5.15-5.15.0/fs/exfat/dir.c +++ linux-aws-5.15-5.15.0/fs/exfat/dir.c @@ -618,6 +618,10 @@ bforget(es->bh[i]); else brelse(es->bh[i]); + + if (IS_DYNAMIC_ES(es)) + kfree(es->bh); + kfree(es); return err; } @@ -853,6 +857,7 @@ /* byte offset in sector */ off = EXFAT_BLK_OFFSET(byte_offset, sb); es->start_off = off; + es->bh = es->__bh; /* sector offset in cluster */ sec = EXFAT_B_TO_BLK(byte_offset, sb); @@ -872,6 +877,16 @@ es->num_entries = num_entries; num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb); + if (num_bh > ARRAY_SIZE(es->__bh)) { + es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_KERNEL); + if (!es->bh) { + brelse(bh); + kfree(es); + return NULL; + } + es->bh[0] = bh; + } + for (i = 1; i < num_bh; i++) { /* get the next sector */ if (exfat_is_last_sector_in_cluster(sbi, sec)) { diff -u linux-aws-5.15-5.15.0/fs/exfat/exfat_fs.h linux-aws-5.15-5.15.0/fs/exfat/exfat_fs.h --- linux-aws-5.15-5.15.0/fs/exfat/exfat_fs.h +++ linux-aws-5.15-5.15.0/fs/exfat/exfat_fs.h @@ -170,10 +170,13 @@ bool modified; unsigned int start_off; int num_bh; - struct buffer_head *bh[DIR_CACHE_SIZE]; + struct buffer_head *__bh[DIR_CACHE_SIZE]; + struct buffer_head **bh; unsigned int num_entries; }; +#define IS_DYNAMIC_ES(es) ((es)->__bh != (es)->bh) + struct exfat_dir_entry { struct exfat_chain dir; int entry; diff -u linux-aws-5.15-5.15.0/fs/ext4/extents.c linux-aws-5.15-5.15.0/fs/ext4/extents.c --- linux-aws-5.15-5.15.0/fs/ext4/extents.c +++ linux-aws-5.15-5.15.0/fs/ext4/extents.c @@ -2227,7 +2227,7 @@ /* - * ext4_ext_determine_hole - determine hole around given block + * ext4_ext_find_hole - find hole around given block according to the given path * @inode: inode we lookup in * @path: path in extent tree to @lblk * @lblk: pointer to logical block around which we want to determine hole @@ -2239,9 +2239,9 @@ * The function returns the length of a hole starting at @lblk. We update @lblk * to the beginning of the hole if we managed to find it. */ -static ext4_lblk_t ext4_ext_determine_hole(struct inode *inode, - struct ext4_ext_path *path, - ext4_lblk_t *lblk) +static ext4_lblk_t ext4_ext_find_hole(struct inode *inode, + struct ext4_ext_path *path, + ext4_lblk_t *lblk) { int depth = ext_depth(inode); struct ext4_extent *ex; @@ -2269,30 +2269,6 @@ } /* - * ext4_ext_put_gap_in_cache: - * calculate boundaries of the gap that the requested block fits into - * and cache this gap - */ -static void -ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start, - ext4_lblk_t hole_len) -{ - struct extent_status es; - - ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, - hole_start + hole_len - 1, &es); - if (es.es_len) { - /* There's delayed extent containing lblock? */ - if (es.es_lblk <= hole_start) - return; - hole_len = min(es.es_lblk - hole_start, hole_len); - } - ext_debug(inode, " -> %u:%u\n", hole_start, hole_len); - ext4_es_insert_extent(inode, hole_start, hole_len, ~0, - EXTENT_STATUS_HOLE); -} - -/* * ext4_ext_rm_idx: * removes index from the index block. */ @@ -4064,6 +4040,69 @@ return 0; } +/* + * Determine hole length around the given logical block, first try to + * locate and expand the hole from the given @path, and then adjust it + * if it's partially or completely converted to delayed extents, insert + * it into the extent cache tree if it's indeed a hole, finally return + * the length of the determined extent. + */ +static ext4_lblk_t ext4_ext_determine_insert_hole(struct inode *inode, + struct ext4_ext_path *path, + ext4_lblk_t lblk) +{ + ext4_lblk_t hole_start, len; + struct extent_status es; + + hole_start = lblk; + len = ext4_ext_find_hole(inode, path, &hole_start); +again: + ext4_es_find_extent_range(inode, &ext4_es_is_delayed, hole_start, + hole_start + len - 1, &es); + if (!es.es_len) + goto insert_hole; + + /* + * There's a delalloc extent in the hole, handle it if the delalloc + * extent is in front of, behind and straddle the queried range. + */ + if (lblk >= es.es_lblk + es.es_len) { + /* + * The delalloc extent is in front of the queried range, + * find again from the queried start block. + */ + len -= lblk - hole_start; + hole_start = lblk; + goto again; + } else if (in_range(lblk, es.es_lblk, es.es_len)) { + /* + * The delalloc extent containing lblk, it must have been + * added after ext4_map_blocks() checked the extent status + * tree, adjust the length to the delalloc extent's after + * lblk. + */ + len = es.es_lblk + es.es_len - lblk; + return len; + } else { + /* + * The delalloc extent is partially or completely behind + * the queried range, update hole length until the + * beginning of the delalloc extent. + */ + len = min(es.es_lblk - hole_start, len); + } + +insert_hole: + /* Put just found gap into cache to speed up subsequent requests */ + ext_debug(inode, " -> %u:%u\n", hole_start, len); + ext4_es_insert_extent(inode, hole_start, len, ~0, EXTENT_STATUS_HOLE); + + /* Update hole_len to reflect hole size after lblk */ + if (hole_start != lblk) + len -= lblk - hole_start; + + return len; +} /* * Block allocation/map/preallocation routine for extents based files @@ -4181,22 +4220,12 @@ * we couldn't try to create block if create flag is zero */ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { - ext4_lblk_t hole_start, hole_len; + ext4_lblk_t len; - hole_start = map->m_lblk; - hole_len = ext4_ext_determine_hole(inode, path, &hole_start); - /* - * put just found gap into cache to speed up - * subsequent requests - */ - ext4_ext_put_gap_in_cache(inode, hole_start, hole_len); + len = ext4_ext_determine_insert_hole(inode, path, map->m_lblk); - /* Update hole_len to reflect hole size after map->m_lblk */ - if (hole_start != map->m_lblk) - hole_len -= map->m_lblk - hole_start; map->m_pblk = 0; - map->m_len = min_t(unsigned int, map->m_len, hole_len); - + map->m_len = min_t(unsigned int, map->m_len, len); goto out; } diff -u linux-aws-5.15-5.15.0/fs/ext4/mballoc.c linux-aws-5.15-5.15.0/fs/ext4/mballoc.c --- linux-aws-5.15-5.15.0/fs/ext4/mballoc.c +++ linux-aws-5.15-5.15.0/fs/ext4/mballoc.c @@ -853,7 +853,7 @@ { struct ext4_sb_info *sbi = EXT4_SB(sb); - if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) + if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0) return; write_lock(&sbi->s_mb_rb_lock); @@ -1168,6 +1168,24 @@ mb_update_avg_fragment_size(sb, grp); } +static void mb_regenerate_buddy(struct ext4_buddy *e4b) +{ + int count; + int order = 1; + void *buddy; + + while ((buddy = mb_find_buddy(e4b, order++, &count))) + ext4_set_bits(buddy, 0, count); + + e4b->bd_info->bb_fragments = 0; + memset(e4b->bd_info->bb_counters, 0, + sizeof(*e4b->bd_info->bb_counters) * + (e4b->bd_sb->s_blocksize_bits + 2)); + + ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, + e4b->bd_bitmap, e4b->bd_group, e4b->bd_info); +} + /* The buddy information is attached the buddy cache inode * for convenience. The information regarding each group * is loaded via ext4_mb_load_buddy. The information involve @@ -1817,11 +1835,6 @@ mb_check_buddy(e4b); mb_free_blocks_double(inode, e4b, first, count); - this_cpu_inc(discard_pa_seq); - e4b->bd_info->bb_free += count; - if (first < e4b->bd_info->bb_first_free) - e4b->bd_info->bb_first_free = first; - /* access memory sequentially: check left neighbour, * clear range and then check right neighbour */ @@ -1835,21 +1848,31 @@ struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t blocknr; + /* + * Fastcommit replay can free already freed blocks which + * corrupts allocation info. Regenerate it. + */ + if (sbi->s_mount_state & EXT4_FC_REPLAY) { + mb_regenerate_buddy(e4b); + goto check; + } + blocknr = ext4_group_first_block_no(sb, e4b->bd_group); blocknr += EXT4_C2B(sbi, block); - if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { - ext4_grp_locked_error(sb, e4b->bd_group, - inode ? inode->i_ino : 0, - blocknr, - "freeing already freed block (bit %u); block bitmap corrupt.", - block); - ext4_mark_group_bitmap_corrupted( - sb, e4b->bd_group, + ext4_grp_locked_error(sb, e4b->bd_group, + inode ? inode->i_ino : 0, blocknr, + "freeing already freed block (bit %u); block bitmap corrupt.", + block); + ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); - } - goto done; + return; } + this_cpu_inc(discard_pa_seq); + e4b->bd_info->bb_free += count; + if (first < e4b->bd_info->bb_first_free) + e4b->bd_info->bb_first_free = first; + /* let's maintain fragments counter */ if (left_is_free && right_is_free) e4b->bd_info->bb_fragments--; @@ -1874,9 +1897,9 @@ if (first <= last) mb_buddy_mark_free(e4b, first >> 1, last >> 1); -done: mb_set_largest_free_order(sb, e4b->bd_info); mb_update_avg_fragment_size(sb, e4b->bd_info); +check: mb_check_buddy(e4b); } @@ -2197,6 +2220,9 @@ return err; ext4_lock_group(ac->ac_sb, group); + if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) + goto out; + max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); if (max > 0) { @@ -2204,6 +2230,7 @@ ext4_mb_use_best_found(ac, e4b); } +out: ext4_unlock_group(ac->ac_sb, group); ext4_mb_unload_buddy(e4b); @@ -2232,12 +2259,10 @@ if (err) return err; - if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { - ext4_mb_unload_buddy(e4b); - return 0; - } - ext4_lock_group(ac->ac_sb, group); + if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) + goto out; + max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, ac->ac_g_ex.fe_len, &ex); ex.fe_logical = 0xDEADFA11; /* debug value */ @@ -2270,6 +2295,7 @@ ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } +out: ext4_unlock_group(ac->ac_sb, group); ext4_mb_unload_buddy(e4b); diff -u linux-aws-5.15-5.15.0/fs/f2fs/checkpoint.c linux-aws-5.15-5.15.0/fs/f2fs/checkpoint.c --- linux-aws-5.15-5.15.0/fs/f2fs/checkpoint.c +++ linux-aws-5.15-5.15.0/fs/f2fs/checkpoint.c @@ -66,7 +66,7 @@ .old_blkaddr = index, .new_blkaddr = index, .encrypted_page = NULL, - .is_por = !is_meta, + .is_por = !is_meta ? 1 : 0, }; int err; @@ -229,8 +229,8 @@ .op = REQ_OP_READ, .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD, .encrypted_page = NULL, - .in_list = false, - .is_por = (type == META_POR), + .in_list = 0, + .is_por = (type == META_POR) ? 1 : 0, }; struct blk_plug plug; int err; diff -u linux-aws-5.15-5.15.0/fs/f2fs/compress.c linux-aws-5.15-5.15.0/fs/f2fs/compress.c --- linux-aws-5.15-5.15.0/fs/f2fs/compress.c +++ linux-aws-5.15-5.15.0/fs/f2fs/compress.c @@ -1233,10 +1233,11 @@ .page = NULL, .encrypted_page = NULL, .compressed_page = NULL, - .submitted = false, + .submitted = 0, .io_type = io_type, .io_wbc = wbc, - .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode), + .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ? + 1 : 0, }; struct dnode_of_data dn; struct node_info ni; @@ -1444,12 +1445,14 @@ } static int f2fs_write_raw_pages(struct compress_ctx *cc, - int *submitted, + int *submitted_p, struct writeback_control *wbc, enum iostat_type io_type) { struct address_space *mapping = cc->inode->i_mapping; - int _submitted, compr_blocks, ret, i; + struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); + int submitted, compr_blocks, i; + int ret = 0; compr_blocks = f2fs_compressed_blocks(cc); @@ -1464,6 +1467,10 @@ if (compr_blocks < 0) return compr_blocks; + /* overwrite compressed cluster w/ normal cluster */ + if (compr_blocks > 0) + f2fs_lock_op(sbi); + for (i = 0; i < cc->cluster_size; i++) { if (!cc->rpages[i]) continue; @@ -1488,7 +1495,7 @@ if (!clear_page_dirty_for_io(cc->rpages[i])) goto continue_unlock; - ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted, + ret = f2fs_write_single_data_page(cc->rpages[i], &submitted, NULL, NULL, wbc, io_type, compr_blocks, false); if (ret) { @@ -1496,28 +1503,29 @@ unlock_page(cc->rpages[i]); ret = 0; } else if (ret == -EAGAIN) { + ret = 0; /* * for quota file, just redirty left pages to * avoid deadlock caused by cluster update race * from foreground operation. */ if (IS_NOQUOTA(cc->inode)) - return 0; - ret = 0; - cond_resched(); - congestion_wait(BLK_RW_ASYNC, - DEFAULT_IO_TIMEOUT); + goto out; + f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); goto retry_write; } - return ret; + goto out; } - *submitted += _submitted; + *submitted_p += submitted; } - f2fs_balance_fs(F2FS_M_SB(mapping), true); +out: + if (compr_blocks > 0) + f2fs_unlock_op(sbi); - return 0; + f2fs_balance_fs(sbi, true); + return ret; } int f2fs_write_multi_pages(struct compress_ctx *cc, diff -u linux-aws-5.15-5.15.0/fs/f2fs/data.c linux-aws-5.15-5.15.0/fs/f2fs/data.c --- linux-aws-5.15-5.15.0/fs/f2fs/data.c +++ linux-aws-5.15-5.15.0/fs/f2fs/data.c @@ -932,7 +932,7 @@ bio_page = fio->page; /* set submitted = true as a return value */ - fio->submitted = true; + fio->submitted = 1; inc_page_count(sbi, WB_DATA_TYPE(bio_page)); @@ -948,7 +948,7 @@ (fio->type == DATA || fio->type == NODE) && fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) { dec_page_count(sbi, WB_DATA_TYPE(bio_page)); - fio->retry = true; + fio->retry = 1; goto skip; } io->bio = __bio_alloc(fio, BIO_MAX_VECS); @@ -1467,10 +1467,15 @@ struct extent_info ei = {0, }; block_t blkaddr; unsigned int start_pgofs; + int bidx = 0; if (!maxblocks) return 0; + map->m_bdev = inode->i_sb->s_bdev; + map->m_multidev_dio = + f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag); + map->m_len = 0; map->m_flags = 0; @@ -1493,6 +1498,21 @@ if (flag == F2FS_GET_BLOCK_DIO) f2fs_wait_on_block_writeback_range(inode, map->m_pblk, map->m_len); + + if (map->m_multidev_dio) { + block_t blk_addr = map->m_pblk; + + bidx = f2fs_target_device_index(sbi, map->m_pblk); + + map->m_bdev = FDEV(bidx).bdev; + map->m_pblk -= FDEV(bidx).start_blk; + map->m_len = min(map->m_len, + FDEV(bidx).end_blk + 1 - map->m_pblk); + + if (map->m_may_create) + f2fs_update_device_state(sbi, inode->i_ino, + blk_addr, map->m_len); + } goto out; } @@ -1611,6 +1631,9 @@ if (flag == F2FS_GET_BLOCK_PRE_AIO) goto skip; + if (map->m_multidev_dio) + bidx = f2fs_target_device_index(sbi, blkaddr); + if (map->m_len == 0) { /* preallocated unwritten block should be mapped for fiemap. */ if (blkaddr == NEW_ADDR) @@ -1619,10 +1642,15 @@ map->m_pblk = blkaddr; map->m_len = 1; + + if (map->m_multidev_dio) + map->m_bdev = FDEV(bidx).bdev; } else if ((map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs)) || (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) || flag == F2FS_GET_BLOCK_PRE_DIO) { + if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev) + goto sync_out; ofs++; map->m_len++; } else { @@ -1675,11 +1703,31 @@ sync_out: - /* for hardware encryption, but to avoid potential issue in future */ - if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) + if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) { + /* + * for hardware encryption, but to avoid potential issue + * in future + */ f2fs_wait_on_block_writeback_range(inode, map->m_pblk, map->m_len); + if (map->m_multidev_dio) { + block_t blk_addr = map->m_pblk; + + bidx = f2fs_target_device_index(sbi, map->m_pblk); + + map->m_bdev = FDEV(bidx).bdev; + map->m_pblk -= FDEV(bidx).start_blk; + + if (map->m_may_create) + f2fs_update_device_state(sbi, inode->i_ino, + blk_addr, map->m_len); + + f2fs_bug_on(sbi, blk_addr + map->m_len > + FDEV(bidx).end_blk + 1); + } + } + if (flag == F2FS_GET_BLOCK_PRECACHE) { if (map->m_flags & F2FS_MAP_MAPPED) { unsigned int ofs = start_pgofs - map->m_lblk; @@ -1698,7 +1746,7 @@ f2fs_balance_fs(sbi, dn.node_changed); } out: - trace_f2fs_map_blocks(inode, map, err); + trace_f2fs_map_blocks(inode, map, create, flag, err); return err; } @@ -1757,6 +1805,9 @@ map_bh(bh, inode->i_sb, map.m_pblk); bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; bh->b_size = blks_to_bytes(inode, map.m_len); + + if (map.m_multidev_dio) + bh->b_bdev = map.m_bdev; } return err; } @@ -2748,9 +2799,10 @@ .old_blkaddr = NULL_ADDR, .page = page, .encrypted_page = NULL, - .submitted = false, + .submitted = 0, .compr_blocks = compr_blocks, - .need_lock = LOCK_RETRY, + .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY, + .post_read = f2fs_post_read_required(inode) ? 1 : 0, .io_type = io_type, .io_wbc = wbc, .bio = bio, @@ -2834,6 +2886,7 @@ if (err == -EAGAIN) { err = f2fs_do_write_data_page(&fio); if (err == -EAGAIN) { + f2fs_bug_on(sbi, compr_blocks); fio.need_lock = LOCK_REQ; err = f2fs_do_write_data_page(&fio); } @@ -2878,7 +2931,7 @@ } if (submitted) - *submitted = fio.submitted ? 1 : 0; + *submitted = fio.submitted; return 0; @@ -3104,8 +3157,7 @@ } else if (ret == -EAGAIN) { ret = 0; if (wbc->sync_mode == WB_SYNC_ALL) { - cond_resched(); - congestion_wait(BLK_RW_ASYNC, + f2fs_io_schedule_timeout( DEFAULT_IO_TIMEOUT); goto retry_write; } diff -u linux-aws-5.15-5.15.0/fs/f2fs/f2fs.h linux-aws-5.15-5.15.0/fs/f2fs/f2fs.h --- linux-aws-5.15-5.15.0/fs/f2fs/f2fs.h +++ linux-aws-5.15-5.15.0/fs/f2fs/f2fs.h @@ -622,6 +622,7 @@ F2FS_MAP_UNWRITTEN) struct f2fs_map_blocks { + struct block_device *m_bdev; /* for multi-device dio */ block_t m_pblk; block_t m_lblk; unsigned int m_len; @@ -630,6 +631,7 @@ pgoff_t *m_next_extent; /* point to next possible extent */ int m_seg_type; bool m_may_create; /* indicate it is from write path */ + bool m_multidev_dio; /* indicate it allows multi-device dio */ }; /* for flag in get_data_block */ @@ -1168,18 +1170,19 @@ struct page *encrypted_page; /* encrypted page */ struct page *compressed_page; /* compressed page */ struct list_head list; /* serialize IOs */ - bool submitted; /* indicate IO submission */ - int need_lock; /* indicate we need to lock cp_rwsem */ - bool in_list; /* indicate fio is in io_list */ - bool is_por; /* indicate IO is from recovery or not */ - bool retry; /* need to reallocate block address */ - int compr_blocks; /* # of compressed block addresses */ - bool encrypted; /* indicate file is encrypted */ + unsigned int compr_blocks; /* # of compressed block addresses */ + unsigned int need_lock:8; /* indicate we need to lock cp_rwsem */ + unsigned int version:8; /* version of the node */ + unsigned int submitted:1; /* indicate IO submission */ + unsigned int in_list:1; /* indicate fio is in io_list */ + unsigned int is_por:1; /* indicate IO is from recovery or not */ + unsigned int retry:1; /* need to reallocate block address */ + unsigned int encrypted:1; /* indicate file is encrypted */ + unsigned int post_read:1; /* require post read */ enum iostat_type io_type; /* io type */ struct writeback_control *io_wbc; /* writeback control */ struct bio **bio; /* bio for ipu */ sector_t *last_block; /* last block number in bio */ - unsigned char version; /* version of the node */ }; struct bio_entry { @@ -1736,12 +1739,15 @@ /* For shrinker support */ struct list_head s_list; + struct mutex umount_mutex; + unsigned int shrinker_run_no; + + /* For multi devices */ int s_ndevs; /* number of devices */ struct f2fs_dev_info *devs; /* for device list */ unsigned int dirty_device; /* for checkpoint data flush */ spinlock_t dev_lock; /* protect dirty_device */ - struct mutex umount_mutex; - unsigned int shrinker_run_no; + bool aligned_blksize; /* all devices has the same logical blksize */ /* For write statistics */ u64 sectors_written_start; @@ -3517,6 +3523,8 @@ block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, struct f2fs_io_info *fio); +void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, + block_t blkaddr, unsigned int blkcnt); void f2fs_wait_on_page_writeback(struct page *page, enum page_type type, bool ordered, bool locked); void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); @@ -4334,6 +4342,16 @@ return align & blocksize_mask; } +static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, + int flag) +{ + if (!f2fs_is_multi_device(sbi)) + return false; + if (flag != F2FS_GET_BLOCK_DIO) + return false; + return sbi->aligned_blksize; +} + static inline bool f2fs_force_buffered_io(struct inode *inode, struct kiocb *iocb, struct iov_iter *iter) { @@ -4342,7 +4360,9 @@ if (f2fs_post_read_required(inode)) return true; - if (f2fs_is_multi_device(sbi)) + + /* disallow direct IO if any of devices has unaligned blksize */ + if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize) return true; /* * for blkzoned device, fallback direct IO to buffered IO, so @@ -4393,6 +4413,12 @@ return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK; } +static inline void f2fs_io_schedule_timeout(long timeout) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + io_schedule_timeout(timeout); +} + #define EFSBADCRC EBADMSG /* Bad CRC detected */ #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ diff -u linux-aws-5.15-5.15.0/fs/f2fs/file.c linux-aws-5.15-5.15.0/fs/f2fs/file.c --- linux-aws-5.15-5.15.0/fs/f2fs/file.c +++ linux-aws-5.15-5.15.0/fs/f2fs/file.c @@ -3532,10 +3532,10 @@ return ret; } -static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count) +static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count, + unsigned int *reserved_blocks) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); - unsigned int reserved_blocks = 0; int cluster_size = F2FS_I(dn->inode)->i_cluster_size; block_t blkaddr; int i; @@ -3566,7 +3566,13 @@ goto next; } - if (__is_valid_data_blkaddr(blkaddr)) { + /* + * compressed cluster was not released due to it + * fails in release_compress_blocks(), so NEW_ADDR + * is a possible case. + */ + if (blkaddr == NEW_ADDR || + __is_valid_data_blkaddr(blkaddr)) { compr_blocks++; continue; } @@ -3576,6 +3582,11 @@ } reserved = cluster_size - compr_blocks; + + /* for the case all blocks in cluster were reserved */ + if (reserved == 1) + goto next; + ret = inc_valid_block_count(sbi, dn->inode, &reserved); if (ret) return ret; @@ -3585,12 +3596,12 @@ f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true); - reserved_blocks += reserved; + *reserved_blocks += reserved; next: count -= cluster_size; } - return reserved_blocks; + return 0; } static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) @@ -3651,7 +3662,7 @@ count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); count = round_up(count, F2FS_I(inode)->i_cluster_size); - ret = reserve_compress_blocks(&dn, count); + ret = reserve_compress_blocks(&dn, count, &reserved_blocks); f2fs_put_dnode(&dn); @@ -3659,13 +3670,12 @@ break; page_idx += count; - reserved_blocks += ret; } filemap_invalidate_unlock(inode->i_mapping); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - if (ret >= 0) { + if (!ret) { clear_inode_flag(inode, FI_COMPRESS_RELEASED); inode->i_ctime = current_time(inode); f2fs_mark_inode_dirty_sync(inode, true); @@ -3675,7 +3685,7 @@ out: mnt_drop_write_file(filp); - if (ret >= 0) { + if (!ret) { ret = put_user(reserved_blocks, (u64 __user *)arg); } else if (reserved_blocks && atomic_read(&F2FS_I(inode)->i_compr_blocks)) { diff -u linux-aws-5.15-5.15.0/fs/f2fs/gc.c linux-aws-5.15-5.15.0/fs/f2fs/gc.c --- linux-aws-5.15-5.15.0/fs/f2fs/gc.c +++ linux-aws-5.15-5.15.0/fs/f2fs/gc.c @@ -1080,8 +1080,8 @@ .op = REQ_OP_READ, .op_flags = 0, .encrypted_page = NULL, - .in_list = false, - .retry = false, + .in_list = 0, + .retry = 0, }; int err; @@ -1167,8 +1167,8 @@ .op = REQ_OP_READ, .op_flags = 0, .encrypted_page = NULL, - .in_list = false, - .retry = false, + .in_list = 0, + .retry = 0, }; struct dnode_of_data dn; struct f2fs_summary sum; @@ -1199,7 +1199,8 @@ } if (f2fs_is_pinned_file(inode)) { - f2fs_pin_file_control(inode, true); + if (gc_type == FG_GC) + f2fs_pin_file_control(inode, true); err = -EAGAIN; goto out; } @@ -1781,23 +1782,31 @@ if (sync) goto stop; - if (has_not_enough_free_secs(sbi, sec_freed, 0)) { - if (skipped_round <= MAX_SKIP_GC_COUNT || - skipped_round * 2 < round) { - segno = NULL_SEGNO; - goto gc_more; - } + if (!has_not_enough_free_secs(sbi, sec_freed, 0)) + goto stop; - if (first_skipped < last_skipped && - (last_skipped - first_skipped) > - sbi->skipped_gc_rwsem) { - f2fs_drop_inmem_pages_all(sbi, true); - segno = NULL_SEGNO; - goto gc_more; - } - if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) + if (skipped_round <= MAX_SKIP_GC_COUNT || skipped_round * 2 < round) { + + /* Write checkpoint to reclaim prefree segments */ + if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE && + prefree_segments(sbi) && + !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { ret = f2fs_write_checkpoint(sbi, &cpc); + if (ret) + goto stop; + } + segno = NULL_SEGNO; + goto gc_more; + } + if (first_skipped < last_skipped && + (last_skipped - first_skipped) > + sbi->skipped_gc_rwsem) { + f2fs_drop_inmem_pages_all(sbi, true); + segno = NULL_SEGNO; + goto gc_more; } + if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) + ret = f2fs_write_checkpoint(sbi, &cpc); stop: SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno; diff -u linux-aws-5.15-5.15.0/fs/f2fs/node.c linux-aws-5.15-5.15.0/fs/f2fs/node.c --- linux-aws-5.15-5.15.0/fs/f2fs/node.c +++ linux-aws-5.15-5.15.0/fs/f2fs/node.c @@ -1583,7 +1583,7 @@ .op_flags = wbc_to_write_flags(wbc), .page = page, .encrypted_page = NULL, - .submitted = false, + .submitted = 0, .io_type = io_type, .io_wbc = wbc, }; diff -u linux-aws-5.15-5.15.0/fs/f2fs/segment.c linux-aws-5.15-5.15.0/fs/f2fs/segment.c --- linux-aws-5.15-5.15.0/fs/f2fs/segment.c +++ linux-aws-5.15-5.15.0/fs/f2fs/segment.c @@ -313,8 +313,7 @@ skip: iput(inode); } - congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); - cond_resched(); + f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); if (gc_failure) { if (++looped >= count) return; @@ -789,8 +788,7 @@ do { ret = __submit_flush_wait(sbi, FDEV(i).bdev); if (ret) - congestion_wait(BLK_RW_ASYNC, - DEFAULT_IO_TIMEOUT); + f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); } while (ret && --count); if (ret) { @@ -3106,7 +3104,7 @@ blk_finish_plug(&plug); mutex_unlock(&dcc->cmd_lock); trimmed += __wait_all_discard_cmd(sbi, NULL); - congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); + f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); goto next; } skip: @@ -3475,10 +3473,10 @@ struct f2fs_bio_info *io; if (F2FS_IO_ALIGNED(sbi)) - fio->retry = false; + fio->retry = 0; INIT_LIST_HEAD(&fio->list); - fio->in_list = true; + fio->in_list = 1; io = sbi->write_io[fio->type] + fio->temp; spin_lock(&io->io_lock); list_add_tail(&fio->list, &io->io_list); @@ -3490,24 +3488,30 @@ up_read(&SM_I(sbi)->curseg_lock); } -static void update_device_state(struct f2fs_io_info *fio) +void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, + block_t blkaddr, unsigned int blkcnt) { - struct f2fs_sb_info *sbi = fio->sbi; - unsigned int devidx; - if (!f2fs_is_multi_device(sbi)) return; - devidx = f2fs_target_device_index(sbi, fio->new_blkaddr); - - /* update device state for fsync */ - f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO); + while (1) { + unsigned int devidx = f2fs_target_device_index(sbi, blkaddr); + unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1; + + /* update device state for fsync */ + f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO); + + /* update device state for checkpoint */ + if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) { + spin_lock(&sbi->dev_lock); + f2fs_set_bit(devidx, (char *)&sbi->dirty_device); + spin_unlock(&sbi->dev_lock); + } - /* update device state for checkpoint */ - if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) { - spin_lock(&sbi->dev_lock); - f2fs_set_bit(devidx, (char *)&sbi->dirty_device); - spin_unlock(&sbi->dev_lock); + if (blkcnt <= blks) + break; + blkcnt -= blks; + blkaddr += blks; } } @@ -3534,7 +3538,7 @@ goto reallocate; } - update_device_state(fio); + f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1); if (keep_order) up_read(&fio->sbi->io_order_lock); @@ -3553,7 +3557,7 @@ .new_blkaddr = page->index, .page = page, .encrypted_page = NULL, - .in_list = false, + .in_list = 0, }; if (unlikely(page->index >= MAIN_BLKADDR(sbi))) @@ -3616,6 +3620,10 @@ goto drop_bio; } + if (fio->post_read) + invalidate_mapping_pages(META_MAPPING(sbi), + fio->new_blkaddr, fio->new_blkaddr); + stat_inc_inplace_blocks(fio->sbi); if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE))) @@ -3623,7 +3631,8 @@ else err = f2fs_submit_page_bio(fio); if (!err) { - update_device_state(fio); + f2fs_update_device_state(fio->sbi, fio->ino, + fio->new_blkaddr, 1); f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE); } @@ -3795,10 +3804,16 @@ void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, block_t len) { + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); block_t i; + if (!f2fs_post_read_required(inode)) + return; + for (i = 0; i < len; i++) f2fs_wait_on_block_writeback(inode, blkaddr + i); + + invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1); } static int read_compacted_summaries(struct f2fs_sb_info *sbi) diff -u linux-aws-5.15-5.15.0/fs/f2fs/super.c linux-aws-5.15-5.15.0/fs/f2fs/super.c --- linux-aws-5.15-5.15.0/fs/f2fs/super.c +++ linux-aws-5.15-5.15.0/fs/f2fs/super.c @@ -2168,8 +2168,7 @@ /* we should flush all the data to keep data consistency */ do { sync_inodes_sb(sbi->sb); - cond_resched(); - congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); + f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--); if (unlikely(retry < 0)) @@ -2540,8 +2539,7 @@ &page, &fsdata); if (unlikely(err)) { if (err == -ENOMEM) { - congestion_wait(BLK_RW_ASYNC, - DEFAULT_IO_TIMEOUT); + f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT); goto retry; } set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); @@ -3831,6 +3829,7 @@ { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); unsigned int max_devices = MAX_DEVICES; + unsigned int logical_blksize; int i; /* Initialize single device information */ @@ -3851,6 +3850,9 @@ if (!sbi->devs) return -ENOMEM; + logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev); + sbi->aligned_blksize = true; + for (i = 0; i < max_devices; i++) { if (i > 0 && !RDEV(i).path[0]) @@ -3887,6 +3889,9 @@ /* to release errored devices */ sbi->s_ndevs = i + 1; + if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev)) + sbi->aligned_blksize = false; + #ifdef CONFIG_BLK_DEV_ZONED if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM && !f2fs_sb_has_blkzoned(sbi)) { diff -u linux-aws-5.15-5.15.0/fs/hugetlbfs/inode.c linux-aws-5.15-5.15.0/fs/hugetlbfs/inode.c --- linux-aws-5.15-5.15.0/fs/hugetlbfs/inode.c +++ linux-aws-5.15-5.15.0/fs/hugetlbfs/inode.c @@ -1234,6 +1234,7 @@ { struct hugetlbfs_fs_context *ctx = fc->fs_private; struct fs_parse_result result; + struct hstate *h; char *rest; unsigned long ps; int opt; @@ -1278,11 +1279,12 @@ case Opt_pagesize: ps = memparse(param->string, &rest); - ctx->hstate = size_to_hstate(ps); - if (!ctx->hstate) { + h = size_to_hstate(ps); + if (!h) { pr_err("Unsupported page size %lu MB\n", ps >> 20); return -EINVAL; } + ctx->hstate = h; return 0; case Opt_min_size: diff -u linux-aws-5.15-5.15.0/fs/ksmbd/smb2pdu.c linux-aws-5.15-5.15.0/fs/ksmbd/smb2pdu.c --- linux-aws-5.15-5.15.0/fs/ksmbd/smb2pdu.c +++ linux-aws-5.15-5.15.0/fs/ksmbd/smb2pdu.c @@ -6175,8 +6175,10 @@ err = ksmbd_iov_pin_rsp_read(work, (void *)rsp, offsetof(struct smb2_read_rsp, Buffer), aux_payload_buf, nbytes); - if (err) + if (err) { + kvfree(aux_payload_buf); goto out; + } kvfree(rpc_resp); } else { err = ksmbd_iov_pin_rsp(work, (void *)rsp, @@ -6386,8 +6388,10 @@ err = ksmbd_iov_pin_rsp_read(work, (void *)rsp, offsetof(struct smb2_read_rsp, Buffer), aux_payload_buf, nbytes); - if (err) + if (err) { + kvfree(aux_payload_buf); goto out; + } ksmbd_fd_put(work, fp); return 0; diff -u linux-aws-5.15-5.15.0/fs/nfs/nfs4proc.c linux-aws-5.15-5.15.0/fs/nfs/nfs4proc.c --- linux-aws-5.15-5.15.0/fs/nfs/nfs4proc.c +++ linux-aws-5.15-5.15.0/fs/nfs/nfs4proc.c @@ -10521,29 +10521,33 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) { ssize_t error, error2, error3; + size_t left = size; - error = generic_listxattr(dentry, list, size); + error = generic_listxattr(dentry, list, left); if (error < 0) return error; if (list) { list += error; - size -= error; + left -= error; } - error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size); + error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left); if (error2 < 0) return error2; if (list) { list += error2; - size -= error2; + left -= error2; } - error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size); + error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left); if (error3 < 0) return error3; - return error + error2 + error3; + error += error2 + error3; + if (size && error > size) + return -ERANGE; + return error; } static void nfs4_enable_swap(struct inode *inode) diff -u linux-aws-5.15-5.15.0/fs/ntfs3/attrib.c linux-aws-5.15-5.15.0/fs/ntfs3/attrib.c --- linux-aws-5.15-5.15.0/fs/ntfs3/attrib.c +++ linux-aws-5.15-5.15.0/fs/ntfs3/attrib.c @@ -1583,8 +1583,10 @@ le_b = NULL; attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b); - if (!attr_b) - return -ENOENT; + if (!attr_b) { + err = -ENOENT; + goto out; + } attr = attr_b; le = le_b; @@ -1665,13 +1667,15 @@ ok: run_truncate_around(run, vcn); out: - if (new_valid > data_size) - new_valid = data_size; + if (attr_b) { + if (new_valid > data_size) + new_valid = data_size; - valid_size = le64_to_cpu(attr_b->nres.valid_size); - if (new_valid != valid_size) { - attr_b->nres.valid_size = cpu_to_le64(valid_size); - mi_b->dirty = true; + valid_size = le64_to_cpu(attr_b->nres.valid_size); + if (new_valid != valid_size) { + attr_b->nres.valid_size = cpu_to_le64(valid_size); + mi_b->dirty = true; + } } return err; diff -u linux-aws-5.15-5.15.0/fs/ntfs3/attrlist.c linux-aws-5.15-5.15.0/fs/ntfs3/attrlist.c --- linux-aws-5.15-5.15.0/fs/ntfs3/attrlist.c +++ linux-aws-5.15-5.15.0/fs/ntfs3/attrlist.c @@ -127,12 +127,13 @@ { size_t off; u16 sz; + const unsigned le_min_size = le_size(0); if (!le) { le = ni->attr_list.le; } else { sz = le16_to_cpu(le->size); - if (sz < sizeof(struct ATTR_LIST_ENTRY)) { + if (sz < le_min_size) { /* Impossible 'cause we should not return such le. */ return NULL; } @@ -141,7 +142,7 @@ /* Check boundary. */ off = PtrOffset(ni->attr_list.le, le); - if (off + sizeof(struct ATTR_LIST_ENTRY) > ni->attr_list.size) { + if (off + le_min_size > ni->attr_list.size) { /* The regular end of list. */ return NULL; } @@ -149,8 +150,7 @@ sz = le16_to_cpu(le->size); /* Check le for errors. */ - if (sz < sizeof(struct ATTR_LIST_ENTRY) || - off + sz > ni->attr_list.size || + if (sz < le_min_size || off + sz > ni->attr_list.size || sz < le->name_off + le->name_len * sizeof(short)) { return NULL; } diff -u linux-aws-5.15-5.15.0/fs/ntfs3/dir.c linux-aws-5.15-5.15.0/fs/ntfs3/dir.c --- linux-aws-5.15-5.15.0/fs/ntfs3/dir.c +++ linux-aws-5.15-5.15.0/fs/ntfs3/dir.c @@ -309,11 +309,31 @@ return 0; } - /* NTFS: symlinks are "dir + reparse" or "file + reparse" */ - if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) - dt_type = DT_LNK; - else - dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG; + /* + * NTFS: symlinks are "dir + reparse" or "file + reparse" + * Unfortunately reparse attribute is used for many purposes (several dozens). + * It is not possible here to know is this name symlink or not. + * To get exactly the type of name we should to open inode (read mft). + * getattr for opened file (fstat) correctly returns symlink. + */ + dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG; + + /* + * It is not reliable to detect the type of name using duplicated information + * stored in parent directory. + * The only correct way to get the type of name - read MFT record and find ATTR_STD. + * The code below is not good idea. + * It does additional locks/reads just to get the type of name. + * Should we use additional mount option to enable branch below? + */ + if ((fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT) && + ino != ni->mi.rno) { + struct inode *inode = ntfs_iget5(sbi->sb, &e->ref, NULL); + if (!IS_ERR_OR_NULL(inode)) { + dt_type = fs_umode_to_dtype(inode->i_mode); + iput(inode); + } + } return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type); } @@ -495,11 +515,9 @@ struct INDEX_HDR *hdr; const struct ATTR_FILE_NAME *fname; u32 e_size, off, end; - u64 vbo = 0; size_t drs = 0, fles = 0, bit = 0; - loff_t i_size = ni->vfs_inode.i_size; struct indx_node *node = NULL; - u8 index_bits = ni->dir.index_bits; + size_t max_indx = ni->vfs_inode.i_size >> ni->dir.index_bits; if (is_empty) *is_empty = true; @@ -543,7 +561,7 @@ fles += 1; } - if (vbo >= i_size) + if (bit >= max_indx) goto out; err = indx_used_bit(&ni->dir, ni, &bit); @@ -553,8 +571,7 @@ if (bit == MINUS_ONE_T) goto out; - vbo = (u64)bit << index_bits; - if (vbo >= i_size) + if (bit >= max_indx) goto out; err = indx_read(&ni->dir, ni, bit << ni->dir.idx2vbn_bits, @@ -564,7 +581,6 @@ hdr = &node->index->ihdr; bit += 1; - vbo = (u64)bit << ni->dir.idx2vbn_bits; } out: diff -u linux-aws-5.15-5.15.0/fs/ntfs3/file.c linux-aws-5.15-5.15.0/fs/ntfs3/file.c --- linux-aws-5.15-5.15.0/fs/ntfs3/file.c +++ linux-aws-5.15-5.15.0/fs/ntfs3/file.c @@ -1090,6 +1090,8 @@ iocb->ki_pos += written; if (iocb->ki_pos > ni->i_valid) ni->i_valid = iocb->ki_pos; + if (iocb->ki_pos > i_size) + i_size_write(inode, iocb->ki_pos); return written; } diff -u linux-aws-5.15-5.15.0/fs/ntfs3/fslog.c linux-aws-5.15-5.15.0/fs/ntfs3/fslog.c --- linux-aws-5.15-5.15.0/fs/ntfs3/fslog.c +++ linux-aws-5.15-5.15.0/fs/ntfs3/fslog.c @@ -465,7 +465,7 @@ { const struct RESTART_AREA *ra; u16 cl, fl, ul; - u32 off, l_size, file_dat_bits, file_size_round; + u32 off, l_size, seq_bits; u16 ro = le16_to_cpu(rhdr->ra_off); u32 sys_page = le32_to_cpu(rhdr->sys_page_size); @@ -511,13 +511,15 @@ /* Make sure the sequence number bits match the log file size. */ l_size = le64_to_cpu(ra->l_size); - file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits); - file_size_round = 1u << (file_dat_bits + 3); - if (file_size_round != l_size && - (file_size_round < l_size || (file_size_round / 2) > l_size)) { - return false; + seq_bits = sizeof(u64) * 8 + 3; + while (l_size) { + l_size >>= 1; + seq_bits -= 1; } + if (seq_bits != ra->seq_num_bits) + return false; + /* The log page data offset and record header length must be quad-aligned. */ if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) || !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8)) diff -u linux-aws-5.15-5.15.0/fs/ntfs3/fsntfs.c linux-aws-5.15-5.15.0/fs/ntfs3/fsntfs.c --- linux-aws-5.15-5.15.0/fs/ntfs3/fsntfs.c +++ linux-aws-5.15-5.15.0/fs/ntfs3/fsntfs.c @@ -981,6 +981,30 @@ return cpu_to_le32(hash); } +/* + * simple wrapper for sb_bread_unmovable. + */ +struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block) +{ + struct ntfs_sb_info *sbi = sb->s_fs_info; + struct buffer_head *bh; + + if (unlikely(block >= sbi->volume.blocks)) { + /* prevent generic message "attempt to access beyond end of device" */ + ntfs_err(sb, "try to read out of volume at offset 0x%llx", + (u64)block << sb->s_blocksize_bits); + return NULL; + } + + bh = sb_bread_unmovable(sb, block); + if (bh) + return bh; + + ntfs_err(sb, "failed to read volume at offset 0x%llx", + (u64)block << sb->s_blocksize_bits); + return NULL; +} + int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer) { struct block_device *bdev = sb->s_bdev; diff -u linux-aws-5.15-5.15.0/fs/ntfs3/inode.c linux-aws-5.15-5.15.0/fs/ntfs3/inode.c --- linux-aws-5.15-5.15.0/fs/ntfs3/inode.c +++ linux-aws-5.15-5.15.0/fs/ntfs3/inode.c @@ -402,7 +402,6 @@ goto out; if (!is_match && name) { - /* Reuse rec as buffer for ascii name. */ err = -ENOENT; goto out; } @@ -417,6 +416,7 @@ if (names != le16_to_cpu(rec->hard_links)) { /* Correct minor error on the fly. Do not mark inode as dirty. */ + ntfs_inode_warn(inode, "Correct links count -> %u.", names); rec->hard_links = cpu_to_le16(names); ni->mi.dirty = true; } diff -u linux-aws-5.15-5.15.0/fs/ntfs3/ntfs.h linux-aws-5.15-5.15.0/fs/ntfs3/ntfs.h --- linux-aws-5.15-5.15.0/fs/ntfs3/ntfs.h +++ linux-aws-5.15-5.15.0/fs/ntfs3/ntfs.h @@ -517,12 +517,10 @@ __le64 vcn; // 0x08: Starting VCN of this attribute. struct MFT_REF ref; // 0x10: MFT record number with attribute. __le16 id; // 0x18: struct ATTRIB ID. - __le16 name[3]; // 0x1A: Just to align. To get real name can use bNameOffset. + __le16 name[]; // 0x1A: Just to align. To get real name can use name_off. }; // sizeof(0x20) -static_assert(sizeof(struct ATTR_LIST_ENTRY) == 0x20); - static inline u32 le_size(u8 name_len) { return ALIGN(offsetof(struct ATTR_LIST_ENTRY, name) + diff -u linux-aws-5.15-5.15.0/fs/ntfs3/ntfs_fs.h linux-aws-5.15-5.15.0/fs/ntfs3/ntfs_fs.h --- linux-aws-5.15-5.15.0/fs/ntfs3/ntfs_fs.h +++ linux-aws-5.15-5.15.0/fs/ntfs3/ntfs_fs.h @@ -581,6 +581,7 @@ int log_replay(struct ntfs_inode *ni, bool *initialized); /* Globals from fsntfs.c */ +struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block); bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes); int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes, bool simple); @@ -1011,19 +1012,6 @@ return (size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; } -static inline struct buffer_head *ntfs_bread(struct super_block *sb, - sector_t block) -{ - struct buffer_head *bh = sb_bread(sb, block); - - if (bh) - return bh; - - ntfs_err(sb, "failed to read volume at offset 0x%llx", - (u64)block << sb->s_blocksize_bits); - return NULL; -} - static inline struct ntfs_inode *ntfs_i(struct inode *inode) { return container_of(inode, struct ntfs_inode, vfs_inode); diff -u linux-aws-5.15-5.15.0/fs/ntfs3/record.c linux-aws-5.15-5.15.0/fs/ntfs3/record.c --- linux-aws-5.15-5.15.0/fs/ntfs3/record.c +++ linux-aws-5.15-5.15.0/fs/ntfs3/record.c @@ -509,8 +509,20 @@ return false; if (ni && is_attr_indexed(attr)) { - le16_add_cpu(&ni->mi.mrec->hard_links, -1); - ni->mi.dirty = true; + u16 links = le16_to_cpu(ni->mi.mrec->hard_links); + struct ATTR_FILE_NAME *fname = + attr->type != ATTR_NAME ? + NULL : + resident_data_ex(attr, + SIZEOF_ATTRIBUTE_FILENAME); + if (fname && fname->type == FILE_NAME_DOS) { + /* Do not decrease links count deleting DOS name. */ + } else if (!links) { + /* minor error. Not critical. */ + } else { + ni->mi.mrec->hard_links = cpu_to_le16(links - 1); + ni->mi.dirty = true; + } } used -= asize; diff -u linux-aws-5.15-5.15.0/fs/ntfs3/xattr.c linux-aws-5.15-5.15.0/fs/ntfs3/xattr.c --- linux-aws-5.15-5.15.0/fs/ntfs3/xattr.c +++ linux-aws-5.15-5.15.0/fs/ntfs3/xattr.c @@ -217,6 +217,9 @@ if (!ea->name_len) break; + if (ea->name_len > ea_size) + break; + if (buffer) { /* Check if we can use field ea->name */ if (off + ea_size > size) diff -u linux-aws-5.15-5.15.0/fs/quota/dquot.c linux-aws-5.15-5.15.0/fs/quota/dquot.c --- linux-aws-5.15-5.15.0/fs/quota/dquot.c +++ linux-aws-5.15-5.15.0/fs/quota/dquot.c @@ -399,15 +399,17 @@ EXPORT_SYMBOL(dquot_mark_dquot_dirty); /* Dirtify all the dquots - this can block when journalling */ -static inline int mark_all_dquot_dirty(struct dquot * const *dquot) +static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots) { int ret, err, cnt; + struct dquot *dquot; ret = err = 0; for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (dquot[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (dquot) /* Even in case of error we have to continue */ - ret = mark_dquot_dirty(dquot[cnt]); + ret = mark_dquot_dirty(dquot); if (!err) err = ret; } @@ -1004,14 +1006,15 @@ } EXPORT_SYMBOL(dqget); -static inline struct dquot **i_dquot(struct inode *inode) +static inline struct dquot __rcu **i_dquot(struct inode *inode) { - return inode->i_sb->s_op->get_dquots(inode); + /* Force __rcu for now until filesystems are fixed */ + return (struct dquot __rcu **)inode->i_sb->s_op->get_dquots(inode); } static int dqinit_needed(struct inode *inode, int type) { - struct dquot * const *dquots; + struct dquot __rcu * const *dquots; int cnt; if (IS_NOQUOTA(inode)) @@ -1084,59 +1087,7 @@ return err; } -/* - * Remove references to dquots from inode and add dquot to list for freeing - * if we have the last reference to dquot - */ -static void remove_inode_dquot_ref(struct inode *inode, int type, - struct list_head *tofree_head) -{ - struct dquot **dquots = i_dquot(inode); - struct dquot *dquot = dquots[type]; - - if (!dquot) - return; - - dquots[type] = NULL; - if (list_empty(&dquot->dq_free)) { - /* - * The inode still has reference to dquot so it can't be in the - * free list - */ - spin_lock(&dq_list_lock); - list_add(&dquot->dq_free, tofree_head); - spin_unlock(&dq_list_lock); - } else { - /* - * Dquot is already in a list to put so we won't drop the last - * reference here. - */ - dqput(dquot); - } -} - -/* - * Free list of dquots - * Dquots are removed from inodes and no new references can be got so we are - * the only ones holding reference - */ -static void put_dquot_list(struct list_head *tofree_head) -{ - struct list_head *act_head; - struct dquot *dquot; - - act_head = tofree_head->next; - while (act_head != tofree_head) { - dquot = list_entry(act_head, struct dquot, dq_free); - act_head = act_head->next; - /* Remove dquot from the list so we won't have problems... */ - list_del_init(&dquot->dq_free); - dqput(dquot); - } -} - -static void remove_dquot_ref(struct super_block *sb, int type, - struct list_head *tofree_head) +static void remove_dquot_ref(struct super_block *sb, int type) { struct inode *inode; #ifdef CONFIG_QUOTA_DEBUG @@ -1153,11 +1104,18 @@ */ spin_lock(&dq_data_lock); if (!IS_NOQUOTA(inode)) { + struct dquot __rcu **dquots = i_dquot(inode); + struct dquot *dquot = srcu_dereference_check( + dquots[type], &dquot_srcu, + lockdep_is_held(&dq_data_lock)); + #ifdef CONFIG_QUOTA_DEBUG if (unlikely(inode_get_rsv_space(inode) > 0)) reserved = 1; #endif - remove_inode_dquot_ref(inode, type, tofree_head); + rcu_assign_pointer(dquots[type], NULL); + if (dquot) + dqput(dquot); } spin_unlock(&dq_data_lock); } @@ -1174,13 +1132,8 @@ /* Gather all references from inodes and drop them */ static void drop_dquot_ref(struct super_block *sb, int type) { - LIST_HEAD(tofree_head); - - if (sb->dq_op) { - remove_dquot_ref(sb, type, &tofree_head); - synchronize_srcu(&dquot_srcu); - put_dquot_list(&tofree_head); - } + if (sb->dq_op) + remove_dquot_ref(sb, type); } static inline @@ -1513,7 +1466,8 @@ static int __dquot_initialize(struct inode *inode, int type) { int cnt, init_needed = 0; - struct dquot **dquots, *got[MAXQUOTAS] = {}; + struct dquot __rcu **dquots; + struct dquot *got[MAXQUOTAS] = {}; struct super_block *sb = inode->i_sb; qsize_t rsv; int ret = 0; @@ -1588,7 +1542,7 @@ if (!got[cnt]) continue; if (!dquots[cnt]) { - dquots[cnt] = got[cnt]; + rcu_assign_pointer(dquots[cnt], got[cnt]); got[cnt] = NULL; /* * Make quota reservation system happy if someone @@ -1596,12 +1550,16 @@ */ rsv = inode_get_rsv_space(inode); if (unlikely(rsv)) { + struct dquot *dquot = srcu_dereference_check( + dquots[cnt], &dquot_srcu, + lockdep_is_held(&dq_data_lock)); + spin_lock(&inode->i_lock); /* Get reservation again under proper lock */ rsv = __inode_get_rsv_space(inode); - spin_lock(&dquots[cnt]->dq_dqb_lock); - dquots[cnt]->dq_dqb.dqb_rsvspace += rsv; - spin_unlock(&dquots[cnt]->dq_dqb_lock); + spin_lock(&dquot->dq_dqb_lock); + dquot->dq_dqb.dqb_rsvspace += rsv; + spin_unlock(&dquot->dq_dqb_lock); spin_unlock(&inode->i_lock); } } @@ -1623,7 +1581,7 @@ bool dquot_initialize_needed(struct inode *inode) { - struct dquot **dquots; + struct dquot __rcu **dquots; int i; if (!inode_quota_active(inode)) @@ -1648,13 +1606,14 @@ static void __dquot_drop(struct inode *inode) { int cnt; - struct dquot **dquots = i_dquot(inode); + struct dquot __rcu **dquots = i_dquot(inode); struct dquot *put[MAXQUOTAS]; spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - put[cnt] = dquots[cnt]; - dquots[cnt] = NULL; + put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu, + lockdep_is_held(&dq_data_lock)); + rcu_assign_pointer(dquots[cnt], NULL); } spin_unlock(&dq_data_lock); dqput_all(put); @@ -1662,7 +1621,7 @@ void dquot_drop(struct inode *inode) { - struct dquot * const *dquots; + struct dquot __rcu * const *dquots; int cnt; if (IS_NOQUOTA(inode)) @@ -1735,7 +1694,8 @@ int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; int reserve = flags & DQUOT_SPACE_RESERVE; - struct dquot **dquots; + struct dquot __rcu **dquots; + struct dquot *dquot; if (!inode_quota_active(inode)) { if (reserve) { @@ -1755,27 +1715,26 @@ index = srcu_read_lock(&dquot_srcu); spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; if (reserve) { - ret = dquot_add_space(dquots[cnt], 0, number, flags, - &warn[cnt]); + ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]); } else { - ret = dquot_add_space(dquots[cnt], number, 0, flags, - &warn[cnt]); + ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]); } if (ret) { /* Back out changes we already did */ for (cnt--; cnt >= 0; cnt--) { - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; - spin_lock(&dquots[cnt]->dq_dqb_lock); + spin_lock(&dquot->dq_dqb_lock); if (reserve) - dquot_free_reserved_space(dquots[cnt], - number); + dquot_free_reserved_space(dquot, number); else - dquot_decr_space(dquots[cnt], number); - spin_unlock(&dquots[cnt]->dq_dqb_lock); + dquot_decr_space(dquot, number); + spin_unlock(&dquot->dq_dqb_lock); } spin_unlock(&inode->i_lock); goto out_flush_warn; @@ -1805,7 +1764,8 @@ { int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; - struct dquot * const *dquots; + struct dquot __rcu * const *dquots; + struct dquot *dquot; if (!inode_quota_active(inode)) return 0; @@ -1816,17 +1776,19 @@ index = srcu_read_lock(&dquot_srcu); spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; - ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]); + ret = dquot_add_inodes(dquot, 1, &warn[cnt]); if (ret) { for (cnt--; cnt >= 0; cnt--) { - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; /* Back out changes we already did */ - spin_lock(&dquots[cnt]->dq_dqb_lock); - dquot_decr_inodes(dquots[cnt], 1); - spin_unlock(&dquots[cnt]->dq_dqb_lock); + spin_lock(&dquot->dq_dqb_lock); + dquot_decr_inodes(dquot, 1); + spin_unlock(&dquot->dq_dqb_lock); } goto warn_put_all; } @@ -1847,7 +1809,8 @@ */ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) { - struct dquot **dquots; + struct dquot __rcu **dquots; + struct dquot *dquot; int cnt, index; if (!inode_quota_active(inode)) { @@ -1863,9 +1826,8 @@ spin_lock(&inode->i_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (dquots[cnt]) { - struct dquot *dquot = dquots[cnt]; - + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (dquot) { spin_lock(&dquot->dq_dqb_lock); if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number)) number = dquot->dq_dqb.dqb_rsvspace; @@ -1889,7 +1851,8 @@ */ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) { - struct dquot **dquots; + struct dquot __rcu **dquots; + struct dquot *dquot; int cnt, index; if (!inode_quota_active(inode)) { @@ -1905,9 +1868,8 @@ spin_lock(&inode->i_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { - if (dquots[cnt]) { - struct dquot *dquot = dquots[cnt]; - + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (dquot) { spin_lock(&dquot->dq_dqb_lock); if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) number = dquot->dq_dqb.dqb_curspace; @@ -1933,7 +1895,8 @@ { unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; - struct dquot **dquots; + struct dquot __rcu **dquots; + struct dquot *dquot; int reserve = flags & DQUOT_SPACE_RESERVE, index; if (!inode_quota_active(inode)) { @@ -1954,17 +1917,18 @@ int wtype; warn[cnt].w_type = QUOTA_NL_NOWARN; - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; - spin_lock(&dquots[cnt]->dq_dqb_lock); - wtype = info_bdq_free(dquots[cnt], number); + spin_lock(&dquot->dq_dqb_lock); + wtype = info_bdq_free(dquot, number); if (wtype != QUOTA_NL_NOWARN) - prepare_warning(&warn[cnt], dquots[cnt], wtype); + prepare_warning(&warn[cnt], dquot, wtype); if (reserve) - dquot_free_reserved_space(dquots[cnt], number); + dquot_free_reserved_space(dquot, number); else - dquot_decr_space(dquots[cnt], number); - spin_unlock(&dquots[cnt]->dq_dqb_lock); + dquot_decr_space(dquot, number); + spin_unlock(&dquot->dq_dqb_lock); } if (reserve) *inode_reserved_space(inode) -= number; @@ -1988,7 +1952,8 @@ { unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; - struct dquot * const *dquots; + struct dquot __rcu * const *dquots; + struct dquot *dquot; int index; if (!inode_quota_active(inode)) @@ -1999,16 +1964,16 @@ spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { int wtype; - warn[cnt].w_type = QUOTA_NL_NOWARN; - if (!dquots[cnt]) + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); + if (!dquot) continue; - spin_lock(&dquots[cnt]->dq_dqb_lock); - wtype = info_idq_free(dquots[cnt], 1); + spin_lock(&dquot->dq_dqb_lock); + wtype = info_idq_free(dquot, 1); if (wtype != QUOTA_NL_NOWARN) - prepare_warning(&warn[cnt], dquots[cnt], wtype); - dquot_decr_inodes(dquots[cnt], 1); - spin_unlock(&dquots[cnt]->dq_dqb_lock); + prepare_warning(&warn[cnt], dquot, wtype); + dquot_decr_inodes(dquot, 1); + spin_unlock(&dquot->dq_dqb_lock); } spin_unlock(&inode->i_lock); mark_all_dquot_dirty(dquots); @@ -2034,8 +1999,9 @@ qsize_t cur_space; qsize_t rsv_space = 0; qsize_t inode_usage = 1; + struct dquot __rcu **dquots; struct dquot *transfer_from[MAXQUOTAS] = {}; - int cnt, ret = 0; + int cnt, index, ret = 0; char is_valid[MAXQUOTAS] = {}; struct dquot_warn warn_to[MAXQUOTAS]; struct dquot_warn warn_from_inodes[MAXQUOTAS]; @@ -2066,6 +2032,7 @@ } cur_space = __inode_get_bytes(inode); rsv_space = __inode_get_rsv_space(inode); + dquots = i_dquot(inode); /* * Build the transfer_from list, check limits, and update usage in * the target structures. @@ -2080,7 +2047,8 @@ if (!sb_has_quota_active(inode->i_sb, cnt)) continue; is_valid[cnt] = 1; - transfer_from[cnt] = i_dquot(inode)[cnt]; + transfer_from[cnt] = srcu_dereference_check(dquots[cnt], + &dquot_srcu, lockdep_is_held(&dq_data_lock)); ret = dquot_add_inodes(transfer_to[cnt], inode_usage, &warn_to[cnt]); if (ret) @@ -2119,13 +2087,21 @@ rsv_space); spin_unlock(&transfer_from[cnt]->dq_dqb_lock); } - i_dquot(inode)[cnt] = transfer_to[cnt]; + rcu_assign_pointer(dquots[cnt], transfer_to[cnt]); } spin_unlock(&inode->i_lock); spin_unlock(&dq_data_lock); - mark_all_dquot_dirty(transfer_from); - mark_all_dquot_dirty(transfer_to); + /* + * These arrays are local and we hold dquot references so we don't need + * the srcu protection but still take dquot_srcu to avoid warning in + * mark_all_dquot_dirty(). + */ + index = srcu_read_lock(&dquot_srcu); + mark_all_dquot_dirty((struct dquot __rcu **)transfer_from); + mark_all_dquot_dirty((struct dquot __rcu **)transfer_to); + srcu_read_unlock(&dquot_srcu, index); + flush_warnings(warn_to); flush_warnings(warn_from_inodes); flush_warnings(warn_from_space); diff -u linux-aws-5.15-5.15.0/fs/select.c linux-aws-5.15-5.15.0/fs/select.c --- linux-aws-5.15-5.15.0/fs/select.c +++ linux-aws-5.15-5.15.0/fs/select.c @@ -475,7 +475,7 @@ wait->_key |= POLLOUT_SET; } -static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) +static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) { ktime_t expire, *to = NULL; struct poll_wqueues table; diff -u linux-aws-5.15-5.15.0/fs/zonefs/super.c linux-aws-5.15-5.15.0/fs/zonefs/super.c --- linux-aws-5.15-5.15.0/fs/zonefs/super.c +++ linux-aws-5.15-5.15.0/fs/zonefs/super.c @@ -327,16 +327,18 @@ } } -struct zonefs_ioerr_data { - struct inode *inode; - bool write; -}; - static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, void *data) { - struct zonefs_ioerr_data *err = data; - struct inode *inode = err->inode; + struct blk_zone *z = data; + + *z = *zone; + return 0; +} + +static void zonefs_handle_io_error(struct inode *inode, struct blk_zone *zone, + bool write) +{ struct zonefs_inode_info *zi = ZONEFS_I(inode); struct super_block *sb = inode->i_sb; struct zonefs_sb_info *sbi = ZONEFS_SB(sb); @@ -352,8 +354,8 @@ isize = i_size_read(inode); if (zone->cond != BLK_ZONE_COND_OFFLINE && zone->cond != BLK_ZONE_COND_READONLY && - !err->write && isize == data_size) - return 0; + !write && isize == data_size) + return; /* * At this point, we detected either a bad zone or an inconsistency @@ -374,8 +376,9 @@ * In all cases, warn about inode size inconsistency and handle the * IO error according to the zone condition and to the mount options. */ - if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size) - zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n", + if (isize != data_size) + zonefs_warn(sb, + "inode %lu: invalid size %lld (should be %lld)\n", inode->i_ino, isize, data_size); /* @@ -435,8 +438,6 @@ zonefs_update_stats(inode, data_size); zonefs_i_size_write(inode, data_size); zi->i_wpoffset = data_size; - - return 0; } /* @@ -450,23 +451,25 @@ { struct zonefs_inode_info *zi = ZONEFS_I(inode); struct super_block *sb = inode->i_sb; - struct zonefs_sb_info *sbi = ZONEFS_SB(sb); unsigned int noio_flag; - unsigned int nr_zones = 1; - struct zonefs_ioerr_data err = { - .inode = inode, - .write = write, - }; + struct blk_zone zone; int ret; /* - * The only files that have more than one zone are conventional zone - * files with aggregated conventional zones, for which the inode zone - * size is always larger than the device zone size. + * Conventional zone have no write pointer and cannot become read-only + * or offline. So simply fake a report for a single or aggregated zone + * and let zonefs_handle_io_error() correct the zone inode information + * according to the mount options. */ - if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev)) - nr_zones = zi->i_zone_size >> - (sbi->s_zone_sectors_shift + SECTOR_SHIFT); + if (zi->i_ztype != ZONEFS_ZTYPE_SEQ) { + zone.start = zi->i_zsector; + zone.len = zi->i_max_size >> SECTOR_SHIFT; + zone.wp = zone.start + zone.len; + zone.type = BLK_ZONE_TYPE_CONVENTIONAL; + zone.cond = BLK_ZONE_COND_NOT_WP; + zone.capacity = zone.len; + goto handle_io_error; + } /* * Memory allocations in blkdev_report_zones() can trigger a memory @@ -477,12 +480,19 @@ * the GFP_NOIO context avoids both problems. */ noio_flag = memalloc_noio_save(); - ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones, - zonefs_io_error_cb, &err); - if (ret != nr_zones) + ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, 1, + zonefs_io_error_cb, &zone); + memalloc_noio_restore(noio_flag); + if (ret != 1) { zonefs_err(sb, "Get inode %lu zone information failed %d\n", inode->i_ino, ret); - memalloc_noio_restore(noio_flag); + zonefs_warn(sb, "remounting filesystem read-only\n"); + sb->s_flags |= SB_RDONLY; + return; + } + +handle_io_error: + zonefs_handle_io_error(inode, &zone, write); } static void zonefs_io_error(struct inode *inode, bool write) diff -u linux-aws-5.15-5.15.0/include/linux/bpf.h linux-aws-5.15-5.15.0/include/linux/bpf.h --- linux-aws-5.15-5.15.0/include/linux/bpf.h +++ linux-aws-5.15-5.15.0/include/linux/bpf.h @@ -192,9 +192,14 @@ */ atomic64_t refcnt ____cacheline_aligned; atomic64_t usercnt; - struct work_struct work; + /* rcu is used before freeing and work is only used during freeing */ + union { + struct work_struct work; + struct rcu_head rcu; + }; struct mutex freeze_mutex; atomic64_t writecnt; + bool free_after_mult_rcu_gp; }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) @@ -2291,7 +2296,17 @@ bool btf_id_set_contains(const struct btf_id_set *set, u32 id); +#define MAX_BPRINTF_VARARGS 12 +#define MAX_BPRINTF_BUF 1024 + +struct bpf_bprintf_data { + u32 *bin_args; + char *buf; + bool get_bin_args; + bool get_buf; +}; + int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, - u32 **bin_buf, u32 num_args); -void bpf_bprintf_cleanup(void); + u32 num_args, struct bpf_bprintf_data *data); +void bpf_bprintf_cleanup(struct bpf_bprintf_data *data); #endif /* _LINUX_BPF_H */ diff -u linux-aws-5.15-5.15.0/include/linux/clk-provider.h linux-aws-5.15-5.15.0/include/linux/clk-provider.h --- linux-aws-5.15-5.15.0/include/linux/clk-provider.h +++ linux-aws-5.15-5.15.0/include/linux/clk-provider.h @@ -61,7 +61,7 @@ }; /** - * struct clk_duty - Struture encoding the duty cycle ratio of a clock + * struct clk_duty - Structure encoding the duty cycle ratio of a clock * * @num: Numerator of the duty cycle ratio * @den: Denominator of the duty cycle ratio @@ -116,7 +116,7 @@ * @restore_context: Restore the context of the clock after a restoration * of power. * - * @recalc_rate Recalculate the rate of this clock, by querying hardware. The + * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The * parent rate is an input parameter. It is up to the caller to * ensure that the prepare_mutex is held across this call. * Returns the calculated rate. Optional, but recommended - if @@ -442,7 +442,7 @@ * clock with the clock framework * @dev: device that is registering this clock * @name: name of this clock - * @parent_name: name of clock's parent + * @parent_data: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate * @fixed_accuracy: non-adjustable clock accuracy @@ -457,7 +457,7 @@ * the clock framework * @dev: device that is registering this clock * @name: name of this clock - * @parent_name: name of clock's parent + * @parent_data: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate */ @@ -593,7 +593,7 @@ * Clock with an adjustable divider affecting its output frequency. Implements * .recalc_rate, .set_rate and .round_rate * - * Flags: + * @flags: * CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the * register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is * the raw value read from the register, with the value of zero considered @@ -1023,11 +1023,12 @@ * @mwidth: width of the numerator bit field * @nshift: shift to the denominator bit field * @nwidth: width of the denominator bit field + * @approximation: clk driver's callback for calculating the divider clock * @lock: register lock * * Clock with adjustable fractional divider affecting its output frequency. * - * Flags: + * @flags: * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED * is set then the numerator and denominator are both the value read @@ -1086,7 +1087,7 @@ * Clock with an adjustable multiplier affecting its output frequency. * Implements .recalc_rate, .set_rate and .round_rate * - * Flags: + * @flags: * CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read * from the register, with 0 being a valid value effectively * zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is diff -u linux-aws-5.15-5.15.0/include/linux/filter.h linux-aws-5.15-5.15.0/include/linux/filter.h --- linux-aws-5.15-5.15.0/include/linux/filter.h +++ linux-aws-5.15-5.15.0/include/linux/filter.h @@ -496,24 +496,27 @@ __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ u64, __ur_3, u64, __ur_4, u64, __ur_5) -#define BPF_CALL_x(x, name, ...) \ +#define BPF_CALL_x(x, attr, name, ...) \ static __always_inline \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ - u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ - u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ + attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ + attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ { \ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ } \ static __always_inline \ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) -#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__) -#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__) -#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__) -#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__) -#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) -#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) +#define __NOATTR +#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__) +#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__) +#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__) +#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__) +#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__) +#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__) + +#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__) #define bpf_ctx_range(TYPE, MEMBER) \ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 @@ -940,8 +943,7 @@ int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); void sk_reuseport_prog_free(struct bpf_prog *prog); int sk_detach_filter(struct sock *sk); -int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, - unsigned int len); +int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len); bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); diff -u linux-aws-5.15-5.15.0/include/linux/fs.h linux-aws-5.15-5.15.0/include/linux/fs.h --- linux-aws-5.15-5.15.0/include/linux/fs.h +++ linux-aws-5.15-5.15.0/include/linux/fs.h @@ -322,6 +322,8 @@ #define IOCB_NOIO (1 << 20) /* can use bio alloc cache */ #define IOCB_ALLOC_CACHE (1 << 21) +/* kiocb is a read or write operation submitted by fs/aio.c. */ +#define IOCB_AIO_RW (1 << 23) struct kiocb { struct file *ki_filp; diff -u linux-aws-5.15-5.15.0/include/linux/hyperv.h linux-aws-5.15-5.15.0/include/linux/hyperv.h --- linux-aws-5.15-5.15.0/include/linux/hyperv.h +++ linux-aws-5.15-5.15.0/include/linux/hyperv.h @@ -344,7 +344,7 @@ u8 sender_owns_set; u8 reserved; u32 range_cnt; - struct vmtransfer_page_range ranges[1]; + struct vmtransfer_page_range ranges[]; } __packed; struct vmgpadl_packet_header { diff -u linux-aws-5.15-5.15.0/include/linux/moduleloader.h linux-aws-5.15-5.15.0/include/linux/moduleloader.h --- linux-aws-5.15-5.15.0/include/linux/moduleloader.h +++ linux-aws-5.15-5.15.0/include/linux/moduleloader.h @@ -95,6 +95,14 @@ const Elf_Shdr *sechdrs, struct module *mod); +#ifdef CONFIG_MODULES +void flush_module_init_free_work(void); +#else +static inline void flush_module_init_free_work(void) +{ +} +#endif + /* Any cleanup needed when module leaves. */ void module_arch_cleanup(struct module *mod); diff -u linux-aws-5.15-5.15.0/include/linux/netfilter.h linux-aws-5.15-5.15.0/include/linux/netfilter.h --- linux-aws-5.15-5.15.0/include/linux/netfilter.h +++ linux-aws-5.15-5.15.0/include/linux/netfilter.h @@ -376,13 +376,13 @@ enum ip_conntrack_dir dir); }; -extern struct nf_nat_hook __rcu *nf_nat_hook; +extern const struct nf_nat_hook __rcu *nf_nat_hook; static inline void nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family) { #if IS_ENABLED(CONFIG_NF_NAT) - struct nf_nat_hook *nat_hook; + const struct nf_nat_hook *nat_hook; rcu_read_lock(); nat_hook = rcu_dereference(nf_nat_hook); @@ -435,13 +435,14 @@ #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include -extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu; void nf_ct_attach(struct sk_buff *, const struct sk_buff *); +void nf_ct_set_closing(struct nf_conntrack *nfct); struct nf_conntrack_tuple; bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb); #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} +static inline void nf_ct_set_closing(struct nf_conntrack *nfct) {} struct nf_conntrack_tuple; static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb) @@ -458,8 +459,11 @@ void (*destroy)(struct nf_conntrack *); bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *); + void (*attach)(struct sk_buff *nskb, const struct sk_buff *skb); + void (*set_closing)(struct nf_conntrack *nfct); + int (*confirm)(struct sk_buff *skb); }; -extern struct nf_ct_hook __rcu *nf_ct_hook; +extern const struct nf_ct_hook __rcu *nf_ct_hook; struct nlattr; @@ -474,7 +478,7 @@ void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, s32 off); }; -extern struct nfnl_ct_hook __rcu *nfnl_ct_hook; +extern const struct nfnl_ct_hook __rcu *nfnl_ct_hook; /** * nf_skb_duplicated - TEE target has sent a packet diff -u linux-aws-5.15-5.15.0/include/linux/pci.h linux-aws-5.15-5.15.0/include/linux/pci.h --- linux-aws-5.15-5.15.0/include/linux/pci.h +++ linux-aws-5.15-5.15.0/include/linux/pci.h @@ -2292,6 +2292,11 @@ return NULL; } +static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) +{ + return dev->error_state == pci_channel_io_perm_failure; +} + void pci_request_acs(void); bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); bool pci_acs_path_enabled(struct pci_dev *start, diff -u linux-aws-5.15-5.15.0/include/linux/rcupdate.h linux-aws-5.15-5.15.0/include/linux/rcupdate.h --- linux-aws-5.15-5.15.0/include/linux/rcupdate.h +++ linux-aws-5.15-5.15.0/include/linux/rcupdate.h @@ -206,6 +206,18 @@ #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ /** + * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period? + * + * As an accident of implementation, an RCU Tasks Trace grace period also + * acts as an RCU grace period. However, this could change at any time. + * Code relying on this accident must call this function to verify that + * this accident is still happening. + * + * You have been warned! + */ +static inline bool rcu_trace_implies_rcu_gp(void) { return true; } + +/** * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU * * This macro resembles cond_resched(), except that it is defined to @@ -218,6 +230,37 @@ cond_resched(); \ } while (0) +/** + * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states + * @old_ts: jiffies at start of processing. + * + * This helper is for long-running softirq handlers, such as NAPI threads in + * networking. The caller should initialize the variable passed in as @old_ts + * at the beginning of the softirq handler. When invoked frequently, this macro + * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will + * provide both RCU and RCU-Tasks quiescent states. Note that this macro + * modifies its old_ts argument. + * + * Because regions of code that have disabled softirq act as RCU read-side + * critical sections, this macro should be invoked with softirq (and + * preemption) enabled. + * + * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would + * have more chance to invoke schedule() calls and provide necessary quiescent + * states. As a contrast, calling cond_resched() only won't achieve the same + * effect because cond_resched() does not provide RCU-Tasks quiescent states. + */ +#define rcu_softirq_qs_periodic(old_ts) \ +do { \ + if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \ + time_after(jiffies, (old_ts) + HZ / 10)) { \ + preempt_disable(); \ + rcu_softirq_qs(); \ + preempt_enable(); \ + (old_ts) = jiffies; \ + } \ +} while (0) + /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. diff -u linux-aws-5.15-5.15.0/include/linux/sched.h linux-aws-5.15-5.15.0/include/linux/sched.h --- linux-aws-5.15-5.15.0/include/linux/sched.h +++ linux-aws-5.15-5.15.0/include/linux/sched.h @@ -608,10 +608,6 @@ * task has to wait for a replenishment to be performed at the * next firing of dl_timer. * - * @dl_boosted tells if we are boosted due to DI. If so we are - * outside bandwidth enforcement mechanism (but only until we - * exit the critical section); - * * @dl_yielded tells if task gave up the CPU before consuming * all its available runtime during the last job. * diff -u linux-aws-5.15-5.15.0/include/linux/sched/signal.h linux-aws-5.15-5.15.0/include/linux/sched/signal.h --- linux-aws-5.15-5.15.0/include/linux/sched/signal.h +++ linux-aws-5.15-5.15.0/include/linux/sched/signal.h @@ -125,7 +125,7 @@ #ifdef CONFIG_POSIX_TIMERS /* POSIX.1b Interval Timers */ - int posix_timer_id; + unsigned int next_posix_timer_id; struct list_head posix_timers; /* ITIMER_REAL timer for the process */ diff -u linux-aws-5.15-5.15.0/include/net/ipv6_stubs.h linux-aws-5.15-5.15.0/include/net/ipv6_stubs.h --- linux-aws-5.15-5.15.0/include/net/ipv6_stubs.h +++ linux-aws-5.15-5.15.0/include/net/ipv6_stubs.h @@ -81,6 +81,11 @@ const struct in6_addr *daddr, __be16 dport, int dif, int sdif, struct udp_table *tbl, struct sk_buff *skb); + int (*ipv6_dev_get_saddr)(struct net *net, + const struct net_device *dst_dev, + const struct in6_addr *daddr, + unsigned int prefs, + struct in6_addr *saddr); }; extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; diff -u linux-aws-5.15-5.15.0/include/net/netfilter/nf_conntrack.h linux-aws-5.15-5.15.0/include/net/netfilter/nf_conntrack.h --- linux-aws-5.15-5.15.0/include/net/netfilter/nf_conntrack.h +++ linux-aws-5.15-5.15.0/include/net/netfilter/nf_conntrack.h @@ -124,6 +124,12 @@ }; static inline struct nf_conn * +nf_ct_to_nf_conn(const struct nf_conntrack *nfct) +{ + return container_of(nfct, struct nf_conn, ct_general); +} + +static inline struct nf_conn * nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash) { return container_of(hash, struct nf_conn, @@ -173,6 +179,8 @@ void nf_ct_destroy(struct nf_conntrack *nfct); +void nf_conntrack_tcp_set_closing(struct nf_conn *ct); + /* decrement reference count on a conntrack */ static inline void nf_ct_put(struct nf_conn *ct) { diff -u linux-aws-5.15-5.15.0/include/net/netfilter/nf_flow_table.h linux-aws-5.15-5.15.0/include/net/netfilter/nf_flow_table.h --- linux-aws-5.15-5.15.0/include/net/netfilter/nf_flow_table.h +++ linux-aws-5.15-5.15.0/include/net/netfilter/nf_flow_table.h @@ -257,8 +257,8 @@ up_write(&flow_table->flow_block_lock); } -int flow_offload_route_init(struct flow_offload *flow, - const struct nf_flow_route *route); +void flow_offload_route_init(struct flow_offload *flow, + struct nf_flow_route *route); int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow); void flow_offload_refresh(struct nf_flowtable *flow_table, diff -u linux-aws-5.15-5.15.0/include/net/strparser.h linux-aws-5.15-5.15.0/include/net/strparser.h --- linux-aws-5.15-5.15.0/include/net/strparser.h +++ linux-aws-5.15-5.15.0/include/net/strparser.h @@ -70,6 +70,10 @@ * when dst_reg == src_reg. */ u64 temp_reg; + struct tls_msg { + u8 control; + u8 decrypted; + } tls; }; static inline struct strp_msg *strp_msg(struct sk_buff *skb) diff -u linux-aws-5.15-5.15.0/include/net/tcp.h linux-aws-5.15-5.15.0/include/net/tcp.h --- linux-aws-5.15-5.15.0/include/net/tcp.h +++ linux-aws-5.15-5.15.0/include/net/tcp.h @@ -2233,7 +2233,7 @@ /* cleanup ulp */ void (*release)(struct sock *sk); /* diagnostic */ - int (*get_info)(const struct sock *sk, struct sk_buff *skb); + int (*get_info)(struct sock *sk, struct sk_buff *skb); size_t (*get_info_size)(const struct sock *sk); /* clone ulp */ void (*clone)(const struct request_sock *req, struct sock *newsk, diff -u linux-aws-5.15-5.15.0/include/net/tls.h linux-aws-5.15-5.15.0/include/net/tls.h --- linux-aws-5.15-5.15.0/include/net/tls.h +++ linux-aws-5.15-5.15.0/include/net/tls.h @@ -116,11 +116,6 @@ u8 aead_req_ctx[]; }; -struct tls_msg { - struct strp_msg rxm; - u8 control; -}; - struct tx_work { struct delayed_work work; struct sock *sk; @@ -133,9 +128,6 @@ struct tls_rec *open_rec; struct list_head tx_list; atomic_t encrypt_pending; - /* protect crypto_wait with encrypt_pending */ - spinlock_t encrypt_compl_lock; - int async_notify; u8 async_capable:1; #define BIT_TX_SCHEDULED 0 @@ -151,13 +143,11 @@ void (*saved_data_ready)(struct sock *sk); struct sk_buff *recv_pkt; - u8 control; + u8 reader_present; u8 async_capable:1; - u8 decrypted:1; + u8 reader_contended:1; atomic_t decrypt_pending; - /* protect crypto_wait with decrypt_pending*/ - spinlock_t decrypt_compl_lock; - bool async_notify; + struct wait_queue_head wq; }; struct tls_record_info { @@ -410,7 +400,9 @@ static inline struct tls_msg *tls_msg(struct sk_buff *skb) { - return (struct tls_msg *)strp_msg(skb); + struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb; + + return &scb->tls; } static inline bool tls_is_partially_sent_record(struct tls_context *ctx) diff -u linux-aws-5.15-5.15.0/include/trace/events/f2fs.h linux-aws-5.15-5.15.0/include/trace/events/f2fs.h --- linux-aws-5.15-5.15.0/include/trace/events/f2fs.h +++ linux-aws-5.15-5.15.0/include/trace/events/f2fs.h @@ -570,9 +570,10 @@ ); TRACE_EVENT(f2fs_map_blocks, - TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret), + TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, + int create, int flag, int ret), - TP_ARGS(inode, map, ret), + TP_ARGS(inode, map, create, flag, ret), TP_STRUCT__entry( __field(dev_t, dev) @@ -583,11 +584,14 @@ __field(unsigned int, m_flags) __field(int, m_seg_type) __field(bool, m_may_create) + __field(bool, m_multidev_dio) + __field(int, create) + __field(int, flag) __field(int, ret) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; + __entry->dev = map->m_bdev->bd_dev; __entry->ino = inode->i_ino; __entry->m_lblk = map->m_lblk; __entry->m_pblk = map->m_pblk; @@ -595,12 +599,16 @@ __entry->m_flags = map->m_flags; __entry->m_seg_type = map->m_seg_type; __entry->m_may_create = map->m_may_create; + __entry->m_multidev_dio = map->m_multidev_dio; + __entry->create = create; + __entry->flag = flag; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, " - "start blkaddr = 0x%llx, len = 0x%llx, flags = %u," - "seg_type = %d, may_create = %d, err = %d", + "start blkaddr = 0x%llx, len = 0x%llx, flags = %u, " + "seg_type = %d, may_create = %d, multidevice = %d, " + "create = %d, flag = %d, err = %d", show_dev_ino(__entry), (unsigned long long)__entry->m_lblk, (unsigned long long)__entry->m_pblk, @@ -608,6 +616,9 @@ __entry->m_flags, __entry->m_seg_type, __entry->m_may_create, + __entry->m_multidev_dio, + __entry->create, + __entry->flag, __entry->ret) ); diff -u linux-aws-5.15-5.15.0/include/uapi/linux/bpf.h linux-aws-5.15-5.15.0/include/uapi/linux/bpf.h --- linux-aws-5.15-5.15.0/include/uapi/linux/bpf.h +++ linux-aws-5.15-5.15.0/include/uapi/linux/bpf.h @@ -3011,9 +3011,23 @@ * **BPF_FIB_LOOKUP_DIRECT** * Do a direct table lookup vs full lookup using FIB * rules. + * **BPF_FIB_LOOKUP_TBID** + * Used with BPF_FIB_LOOKUP_DIRECT. + * Use the routing table ID present in *params*->tbid + * for the fib lookup. * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). + * **BPF_FIB_LOOKUP_SKIP_NEIGH** + * Skip the neighbour table lookup. *params*->dmac + * and *params*->smac will not be set as output. A common + * use case is to call **bpf_redirect_neigh**\ () after + * doing **bpf_fib_lookup**\ (). + * **BPF_FIB_LOOKUP_SRC** + * Derive and set source IP addr in *params*->ipv{4,6}_src + * for the nexthop. If the src addr cannot be derived, + * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this + * case, *params*->dmac and *params*->smac are not set either. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. @@ -6040,6 +6054,9 @@ enum { BPF_FIB_LOOKUP_DIRECT = (1U << 0), BPF_FIB_LOOKUP_OUTPUT = (1U << 1), + BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), + BPF_FIB_LOOKUP_TBID = (1U << 3), + BPF_FIB_LOOKUP_SRC = (1U << 4), }; enum { @@ -6052,6 +6069,7 @@ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ + BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */ }; struct bpf_fib_lookup { @@ -6086,6 +6104,9 @@ __u32 rt_metric; }; + /* input: source address to consider for lookup + * output: source address result from lookup + */ union { __be32 ipv4_src; __u32 ipv6_src[4]; /* in6_addr; network order */ @@ -6100,9 +6121,19 @@ __u32 ipv6_dst[4]; /* in6_addr; network order */ }; - /* output */ - __be16 h_vlan_proto; - __be16 h_vlan_TCI; + union { + struct { + /* output */ + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + }; + /* input: when accompanied with the + * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a + * specific routing table to use for the fib lookup. + */ + __u32 tbid; + }; + __u8 smac[6]; /* ETH_ALEN */ __u8 dmac[6]; /* ETH_ALEN */ }; diff -u linux-aws-5.15-5.15.0/init/main.c linux-aws-5.15-5.15.0/init/main.c --- linux-aws-5.15-5.15.0/init/main.c +++ linux-aws-5.15-5.15.0/init/main.c @@ -89,6 +89,7 @@ #include #include #include +#include #include #include #include @@ -1464,11 +1465,11 @@ if (rodata_enabled) { /* * load_module() results in W+X mappings, which are cleaned - * up with call_rcu(). Let's make sure that queued work is + * up with init_free_wq. Let's make sure that queued work is * flushed so that we don't hit false positives looking for * insecure pages which are W+X. */ - rcu_barrier(); + flush_module_init_free_work(); mark_rodata_ro(); rodata_test(); } else diff -u linux-aws-5.15-5.15.0/io_uring/io_uring.c linux-aws-5.15-5.15.0/io_uring/io_uring.c --- linux-aws-5.15-5.15.0/io_uring/io_uring.c +++ linux-aws-5.15-5.15.0/io_uring/io_uring.c @@ -62,7 +62,6 @@ #include #include #include -#include #include #include #include @@ -440,9 +439,6 @@ /* Keep this last, we don't need it for the fast path */ struct { - #if defined(CONFIG_UNIX) - struct socket *ring_sock; - #endif /* hashed buffered write serialization */ struct io_wq_hash *hash_map; @@ -1141,19 +1137,6 @@ }; #endif -struct sock *io_uring_get_socket(struct file *file) -{ -#if defined(CONFIG_UNIX) - if (file->f_op == &io_uring_fops) { - struct io_ring_ctx *ctx = file->private_data; - - return ctx->ring_sock->sk; - } -#endif - return NULL; -} -EXPORT_SYMBOL(io_uring_get_socket); - static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) { if (!*locked) { @@ -7857,7 +7840,7 @@ struct io_wait_queue *iowq, ktime_t *timeout) { - int io_wait, ret; + int ret; /* make sure we run task_work before checking for signals */ ret = io_run_task_work_sig(); @@ -7872,13 +7855,12 @@ * can take into account that the task is waiting for IO - turns out * to be important for low QD IO. */ - io_wait = current->in_iowait; if (current_pending_io()) current->in_iowait = 1; ret = 1; if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS)) ret = -ETIME; - current->in_iowait = io_wait; + current->in_iowait = 0; return ret; } @@ -8189,15 +8171,6 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) { -#if defined(CONFIG_UNIX) - if (ctx->ring_sock) { - struct sock *sock = ctx->ring_sock->sk; - struct sk_buff *skb; - - while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL) - kfree_skb(skb); - } -#else int i; for (i = 0; i < ctx->nr_user_files; i++) { @@ -8207,7 +8180,6 @@ if (file) fput(file); } -#endif io_free_file_tables(&ctx->file_table); io_rsrc_data_free(ctx->file_data); ctx->file_data = NULL; @@ -8359,170 +8331,11 @@ return sqd; } -#if defined(CONFIG_UNIX) -/* - * Ensure the UNIX gc is aware of our file set, so we are certain that - * the io_uring can be safely unregistered on process exit, even if we have - * loops in the file referencing. - */ -static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) -{ - struct sock *sk = ctx->ring_sock->sk; - struct scm_fp_list *fpl; - struct sk_buff *skb; - int i, nr_files; - - fpl = kzalloc(sizeof(*fpl), GFP_KERNEL); - if (!fpl) - return -ENOMEM; - - skb = alloc_skb(0, GFP_KERNEL); - if (!skb) { - kfree(fpl); - return -ENOMEM; - } - - skb->sk = sk; - - nr_files = 0; - fpl->user = get_uid(current_user()); - for (i = 0; i < nr; i++) { - struct file *file = io_file_from_index(ctx, i + offset); - - if (!file) - continue; - fpl->fp[nr_files] = get_file(file); - unix_inflight(fpl->user, fpl->fp[nr_files]); - nr_files++; - } - - if (nr_files) { - fpl->max = SCM_MAX_FD; - fpl->count = nr_files; - UNIXCB(skb).fp = fpl; - skb->scm_io_uring = 1; - skb->destructor = unix_destruct_scm; - refcount_add(skb->truesize, &sk->sk_wmem_alloc); - skb_queue_head(&sk->sk_receive_queue, skb); - - for (i = 0; i < nr; i++) { - struct file *file = io_file_from_index(ctx, i + offset); - - if (file) - fput(file); - } - } else { - kfree_skb(skb); - free_uid(fpl->user); - kfree(fpl); - } - - return 0; -} - -/* - * If UNIX sockets are enabled, fd passing can cause a reference cycle which - * causes regular reference counting to break down. We rely on the UNIX - * garbage collection to take care of this problem for us. - */ -static int io_sqe_files_scm(struct io_ring_ctx *ctx) -{ - unsigned left, total; - int ret = 0; - - total = 0; - left = ctx->nr_user_files; - while (left) { - unsigned this_files = min_t(unsigned, left, SCM_MAX_FD); - - ret = __io_sqe_files_scm(ctx, this_files, total); - if (ret) - break; - left -= this_files; - total += this_files; - } - - if (!ret) - return 0; - - while (total < ctx->nr_user_files) { - struct file *file = io_file_from_index(ctx, total); - - if (file) - fput(file); - total++; - } - - return ret; -} -#else -static int io_sqe_files_scm(struct io_ring_ctx *ctx) -{ - return 0; -} -#endif - static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) { struct file *file = prsrc->file; -#if defined(CONFIG_UNIX) - struct sock *sock = ctx->ring_sock->sk; - struct sk_buff_head list, *head = &sock->sk_receive_queue; - struct sk_buff *skb; - int i; - - __skb_queue_head_init(&list); - - /* - * Find the skb that holds this file in its SCM_RIGHTS. When found, - * remove this entry and rearrange the file array. - */ - skb = skb_dequeue(head); - while (skb) { - struct scm_fp_list *fp; - fp = UNIXCB(skb).fp; - for (i = 0; i < fp->count; i++) { - int left; - - if (fp->fp[i] != file) - continue; - - unix_notinflight(fp->user, fp->fp[i]); - left = fp->count - 1 - i; - if (left) { - memmove(&fp->fp[i], &fp->fp[i + 1], - left * sizeof(struct file *)); - } - fp->count--; - if (!fp->count) { - kfree_skb(skb); - skb = NULL; - } else { - __skb_queue_tail(&list, skb); - } - fput(file); - file = NULL; - break; - } - - if (!file) - break; - - __skb_queue_tail(&list, skb); - - skb = skb_dequeue(head); - } - - if (skb_peek(&list)) { - spin_lock_irq(&head->lock); - while ((skb = __skb_dequeue(&list)) != NULL) - __skb_queue_tail(head, skb); - spin_unlock_irq(&head->lock); - } -#else fput(file); -#endif } static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) @@ -8633,12 +8446,6 @@ io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file); } - ret = io_sqe_files_scm(ctx); - if (ret) { - __io_sqe_files_unregister(ctx); - return ret; - } - io_rsrc_node_switch(ctx, NULL); return ret; out_fput: @@ -9595,12 +9402,6 @@ WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist)); -#if defined(CONFIG_UNIX) - if (ctx->ring_sock) { - ctx->ring_sock->file = NULL; /* so that iput() is called */ - sock_release(ctx->ring_sock); - } -#endif WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); if (ctx->mm_account) { @@ -10470,6 +10271,11 @@ #endif }; +bool io_is_uring_fops(struct file *file) +{ + return file->f_op == &io_uring_fops; +} + static int io_allocate_scq_urings(struct io_ring_ctx *ctx, struct io_uring_params *p) { @@ -10532,32 +10338,12 @@ /* * Allocate an anonymous fd, this is what constitutes the application * visible backing of an io_uring instance. The application mmaps this - * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled, - * we have to tie this fd to a socket for file garbage collection purposes. + * fd to gain access to the SQ/CQ ring details. */ static struct file *io_uring_get_file(struct io_ring_ctx *ctx) { - struct file *file; -#if defined(CONFIG_UNIX) - int ret; - - ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP, - &ctx->ring_sock); - if (ret) - return ERR_PTR(ret); -#endif - - file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx, - O_RDWR | O_CLOEXEC); -#if defined(CONFIG_UNIX) - if (IS_ERR(file)) { - sock_release(ctx->ring_sock); - ctx->ring_sock = NULL; - } else { - ctx->ring_sock->file = file; - } -#endif - return file; + return anon_inode_getfile("[io_uring]", &io_uring_fops, ctx, + O_RDWR | O_CLOEXEC); } static int io_uring_create(unsigned entries, struct io_uring_params *p, diff -u linux-aws-5.15-5.15.0/kernel/bpf/cpumap.c linux-aws-5.15-5.15.0/kernel/bpf/cpumap.c --- linux-aws-5.15-5.15.0/kernel/bpf/cpumap.c +++ linux-aws-5.15-5.15.0/kernel/bpf/cpumap.c @@ -221,7 +221,7 @@ void **frames, int n, struct xdp_cpumap_stats *stats) { - struct xdp_rxq_info rxq; + struct xdp_rxq_info rxq = {}; struct xdp_buff xdp; int i, nframes = 0; @@ -305,6 +305,7 @@ static int cpu_map_kthread_run(void *data) { struct bpf_cpu_map_entry *rcpu = data; + unsigned long last_qs = jiffies; complete(&rcpu->kthread_running); set_current_state(TASK_INTERRUPTIBLE); @@ -330,10 +331,12 @@ if (__ptr_ring_empty(rcpu->queue)) { schedule(); sched = 1; + last_qs = jiffies; } else { __set_current_state(TASK_RUNNING); } } else { + rcu_softirq_qs_periodic(last_qs); sched = cond_resched(); } diff -u linux-aws-5.15-5.15.0/kernel/bpf/hashtab.c linux-aws-5.15-5.15.0/kernel/bpf/hashtab.c --- linux-aws-5.15-5.15.0/kernel/bpf/hashtab.c +++ linux-aws-5.15-5.15.0/kernel/bpf/hashtab.c @@ -504,7 +504,13 @@ num_possible_cpus()); } - /* hash table size must be power of 2 */ + /* hash table size must be power of 2; roundup_pow_of_two() can overflow + * into UB on 32-bit arches, so check that first + */ + err = -E2BIG; + if (htab->map.max_entries > 1UL << 31) + goto free_htab; + htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); htab->elem_size = sizeof(struct htab_elem) + @@ -514,10 +520,8 @@ else htab->elem_size += round_up(htab->map.value_size, 8); - err = -E2BIG; - /* prevent zero size kmalloc and check for u32 overflow */ - if (htab->n_buckets == 0 || - htab->n_buckets > U32_MAX / sizeof(struct bucket)) + /* check for u32 overflow */ + if (htab->n_buckets > U32_MAX / sizeof(struct bucket)) goto free_htab; err = -ENOMEM; diff -u linux-aws-5.15-5.15.0/kernel/bpf/helpers.c linux-aws-5.15-5.15.0/kernel/bpf/helpers.c --- linux-aws-5.15-5.15.0/kernel/bpf/helpers.c +++ linux-aws-5.15-5.15.0/kernel/bpf/helpers.c @@ -298,7 +298,7 @@ __this_cpu_write(irqsave_flags, flags); } -notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) +NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) { __bpf_spin_lock_irqsave(lock); return 0; @@ -320,7 +320,7 @@ local_irq_restore(flags); } -notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) +NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) { __bpf_spin_unlock_irqrestore(lock); return 0; @@ -710,19 +710,20 @@ /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary * arguments representation. */ -#define MAX_BPRINTF_BUF_LEN 512 +#define MAX_BPRINTF_BIN_ARGS 512 /* Support executing three nested bprintf helper calls on a given CPU */ #define MAX_BPRINTF_NEST_LEVEL 3 struct bpf_bprintf_buffers { - char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN]; + char bin_args[MAX_BPRINTF_BIN_ARGS]; + char buf[MAX_BPRINTF_BUF]; }; -static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs); + +static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); -static int try_get_fmt_tmp_buf(char **tmp_buf) +static int try_get_buffers(struct bpf_bprintf_buffers **bufs) { - struct bpf_bprintf_buffers *bufs; int nest_level; preempt_disable(); @@ -732,18 +733,19 @@ preempt_enable(); return -EBUSY; } - bufs = this_cpu_ptr(&bpf_bprintf_bufs); - *tmp_buf = bufs->tmp_bufs[nest_level - 1]; + *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); return 0; } -void bpf_bprintf_cleanup(void) +void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) { - if (this_cpu_read(bpf_bprintf_nest_level)) { - this_cpu_dec(bpf_bprintf_nest_level); - preempt_enable(); - } + if (!data->bin_args && !data->buf) + return; + if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) + return; + this_cpu_dec(bpf_bprintf_nest_level); + preempt_enable(); } /* @@ -752,18 +754,20 @@ * Returns a negative value if fmt is an invalid format string or 0 otherwise. * * This can be used in two ways: - * - Format string verification only: when bin_args is NULL + * - Format string verification only: when data->get_bin_args is false * - Arguments preparation: in addition to the above verification, it writes in - * bin_args a binary representation of arguments usable by bstr_printf where - * pointers from BPF have been sanitized. + * data->bin_args a binary representation of arguments usable by bstr_printf + * where pointers from BPF have been sanitized. * * In argument preparation mode, if 0 is returned, safe temporary buffers are * allocated and bpf_bprintf_cleanup should be called to free them after use. */ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, - u32 **bin_args, u32 num_args) + u32 num_args, struct bpf_bprintf_data *data) { + bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; + struct bpf_bprintf_buffers *buffers = NULL; size_t sizeof_cur_arg, sizeof_cur_ip; int err, i, num_spec = 0; u64 cur_arg; @@ -774,14 +778,19 @@ return -EINVAL; fmt_size = fmt_end - fmt; - if (bin_args) { - if (num_args && try_get_fmt_tmp_buf(&tmp_buf)) - return -EBUSY; + if (get_buffers && try_get_buffers(&buffers)) + return -EBUSY; - tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN; - *bin_args = (u32 *)tmp_buf; + if (data->get_bin_args) { + if (num_args) + tmp_buf = buffers->bin_args; + tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; + data->bin_args = (u32 *)tmp_buf; } + if (data->get_buf) + data->buf = buffers->buf; + for (i = 0; i < fmt_size; i++) { if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { err = -EINVAL; @@ -975,33 +984,33 @@ err = 0; out: if (err) - bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(data); return err; } -#define MAX_SNPRINTF_VARARGS 12 - BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, - const void *, data, u32, data_len) + const void *, args, u32, data_len) { + struct bpf_bprintf_data data = { + .get_bin_args = true, + }; int err, num_args; - u32 *bin_args; - if (data_len % 8 || data_len > MAX_SNPRINTF_VARARGS * 8 || - (data_len && !data)) + if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || + (data_len && !args)) return -EINVAL; num_args = data_len / 8; /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we * can safely give an unbounded size. */ - err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args); + err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); if (err < 0) return err; - err = bstr_printf(str, str_size, fmt, bin_args); + err = bstr_printf(str, str_size, fmt, data.bin_args); - bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(&data); return err + 1; } @@ -1039,6 +1048,7 @@ struct bpf_prog *prog; void __rcu *callback_fn; void *value; + struct rcu_head rcu; }; /* the actual struct hidden inside uapi struct bpf_timer */ @@ -1260,6 +1270,7 @@ if (in_nmi()) return -EOPNOTSUPP; + rcu_read_lock(); __bpf_spin_lock_irqsave(&timer->lock); t = timer->timer; if (!t) { @@ -1281,6 +1292,7 @@ * if it was running. */ ret = ret ?: hrtimer_cancel(&t->timer); + rcu_read_unlock(); return ret; } @@ -1335,7 +1347,7 @@ */ if (this_cpu_read(hrtimer_running) != t) hrtimer_cancel(&t->timer); - kfree(t); + kfree_rcu(t, rcu); } const struct bpf_func_proto bpf_get_current_task_proto __weak; diff -u linux-aws-5.15-5.15.0/kernel/bpf/map_in_map.c linux-aws-5.15-5.15.0/kernel/bpf/map_in_map.c --- linux-aws-5.15-5.15.0/kernel/bpf/map_in_map.c +++ linux-aws-5.15-5.15.0/kernel/bpf/map_in_map.c @@ -110,10 +110,15 @@ void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) { - /* ptr->ops->map_free() has to go through one - * rcu grace period by itself. + struct bpf_map *inner_map = ptr; + + /* The inner map may still be used by both non-sleepable and sleepable + * bpf program, so free it after one RCU grace period and one tasks + * trace RCU grace period. */ - bpf_map_put(ptr); + if (need_defer) + WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true); + bpf_map_put(inner_map); } u32 bpf_map_fd_sys_lookup_elem(void *ptr) diff -u linux-aws-5.15-5.15.0/kernel/bpf/stackmap.c linux-aws-5.15-5.15.0/kernel/bpf/stackmap.c --- linux-aws-5.15-5.15.0/kernel/bpf/stackmap.c +++ linux-aws-5.15-5.15.0/kernel/bpf/stackmap.c @@ -113,11 +113,14 @@ } else if (value_size / 8 > sysctl_perf_event_max_stack) return ERR_PTR(-EINVAL); - /* hash table size must be power of 2 */ - n_buckets = roundup_pow_of_two(attr->max_entries); - if (!n_buckets) + /* hash table size must be power of 2; roundup_pow_of_two() can overflow + * into UB on 32-bit arches, so check that first + */ + if (attr->max_entries > 1UL << 31) return ERR_PTR(-E2BIG); + n_buckets = roundup_pow_of_two(attr->max_entries); + cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); if (!smap) diff -u linux-aws-5.15-5.15.0/kernel/bpf/syscall.c linux-aws-5.15-5.15.0/kernel/bpf/syscall.c --- linux-aws-5.15-5.15.0/kernel/bpf/syscall.c +++ linux-aws-5.15-5.15.0/kernel/bpf/syscall.c @@ -487,6 +487,25 @@ } } +static void bpf_map_free_in_work(struct bpf_map *map) +{ + INIT_WORK(&map->work, bpf_map_free_deferred); + schedule_work(&map->work); +} + +static void bpf_map_free_rcu_gp(struct rcu_head *rcu) +{ + bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu)); +} + +static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu) +{ + if (rcu_trace_implies_rcu_gp()) + bpf_map_free_rcu_gp(rcu); + else + call_rcu(rcu, bpf_map_free_rcu_gp); +} + /* decrement map refcnt and schedule it for freeing via workqueue * (unrelying map implementation ops->map_free() might sleep) */ @@ -496,8 +515,11 @@ /* bpf_map_free_id() must be called first */ bpf_map_free_id(map, do_idr_lock); btf_put(map->btf); - INIT_WORK(&map->work, bpf_map_free_deferred); - schedule_work(&map->work); + + if (READ_ONCE(map->free_after_mult_rcu_gp)) + call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); + else + bpf_map_free_in_work(map); } } diff -u linux-aws-5.15-5.15.0/kernel/bpf/verifier.c linux-aws-5.15-5.15.0/kernel/bpf/verifier.c --- linux-aws-5.15-5.15.0/kernel/bpf/verifier.c +++ linux-aws-5.15-5.15.0/kernel/bpf/verifier.c @@ -6415,6 +6415,7 @@ struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3]; struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; struct bpf_map *fmt_map = fmt_reg->map_ptr; + struct bpf_bprintf_data data = {}; int err, fmt_map_off, num_args; u64 fmt_addr; char *fmt; @@ -6439,7 +6440,7 @@ /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we * can focus on validating the format specifiers. */ - err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args); + err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data); if (err < 0) verbose(env, "Invalid format string\n"); diff -u linux-aws-5.15-5.15.0/kernel/module.c linux-aws-5.15-5.15.0/kernel/module.c --- linux-aws-5.15-5.15.0/kernel/module.c +++ linux-aws-5.15-5.15.0/kernel/module.c @@ -3705,6 +3705,17 @@ } } +void flush_module_init_free_work(void) +{ + flush_work(&init_free_wq); +} + +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "module." +/* Default value for module->async_probe_requested */ +static bool async_probe; +module_param(async_probe, bool, 0644); + /* * This is where the real work happens. * @@ -3789,8 +3800,8 @@ * Note that module_alloc() on most architectures creates W+X page * mappings which won't be cleaned up until do_free_init() runs. Any * code such as mark_rodata_ro() which depends on those mappings to - * be cleaned up needs to sync with the queued work - ie - * rcu_barrier() + * be cleaned up needs to sync with the queued work by invoking + * flush_module_init_free_work(). */ if (llist_add(&freeinit->node, &init_free_list)) schedule_work(&init_free_wq); @@ -3935,7 +3946,8 @@ int ret; if (strcmp(param, "async_probe") == 0) { - mod->async_probe_requested = true; + if (strtobool(val, &mod->async_probe_requested)) + mod->async_probe_requested = true; return 0; } @@ -4102,6 +4114,8 @@ if (err) goto bug_cleanup; + mod->async_probe_requested = async_probe; + /* Module is ready to execute: parsing args may do that. */ after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, -32768, 32767, mod, diff -u linux-aws-5.15-5.15.0/kernel/power/hibernate.c linux-aws-5.15-5.15.0/kernel/power/hibernate.c --- linux-aws-5.15-5.15.0/kernel/power/hibernate.c +++ linux-aws-5.15-5.15.0/kernel/power/hibernate.c @@ -664,8 +664,11 @@ hibernation_platform_enter(); fallthrough; case HIBERNATION_SHUTDOWN: - if (pm_power_off) + if (pm_power_off) { + entering_platform_hibernation = true; kernel_power_off(); + entering_platform_hibernation = false; + } break; } kernel_halt(); diff -u linux-aws-5.15-5.15.0/kernel/rcu/tasks.h linux-aws-5.15-5.15.0/kernel/rcu/tasks.h --- linux-aws-5.15-5.15.0/kernel/rcu/tasks.h +++ linux-aws-5.15-5.15.0/kernel/rcu/tasks.h @@ -1098,6 +1098,8 @@ // Wait for late-stage exiting tasks to finish exiting. // These might have passed the call to exit_tasks_rcu_finish(). + + // If you remove the following line, update rcu_trace_implies_rcu_gp()!!! synchronize_rcu(); // Any tasks that exit after this point will set ->trc_reader_checked. } diff -u linux-aws-5.15-5.15.0/kernel/sched/fair.c linux-aws-5.15-5.15.0/kernel/sched/fair.c --- linux-aws-5.15-5.15.0/kernel/sched/fair.c +++ linux-aws-5.15-5.15.0/kernel/sched/fair.c @@ -6405,7 +6405,7 @@ if (!available_idle_cpu(cpu)) { idle = false; if (*idle_cpu == -1) { - if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) { + if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, cpus)) { *idle_cpu = cpu; break; } @@ -6413,7 +6413,7 @@ } break; } - if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr)) + if (*idle_cpu == -1 && cpumask_test_cpu(cpu, cpus)) *idle_cpu = cpu; } @@ -10287,7 +10287,7 @@ .sd = sd, .dst_cpu = this_cpu, .dst_rq = this_rq, - .dst_grpmask = sched_group_span(sd->groups), + .dst_grpmask = group_balance_mask(sd->groups), .idle = idle, .loop_break = sched_nr_migrate_break, .cpus = cpus, diff -u linux-aws-5.15-5.15.0/kernel/sched/rt.c linux-aws-5.15-5.15.0/kernel/sched/rt.c --- linux-aws-5.15-5.15.0/kernel/sched/rt.c +++ linux-aws-5.15-5.15.0/kernel/sched/rt.c @@ -8,7 +8,7 @@ #include "pelt.h" int sched_rr_timeslice = RR_TIMESLICE; -int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; +int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ; /* More than 4 hours if BW_SHIFT equals 20. */ static const u64 max_rt_runtime = MAX_BW; @@ -2806,9 +2806,6 @@ static int sched_rt_global_validate(void) { - if (sysctl_sched_rt_period <= 0) - return -EINVAL; - if ((sysctl_sched_rt_runtime != RUNTIME_INF) && ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) || ((u64)sysctl_sched_rt_runtime * @@ -2839,7 +2836,7 @@ old_period = sysctl_sched_rt_period; old_runtime = sysctl_sched_rt_runtime; - ret = proc_dointvec(table, write, buffer, lenp, ppos); + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (!ret && write) { ret = sched_rt_global_validate(); @@ -2883,6 +2880,9 @@ sched_rr_timeslice = sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE : msecs_to_jiffies(sysctl_sched_rr_timeslice); + + if (sysctl_sched_rr_timeslice <= 0) + sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE); } mutex_unlock(&mutex); diff -u linux-aws-5.15-5.15.0/kernel/sys.c linux-aws-5.15-5.15.0/kernel/sys.c --- linux-aws-5.15-5.15.0/kernel/sys.c +++ linux-aws-5.15-5.15.0/kernel/sys.c @@ -1778,74 +1778,87 @@ struct task_struct *t; unsigned long flags; u64 tgutime, tgstime, utime, stime; - unsigned long maxrss = 0; + unsigned long maxrss; + struct mm_struct *mm; + struct signal_struct *sig = p->signal; + unsigned int seq = 0; - memset((char *)r, 0, sizeof (*r)); +retry: + memset(r, 0, sizeof(*r)); utime = stime = 0; + maxrss = 0; if (who == RUSAGE_THREAD) { task_cputime_adjusted(current, &utime, &stime); accumulate_thread_rusage(p, r); - maxrss = p->signal->maxrss; - goto out; + maxrss = sig->maxrss; + goto out_thread; } - if (!lock_task_sighand(p, &flags)) - return; + flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); switch (who) { case RUSAGE_BOTH: case RUSAGE_CHILDREN: - utime = p->signal->cutime; - stime = p->signal->cstime; - r->ru_nvcsw = p->signal->cnvcsw; - r->ru_nivcsw = p->signal->cnivcsw; - r->ru_minflt = p->signal->cmin_flt; - r->ru_majflt = p->signal->cmaj_flt; - r->ru_inblock = p->signal->cinblock; - r->ru_oublock = p->signal->coublock; - maxrss = p->signal->cmaxrss; + utime = sig->cutime; + stime = sig->cstime; + r->ru_nvcsw = sig->cnvcsw; + r->ru_nivcsw = sig->cnivcsw; + r->ru_minflt = sig->cmin_flt; + r->ru_majflt = sig->cmaj_flt; + r->ru_inblock = sig->cinblock; + r->ru_oublock = sig->coublock; + maxrss = sig->cmaxrss; if (who == RUSAGE_CHILDREN) break; fallthrough; case RUSAGE_SELF: - thread_group_cputime_adjusted(p, &tgutime, &tgstime); - utime += tgutime; - stime += tgstime; - r->ru_nvcsw += p->signal->nvcsw; - r->ru_nivcsw += p->signal->nivcsw; - r->ru_minflt += p->signal->min_flt; - r->ru_majflt += p->signal->maj_flt; - r->ru_inblock += p->signal->inblock; - r->ru_oublock += p->signal->oublock; - if (maxrss < p->signal->maxrss) - maxrss = p->signal->maxrss; - t = p; - do { + r->ru_nvcsw += sig->nvcsw; + r->ru_nivcsw += sig->nivcsw; + r->ru_minflt += sig->min_flt; + r->ru_majflt += sig->maj_flt; + r->ru_inblock += sig->inblock; + r->ru_oublock += sig->oublock; + if (maxrss < sig->maxrss) + maxrss = sig->maxrss; + + rcu_read_lock(); + __for_each_thread(sig, t) accumulate_thread_rusage(t, r); - } while_each_thread(p, t); + rcu_read_unlock(); + break; default: BUG(); } - unlock_task_sighand(p, &flags); -out: - r->ru_utime = ns_to_kernel_old_timeval(utime); - r->ru_stime = ns_to_kernel_old_timeval(stime); + if (need_seqretry(&sig->stats_lock, seq)) { + seq = 1; + goto retry; + } + done_seqretry_irqrestore(&sig->stats_lock, seq, flags); - if (who != RUSAGE_CHILDREN) { - struct mm_struct *mm = get_task_mm(p); + if (who == RUSAGE_CHILDREN) + goto out_children; - if (mm) { - setmax_mm_hiwater_rss(&maxrss, mm); - mmput(mm); - } + thread_group_cputime_adjusted(p, &tgutime, &tgstime); + utime += tgutime; + stime += tgstime; + +out_thread: + mm = get_task_mm(p); + if (mm) { + setmax_mm_hiwater_rss(&maxrss, mm); + mmput(mm); } + +out_children: r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ + r->ru_utime = ns_to_kernel_old_timeval(utime); + r->ru_stime = ns_to_kernel_old_timeval(stime); } SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) diff -u linux-aws-5.15-5.15.0/kernel/sysctl.c linux-aws-5.15-5.15.0/kernel/sysctl.c --- linux-aws-5.15-5.15.0/kernel/sysctl.c +++ linux-aws-5.15-5.15.0/kernel/sysctl.c @@ -1822,6 +1822,8 @@ .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_rt_handler, + .extra1 = SYSCTL_ONE, + .extra2 = SYSCTL_INT_MAX, }, { .procname = "sched_rt_runtime_us", @@ -1829,6 +1831,8 @@ .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rt_handler, + .extra1 = SYSCTL_NEG_ONE, + .extra2 = SYSCTL_INT_MAX, }, { .procname = "sched_deadline_period_max_us", diff -u linux-aws-5.15-5.15.0/kernel/time/posix-timers.c linux-aws-5.15-5.15.0/kernel/time/posix-timers.c --- linux-aws-5.15-5.15.0/kernel/time/posix-timers.c +++ linux-aws-5.15-5.15.0/kernel/time/posix-timers.c @@ -140,25 +140,30 @@ static int posix_timer_add(struct k_itimer *timer) { struct signal_struct *sig = current->signal; - int first_free_id = sig->posix_timer_id; struct hlist_head *head; - int ret = -ENOENT; + unsigned int cnt, id; - do { + /* + * FIXME: Replace this by a per signal struct xarray once there is + * a plan to handle the resulting CRIU regression gracefully. + */ + for (cnt = 0; cnt <= INT_MAX; cnt++) { spin_lock(&hash_lock); - head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)]; - if (!__posix_timers_find(head, sig, sig->posix_timer_id)) { + id = sig->next_posix_timer_id; + + /* Write the next ID back. Clamp it to the positive space */ + sig->next_posix_timer_id = (id + 1) & INT_MAX; + + head = &posix_timers_hashtable[hash(sig, id)]; + if (!__posix_timers_find(head, sig, id)) { hlist_add_head_rcu(&timer->t_hash, head); - ret = sig->posix_timer_id; + spin_unlock(&hash_lock); + return id; } - if (++sig->posix_timer_id < 0) - sig->posix_timer_id = 0; - if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT)) - /* Loop over all possible ids completed */ - ret = -EAGAIN; spin_unlock(&hash_lock); - } while (ret == -ENOENT); - return ret; + } + /* POSIX return code when no timer ID could be allocated */ + return -EAGAIN; } static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) diff -u linux-aws-5.15-5.15.0/kernel/time/timekeeping.c linux-aws-5.15-5.15.0/kernel/time/timekeeping.c --- linux-aws-5.15-5.15.0/kernel/time/timekeeping.c +++ linux-aws-5.15-5.15.0/kernel/time/timekeeping.c @@ -1163,13 +1163,15 @@ } /* - * cycle_between - true if test occurs chronologically between before and after + * timestamp_in_interval - true if ts is chronologically in [start, end] + * + * True if ts occurs chronologically at or after start, and before or at end. */ -static bool cycle_between(u64 before, u64 test, u64 after) +static bool timestamp_in_interval(u64 start, u64 end, u64 ts) { - if (test > before && test < after) + if (ts >= start && ts <= end) return true; - if (test < before && before > after) + if (start > end && (ts >= start || ts <= end)) return true; return false; } @@ -1229,7 +1231,7 @@ */ now = tk_clock_read(&tk->tkr_mono); interval_start = tk->tkr_mono.cycle_last; - if (!cycle_between(interval_start, cycles, now)) { + if (!timestamp_in_interval(interval_start, now, cycles)) { clock_was_set_seq = tk->clock_was_set_seq; cs_was_changed_seq = tk->cs_was_changed_seq; cycles = interval_start; @@ -1242,10 +1244,8 @@ tk_core.timekeeper.offs_real); base_raw = tk->tkr_raw.base; - nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, - system_counterval.cycles); - nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, - system_counterval.cycles); + nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles); + nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles); } while (read_seqcount_retry(&tk_core.seq, seq)); xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real); @@ -1260,13 +1260,13 @@ bool discontinuity; /* - * Check that the counter value occurs after the provided + * Check that the counter value is not before the provided * history reference and that the history doesn't cross a * clocksource change */ if (!history_begin || - !cycle_between(history_begin->cycles, - system_counterval.cycles, cycles) || + !timestamp_in_interval(history_begin->cycles, + cycles, system_counterval.cycles) || history_begin->cs_was_changed_seq != cs_was_changed_seq) return -EINVAL; partial_history_cycles = cycles - system_counterval.cycles; diff -u linux-aws-5.15-5.15.0/kernel/trace/bpf_trace.c linux-aws-5.15-5.15.0/kernel/trace/bpf_trace.c --- linux-aws-5.15-5.15.0/kernel/trace/bpf_trace.c +++ linux-aws-5.15-5.15.0/kernel/trace/bpf_trace.c @@ -360,8 +360,6 @@ return &bpf_probe_write_user_proto; } -static DEFINE_RAW_SPINLOCK(trace_printk_lock); - #define MAX_TRACE_PRINTK_VARARGS 3 #define BPF_TRACE_PRINTK_SIZE 1024 @@ -369,23 +367,22 @@ u64, arg2, u64, arg3) { u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; - u32 *bin_args; - static char buf[BPF_TRACE_PRINTK_SIZE]; - unsigned long flags; + struct bpf_bprintf_data data = { + .get_bin_args = true, + .get_buf = true, + }; int ret; - ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args, - MAX_TRACE_PRINTK_VARARGS); + ret = bpf_bprintf_prepare(fmt, fmt_size, args, + MAX_TRACE_PRINTK_VARARGS, &data); if (ret < 0) return ret; - raw_spin_lock_irqsave(&trace_printk_lock, flags); - ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); + ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); - trace_bpf_trace_printk(buf); - raw_spin_unlock_irqrestore(&trace_printk_lock, flags); + trace_bpf_trace_printk(data.buf); - bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(&data); return ret; } @@ -414,26 +411,26 @@ return &bpf_trace_printk_proto; } -#define MAX_SEQ_PRINTF_VARARGS 12 - BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, - const void *, data, u32, data_len) + const void *, args, u32, data_len) { + struct bpf_bprintf_data data = { + .get_bin_args = true, + }; int err, num_args; - u32 *bin_args; - if (data_len & 7 || data_len > MAX_SEQ_PRINTF_VARARGS * 8 || - (data_len && !data)) + if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || + (data_len && !args)) return -EINVAL; num_args = data_len / 8; - err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); + err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); if (err < 0) return err; - seq_bprintf(m, fmt, bin_args); + seq_bprintf(m, fmt, data.bin_args); - bpf_bprintf_cleanup(); + bpf_bprintf_cleanup(&data); return seq_has_overflowed(m) ? -EOVERFLOW : 0; } diff -u linux-aws-5.15-5.15.0/lib/debugobjects.c linux-aws-5.15-5.15.0/lib/debugobjects.c --- linux-aws-5.15-5.15.0/lib/debugobjects.c +++ linux-aws-5.15-5.15.0/lib/debugobjects.c @@ -501,6 +501,15 @@ const struct debug_obj_descr *descr = obj->descr; static int limit; + /* + * Don't report if lookup_object_or_alloc() by the current thread + * failed because lookup_object_or_alloc()/debug_objects_oom() by a + * concurrent thread turned off debug_objects_enabled and cleared + * the hash buckets. + */ + if (!debug_objects_enabled) + return; + if (limit < 5 && descr != descr_test) { void *hint = descr->debug_hint ? descr->debug_hint(obj->object) : NULL; diff -u linux-aws-5.15-5.15.0/mm/userfaultfd.c linux-aws-5.15-5.15.0/mm/userfaultfd.c --- linux-aws-5.15-5.15.0/mm/userfaultfd.c +++ linux-aws-5.15-5.15.0/mm/userfaultfd.c @@ -289,6 +289,7 @@ unsigned long dst_start, unsigned long src_start, unsigned long len, + atomic_t *mmap_changing, enum mcopy_atomic_mode mode) { int vm_shared = dst_vma->vm_flags & VM_SHARED; @@ -405,6 +406,15 @@ goto out; } mmap_read_lock(dst_mm); + /* + * If memory mappings are changing because of non-cooperative + * operation (e.g. mremap) running in parallel, bail out and + * request the user to retry later + */ + if (mmap_changing && atomic_read(mmap_changing)) { + err = -EAGAIN; + break; + } dst_vma = NULL; goto retry; @@ -440,6 +450,7 @@ unsigned long dst_start, unsigned long src_start, unsigned long len, + atomic_t *mmap_changing, enum mcopy_atomic_mode mode); #endif /* CONFIG_HUGETLB_PAGE */ @@ -561,7 +572,8 @@ */ if (is_vm_hugetlb_page(dst_vma)) return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, - src_start, len, mcopy_mode); + src_start, len, mmap_changing, + mcopy_mode); if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) goto out_unlock; diff -u linux-aws-5.15-5.15.0/net/bluetooth/hci_core.c linux-aws-5.15-5.15.0/net/bluetooth/hci_core.c --- linux-aws-5.15-5.15.0/net/bluetooth/hci_core.c +++ linux-aws-5.15-5.15.0/net/bluetooth/hci_core.c @@ -2187,7 +2187,7 @@ else flags = hdev->flags; - strcpy(di.name, hdev->name); + strscpy(di.name, hdev->name, sizeof(di.name)); di.bdaddr = hdev->bdaddr; di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); di.flags = flags; @@ -2330,6 +2330,7 @@ { struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); + hci_dev_hold(hdev); BT_DBG("%s", hdev->name); if (hdev->hw_error) @@ -2337,10 +2338,10 @@ else bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); - if (hci_dev_do_close(hdev)) - return; + if (!hci_dev_do_close(hdev)) + hci_dev_do_open(hdev); - hci_dev_do_open(hdev); + hci_dev_put(hdev); } void hci_uuids_clear(struct hci_dev *hdev) diff -u linux-aws-5.15-5.15.0/net/bluetooth/hci_event.c linux-aws-5.15-5.15.0/net/bluetooth/hci_event.c --- linux-aws-5.15-5.15.0/net/bluetooth/hci_event.c +++ linux-aws-5.15-5.15.0/net/bluetooth/hci_event.c @@ -3072,8 +3072,6 @@ BT_DBG("%s", hdev->name); - hci_conn_check_pending(hdev); - hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); @@ -4737,9 +4735,12 @@ hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); - if (!conn || !hci_conn_ssp_enabled(conn)) + if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) goto unlock; + /* Assume remote supports SSP since it has triggered this event */ + set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); + hci_conn_hold(conn); if (!hci_dev_test_flag(hdev, HCI_MGMT)) @@ -6293,10 +6294,10 @@ * keep track of the bdaddr of the connection event that woke us up. */ if (event == HCI_EV_CONN_REQUEST) { - bacpy(&hdev->wake_addr, &conn_complete->bdaddr); + bacpy(&hdev->wake_addr, &conn_request->bdaddr); hdev->wake_addr_type = BDADDR_BREDR; } else if (event == HCI_EV_CONN_COMPLETE) { - bacpy(&hdev->wake_addr, &conn_request->bdaddr); + bacpy(&hdev->wake_addr, &conn_complete->bdaddr); hdev->wake_addr_type = BDADDR_BREDR; } else if (event == HCI_EV_LE_META) { struct hci_ev_le_meta *le_ev = (void *)skb->data; diff -u linux-aws-5.15-5.15.0/net/bluetooth/rfcomm/core.c linux-aws-5.15-5.15.0/net/bluetooth/rfcomm/core.c --- linux-aws-5.15-5.15.0/net/bluetooth/rfcomm/core.c +++ linux-aws-5.15-5.15.0/net/bluetooth/rfcomm/core.c @@ -1937,7 +1937,7 @@ /* Get data directly from socket receive queue without copying it. */ while ((skb = skb_dequeue(&sk->sk_receive_queue))) { skb_orphan(skb); - if (!skb_linearize(skb)) { + if (!skb_linearize(skb) && sk->sk_state != BT_CLOSED) { s = rfcomm_recv_frame(s, skb); if (!s) break; diff -u linux-aws-5.15-5.15.0/net/bridge/br_netfilter_hooks.c linux-aws-5.15-5.15.0/net/bridge/br_netfilter_hooks.c --- linux-aws-5.15-5.15.0/net/bridge/br_netfilter_hooks.c +++ linux-aws-5.15-5.15.0/net/bridge/br_netfilter_hooks.c @@ -43,6 +43,10 @@ #include #endif +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include +#endif + static unsigned int brnf_net_id __read_mostly; struct brnf_net { @@ -537,6 +541,90 @@ return NF_STOLEN; } +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +/* conntracks' nf_confirm logic cannot handle cloned skbs referencing + * the same nf_conn entry, which will happen for multicast (broadcast) + * Frames on bridges. + * + * Example: + * macvlan0 + * br0 + * ethX ethY + * + * ethX (or Y) receives multicast or broadcast packet containing + * an IP packet, not yet in conntrack table. + * + * 1. skb passes through bridge and fake-ip (br_netfilter)Prerouting. + * -> skb->_nfct now references a unconfirmed entry + * 2. skb is broad/mcast packet. bridge now passes clones out on each bridge + * interface. + * 3. skb gets passed up the stack. + * 4. In macvlan case, macvlan driver retains clone(s) of the mcast skb + * and schedules a work queue to send them out on the lower devices. + * + * The clone skb->_nfct is not a copy, it is the same entry as the + * original skb. The macvlan rx handler then returns RX_HANDLER_PASS. + * 5. Normal conntrack hooks (in NF_INET_LOCAL_IN) confirm the orig skb. + * + * The Macvlan broadcast worker and normal confirm path will race. + * + * This race will not happen if step 2 already confirmed a clone. In that + * case later steps perform skb_clone() with skb->_nfct already confirmed (in + * hash table). This works fine. + * + * But such confirmation won't happen when eb/ip/nftables rules dropped the + * packets before they reached the nf_confirm step in postrouting. + * + * Work around this problem by explicit confirmation of the entry at + * LOCAL_IN time, before upper layer has a chance to clone the unconfirmed + * entry. + * + */ +static unsigned int br_nf_local_in(void *priv, + struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct nf_conntrack *nfct = skb_nfct(skb); + const struct nf_ct_hook *ct_hook; + struct nf_conn *ct; + int ret; + + if (!nfct || skb->pkt_type == PACKET_HOST) + return NF_ACCEPT; + + ct = container_of(nfct, struct nf_conn, ct_general); + if (likely(nf_ct_is_confirmed(ct))) + return NF_ACCEPT; + + WARN_ON_ONCE(skb_shared(skb)); + WARN_ON_ONCE(refcount_read(&nfct->use) != 1); + + /* We can't call nf_confirm here, it would create a dependency + * on nf_conntrack module. + */ + ct_hook = rcu_dereference(nf_ct_hook); + if (!ct_hook) { + skb->_nfct = 0ul; + nf_conntrack_put(nfct); + return NF_ACCEPT; + } + + nf_bridge_pull_encap_header(skb); + ret = ct_hook->confirm(skb); + switch (ret & NF_VERDICT_MASK) { + case NF_STOLEN: + return NF_STOLEN; + default: + nf_bridge_push_encap_header(skb); + break; + } + + ct = container_of(nfct, struct nf_conn, ct_general); + WARN_ON_ONCE(!nf_ct_is_confirmed(ct)); + + return ret; +} +#endif /* PF_BRIDGE/FORWARD *************************************************/ static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) @@ -935,6 +1023,14 @@ .hooknum = NF_BR_PRE_ROUTING, .priority = NF_BR_PRI_BRNF, }, +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + { + .hook = br_nf_local_in, + .pf = NFPROTO_BRIDGE, + .hooknum = NF_BR_LOCAL_IN, + .priority = NF_BR_PRI_LAST, + }, +#endif { .hook = br_nf_forward_ip, .pf = NFPROTO_BRIDGE, diff -u linux-aws-5.15-5.15.0/net/bridge/netfilter/nf_conntrack_bridge.c linux-aws-5.15-5.15.0/net/bridge/netfilter/nf_conntrack_bridge.c --- linux-aws-5.15-5.15.0/net/bridge/netfilter/nf_conntrack_bridge.c +++ linux-aws-5.15-5.15.0/net/bridge/netfilter/nf_conntrack_bridge.c @@ -290,6 +290,30 @@ return nf_conntrack_in(skb, &bridge_state); } +static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + enum ip_conntrack_info ctinfo; + struct nf_conn *ct; + + if (skb->pkt_type == PACKET_HOST) + return NF_ACCEPT; + + /* nf_conntrack_confirm() cannot handle concurrent clones, + * this happens for broad/multicast frames with e.g. macvlan on top + * of the bridge device. + */ + ct = nf_ct_get(skb, &ctinfo); + if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) + return NF_ACCEPT; + + /* let inet prerouting call conntrack again */ + skb->_nfct = 0; + nf_ct_put(ct); + + return NF_ACCEPT; +} + static void nf_ct_bridge_frag_save(struct sk_buff *skb, struct nf_bridge_frag_data *data) { @@ -415,6 +439,12 @@ .priority = NF_IP_PRI_CONNTRACK, }, { + .hook = nf_ct_bridge_in, + .pf = NFPROTO_BRIDGE, + .hooknum = NF_BR_LOCAL_IN, + .priority = NF_IP_PRI_CONNTRACK_CONFIRM, + }, + { .hook = nf_ct_bridge_post, .pf = NFPROTO_BRIDGE, .hooknum = NF_BR_POST_ROUTING, diff -u linux-aws-5.15-5.15.0/net/core/dev.c linux-aws-5.15-5.15.0/net/core/dev.c --- linux-aws-5.15-5.15.0/net/core/dev.c +++ linux-aws-5.15-5.15.0/net/core/dev.c @@ -2291,7 +2291,7 @@ rcu_read_lock(); again: list_for_each_entry_rcu(ptype, ptype_list, list) { - if (ptype->ignore_outgoing) + if (READ_ONCE(ptype->ignore_outgoing)) continue; /* Never send packets back to the socket @@ -7143,6 +7143,8 @@ void *have; while (!napi_thread_wait(napi)) { + unsigned long last_qs = jiffies; + for (;;) { bool repoll = false; @@ -7157,6 +7159,7 @@ if (!repoll) break; + rcu_softirq_qs_periodic(last_qs); cond_resched(); } } @@ -9090,7 +9093,7 @@ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) { - size_t size = sizeof(sa->sa_data); + size_t size = sizeof(sa->sa_data_min); struct net_device *dev; int ret = 0; diff -u linux-aws-5.15-5.15.0/net/core/devlink.c linux-aws-5.15-5.15.0/net/core/devlink.c --- linux-aws-5.15-5.15.0/net/core/devlink.c +++ linux-aws-5.15-5.15.0/net/core/devlink.c @@ -9055,7 +9055,10 @@ static void devlink_port_type_warn(struct work_struct *work) { - WARN(true, "Type was not set for devlink port."); + struct devlink_port *port = container_of(to_delayed_work(work), + struct devlink_port, + type_warn_dw); + dev_warn(port->devlink->dev, "Type was not set for devlink port."); } static bool devlink_port_type_should_warn(struct devlink_port *devlink_port) diff -u linux-aws-5.15-5.15.0/net/core/filter.c linux-aws-5.15-5.15.0/net/core/filter.c --- linux-aws-5.15-5.15.0/net/core/filter.c +++ linux-aws-5.15-5.15.0/net/core/filter.c @@ -5392,12 +5392,8 @@ #endif #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6) -static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, - const struct neighbour *neigh, - const struct net_device *dev, u32 mtu) +static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, u32 mtu) { - memcpy(params->dmac, neigh->ha, ETH_ALEN); - memcpy(params->smac, dev->dev_addr, ETH_ALEN); params->h_vlan_TCI = 0; params->h_vlan_proto = 0; if (mtu) @@ -5451,6 +5447,12 @@ u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; struct fib_table *tb; + if (flags & BPF_FIB_LOOKUP_TBID) { + tbid = params->tbid; + /* zero out for vlan output */ + params->tbid = 0; + } + tb = fib_get_table(net, tbid); if (unlikely(!tb)) return BPF_FIB_LKUP_RET_NOT_FWDED; @@ -5502,27 +5504,38 @@ params->rt_metric = res.fi->fib_priority; params->ifindex = dev->ifindex; + if (flags & BPF_FIB_LOOKUP_SRC) + params->ipv4_src = fib_result_prefsrc(net, &res); + /* xdp and cls_bpf programs are run in RCU-bh so * rcu_read_lock_bh is not needed here */ if (likely(nhc->nhc_gw_family != AF_INET6)) { if (nhc->nhc_gw_family) params->ipv4_dst = nhc->nhc_gw.ipv4; - - neigh = __ipv4_neigh_lookup_noref(dev, - (__force u32)params->ipv4_dst); } else { struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst; params->family = AF_INET6; *dst = nhc->nhc_gw.ipv6; - neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); } + if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH) + goto set_fwd_params; + + if (likely(nhc->nhc_gw_family != AF_INET6)) + neigh = __ipv4_neigh_lookup_noref(dev, + (__force u32)params->ipv4_dst); + else + neigh = __ipv6_neigh_lookup_noref_stub(dev, params->ipv6_dst); + if (!neigh || !(neigh->nud_state & NUD_VALID)) return BPF_FIB_LKUP_RET_NO_NEIGH; + memcpy(params->dmac, neigh->ha, ETH_ALEN); + memcpy(params->smac, dev->dev_addr, ETH_ALEN); - return bpf_fib_set_fwd_params(params, neigh, dev, mtu); +set_fwd_params: + return bpf_fib_set_fwd_params(params, mtu); } #endif @@ -5576,6 +5589,12 @@ u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; struct fib6_table *tb; + if (flags & BPF_FIB_LOOKUP_TBID) { + tbid = params->tbid; + /* zero out for vlan output */ + params->tbid = 0; + } + tb = ipv6_stub->fib6_get_table(net, tbid); if (unlikely(!tb)) return BPF_FIB_LKUP_RET_NOT_FWDED; @@ -5630,24 +5649,46 @@ params->rt_metric = res.f6i->fib6_metric; params->ifindex = dev->ifindex; + if (flags & BPF_FIB_LOOKUP_SRC) { + if (res.f6i->fib6_prefsrc.plen) { + *src = res.f6i->fib6_prefsrc.addr; + } else { + err = ipv6_bpf_stub->ipv6_dev_get_saddr(net, dev, + &fl6.daddr, 0, + src); + if (err) + return BPF_FIB_LKUP_RET_NO_SRC_ADDR; + } + } + + if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH) + goto set_fwd_params; + /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is * not needed here. */ neigh = __ipv6_neigh_lookup_noref_stub(dev, dst); if (!neigh || !(neigh->nud_state & NUD_VALID)) return BPF_FIB_LKUP_RET_NO_NEIGH; + memcpy(params->dmac, neigh->ha, ETH_ALEN); + memcpy(params->smac, dev->dev_addr, ETH_ALEN); - return bpf_fib_set_fwd_params(params, neigh, dev, mtu); +set_fwd_params: + return bpf_fib_set_fwd_params(params, mtu); } #endif +#define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \ + BPF_FIB_LOOKUP_SKIP_NEIGH | BPF_FIB_LOOKUP_TBID | \ + BPF_FIB_LOOKUP_SRC) + BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx, struct bpf_fib_lookup *, params, int, plen, u32, flags) { if (plen < sizeof(*params)) return -EINVAL; - if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) + if (flags & ~BPF_FIB_LOOKUP_MASK) return -EINVAL; switch (params->family) { @@ -5685,7 +5726,7 @@ if (plen < sizeof(*params)) return -EINVAL; - if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) + if (flags & ~BPF_FIB_LOOKUP_MASK) return -EINVAL; if (params->tot_len) @@ -10275,8 +10316,7 @@ } EXPORT_SYMBOL_GPL(sk_detach_filter); -int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, - unsigned int len) +int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len) { struct sock_fprog_kern *fprog; struct sk_filter *filter; @@ -10307,7 +10347,7 @@ goto out; ret = -EFAULT; - if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog))) + if (copy_to_sockptr(optval, fprog->filter, bpf_classic_proglen(fprog))) goto out; /* Instead of bytes, the API requests to return the number diff -u linux-aws-5.15-5.15.0/net/core/rtnetlink.c linux-aws-5.15-5.15.0/net/core/rtnetlink.c --- linux-aws-5.15-5.15.0/net/core/rtnetlink.c +++ linux-aws-5.15-5.15.0/net/core/rtnetlink.c @@ -4925,10 +4925,9 @@ struct net *net = sock_net(skb->sk); struct ifinfomsg *ifm; struct net_device *dev; - struct nlattr *br_spec, *attr = NULL; + struct nlattr *br_spec, *attr, *br_flags_attr = NULL; int rem, err = -EOPNOTSUPP; u16 flags = 0; - bool have_flags = false; if (nlmsg_len(nlh) < sizeof(*ifm)) return -EINVAL; @@ -4946,11 +4945,11 @@ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (br_spec) { nla_for_each_nested(attr, br_spec, rem) { - if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) { + if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) { if (nla_len(attr) < sizeof(flags)) return -EINVAL; - have_flags = true; + br_flags_attr = attr; flags = nla_get_u16(attr); } @@ -4994,8 +4993,8 @@ } } - if (have_flags) - memcpy(nla_data(attr), &flags, sizeof(flags)); + if (br_flags_attr) + memcpy(nla_data(br_flags_attr), &flags, sizeof(flags)); out: return err; } diff -u linux-aws-5.15-5.15.0/net/core/scm.c linux-aws-5.15-5.15.0/net/core/scm.c --- linux-aws-5.15-5.15.0/net/core/scm.c +++ linux-aws-5.15-5.15.0/net/core/scm.c @@ -105,7 +105,7 @@ if (fd < 0 || !(file = fget_raw(fd))) return -EBADF; /* don't allow io_uring files */ - if (io_uring_get_socket(file)) { + if (io_is_uring_fops(file)) { fput(file); return -EINVAL; } diff -u linux-aws-5.15-5.15.0/net/core/sock.c linux-aws-5.15-5.15.0/net/core/sock.c --- linux-aws-5.15-5.15.0/net/core/sock.c +++ linux-aws-5.15-5.15.0/net/core/sock.c @@ -671,8 +671,8 @@ return ret; } -static int sock_getbindtodevice(struct sock *sk, char __user *optval, - int __user *optlen, int len) +static int sock_getbindtodevice(struct sock *sk, sockptr_t optval, + sockptr_t optlen, int len) { int ret = -ENOPROTOOPT; #ifdef CONFIG_NETDEVICES @@ -695,12 +695,12 @@ len = strlen(devname) + 1; ret = -EFAULT; - if (copy_to_user(optval, devname, len)) + if (copy_to_sockptr(optval, devname, len)) goto out; zero: ret = -EFAULT; - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) goto out; ret = 0; @@ -1404,22 +1404,25 @@ } } -static int groups_to_user(gid_t __user *dst, const struct group_info *src) +static int groups_to_user(sockptr_t dst, const struct group_info *src) { struct user_namespace *user_ns = current_user_ns(); int i; - for (i = 0; i < src->ngroups; i++) - if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i)) + for (i = 0; i < src->ngroups; i++) { + gid_t gid = from_kgid_munged(user_ns, src->gid[i]); + + if (copy_to_sockptr_offset(dst, i * sizeof(gid), &gid, sizeof(gid))) return -EFAULT; + } return 0; } -int sock_getsockopt(struct socket *sock, int level, int optname, - char __user *optval, int __user *optlen) +static int sk_getsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, sockptr_t optlen) { - struct sock *sk = sock->sk; + struct socket *sock = sk->sk_socket; union { int val; @@ -1436,7 +1439,7 @@ int lv = sizeof(int); int len; - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; if (len < 0) return -EINVAL; @@ -1578,7 +1581,7 @@ cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); spin_unlock(&sk->sk_peer_lock); - if (copy_to_user(optval, &peercred, len)) + if (copy_to_sockptr(optval, &peercred, len)) return -EFAULT; goto lenout; } @@ -1596,11 +1599,11 @@ if (len < n * sizeof(gid_t)) { len = n * sizeof(gid_t); put_cred(cred); - return put_user(len, optlen) ? -EFAULT : -ERANGE; + return copy_to_sockptr(optlen, &len, sizeof(int)) ? -EFAULT : -ERANGE; } len = n * sizeof(gid_t); - ret = groups_to_user((gid_t __user *)optval, cred->group_info); + ret = groups_to_user(optval, cred->group_info); put_cred(cred); if (ret) return ret; @@ -1616,7 +1619,7 @@ return -ENOTCONN; if (lv < len) return -EINVAL; - if (copy_to_user(optval, address, len)) + if (copy_to_sockptr(optval, address, len)) return -EFAULT; goto lenout; } @@ -1633,7 +1636,7 @@ break; case SO_PEERSEC: - return security_socket_getpeersec_stream(sock, optval, optlen, len); + return security_socket_getpeersec_stream(sock, optval.user, optlen.user, len); case SO_MARK: v.val = sk->sk_mark; @@ -1661,7 +1664,7 @@ return sock_getbindtodevice(sk, optval, optlen, len); case SO_GET_FILTER: - len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); + len = sk_get_filter(sk, optval, len); if (len < 0) return len; @@ -1711,7 +1714,7 @@ sk_get_meminfo(sk, meminfo); len = min_t(unsigned int, len, sizeof(meminfo)); - if (copy_to_user(optval, &meminfo, len)) + if (copy_to_sockptr(optval, &meminfo, len)) return -EFAULT; goto lenout; @@ -1772,14 +1775,22 @@ if (len > lv) len = lv; - if (copy_to_user(optval, &v, len)) + if (copy_to_sockptr(optval, &v, len)) return -EFAULT; lenout: - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; return 0; } +int sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + return sk_getsockopt(sock->sk, level, optname, + USER_SOCKPTR(optval), + USER_SOCKPTR(optlen)); +} + /* * Initialize an sk_lock. * diff -u linux-aws-5.15-5.15.0/net/hsr/hsr_framereg.c linux-aws-5.15-5.15.0/net/hsr/hsr_framereg.c --- linux-aws-5.15-5.15.0/net/hsr/hsr_framereg.c +++ linux-aws-5.15-5.15.0/net/hsr/hsr_framereg.c @@ -237,6 +237,10 @@ */ if (ethhdr->h_proto == htons(ETH_P_PRP) || ethhdr->h_proto == htons(ETH_P_HSR)) { + /* Check if skb contains hsr_ethhdr */ + if (skb->mac_len < sizeof(struct hsr_ethhdr)) + return NULL; + /* Use the existing sequence_nr from the tag as starting point * for filtering duplicate frames. */ diff -u linux-aws-5.15-5.15.0/net/hsr/hsr_main.c linux-aws-5.15-5.15.0/net/hsr/hsr_main.c --- linux-aws-5.15-5.15.0/net/hsr/hsr_main.c +++ linux-aws-5.15-5.15.0/net/hsr/hsr_main.c @@ -148,14 +148,21 @@ static int __init hsr_init(void) { - int res; + int err; BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN); - register_netdevice_notifier(&hsr_nb); - res = hsr_netlink_init(); + err = register_netdevice_notifier(&hsr_nb); + if (err) + return err; - return res; + err = hsr_netlink_init(); + if (err) { + unregister_netdevice_notifier(&hsr_nb); + return err; + } + + return 0; } static void __exit hsr_exit(void) diff -u linux-aws-5.15-5.15.0/net/ipv4/arp.c linux-aws-5.15-5.15.0/net/ipv4/arp.c --- linux-aws-5.15-5.15.0/net/ipv4/arp.c +++ linux-aws-5.15-5.15.0/net/ipv4/arp.c @@ -1104,7 +1104,8 @@ if (neigh) { if (!(neigh->nud_state & NUD_NOARP)) { read_lock_bh(&neigh->lock); - memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len); + memcpy(r->arp_ha.sa_data, neigh->ha, + min(dev->addr_len, (unsigned char)sizeof(r->arp_ha.sa_data_min))); r->arp_flags = arp_state_to_flags(neigh); read_unlock_bh(&neigh->lock); r->arp_ha.sa_family = dev->type; diff -u linux-aws-5.15-5.15.0/net/ipv4/devinet.c linux-aws-5.15-5.15.0/net/ipv4/devinet.c --- linux-aws-5.15-5.15.0/net/ipv4/devinet.c +++ linux-aws-5.15-5.15.0/net/ipv4/devinet.c @@ -1797,6 +1797,21 @@ return err; } +/* Combine dev_addr_genid and dev_base_seq to detect changes. + */ +static u32 inet_base_seq(const struct net *net) +{ + u32 res = atomic_read(&net->ipv4.dev_addr_genid) + + net->dev_base_seq; + + /* Must not return 0 (see nl_dump_check_consistent()). + * Chose a value far away from 0. + */ + if (!res) + res = 0x80000000; + return res; +} + static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) { const struct nlmsghdr *nlh = cb->nlh; @@ -1848,8 +1863,7 @@ idx = 0; head = &tgt_net->dev_index_head[h]; rcu_read_lock(); - cb->seq = atomic_read(&tgt_net->ipv4.dev_addr_genid) ^ - tgt_net->dev_base_seq; + cb->seq = inet_base_seq(tgt_net); hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; @@ -2250,8 +2264,7 @@ idx = 0; head = &net->dev_index_head[h]; rcu_read_lock(); - cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^ - net->dev_base_seq; + cb->seq = inet_base_seq(net); hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; diff -u linux-aws-5.15-5.15.0/net/ipv4/igmp.c linux-aws-5.15-5.15.0/net/ipv4/igmp.c --- linux-aws-5.15-5.15.0/net/ipv4/igmp.c +++ linux-aws-5.15-5.15.0/net/ipv4/igmp.c @@ -2532,11 +2532,10 @@ err = ip_mc_leave_group(sk, &imr); return err; } - int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, - struct ip_msfilter __user *optval, int __user *optlen) + sockptr_t optval, sockptr_t optlen) { - int err, len, count, copycount; + int err, len, count, copycount, msf_size; struct ip_mreqn imr; __be32 addr = msf->imsf_multiaddr; struct ip_mc_socklist *pmc; @@ -2579,12 +2578,15 @@ copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc; len = flex_array_size(psl, sl_addr, copycount); msf->imsf_numsrc = count; - if (put_user(IP_MSFILTER_SIZE(copycount), optlen) || - copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) { + msf_size = IP_MSFILTER_SIZE(copycount); + if (copy_to_sockptr(optlen, &msf_size, sizeof(int)) || + copy_to_sockptr(optval, msf, IP_MSFILTER_SIZE(0))) { return -EFAULT; } if (len && - copy_to_user(&optval->imsf_slist_flex[0], psl->sl_addr, len)) + copy_to_sockptr_offset(optval, + offsetof(struct ip_msfilter, imsf_slist_flex), + psl->sl_addr, len)) return -EFAULT; return 0; done: @@ -2592,7 +2594,7 @@ } int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, - struct sockaddr_storage __user *p) + sockptr_t optval, size_t ss_offset) { int i, count, copycount; struct sockaddr_in *psin; @@ -2622,15 +2624,17 @@ count = psl ? psl->sl_count : 0; copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; gsf->gf_numsrc = count; - for (i = 0; i < copycount; i++, p++) { + for (i = 0; i < copycount; i++) { struct sockaddr_storage ss; psin = (struct sockaddr_in *)&ss; memset(&ss, 0, sizeof(ss)); psin->sin_family = AF_INET; psin->sin_addr.s_addr = psl->sl_addr[i]; - if (copy_to_user(p, &ss, sizeof(ss))) + if (copy_to_sockptr_offset(optval, ss_offset, + &ss, sizeof(ss))) return -EFAULT; + ss_offset += sizeof(ss); } return 0; } diff -u linux-aws-5.15-5.15.0/net/ipv4/inet_diag.c linux-aws-5.15-5.15.0/net/ipv4/inet_diag.c --- linux-aws-5.15-5.15.0/net/ipv4/inet_diag.c +++ linux-aws-5.15-5.15.0/net/ipv4/inet_diag.c @@ -57,7 +57,7 @@ return ERR_PTR(-ENOENT); } - if (!inet_diag_table[proto]) + if (!READ_ONCE(inet_diag_table[proto])) sock_load_diag_module(AF_INET, proto); mutex_lock(&inet_diag_table_mutex); @@ -1419,7 +1419,7 @@ mutex_lock(&inet_diag_table_mutex); err = -EEXIST; if (!inet_diag_table[type]) { - inet_diag_table[type] = h; + WRITE_ONCE(inet_diag_table[type], h); err = 0; } mutex_unlock(&inet_diag_table_mutex); @@ -1436,7 +1436,7 @@ return; mutex_lock(&inet_diag_table_mutex); - inet_diag_table[type] = NULL; + WRITE_ONCE(inet_diag_table[type], NULL); mutex_unlock(&inet_diag_table_mutex); } EXPORT_SYMBOL_GPL(inet_diag_unregister); diff -u linux-aws-5.15-5.15.0/net/ipv4/ip_sockglue.c linux-aws-5.15-5.15.0/net/ipv4/ip_sockglue.c --- linux-aws-5.15-5.15.0/net/ipv4/ip_sockglue.c +++ linux-aws-5.15-5.15.0/net/ipv4/ip_sockglue.c @@ -1460,37 +1460,37 @@ return false; } -static int ip_get_mcast_msfilter(struct sock *sk, void __user *optval, - int __user *optlen, int len) +static int ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval, + sockptr_t optlen, int len) { const int size0 = offsetof(struct group_filter, gf_slist_flex); - struct group_filter __user *p = optval; struct group_filter gsf; - int num; + int num, gsf_size; int err; if (len < size0) return -EINVAL; - if (copy_from_user(&gsf, p, size0)) + if (copy_from_sockptr(&gsf, optval, size0)) return -EFAULT; num = gsf.gf_numsrc; - err = ip_mc_gsfget(sk, &gsf, p->gf_slist_flex); + err = ip_mc_gsfget(sk, &gsf, optval, + offsetof(struct group_filter, gf_slist_flex)); if (err) return err; if (gsf.gf_numsrc < num) num = gsf.gf_numsrc; - if (put_user(GROUP_FILTER_SIZE(num), optlen) || - copy_to_user(p, &gsf, size0)) + gsf_size = GROUP_FILTER_SIZE(num); + if (copy_to_sockptr(optlen, &gsf_size, sizeof(int)) || + copy_to_sockptr(optval, &gsf, size0)) return -EFAULT; return 0; } -static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, - int __user *optlen, int len) +static int compat_ip_get_mcast_msfilter(struct sock *sk, sockptr_t optval, + sockptr_t optlen, int len) { const int size0 = offsetof(struct compat_group_filter, gf_slist_flex); - struct compat_group_filter __user *p = optval; struct compat_group_filter gf32; struct group_filter gf; int num; @@ -1498,7 +1498,7 @@ if (len < size0) return -EINVAL; - if (copy_from_user(&gf32, p, size0)) + if (copy_from_sockptr(&gf32, optval, size0)) return -EFAULT; gf.gf_interface = gf32.gf_interface; @@ -1506,21 +1506,24 @@ num = gf.gf_numsrc = gf32.gf_numsrc; gf.gf_group = gf32.gf_group; - err = ip_mc_gsfget(sk, &gf, p->gf_slist_flex); + err = ip_mc_gsfget(sk, &gf, optval, + offsetof(struct compat_group_filter, gf_slist_flex)); if (err) return err; if (gf.gf_numsrc < num) num = gf.gf_numsrc; len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32)); - if (put_user(len, optlen) || - put_user(gf.gf_fmode, &p->gf_fmode) || - put_user(gf.gf_numsrc, &p->gf_numsrc)) + if (copy_to_sockptr(optlen, &len, sizeof(int)) || + copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_fmode), + &gf.gf_fmode, sizeof(gf.gf_fmode)) || + copy_to_sockptr_offset(optval, offsetof(struct compat_group_filter, gf_numsrc), + &gf.gf_numsrc, sizeof(gf.gf_numsrc))) return -EFAULT; return 0; } static int do_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) + sockptr_t optval, sockptr_t optlen) { struct inet_sock *inet = inet_sk(sk); bool needs_rtnl = getsockopt_needs_rtnl(optname); @@ -1533,7 +1536,7 @@ if (ip_mroute_opt(optname)) return ip_mroute_getsockopt(sk, optname, optval, optlen); - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; if (len < 0) return -EINVAL; @@ -1558,15 +1561,17 @@ inet_opt->opt.optlen); release_sock(sk); - if (opt->optlen == 0) - return put_user(0, optlen); + if (opt->optlen == 0) { + len = 0; + return copy_to_sockptr(optlen, &len, sizeof(int)); + } ip_options_undo(opt); len = min_t(unsigned int, len, opt->optlen); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, opt->__data, len)) + if (copy_to_sockptr(optval, opt->__data, len)) return -EFAULT; return 0; } @@ -1657,9 +1662,9 @@ addr.s_addr = inet->mc_addr; release_sock(sk); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &addr, len)) + if (copy_to_sockptr(optval, &addr, len)) return -EFAULT; return 0; } @@ -1671,12 +1676,11 @@ err = -EINVAL; goto out; } - if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) { + if (copy_from_sockptr(&msf, optval, IP_MSFILTER_SIZE(0))) { err = -EFAULT; goto out; } - err = ip_mc_msfget(sk, &msf, - (struct ip_msfilter __user *)optval, optlen); + err = ip_mc_msfget(sk, &msf, optval, optlen); goto out; } case MCAST_MSFILTER: @@ -1698,8 +1702,13 @@ if (sk->sk_type != SOCK_STREAM) return -ENOPROTOOPT; - msg.msg_control_is_user = true; - msg.msg_control_user = optval; + if (optval.is_kernel) { + msg.msg_control_is_user = false; + msg.msg_control = optval.kernel; + } else { + msg.msg_control_is_user = true; + msg.msg_control_user = optval.user; + } msg.msg_controllen = len; msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0; @@ -1720,7 +1729,7 @@ put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos); } len -= msg.msg_controllen; - return put_user(len, optlen); + return copy_to_sockptr(optlen, &len, sizeof(int)); } case IP_FREEBIND: val = inet->freebind; @@ -1743,15 +1752,15 @@ if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { unsigned char ucval = (unsigned char)val; len = 1; - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &ucval, 1)) + if (copy_to_sockptr(optval, &ucval, 1)) return -EFAULT; } else { len = min_t(unsigned int, sizeof(int), len); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, len)) + if (copy_to_sockptr(optval, &val, len)) return -EFAULT; } return 0; @@ -1768,7 +1777,8 @@ { int err; - err = do_ip_getsockopt(sk, level, optname, optval, optlen); + err = do_ip_getsockopt(sk, level, optname, + USER_SOCKPTR(optval), USER_SOCKPTR(optlen)); #if IS_ENABLED(CONFIG_BPFILTER_UMH) if (optname >= BPFILTER_IPT_SO_GET_INFO && diff -u linux-aws-5.15-5.15.0/net/ipv4/ip_tunnel.c linux-aws-5.15-5.15.0/net/ipv4/ip_tunnel.c --- linux-aws-5.15-5.15.0/net/ipv4/ip_tunnel.c +++ linux-aws-5.15-5.15.0/net/ipv4/ip_tunnel.c @@ -364,7 +364,7 @@ bool log_ecn_error) { const struct iphdr *iph = ip_hdr(skb); - int err; + int nh, err; #ifdef CONFIG_NET_IPGRE_BROADCAST if (ipv4_is_multicast(iph->daddr)) { @@ -390,8 +390,21 @@ tunnel->i_seqno = ntohl(tpi->seq) + 1; } + /* Save offset of outer header relative to skb->head, + * because we are going to reset the network header to the inner header + * and might change skb->head. + */ + nh = skb_network_header(skb) - skb->head; + skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0); + if (!pskb_inet_may_pull(skb)) { + DEV_STATS_INC(tunnel->dev, rx_length_errors); + DEV_STATS_INC(tunnel->dev, rx_errors); + goto drop; + } + iph = (struct iphdr *)(skb->head + nh); + err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { if (log_ecn_error) @@ -540,6 +553,20 @@ return 0; } +static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom) +{ + /* we must cap headroom to some upperlimit, else pskb_expand_head + * will overflow header offsets in skb_headers_offset_update(). + */ + static const unsigned int max_allowed = 512; + + if (headroom > max_allowed) + headroom = max_allowed; + + if (headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, headroom); +} + void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto, int tunnel_hlen) { @@ -613,13 +640,13 @@ } headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len; - if (headroom > READ_ONCE(dev->needed_headroom)) - WRITE_ONCE(dev->needed_headroom, headroom); - - if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) { + if (skb_cow_head(skb, headroom)) { ip_rt_put(rt); goto tx_dropped; } + + ip_tunnel_adj_headroom(dev, headroom); + iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); return; @@ -797,16 +824,16 @@ max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); - if (max_headroom > READ_ONCE(dev->needed_headroom)) - WRITE_ONCE(dev->needed_headroom, max_headroom); - if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) { + if (skb_cow_head(skb, max_headroom)) { ip_rt_put(rt); dev->stats.tx_dropped++; kfree_skb(skb); return; } + ip_tunnel_adj_headroom(dev, max_headroom); + iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl, df, !net_eq(tunnel->net, dev_net(dev))); return; diff -u linux-aws-5.15-5.15.0/net/ipv4/ipmr.c linux-aws-5.15-5.15.0/net/ipv4/ipmr.c --- linux-aws-5.15-5.15.0/net/ipv4/ipmr.c +++ linux-aws-5.15-5.15.0/net/ipv4/ipmr.c @@ -1540,7 +1540,8 @@ } /* Getsock opt support for the multicast routing system. */ -int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) +int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval, + sockptr_t optlen) { int olr; int val; @@ -1571,14 +1572,16 @@ return -ENOPROTOOPT; } - if (get_user(olr, optlen)) + if (copy_from_sockptr(&olr, optlen, sizeof(int))) return -EFAULT; - olr = min_t(unsigned int, olr, sizeof(int)); if (olr < 0) return -EINVAL; - if (put_user(olr, optlen)) + + olr = min_t(unsigned int, olr, sizeof(int)); + + if (copy_to_sockptr(optlen, &olr, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, olr)) + if (copy_to_sockptr(optval, &val, olr)) return -EFAULT; return 0; } diff -u linux-aws-5.15-5.15.0/net/ipv4/netfilter/nf_reject_ipv4.c linux-aws-5.15-5.15.0/net/ipv4/netfilter/nf_reject_ipv4.c --- linux-aws-5.15-5.15.0/net/ipv4/netfilter/nf_reject_ipv4.c +++ linux-aws-5.15-5.15.0/net/ipv4/netfilter/nf_reject_ipv4.c @@ -278,6 +278,7 @@ goto free_nskb; nf_ct_attach(nskb, oldskb); + nf_ct_set_closing(skb_nfct(oldskb)); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* If we use ip_local_out for bridged traffic, the MAC source on diff -u linux-aws-5.15-5.15.0/net/ipv4/tcp.c linux-aws-5.15-5.15.0/net/ipv4/tcp.c --- linux-aws-5.15-5.15.0/net/ipv4/tcp.c +++ linux-aws-5.15-5.15.0/net/ipv4/tcp.c @@ -3966,11 +3966,11 @@ if (get_user(len, optlen)) return -EFAULT; - len = min_t(unsigned int, len, sizeof(int)); - if (len < 0) return -EINVAL; + len = min_t(unsigned int, len, sizeof(int)); + switch (optname) { case TCP_MAXSEG: val = tp->mss_cache; diff -u linux-aws-5.15-5.15.0/net/ipv4/udp.c linux-aws-5.15-5.15.0/net/ipv4/udp.c --- linux-aws-5.15-5.15.0/net/ipv4/udp.c +++ linux-aws-5.15-5.15.0/net/ipv4/udp.c @@ -2814,11 +2814,11 @@ if (get_user(len, optlen)) return -EFAULT; - len = min_t(unsigned int, len, sizeof(int)); - if (len < 0) return -EINVAL; + len = min_t(unsigned int, len, sizeof(int)); + switch (optname) { case UDP_CORK: val = READ_ONCE(up->corkflag); diff -u linux-aws-5.15-5.15.0/net/ipv6/addrconf.c linux-aws-5.15-5.15.0/net/ipv6/addrconf.c --- linux-aws-5.15-5.15.0/net/ipv6/addrconf.c +++ linux-aws-5.15-5.15.0/net/ipv6/addrconf.c @@ -707,6 +707,22 @@ return err; } +/* Combine dev_addr_genid and dev_base_seq to detect changes. + */ +static u32 inet6_base_seq(const struct net *net) +{ + u32 res = atomic_read(&net->ipv6.dev_addr_genid) + + net->dev_base_seq; + + /* Must not return 0 (see nl_dump_check_consistent()). + * Chose a value far away from 0. + */ + if (!res) + res = 0x80000000; + return res; +} + + static int inet6_netconf_dump_devconf(struct sk_buff *skb, struct netlink_callback *cb) { @@ -740,8 +756,7 @@ idx = 0; head = &net->dev_index_head[h]; rcu_read_lock(); - cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ - net->dev_base_seq; + cb->seq = inet6_base_seq(net); hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; @@ -5316,7 +5331,7 @@ } rcu_read_lock(); - cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq; + cb->seq = inet6_base_seq(tgt_net); for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; head = &tgt_net->dev_index_head[h]; @@ -5448,9 +5463,10 @@ } addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer); - if (!addr) - return -EINVAL; - + if (!addr) { + err = -EINVAL; + goto errout; + } ifm = nlmsg_data(nlh); if (ifm->ifa_index) dev = dev_get_by_index(tgt_net, ifm->ifa_index); diff -u linux-aws-5.15-5.15.0/net/ipv6/af_inet6.c linux-aws-5.15-5.15.0/net/ipv6/af_inet6.c --- linux-aws-5.15-5.15.0/net/ipv6/af_inet6.c +++ linux-aws-5.15-5.15.0/net/ipv6/af_inet6.c @@ -1073,6 +1073,7 @@ static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = { .inet6_bind = __inet6_bind, .udp6_lib_lookup = __udp6_lib_lookup, + .ipv6_dev_get_saddr = ipv6_dev_get_saddr, }; static int __init inet6_init(void) diff -u linux-aws-5.15-5.15.0/net/ipv6/fib6_rules.c linux-aws-5.15-5.15.0/net/ipv6/fib6_rules.c --- linux-aws-5.15-5.15.0/net/ipv6/fib6_rules.c +++ linux-aws-5.15-5.15.0/net/ipv6/fib6_rules.c @@ -446,6 +446,11 @@ + nla_total_size(16); /* src */ } +static void fib6_rule_flush_cache(struct fib_rules_ops *ops) +{ + rt_genid_bump_ipv6(ops->fro_net); +} + static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = { .family = AF_INET6, .rule_size = sizeof(struct fib6_rule), @@ -458,6 +463,7 @@ .compare = fib6_rule_compare, .fill = fib6_rule_fill, .nlmsg_payload = fib6_rule_nlmsg_payload, + .flush_cache = fib6_rule_flush_cache, .nlgroup = RTNLGRP_IPV6_RULE, .policy = fib6_rule_policy, .owner = THIS_MODULE, diff -u linux-aws-5.15-5.15.0/net/ipv6/mcast.c linux-aws-5.15-5.15.0/net/ipv6/mcast.c --- linux-aws-5.15-5.15.0/net/ipv6/mcast.c +++ linux-aws-5.15-5.15.0/net/ipv6/mcast.c @@ -2722,7 +2722,6 @@ /* Should stop work after group drop. or we will * start work again in mld_ifc_event() */ - synchronize_net(); mld_query_stop_work(idev); mld_report_stop_work(idev); diff -u linux-aws-5.15-5.15.0/net/ipv6/route.c linux-aws-5.15-5.15.0/net/ipv6/route.c --- linux-aws-5.15-5.15.0/net/ipv6/route.c +++ linux-aws-5.15-5.15.0/net/ipv6/route.c @@ -5346,19 +5346,7 @@ err_nh = NULL; list_for_each_entry(nh, &rt6_nh_list, next) { err = __ip6_ins_rt(nh->fib6_info, info, extack); - fib6_info_release(nh->fib6_info); - - if (!err) { - /* save reference to last route successfully inserted */ - rt_last = nh->fib6_info; - - /* save reference to first route for notification */ - if (!rt_notif) - rt_notif = nh->fib6_info; - } - /* nh->fib6_info is used or freed at this point, reset to NULL*/ - nh->fib6_info = NULL; if (err) { if (replace && nhn) NL_SET_ERR_MSG_MOD(extack, @@ -5366,6 +5354,12 @@ err_nh = nh; goto add_errout; } + /* save reference to last route successfully inserted */ + rt_last = nh->fib6_info; + + /* save reference to first route for notification */ + if (!rt_notif) + rt_notif = nh->fib6_info; /* Because each route is added like a single route we remove * these flags after the first nexthop: if there is a collision, @@ -5426,8 +5420,7 @@ cleanup: list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { - if (nh->fib6_info) - fib6_info_release(nh->fib6_info); + fib6_info_release(nh->fib6_info); list_del(&nh->next); kfree(nh); } diff -u linux-aws-5.15-5.15.0/net/ipv6/seg6.c linux-aws-5.15-5.15.0/net/ipv6/seg6.c --- linux-aws-5.15-5.15.0/net/ipv6/seg6.c +++ linux-aws-5.15-5.15.0/net/ipv6/seg6.c @@ -507,22 +507,24 @@ { int err; - err = genl_register_family(&seg6_genl_family); + err = register_pernet_subsys(&ip6_segments_ops); if (err) goto out; - err = register_pernet_subsys(&ip6_segments_ops); + err = genl_register_family(&seg6_genl_family); if (err) - goto out_unregister_genl; + goto out_unregister_pernet; #ifdef CONFIG_IPV6_SEG6_LWTUNNEL err = seg6_iptunnel_init(); if (err) - goto out_unregister_pernet; + goto out_unregister_genl; err = seg6_local_init(); - if (err) - goto out_unregister_pernet; + if (err) { + seg6_iptunnel_exit(); + goto out_unregister_genl; + } #endif #ifdef CONFIG_IPV6_SEG6_HMAC @@ -543,11 +545,11 @@ #endif #endif #ifdef CONFIG_IPV6_SEG6_LWTUNNEL -out_unregister_pernet: - unregister_pernet_subsys(&ip6_segments_ops); -#endif out_unregister_genl: genl_unregister_family(&seg6_genl_family); +#endif +out_unregister_pernet: + unregister_pernet_subsys(&ip6_segments_ops); goto out; } diff -u linux-aws-5.15-5.15.0/net/iucv/iucv.c linux-aws-5.15-5.15.0/net/iucv/iucv.c --- linux-aws-5.15-5.15.0/net/iucv/iucv.c +++ linux-aws-5.15-5.15.0/net/iucv/iucv.c @@ -156,7 +156,7 @@ static LIST_HEAD(iucv_handler_list); /* - * iucv_path_table: an array of iucv_path structures. + * iucv_path_table: array of pointers to iucv_path structures. */ static struct iucv_path **iucv_path_table; static unsigned long iucv_max_pathid; @@ -544,7 +544,7 @@ cpus_read_lock(); rc = -ENOMEM; - alloc_size = iucv_max_pathid * sizeof(struct iucv_path); + alloc_size = iucv_max_pathid * sizeof(*iucv_path_table); iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); if (!iucv_path_table) goto out; diff -u linux-aws-5.15-5.15.0/net/kcm/kcmsock.c linux-aws-5.15-5.15.0/net/kcm/kcmsock.c --- linux-aws-5.15-5.15.0/net/kcm/kcmsock.c +++ linux-aws-5.15-5.15.0/net/kcm/kcmsock.c @@ -1275,10 +1275,11 @@ if (get_user(len, optlen)) return -EFAULT; - len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; + len = min_t(unsigned int, len, sizeof(int)); + switch (optname) { case KCM_RECV_DISABLE: val = kcm->rx_disabled; diff -u linux-aws-5.15-5.15.0/net/l2tp/l2tp_ip6.c linux-aws-5.15-5.15.0/net/l2tp/l2tp_ip6.c --- linux-aws-5.15-5.15.0/net/l2tp/l2tp_ip6.c +++ linux-aws-5.15-5.15.0/net/l2tp/l2tp_ip6.c @@ -628,7 +628,7 @@ back_from_confirm: lock_sock(sk); - ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0; + ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0); err = ip6_append_data(sk, ip_generic_getfrag, msg, ulen, transhdrlen, &ipc6, &fl6, (struct rt6_info *)dst, diff -u linux-aws-5.15-5.15.0/net/l2tp/l2tp_ppp.c linux-aws-5.15-5.15.0/net/l2tp/l2tp_ppp.c --- linux-aws-5.15-5.15.0/net/l2tp/l2tp_ppp.c +++ linux-aws-5.15-5.15.0/net/l2tp/l2tp_ppp.c @@ -1357,11 +1357,11 @@ if (get_user(len, optlen)) return -EFAULT; - len = min_t(unsigned int, len, sizeof(int)); - if (len < 0) return -EINVAL; + len = min_t(unsigned int, len, sizeof(int)); + err = -ENOTCONN; if (!sk->sk_user_data) goto end; diff -u linux-aws-5.15-5.15.0/net/mac80211/cfg.c linux-aws-5.15-5.15.0/net/mac80211/cfg.c --- linux-aws-5.15-5.15.0/net/mac80211/cfg.c +++ linux-aws-5.15-5.15.0/net/mac80211/cfg.c @@ -511,6 +511,9 @@ sta->cipher_scheme = cs; err = ieee80211_key_link(key, sdata, sta); + /* KRACK protection, shouldn't happen but just silently accept key */ + if (err == -EALREADY) + err = 0; out_unlock: mutex_unlock(&local->sta_mtx); diff -u linux-aws-5.15-5.15.0/net/mac80211/mlme.c linux-aws-5.15-5.15.0/net/mac80211/mlme.c --- linux-aws-5.15-5.15.0/net/mac80211/mlme.c +++ linux-aws-5.15-5.15.0/net/mac80211/mlme.c @@ -5923,6 +5923,7 @@ ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code, false); + drv_mgd_complete_tx(sdata->local, sdata, &info); return 0; } diff -u linux-aws-5.15-5.15.0/net/mac80211/sta_info.c linux-aws-5.15-5.15.0/net/mac80211/sta_info.c --- linux-aws-5.15-5.15.0/net/mac80211/sta_info.c +++ linux-aws-5.15-5.15.0/net/mac80211/sta_info.c @@ -696,6 +696,8 @@ if (ieee80211_vif_is_mesh(&sdata->vif)) mesh_accept_plinks_update(sdata); + ieee80211_check_fast_xmit(sta); + return 0; out_remove: sta_info_hash_del(local, sta); diff -u linux-aws-5.15-5.15.0/net/mac80211/tx.c linux-aws-5.15-5.15.0/net/mac80211/tx.c --- linux-aws-5.15-5.15.0/net/mac80211/tx.c +++ linux-aws-5.15-5.15.0/net/mac80211/tx.c @@ -2965,7 +2965,7 @@ sdata->vif.type == NL80211_IFTYPE_STATION) goto out; - if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) + if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded) goto out; if (test_sta_flag(sta, WLAN_STA_PS_STA) || diff -u linux-aws-5.15-5.15.0/net/mptcp/pm_netlink.c linux-aws-5.15-5.15.0/net/mptcp/pm_netlink.c --- linux-aws-5.15-5.15.0/net/mptcp/pm_netlink.c +++ linux-aws-5.15-5.15.0/net/mptcp/pm_netlink.c @@ -38,7 +38,8 @@ u8 retrans_times; }; -#define MAX_ADDR_ID 255 +/* max value of mptcp_addr_info.id */ +#define MAX_ADDR_ID U8_MAX #define BITMAP_SZ DIV_ROUND_UP(MAX_ADDR_ID + 1, BITS_PER_LONG) struct pm_nl_pernet { @@ -822,7 +823,8 @@ } static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, - struct mptcp_pm_addr_entry *entry) + struct mptcp_pm_addr_entry *entry, + bool needs_id) { struct mptcp_pm_addr_entry *cur; unsigned int addr_max; @@ -849,19 +851,18 @@ goto out; } - if (!entry->addr.id) { + if (!entry->addr.id && needs_id) { find_next: entry->addr.id = find_next_zero_bit(pernet->id_bitmap, MAX_ADDR_ID + 1, pernet->next_id); - if ((!entry->addr.id || entry->addr.id > MAX_ADDR_ID) && - pernet->next_id != 1) { + if (!entry->addr.id && pernet->next_id != 1) { pernet->next_id = 1; goto find_next; } } - if (!entry->addr.id || entry->addr.id > MAX_ADDR_ID) + if (!entry->addr.id && needs_id) goto out; __set_bit(entry->addr.id, pernet->id_bitmap); @@ -1001,7 +1002,7 @@ entry->ifindex = 0; entry->flags = 0; entry->lsk = NULL; - ret = mptcp_pm_nl_append_new_local_addr(pernet, entry); + ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); if (ret < 0) kfree(entry); @@ -1202,6 +1203,18 @@ return 0; } +static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr, + struct genl_info *info) +{ + struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; + + if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, + mptcp_pm_addr_policy, info->extack) && + tb[MPTCP_PM_ADDR_ATTR_ID]) + return true; + return false; +} + static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info) { struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; @@ -1228,7 +1241,8 @@ return ret; } } - ret = mptcp_pm_nl_append_new_local_addr(pernet, entry); + ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, + !mptcp_pm_has_addr_attr_id(attr, info)); if (ret < 0) { GENL_SET_ERR_MSG(info, "too many addresses or duplicate one"); if (entry->lsk) diff -u linux-aws-5.15-5.15.0/net/mptcp/protocol.c linux-aws-5.15-5.15.0/net/mptcp/protocol.c --- linux-aws-5.15-5.15.0/net/mptcp/protocol.c +++ linux-aws-5.15-5.15.0/net/mptcp/protocol.c @@ -330,7 +330,7 @@ return false; } -static void mptcp_stop_timer(struct sock *sk) +static void mptcp_stop_rtx_timer(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -688,6 +688,46 @@ return moved; } +static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) +{ + int err = sock_error(ssk); + int ssk_state; + + if (!err) + return false; + + /* only propagate errors on fallen-back sockets or + * on MPC connect + */ + if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk))) + return false; + + /* We need to propagate only transition to CLOSE state. + * Orphaned socket will see such state change via + * subflow_sched_work_if_closed() and that path will properly + * destroy the msk as needed. + */ + ssk_state = inet_sk_state_load(ssk); + if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD)) + inet_sk_state_store(sk, ssk_state); + WRITE_ONCE(sk->sk_err, -err); + + /* This barrier is coupled with smp_rmb() in mptcp_poll() */ + smp_wmb(); + sk_error_report(sk); + return true; +} + +void __mptcp_error_report(struct sock *sk) +{ + struct mptcp_subflow_context *subflow; + struct mptcp_sock *msk = mptcp_sk(sk); + + mptcp_for_each_subflow(msk, subflow) + if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow))) + break; +} + /* In most cases we will be able to lock the mptcp socket. If its already * owned, we need to defer to the work queue to avoid ABBA deadlock. */ @@ -790,12 +830,12 @@ mptcp_sockopt_sync_all(msk); } -static bool mptcp_timer_pending(struct sock *sk) +static bool mptcp_rtx_timer_pending(struct sock *sk) { return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); } -static void mptcp_reset_timer(struct sock *sk) +static void mptcp_reset_rtx_timer(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); unsigned long tout; @@ -1105,10 +1145,10 @@ __mptcp_mem_reclaim_partial(sk); if (snd_una == READ_ONCE(msk->snd_nxt) && !msk->recovery) { - if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) - mptcp_stop_timer(sk); + if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) + mptcp_stop_rtx_timer(sk); } else { - mptcp_reset_timer(sk); + mptcp_reset_rtx_timer(sk); } } @@ -1310,6 +1350,7 @@ mpext = skb_ext_find(skb, SKB_EXT_MPTCP); if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) { TCP_SKB_CB(skb)->eor = 1; + tcp_mark_push(tcp_sk(ssk), skb); goto alloc_skb; } @@ -1600,8 +1641,8 @@ out: /* ensure the rtx timer is running */ - if (!mptcp_timer_pending(sk)) - mptcp_reset_timer(sk); + if (!mptcp_rtx_timer_pending(sk)) + mptcp_reset_rtx_timer(sk); if (copied) mptcp_check_send_data_fin(sk); } @@ -1660,8 +1701,8 @@ if (copied) { tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, info.size_goal); - if (!mptcp_timer_pending(sk)) - mptcp_reset_timer(sk); + if (!mptcp_rtx_timer_pending(sk)) + mptcp_reset_rtx_timer(sk); if (msk->snd_data_fin_enable && msk->snd_nxt + 1 == msk->write_seq) @@ -2133,7 +2174,7 @@ sock_put(sk); } -static void mptcp_timeout_timer(struct timer_list *t) +static void mptcp_tout_timer(struct timer_list *t) { struct sock *sk = from_timer(sk, t, sk_timer); @@ -2273,6 +2314,7 @@ /* close acquired an extra ref */ __sock_put(ssk); } + __mptcp_subflow_error_report(sk, ssk); release_sock(ssk); sock_put(ssk); @@ -2424,8 +2466,8 @@ release_sock(ssk); reset_timer: - if (!mptcp_timer_pending(sk)) - mptcp_reset_timer(sk); + if (!mptcp_rtx_timer_pending(sk)) + mptcp_reset_rtx_timer(sk); } static void mptcp_worker(struct work_struct *work) @@ -2502,7 +2544,7 @@ /* re-use the csk retrans timer for MPTCP-level retrans */ timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); - timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0); + timer_setup(&sk->sk_timer, mptcp_tout_timer, 0); return 0; } @@ -2588,8 +2630,8 @@ } else { pr_debug("Sending DATA_FIN on subflow %p", ssk); tcp_send_ack(ssk); - if (!mptcp_timer_pending(sk)) - mptcp_reset_timer(sk); + if (!mptcp_rtx_timer_pending(sk)) + mptcp_reset_rtx_timer(sk); } break; } @@ -2814,8 +2856,50 @@ return (struct ipv6_pinfo *)(((u8 *)sk) + offset); } + +static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk) +{ + const struct ipv6_pinfo *np = inet6_sk(sk); + struct ipv6_txoptions *opt; + struct ipv6_pinfo *newnp; + + newnp = inet6_sk(newsk); + + rcu_read_lock(); + opt = rcu_dereference(np->opt); + if (opt) { + opt = ipv6_dup_options(newsk, opt); + if (!opt) + net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__); + } + RCU_INIT_POINTER(newnp->opt, opt); + rcu_read_unlock(); +} #endif +static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk) +{ + struct ip_options_rcu *inet_opt, *newopt = NULL; + const struct inet_sock *inet = inet_sk(sk); + struct inet_sock *newinet; + + newinet = inet_sk(newsk); + + rcu_read_lock(); + inet_opt = rcu_dereference(inet->inet_opt); + if (inet_opt) { + newopt = sock_kmalloc(newsk, sizeof(*inet_opt) + + inet_opt->opt.optlen, GFP_ATOMIC); + if (newopt) + memcpy(newopt, inet_opt, sizeof(*inet_opt) + + inet_opt->opt.optlen); + else + net_warn_ratelimited("%s: Failed to copy ip options\n", __func__); + } + RCU_INIT_POINTER(newinet->inet_opt, newopt); + rcu_read_unlock(); +} + struct sock *mptcp_sk_clone(const struct sock *sk, const struct mptcp_options_received *mp_opt, struct request_sock *req) @@ -2836,6 +2920,13 @@ nsk->sk_wait_pending = 0; __mptcp_init_sock(nsk); +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + if (nsk->sk_family == AF_INET6) + mptcp_copy_ip6_options(nsk, sk); + else +#endif + mptcp_copy_ip_options(nsk, sk); + msk = mptcp_sk(nsk); msk->local_key = subflow_req->local_key; msk->token = subflow_req->token; diff -u linux-aws-5.15-5.15.0/net/mptcp/subflow.c linux-aws-5.15-5.15.0/net/mptcp/subflow.c --- linux-aws-5.15-5.15.0/net/mptcp/subflow.c +++ linux-aws-5.15-5.15.0/net/mptcp/subflow.c @@ -1269,42 +1269,6 @@ *full_space = tcp_full_space(sk); } -void __mptcp_error_report(struct sock *sk) -{ - struct mptcp_subflow_context *subflow; - struct mptcp_sock *msk = mptcp_sk(sk); - - mptcp_for_each_subflow(msk, subflow) { - struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - int err = sock_error(ssk); - int ssk_state; - - if (!err) - continue; - - /* only propagate errors on fallen-back sockets or - * on MPC connect - */ - if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk)) - continue; - - /* We need to propagate only transition to CLOSE state. - * Orphaned socket will see such state change via - * subflow_sched_work_if_closed() and that path will properly - * destroy the msk as needed. - */ - ssk_state = inet_sk_state_load(ssk); - if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD)) - inet_sk_state_store(sk, ssk_state); - sk->sk_err = -err; - - /* This barrier is coupled with smp_rmb() in mptcp_poll() */ - smp_wmb(); - sk_error_report(sk); - break; - } -} - static void subflow_error_report(struct sock *ssk) { struct sock *sk = mptcp_subflow_ctx(ssk)->conn; diff -u linux-aws-5.15-5.15.0/net/netfilter/core.c linux-aws-5.15-5.15.0/net/netfilter/core.c --- linux-aws-5.15-5.15.0/net/netfilter/core.c +++ linux-aws-5.15-5.15.0/net/netfilter/core.c @@ -632,32 +632,29 @@ /* This needs to be compiled in any case to avoid dependencies between the * nfnetlink_queue code and nf_conntrack. */ -struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly; +const struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly; EXPORT_SYMBOL_GPL(nfnl_ct_hook); -struct nf_ct_hook __rcu *nf_ct_hook __read_mostly; +const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly; EXPORT_SYMBOL_GPL(nf_ct_hook); #if IS_ENABLED(CONFIG_NF_CONNTRACK) -/* This does not belong here, but locally generated errors need it if connection - tracking in use: without this, connection may not be in hash table, and hence - manufactured ICMP or RST packets will not be associated with it. */ -void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) - __rcu __read_mostly; -EXPORT_SYMBOL(ip_ct_attach); - -struct nf_nat_hook __rcu *nf_nat_hook __read_mostly; +const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly; EXPORT_SYMBOL_GPL(nf_nat_hook); +/* This does not belong here, but locally generated errors need it if connection + * tracking in use: without this, connection may not be in hash table, and hence + * manufactured ICMP or RST packets will not be associated with it. + */ void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb) { - void (*attach)(struct sk_buff *, const struct sk_buff *); + const struct nf_ct_hook *ct_hook; if (skb->_nfct) { rcu_read_lock(); - attach = rcu_dereference(ip_ct_attach); - if (attach) - attach(new, skb); + ct_hook = rcu_dereference(nf_ct_hook); + if (ct_hook) + ct_hook->attach(new, skb); rcu_read_unlock(); } } @@ -665,7 +662,7 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct) { - struct nf_ct_hook *ct_hook; + const struct nf_ct_hook *ct_hook; rcu_read_lock(); ct_hook = rcu_dereference(nf_ct_hook); @@ -677,10 +674,26 @@ } EXPORT_SYMBOL(nf_conntrack_destroy); +void nf_ct_set_closing(struct nf_conntrack *nfct) +{ + const struct nf_ct_hook *ct_hook; + + if (!nfct) + return; + + rcu_read_lock(); + ct_hook = rcu_dereference(nf_ct_hook); + if (ct_hook) + ct_hook->set_closing(nfct); + + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(nf_ct_set_closing); + bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple, const struct sk_buff *skb) { - struct nf_ct_hook *ct_hook; + const struct nf_ct_hook *ct_hook; bool ret = false; rcu_read_lock(); diff -u linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_core.c linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_core.c --- linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_core.c +++ linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_core.c @@ -2145,9 +2145,9 @@ struct nf_conn *ct, enum ip_conntrack_info ctinfo) { + const struct nf_nat_hook *nat_hook; struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; - struct nf_nat_hook *nat_hook; unsigned int status; int dataoff; u16 l3num; @@ -2518,7 +2518,6 @@ void nf_conntrack_cleanup_start(void) { conntrack_gc_work.exiting = true; - RCU_INIT_POINTER(ip_ct_attach, NULL); } void nf_conntrack_cleanup_end(void) @@ -2834,16 +2833,28 @@ return ret; } -static struct nf_ct_hook nf_conntrack_hook = { +static void nf_conntrack_set_closing(struct nf_conntrack *nfct) +{ + struct nf_conn *ct = nf_ct_to_nf_conn(nfct); + + switch (nf_ct_protonum(ct)) { + case IPPROTO_TCP: + nf_conntrack_tcp_set_closing(ct); + break; + } +} + +static const struct nf_ct_hook nf_conntrack_hook = { .update = nf_conntrack_update, .destroy = nf_ct_destroy, .get_tuple_skb = nf_conntrack_get_tuple_skb, + .attach = nf_conntrack_attach, + .set_closing = nf_conntrack_set_closing, + .confirm = __nf_conntrack_confirm, }; void nf_conntrack_init_end(void) { - /* For use by REJECT target */ - RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach); RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook); } diff -u linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_netlink.c linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_netlink.c --- linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_netlink.c +++ linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_netlink.c @@ -1826,7 +1826,7 @@ const struct nlattr *attr) __must_hold(RCU) { - struct nf_nat_hook *nat_hook; + const struct nf_nat_hook *nat_hook; int err; nat_hook = rcu_dereference(nf_nat_hook); @@ -2932,7 +2932,7 @@ nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff); } -static struct nfnl_ct_hook ctnetlink_glue_hook = { +static const struct nfnl_ct_hook ctnetlink_glue_hook = { .build_size = ctnetlink_glue_build_size, .build = ctnetlink_glue_build, .parse = ctnetlink_glue_parse, diff -u linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_proto_sctp.c linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_proto_sctp.c --- linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_proto_sctp.c +++ linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_proto_sctp.c @@ -299,7 +299,7 @@ pr_debug("Setting vtag %x for secondary conntrack\n", sh->vtag); ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag; - } else { + } else if (sch->type == SCTP_CID_SHUTDOWN_ACK) { /* If it is a shutdown ack OOTB packet, we expect a return shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */ pr_debug("Setting vtag %x for new conn OOTB\n", diff -u linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_proto_tcp.c linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_proto_tcp.c --- linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_proto_tcp.c +++ linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_proto_tcp.c @@ -870,6 +870,41 @@ return false; } +void nf_conntrack_tcp_set_closing(struct nf_conn *ct) +{ + enum tcp_conntrack old_state; + const unsigned int *timeouts; + u32 timeout; + + if (!nf_ct_is_confirmed(ct)) + return; + + spin_lock_bh(&ct->lock); + old_state = ct->proto.tcp.state; + ct->proto.tcp.state = TCP_CONNTRACK_CLOSE; + + if (old_state == TCP_CONNTRACK_CLOSE || + test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) { + spin_unlock_bh(&ct->lock); + return; + } + + timeouts = nf_ct_timeout_lookup(ct); + if (!timeouts) { + const struct nf_tcp_net *tn; + + tn = nf_tcp_pernet(nf_ct_net(ct)); + timeouts = tn->timeouts; + } + + timeout = timeouts[TCP_CONNTRACK_CLOSE]; + WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp); + + spin_unlock_bh(&ct->lock); + + nf_conntrack_event_cache(IPCT_PROTOINFO, ct); +} + static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state) { state->td_end = 0; diff -u linux-aws-5.15-5.15.0/net/netfilter/nf_flow_table_core.c linux-aws-5.15-5.15.0/net/netfilter/nf_flow_table_core.c --- linux-aws-5.15-5.15.0/net/netfilter/nf_flow_table_core.c +++ linux-aws-5.15-5.15.0/net/netfilter/nf_flow_table_core.c @@ -86,12 +86,22 @@ return 0; } +static struct dst_entry *nft_route_dst_fetch(struct nf_flow_route *route, + enum flow_offload_tuple_dir dir) +{ + struct dst_entry *dst = route->tuple[dir].dst; + + route->tuple[dir].dst = NULL; + + return dst; +} + static int flow_offload_fill_route(struct flow_offload *flow, - const struct nf_flow_route *route, + struct nf_flow_route *route, enum flow_offload_tuple_dir dir) { struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple; - struct dst_entry *dst = route->tuple[dir].dst; + struct dst_entry *dst = nft_route_dst_fetch(route, dir); int i, j = 0; switch (flow_tuple->l3proto) { @@ -121,12 +131,10 @@ ETH_ALEN); flow_tuple->out.ifidx = route->tuple[dir].out.ifindex; flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex; + dst_release(dst); break; case FLOW_OFFLOAD_XMIT_XFRM: case FLOW_OFFLOAD_XMIT_NEIGH: - if (!dst_hold_safe(route->tuple[dir].dst)) - return -1; - flow_tuple->dst_cache = dst; flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple); break; @@ -147,27 +155,12 @@ dst_release(flow->tuplehash[dir].tuple.dst_cache); } -int flow_offload_route_init(struct flow_offload *flow, - const struct nf_flow_route *route) +void flow_offload_route_init(struct flow_offload *flow, + struct nf_flow_route *route) { - int err; - - err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL); - if (err < 0) - return err; - - err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY); - if (err < 0) - goto err_route_reply; - + flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL); + flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY); flow->type = NF_FLOW_OFFLOAD_ROUTE; - - return 0; - -err_route_reply: - nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL); - - return err; } EXPORT_SYMBOL_GPL(flow_offload_route_init); diff -u linux-aws-5.15-5.15.0/net/netfilter/nf_tables_api.c linux-aws-5.15-5.15.0/net/netfilter/nf_tables_api.c --- linux-aws-5.15-5.15.0/net/netfilter/nf_tables_api.c +++ linux-aws-5.15-5.15.0/net/netfilter/nf_tables_api.c @@ -1152,7 +1152,7 @@ if (flags & ~NFT_TABLE_F_MASK) return -EOPNOTSUPP; - if (flags == ctx->table->flags) + if (flags == (ctx->table->flags & NFT_TABLE_F_MASK)) return 0; if ((nft_table_has_owner(ctx->table) && @@ -1193,6 +1193,7 @@ return 0; err_register_hooks: + ctx->table->flags |= NFT_TABLE_F_DORMANT; nft_trans_destroy(trans); return ret; } @@ -3465,6 +3466,8 @@ err = nft_chain_validate(&ctx, chain); if (err < 0) return err; + + cond_resched(); } return 0; @@ -4679,6 +4682,9 @@ if (!(flags & NFT_SET_TIMEOUT)) return -EINVAL; + if (flags & NFT_SET_ANONYMOUS) + return -EOPNOTSUPP; + err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &desc.timeout); if (err) return err; @@ -4687,6 +4693,10 @@ if (nla[NFTA_SET_GC_INTERVAL] != NULL) { if (!(flags & NFT_SET_TIMEOUT)) return -EINVAL; + + if (flags & NFT_SET_ANONYMOUS) + return -EOPNOTSUPP; + desc.gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL])); } diff -u linux-aws-5.15-5.15.0/net/netfilter/nfnetlink_queue.c linux-aws-5.15-5.15.0/net/netfilter/nfnetlink_queue.c --- linux-aws-5.15-5.15.0/net/netfilter/nfnetlink_queue.c +++ linux-aws-5.15-5.15.0/net/netfilter/nfnetlink_queue.c @@ -225,7 +225,7 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict) { - struct nf_ct_hook *ct_hook; + const struct nf_ct_hook *ct_hook; int err; if (verdict == NF_ACCEPT || @@ -395,8 +395,8 @@ struct net_device *indev; struct net_device *outdev; struct nf_conn *ct = NULL; - enum ip_conntrack_info ctinfo; - struct nfnl_ct_hook *nfnl_ct; + enum ip_conntrack_info ctinfo = 0; + const struct nfnl_ct_hook *nfnl_ct; bool csum_verify; struct lsmcontext context = { }; u32 seclen = 0; @@ -1123,7 +1123,7 @@ return 0; } -static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct, +static struct nf_conn *nfqnl_ct_parse(const struct nfnl_ct_hook *nfnl_ct, const struct nlmsghdr *nlh, const struct nlattr * const nfqa[], struct nf_queue_entry *entry, @@ -1190,11 +1190,11 @@ { struct nfnl_queue_net *q = nfnl_queue_pernet(info->net); u_int16_t queue_num = ntohs(info->nfmsg->res_id); + const struct nfnl_ct_hook *nfnl_ct; struct nfqnl_msg_verdict_hdr *vhdr; enum ip_conntrack_info ctinfo; struct nfqnl_instance *queue; struct nf_queue_entry *entry; - struct nfnl_ct_hook *nfnl_ct; struct nf_conn *ct = NULL; unsigned int verdict; int err; diff -u linux-aws-5.15-5.15.0/net/netfilter/nft_compat.c linux-aws-5.15-5.15.0/net/netfilter/nft_compat.c --- linux-aws-5.15-5.15.0/net/netfilter/nft_compat.c +++ linux-aws-5.15-5.15.0/net/netfilter/nft_compat.c @@ -358,10 +358,20 @@ if (ctx->family != NFPROTO_IPV4 && ctx->family != NFPROTO_IPV6 && + ctx->family != NFPROTO_INET && ctx->family != NFPROTO_BRIDGE && ctx->family != NFPROTO_ARP) return -EOPNOTSUPP; + ret = nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_IN) | + (1 << NF_INET_FORWARD) | + (1 << NF_INET_LOCAL_OUT) | + (1 << NF_INET_POST_ROUTING)); + if (ret) + return ret; + if (nft_is_base_chain(ctx->chain)) { const struct nft_base_chain *basechain = nft_base_chain(ctx->chain); @@ -607,10 +617,20 @@ if (ctx->family != NFPROTO_IPV4 && ctx->family != NFPROTO_IPV6 && + ctx->family != NFPROTO_INET && ctx->family != NFPROTO_BRIDGE && ctx->family != NFPROTO_ARP) return -EOPNOTSUPP; + ret = nft_chain_validate_hooks(ctx->chain, + (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_IN) | + (1 << NF_INET_FORWARD) | + (1 << NF_INET_LOCAL_OUT) | + (1 << NF_INET_POST_ROUTING)); + if (ret) + return ret; + if (nft_is_base_chain(ctx->chain)) { const struct nft_base_chain *basechain = nft_base_chain(ctx->chain); diff -u linux-aws-5.15-5.15.0/net/netfilter/nft_ct.c linux-aws-5.15-5.15.0/net/netfilter/nft_ct.c --- linux-aws-5.15-5.15.0/net/netfilter/nft_ct.c +++ linux-aws-5.15-5.15.0/net/netfilter/nft_ct.c @@ -1192,14 +1192,13 @@ switch (priv->l3num) { case NFPROTO_IPV4: case NFPROTO_IPV6: - if (priv->l3num != ctx->family) - return -EINVAL; + if (priv->l3num == ctx->family || ctx->family == NFPROTO_INET) + break; - fallthrough; - case NFPROTO_INET: - break; + return -EINVAL; + case NFPROTO_INET: /* tuple.src.l3num supports NFPROTO_IPV4/6 only */ default: - return -EOPNOTSUPP; + return -EAFNOSUPPORT; } priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]); diff -u linux-aws-5.15-5.15.0/net/netfilter/nft_flow_offload.c linux-aws-5.15-5.15.0/net/netfilter/nft_flow_offload.c --- linux-aws-5.15-5.15.0/net/netfilter/nft_flow_offload.c +++ linux-aws-5.15-5.15.0/net/netfilter/nft_flow_offload.c @@ -240,9 +240,14 @@ break; } + if (!dst_hold_safe(this_dst)) + return -ENOENT; + nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt)); - if (!other_dst) + if (!other_dst) { + dst_release(this_dst); return -ENOENT; + } nft_default_forward_path(route, this_dst, dir); nft_default_forward_path(route, other_dst, !dir); @@ -326,8 +331,7 @@ if (!flow) goto err_flow_alloc; - if (flow_offload_route_init(flow, &route) < 0) - goto err_flow_add; + flow_offload_route_init(flow, &route); if (tcph) { ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; @@ -338,12 +342,12 @@ if (ret < 0) goto err_flow_add; - dst_release(route.tuple[!dir].dst); return; err_flow_add: flow_offload_free(flow); err_flow_alloc: + dst_release(route.tuple[dir].dst); dst_release(route.tuple[!dir].dst); err_flow_route: clear_bit(IPS_OFFLOAD_BIT, &ct->status); diff -u linux-aws-5.15-5.15.0/net/netfilter/nft_set_pipapo.c linux-aws-5.15-5.15.0/net/netfilter/nft_set_pipapo.c --- linux-aws-5.15-5.15.0/net/netfilter/nft_set_pipapo.c +++ linux-aws-5.15-5.15.0/net/netfilter/nft_set_pipapo.c @@ -2240,8 +2240,6 @@ if (m) { rcu_barrier(); - nft_set_pipapo_match_destroy(ctx, set, m); - for_each_possible_cpu(cpu) pipapo_free_scratch(m, cpu); free_percpu(m->scratch); @@ -2253,8 +2251,7 @@ if (priv->clone) { m = priv->clone; - if (priv->dirty) - nft_set_pipapo_match_destroy(ctx, set, m); + nft_set_pipapo_match_destroy(ctx, set, m); for_each_possible_cpu(cpu) pipapo_free_scratch(priv->clone, cpu); diff -u linux-aws-5.15-5.15.0/net/netlink/af_netlink.c linux-aws-5.15-5.15.0/net/netlink/af_netlink.c --- linux-aws-5.15-5.15.0/net/netlink/af_netlink.c +++ linux-aws-5.15-5.15.0/net/netlink/af_netlink.c @@ -165,7 +165,7 @@ static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb, gfp_t gfp_mask) { - unsigned int len = skb_end_offset(skb); + unsigned int len = skb->len; struct sk_buff *new; new = alloc_skb(len, gfp_mask); diff -u linux-aws-5.15-5.15.0/net/netrom/af_netrom.c linux-aws-5.15-5.15.0/net/netrom/af_netrom.c --- linux-aws-5.15-5.15.0/net/netrom/af_netrom.c +++ linux-aws-5.15-5.15.0/net/netrom/af_netrom.c @@ -453,16 +453,16 @@ nr_init_timers(sk); nr->t1 = - msecs_to_jiffies(sysctl_netrom_transport_timeout); + msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_timeout)); nr->t2 = - msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay); + msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_acknowledge_delay)); nr->n2 = - msecs_to_jiffies(sysctl_netrom_transport_maximum_tries); + msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_maximum_tries)); nr->t4 = - msecs_to_jiffies(sysctl_netrom_transport_busy_delay); + msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_busy_delay)); nr->idle = - msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout); - nr->window = sysctl_netrom_transport_requested_window_size; + msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_no_activity_timeout)); + nr->window = READ_ONCE(sysctl_netrom_transport_requested_window_size); nr->bpqext = 1; nr->state = NR_STATE_0; @@ -954,7 +954,7 @@ * G8PZT's Xrouter which is sending packets with command type 7 * as an extension of the protocol. */ - if (sysctl_netrom_reset_circuit && + if (READ_ONCE(sysctl_netrom_reset_circuit) && (frametype != NR_RESET || flags != 0)) nr_transmit_reset(skb, 1); diff -u linux-aws-5.15-5.15.0/net/netrom/nr_subr.c linux-aws-5.15-5.15.0/net/netrom/nr_subr.c --- linux-aws-5.15-5.15.0/net/netrom/nr_subr.c +++ linux-aws-5.15-5.15.0/net/netrom/nr_subr.c @@ -182,7 +182,8 @@ *dptr++ = nr->my_id; *dptr++ = frametype; *dptr++ = nr->window; - if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser; + if (nr->bpqext) + *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser); break; case NR_DISCREQ: @@ -236,7 +237,7 @@ dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; - *dptr++ = sysctl_netrom_network_ttl_initialiser; + *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser); if (mine) { *dptr++ = 0; diff -u linux-aws-5.15-5.15.0/net/packet/af_packet.c linux-aws-5.15-5.15.0/net/packet/af_packet.c --- linux-aws-5.15-5.15.0/net/packet/af_packet.c +++ linux-aws-5.15-5.15.0/net/packet/af_packet.c @@ -1874,7 +1874,7 @@ */ spkt->spkt_family = dev->type; - strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); + strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device)); spkt->spkt_protocol = skb->protocol; /* @@ -3252,7 +3252,7 @@ int addr_len) { struct sock *sk = sock->sk; - char name[sizeof(uaddr->sa_data) + 1]; + char name[sizeof(uaddr->sa_data_min) + 1]; /* * Check legality @@ -3263,8 +3263,8 @@ /* uaddr->sa_data comes from the userspace, it's not guaranteed to be * zero-terminated. */ - memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data)); - name[sizeof(uaddr->sa_data)] = 0; + memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min)); + name[sizeof(uaddr->sa_data_min)] = 0; return packet_do_bind(sk, name, 0, 0); } @@ -3536,11 +3536,11 @@ return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; - memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); + memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex)); if (dev) - strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); + strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min)); rcu_read_unlock(); return sizeof(*uaddr); @@ -3951,7 +3951,7 @@ if (val < 0 || val > 1) return -EINVAL; - po->prot_hook.ignore_outgoing = !!val; + WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val); return 0; } case PACKET_TX_HAS_OFF: @@ -4080,7 +4080,7 @@ 0); break; case PACKET_IGNORE_OUTGOING: - val = po->prot_hook.ignore_outgoing; + val = READ_ONCE(po->prot_hook.ignore_outgoing); break; case PACKET_ROLLOVER_STATS: if (!po->rollover) diff -u linux-aws-5.15-5.15.0/net/sched/Kconfig linux-aws-5.15-5.15.0/net/sched/Kconfig --- linux-aws-5.15-5.15.0/net/sched/Kconfig +++ linux-aws-5.15-5.15.0/net/sched/Kconfig @@ -45,23 +45,6 @@ comment "Queueing/Scheduling" -config NET_SCH_CBQ - tristate "Class Based Queueing (CBQ)" - help - Say Y here if you want to use the Class-Based Queueing (CBQ) packet - scheduling algorithm. This algorithm classifies the waiting packets - into a tree-like hierarchy of classes; the leaves of this tree are - in turn scheduled by separate algorithms. - - See the top of for more details. - - CBQ is a commonly used scheduler, so if you're unsure, you should - say Y here. Then say Y to all the queueing algorithms below that you - want to use as leaf disciplines. - - To compile this code as a module, choose M here: the - module will be called sch_cbq. - config NET_SCH_HTB tristate "Hierarchical Token Bucket (HTB)" help @@ -85,20 +68,6 @@ To compile this code as a module, choose M here: the module will be called sch_hfsc. -config NET_SCH_ATM - tristate "ATM Virtual Circuits (ATM)" - depends on ATM - help - Say Y here if you want to use the ATM pseudo-scheduler. This - provides a framework for invoking classifiers, which in turn - select classes of this queuing discipline. Each class maps - the flow(s) it is handling to a given virtual circuit. - - See the top of for more details. - - To compile this code as a module, choose M here: the - module will be called sch_atm. - config NET_SCH_PRIO tristate "Multi Band Priority Queueing (PRIO)" help @@ -217,17 +186,6 @@ To compile this code as a module, choose M here: the module will be called sch_gred. -config NET_SCH_DSMARK - tristate "Differentiated Services marker (DSMARK)" - help - Say Y if you want to schedule packets according to the - Differentiated Services architecture proposed in RFC 2475. - Technical information on this method, with pointers to associated - RFCs, is available at . - - To compile this code as a module, choose M here: the - module will be called sch_dsmark. - config NET_SCH_NETEM tristate "Network emulator (NETEM)" help diff -u linux-aws-5.15-5.15.0/net/sched/Makefile linux-aws-5.15-5.15.0/net/sched/Makefile --- linux-aws-5.15-5.15.0/net/sched/Makefile +++ linux-aws-5.15-5.15.0/net/sched/Makefile @@ -33,20 +33,17 @@ obj-$(CONFIG_NET_ACT_CT) += act_ct.o obj-$(CONFIG_NET_ACT_GATE) += act_gate.o obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o -obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o obj-$(CONFIG_NET_SCH_RED) += sch_red.o obj-$(CONFIG_NET_SCH_GRED) += sch_gred.o obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o -obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o obj-$(CONFIG_NET_SCH_SFB) += sch_sfb.o obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o obj-$(CONFIG_NET_SCH_MULTIQ) += sch_multiq.o -obj-$(CONFIG_NET_SCH_ATM) += sch_atm.o obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o obj-$(CONFIG_NET_SCH_PLUG) += sch_plug.o diff -u linux-aws-5.15-5.15.0/net/sched/sch_api.c linux-aws-5.15-5.15.0/net/sched/sch_api.c --- linux-aws-5.15-5.15.0/net/sched/sch_api.c +++ linux-aws-5.15-5.15.0/net/sched/sch_api.c @@ -1044,12 +1044,12 @@ if (parent == NULL) { unsigned int i, num_q, ingress; + struct netdev_queue *dev_queue; ingress = 0; num_q = dev->num_tx_queues; if ((q && q->flags & TCQ_F_INGRESS) || (new && new->flags & TCQ_F_INGRESS)) { - num_q = 1; ingress = 1; if (!dev_ingress_queue(dev)) { NL_SET_ERR_MSG(extack, "Device does not have an ingress queue"); @@ -1065,18 +1065,18 @@ if (new && new->ops->attach && !ingress) goto skip; - for (i = 0; i < num_q; i++) { - struct netdev_queue *dev_queue = dev_ingress_queue(dev); - - if (!ingress) + if (!ingress) { + for (i = 0; i < num_q; i++) { dev_queue = netdev_get_tx_queue(dev, i); + old = dev_graft_qdisc(dev_queue, new); - old = dev_graft_qdisc(dev_queue, new); - if (new && i > 0) - qdisc_refcount_inc(new); - - if (!ingress) + if (new && i > 0) + qdisc_refcount_inc(new); qdisc_put(old); + } + } else { + dev_queue = dev_ingress_queue(dev); + old = dev_graft_qdisc(dev_queue, new); } skip: reverted: --- linux-aws-5.15-5.15.0/net/sched/sch_atm.c +++ linux-aws-5.15-5.15.0.orig/net/sched/sch_atm.c @@ -397,13 +397,10 @@ result = tcf_classify(skb, NULL, fl, &res, true); if (result < 0) continue; - if (result == TC_ACT_SHOT) - goto done; - flow = (struct atm_flow_data *)res.class; if (!flow) flow = lookup_flow(sch, res.classid); + goto done; - goto drop; } } flow = NULL; @@ -579,6 +576,7 @@ pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p); list_for_each_entry(flow, &p->flows, list) qdisc_reset(flow->q); + sch->q.qlen = 0; } static void atm_tc_destroy(struct Qdisc *sch) reverted: --- linux-aws-5.15-5.15.0/net/sched/sch_cbq.c +++ linux-aws-5.15-5.15.0.orig/net/sched/sch_cbq.c @@ -231,8 +231,6 @@ result = tcf_classify(skb, NULL, fl, &res, true); if (!fl || result < 0) goto fallback; - if (result == TC_ACT_SHOT) - return NULL; cl = (void *)res.class; if (!cl) { @@ -253,6 +251,8 @@ case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; fallthrough; + case TC_ACT_SHOT: + return NULL; case TC_ACT_RECLASSIFY: return cbq_reclassify(skb, cl); } @@ -1053,6 +1053,7 @@ cl->cpriority = cl->priority; } } + sch->q.qlen = 0; } reverted: --- linux-aws-5.15-5.15.0/net/sched/sch_dsmark.c +++ linux-aws-5.15-5.15.0.orig/net/sched/sch_dsmark.c @@ -409,6 +409,8 @@ pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p); if (p->q) qdisc_reset(p->q); + sch->qstats.backlog = 0; + sch->q.qlen = 0; } static void dsmark_destroy(struct Qdisc *sch) diff -u linux-aws-5.15-5.15.0/net/sunrpc/addr.c linux-aws-5.15-5.15.0/net/sunrpc/addr.c --- linux-aws-5.15-5.15.0/net/sunrpc/addr.c +++ linux-aws-5.15-5.15.0/net/sunrpc/addr.c @@ -284,10 +284,10 @@ } if (snprintf(portbuf, sizeof(portbuf), - ".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf)) + ".%u.%u", port >> 8, port & 0xff) >= (int)sizeof(portbuf)) return NULL; - if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf)) + if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) >= sizeof(addrbuf)) return NULL; return kstrdup(addrbuf, gfp_flags); diff -u linux-aws-5.15-5.15.0/net/tls/tls_device.c linux-aws-5.15-5.15.0/net/tls/tls_device.c --- linux-aws-5.15-5.15.0/net/tls/tls_device.c +++ linux-aws-5.15-5.15.0/net/tls/tls_device.c @@ -950,11 +950,9 @@ tls_ctx->rx.rec_seq, rxm->full_len, is_encrypted, is_decrypted); - ctx->sw.decrypted |= is_decrypted; - if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) { if (likely(is_encrypted || is_decrypted)) - return 0; + return is_decrypted; /* After tls_device_down disables the offload, the next SKB will * likely have initial fragments decrypted, and final ones not @@ -969,7 +967,7 @@ */ if (is_decrypted) { ctx->resync_nh_reset = 1; - return 0; + return is_decrypted; } if (is_encrypted) { tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb); diff -u linux-aws-5.15-5.15.0/net/tls/tls_main.c linux-aws-5.15-5.15.0/net/tls/tls_main.c --- linux-aws-5.15-5.15.0/net/tls/tls_main.c +++ linux-aws-5.15-5.15.0/net/tls/tls_main.c @@ -805,7 +805,7 @@ } } -static int tls_get_info(const struct sock *sk, struct sk_buff *skb) +static int tls_get_info(struct sock *sk, struct sk_buff *skb) { u16 version, cipher_type; struct tls_context *ctx; diff -u linux-aws-5.15-5.15.0/net/tls/tls_sw.c linux-aws-5.15-5.15.0/net/tls/tls_sw.c --- linux-aws-5.15-5.15.0/net/tls/tls_sw.c +++ linux-aws-5.15-5.15.0/net/tls/tls_sw.c @@ -44,6 +44,11 @@ #include #include +struct tls_decrypt_arg { + bool zc; + bool async; +}; + noinline void tls_err_abort(struct sock *sk, int err) { WARN_ON_ONCE(err >= 0); @@ -128,10 +133,10 @@ return __skb_nsg(skb, offset, len, 0); } -static int padding_length(struct tls_sw_context_rx *ctx, - struct tls_prot_info *prot, struct sk_buff *skb) +static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb) { struct strp_msg *rxm = strp_msg(skb); + struct tls_msg *tlm = tls_msg(skb); int sub = 0; /* Determine zero-padding length */ @@ -153,7 +158,7 @@ sub++; back++; } - ctx->control = content_type; + tlm->control = content_type; } return sub; } @@ -169,7 +174,17 @@ struct scatterlist *sg; struct sk_buff *skb; unsigned int pages; - int pending; + + /* If requests get too backlogged crypto API returns -EBUSY and calls + * ->complete(-EINPROGRESS) immediately followed by ->complete(0) + * to make waiting for backlog to flush with crypto_wait_req() easier. + * First wait converts -EBUSY -> -EINPROGRESS, and the second one + * -EINPROGRESS -> 0. + * We have a single struct crypto_async_request per direction, this + * scheme doesn't help us, so just ignore the first ->complete(). + */ + if (err == -EINPROGRESS) + return; skb = (struct sk_buff *)req->data; tls_ctx = tls_get_ctx(skb->sk); @@ -187,7 +202,7 @@ struct strp_msg *rxm = strp_msg(skb); int pad; - pad = padding_length(ctx, prot, skb); + pad = padding_length(prot, skb); if (pad < 0) { ctx->async_wait.err = pad; tls_err_abort(skb->sk, pad); @@ -216,12 +231,17 @@ kfree(aead_req); - spin_lock_bh(&ctx->decrypt_compl_lock); - pending = atomic_dec_return(&ctx->decrypt_pending); - - if (!pending && ctx->async_notify) + if (atomic_dec_and_test(&ctx->decrypt_pending)) complete(&ctx->async_wait.completion); - spin_unlock_bh(&ctx->decrypt_compl_lock); +} + +static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) +{ + if (!atomic_dec_and_test(&ctx->decrypt_pending)) + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + atomic_inc(&ctx->decrypt_pending); + + return ctx->async_wait.err; } static int tls_do_decryption(struct sock *sk, @@ -231,7 +251,7 @@ char *iv_recv, size_t data_len, struct aead_request *aead_req, - bool async) + struct tls_decrypt_arg *darg) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; @@ -244,7 +264,7 @@ data_len + prot->tag_size, (u8 *)iv_recv); - if (async) { + if (darg->async) { /* Using skb->sk to push sk through to crypto async callback * handler. This allows propagating errors up to the socket * if needed. It _must_ be cleared in the async handler @@ -263,15 +283,19 @@ } ret = crypto_aead_decrypt(aead_req); + if (ret == -EBUSY) { + ret = tls_decrypt_async_wait(ctx); + ret = ret ?: -EINPROGRESS; + } if (ret == -EINPROGRESS) { - if (async) - return ret; + if (darg->async) + return 0; ret = crypto_wait_req(ret, &ctx->async_wait); - } - - if (async) + } else if (darg->async) { atomic_dec(&ctx->decrypt_pending); + } + darg->async = false; return ret; } @@ -443,8 +467,9 @@ struct scatterlist *sge; struct sk_msg *msg_en; struct tls_rec *rec; - bool ready = false; - int pending; + + if (err == -EINPROGRESS) /* see the comment in tls_decrypt_done() */ + return; rec = container_of(aead_req, struct tls_rec, aead_req); msg_en = &rec->msg_encrypted; @@ -475,23 +500,25 @@ /* If received record is at head of tx_list, schedule tx */ first_rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); - if (rec == first_rec) - ready = true; + if (rec == first_rec) { + /* Schedule the transmission */ + if (!test_and_set_bit(BIT_TX_SCHEDULED, + &ctx->tx_bitmask)) + schedule_delayed_work(&ctx->tx_work.work, 1); + } } - spin_lock_bh(&ctx->encrypt_compl_lock); - pending = atomic_dec_return(&ctx->encrypt_pending); - - if (!pending && ctx->async_notify) + if (atomic_dec_and_test(&ctx->encrypt_pending)) complete(&ctx->async_wait.completion); - spin_unlock_bh(&ctx->encrypt_compl_lock); +} - if (!ready) - return; +static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) +{ + if (!atomic_dec_and_test(&ctx->encrypt_pending)) + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + atomic_inc(&ctx->encrypt_pending); - /* Schedule the transmission */ - if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) - schedule_delayed_work(&ctx->tx_work.work, 1); + return ctx->async_wait.err; } static int tls_do_encryption(struct sock *sk, @@ -536,6 +563,10 @@ atomic_inc(&ctx->encrypt_pending); rc = crypto_aead_encrypt(aead_req); + if (rc == -EBUSY) { + rc = tls_encrypt_async_wait(ctx); + rc = rc ?: -EINPROGRESS; + } if (!rc || rc != -EINPROGRESS) { atomic_dec(&ctx->encrypt_pending); sge->offset -= prot->prepend_size; @@ -944,7 +975,6 @@ int num_zc = 0; int orig_size; int ret = 0; - int pending; if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_CMSG_COMPAT)) @@ -1113,24 +1143,12 @@ if (!num_async) { goto send_end; } else if (num_zc) { - /* Wait for pending encryptions to get completed */ - spin_lock_bh(&ctx->encrypt_compl_lock); - ctx->async_notify = true; - - pending = atomic_read(&ctx->encrypt_pending); - spin_unlock_bh(&ctx->encrypt_compl_lock); - if (pending) - crypto_wait_req(-EINPROGRESS, &ctx->async_wait); - else - reinit_completion(&ctx->async_wait.completion); + int err; - /* There can be no concurrent accesses, since we have no - * pending encrypt operations - */ - WRITE_ONCE(ctx->async_notify, false); - - if (ctx->async_wait.err) { - ret = ctx->async_wait.err; + /* Wait for pending encryptions to get completed */ + err = tls_encrypt_async_wait(ctx); + if (err) { + ret = err; copied = 0; } } @@ -1348,15 +1366,14 @@ return skb; } -static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from, +static int tls_setup_from_iter(struct iov_iter *from, int length, int *pages_used, - unsigned int *size_used, struct scatterlist *to, int to_max_pages) { int rc = 0, i = 0, num_elem = *pages_used, maxpages; struct page *pages[MAX_SKB_FRAGS]; - unsigned int size = *size_used; + unsigned int size = 0; ssize_t copied, use; size_t offset; @@ -1399,8 +1416,7 @@ sg_mark_end(&to[num_elem - 1]); out: if (rc) - iov_iter_revert(from, size - *size_used); - *size_used = size; + iov_iter_revert(from, size); *pages_used = num_elem; return rc; @@ -1417,12 +1433,13 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, struct iov_iter *out_iov, struct scatterlist *out_sg, - int *chunk, bool *zc, bool async) + struct tls_decrypt_arg *darg) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct tls_prot_info *prot = &tls_ctx->prot_info; struct strp_msg *rxm = strp_msg(skb); + struct tls_msg *tlm = tls_msg(skb); int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0; struct aead_request *aead_req; struct sk_buff *unused; @@ -1433,7 +1450,7 @@ prot->tail_size; int iv_offset = 0; - if (*zc && (out_iov || out_sg)) { + if (darg->zc && (out_iov || out_sg)) { if (out_iov) n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1; else @@ -1442,7 +1459,7 @@ rxm->full_len - prot->prepend_size); } else { n_sgout = 0; - *zc = false; + darg->zc = false; n_sgin = skb_cow_data(skb, 0, &unused); } @@ -1500,7 +1517,7 @@ /* Prepare AAD */ tls_make_aad(aad, rxm->full_len - prot->overhead_size + prot->tail_size, - tls_ctx->rx.rec_seq, ctx->control, prot); + tls_ctx->rx.rec_seq, tlm->control, prot); /* Prepare sgin */ sg_init_table(sgin, n_sgin); @@ -1518,9 +1535,8 @@ sg_init_table(sgout, n_sgout); sg_set_buf(&sgout[0], aad, prot->aad_size); - *chunk = 0; - err = tls_setup_from_iter(sk, out_iov, data_len, - &pages, chunk, &sgout[1], + err = tls_setup_from_iter(out_iov, data_len, + &pages, &sgout[1], (n_sgout - 1)); if (err < 0) goto fallback_to_reg_recv; @@ -1533,15 +1549,14 @@ fallback_to_reg_recv: sgout = sgin; pages = 0; - *chunk = data_len; - *zc = false; + darg->zc = false; } /* Prepare and submit AEAD request */ err = tls_do_decryption(sk, skb, sgin, sgout, iv, - data_len, aead_req, async); - if (err == -EINPROGRESS) - return err; + data_len, aead_req, darg); + if (darg->async) + return 0; /* Release the pages in case iov was mapped to pages */ for (; pages > 0; pages--) @@ -1552,63 +1567,63 @@ } static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, - struct iov_iter *dest, int *chunk, bool *zc, - bool async) + struct iov_iter *dest, + struct tls_decrypt_arg *darg) { struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct tls_prot_info *prot = &tls_ctx->prot_info; struct strp_msg *rxm = strp_msg(skb); - int pad, err = 0; + struct tls_msg *tlm = tls_msg(skb); + int pad, err; - if (!ctx->decrypted) { - if (tls_ctx->rx_conf == TLS_HW) { - err = tls_device_decrypted(sk, tls_ctx, skb, rxm); - if (err < 0) - return err; - } + if (tlm->decrypted) { + darg->zc = false; + darg->async = false; + return 0; + } - /* Still not decrypted after tls_device */ - if (!ctx->decrypted) { - err = decrypt_internal(sk, skb, dest, NULL, chunk, zc, - async); - if (err < 0) { - if (err == -EINPROGRESS) - tls_advance_record_sn(sk, prot, - &tls_ctx->rx); - else if (err == -EBADMSG) - TLS_INC_STATS(sock_net(sk), - LINUX_MIB_TLSDECRYPTERROR); - return err; - } - } else { - *zc = false; + if (tls_ctx->rx_conf == TLS_HW) { + err = tls_device_decrypted(sk, tls_ctx, skb, rxm); + if (err < 0) + return err; + if (err > 0) { + tlm->decrypted = 1; + darg->zc = false; + darg->async = false; + goto decrypt_done; } + } - pad = padding_length(ctx, prot, skb); - if (pad < 0) - return pad; - - rxm->full_len -= pad; - rxm->offset += prot->prepend_size; - rxm->full_len -= prot->overhead_size; - tls_advance_record_sn(sk, prot, &tls_ctx->rx); - ctx->decrypted = 1; - ctx->saved_data_ready(sk); - } else { - *zc = false; + err = decrypt_internal(sk, skb, dest, NULL, darg); + if (err < 0) { + if (err == -EBADMSG) + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSDECRYPTERROR); + return err; } + if (darg->async) + goto decrypt_next; - return err; +decrypt_done: + pad = padding_length(prot, skb); + if (pad < 0) + return pad; + + rxm->full_len -= pad; + rxm->offset += prot->prepend_size; + rxm->full_len -= prot->overhead_size; + tlm->decrypted = 1; +decrypt_next: + tls_advance_record_sn(sk, prot, &tls_ctx->rx); + + return 0; } int decrypt_skb(struct sock *sk, struct sk_buff *skb, struct scatterlist *sgout) { - bool zc = true; - int chunk; + struct tls_decrypt_arg darg = { .zc = true, }; - return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false); + return decrypt_internal(sk, skb, NULL, sgout, &darg); } static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb, @@ -1635,6 +1650,29 @@ return true; } +static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm, + u8 *control) +{ + int err; + + if (!*control) { + *control = tlm->control; + if (!*control) + return -EBADMSG; + + err = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, + sizeof(*control), control); + if (*control != TLS_RECORD_TYPE_DATA) { + if (err || msg->msg_flags & MSG_CTRUNC) + return -EIO; + } + } else if (*control != tlm->control) { + return 0; + } + + return 1; +} + /* This function traverses the rx_list in tls receive context to copies the * decrypted records into the buffer provided by caller zero copy is not * true. Further, the records are removed from the rx_list if it is not a peek @@ -1643,31 +1681,23 @@ static int process_rx_list(struct tls_sw_context_rx *ctx, struct msghdr *msg, u8 *control, - bool *cmsg, size_t skip, size_t len, bool zc, bool is_peek) { struct sk_buff *skb = skb_peek(&ctx->rx_list); - u8 ctrl = *control; - u8 msgc = *cmsg; struct tls_msg *tlm; ssize_t copied = 0; - - /* Set the record type in 'control' if caller didn't pass it */ - if (!ctrl && skb) { - tlm = tls_msg(skb); - ctrl = tlm->control; - } + int err; while (skip && skb) { struct strp_msg *rxm = strp_msg(skb); tlm = tls_msg(skb); - /* Cannot process a record of different type */ - if (ctrl != tlm->control) - return 0; + err = tls_record_content_type(msg, tlm, control); + if (err <= 0) + return err; if (skip < rxm->full_len) break; @@ -1683,27 +1713,12 @@ tlm = tls_msg(skb); - /* Cannot process a record of different type */ - if (ctrl != tlm->control) - return 0; - - /* Set record type if not already done. For a non-data record, - * do not proceed if record type could not be copied. - */ - if (!msgc) { - int cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, - sizeof(ctrl), &ctrl); - msgc = true; - if (ctrl != TLS_RECORD_TYPE_DATA) { - if (cerr || msg->msg_flags & MSG_CTRUNC) - return -EIO; - - *cmsg = msgc; - } - } + err = tls_record_content_type(msg, tlm, control); + if (err <= 0) + return err; if (!zc || (rxm->full_len - skip) > len) { - int err = skb_copy_datagram_msg(skb, rxm->offset + skip, + err = skb_copy_datagram_msg(skb, rxm->offset + skip, msg, chunk); if (err < 0) return err; @@ -1740,10 +1755,63 @@ skb = next_skb; } - *control = ctrl; return copied; } +static long tls_rx_reader_lock(struct sock *sk, struct tls_sw_context_rx *ctx, + bool nonblock) +{ + long timeo; + int err; + + lock_sock(sk); + + timeo = sock_rcvtimeo(sk, nonblock); + + while (unlikely(ctx->reader_present)) { + DEFINE_WAIT_FUNC(wait, woken_wake_function); + + ctx->reader_contended = 1; + + add_wait_queue(&ctx->wq, &wait); + sk_wait_event(sk, &timeo, + !READ_ONCE(ctx->reader_present), &wait); + remove_wait_queue(&ctx->wq, &wait); + + if (timeo <= 0) { + err = -EAGAIN; + goto err_unlock; + } + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + goto err_unlock; + } + } + + WRITE_ONCE(ctx->reader_present, 1); + + return timeo; + +err_unlock: + release_sock(sk); + return err; +} + +static void tls_rx_reader_unlock(struct sock *sk, struct tls_sw_context_rx *ctx) +{ + if (unlikely(ctx->reader_contended)) { + if (wq_has_sleeper(&ctx->wq)) + wake_up(&ctx->wq); + else + ctx->reader_contended = 0; + + WARN_ON_ONCE(!ctx->reader_present); + } + + WRITE_ONCE(ctx->reader_present, 0); + release_sock(sk); +} + int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, @@ -1761,14 +1829,12 @@ struct tls_msg *tlm; struct sk_buff *skb; ssize_t copied = 0; - bool cmsg = false; + bool async = false; int target, err = 0; long timeo; bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); bool is_peek = flags & MSG_PEEK; bool bpf_strp_enabled; - int num_async = 0; - int pending; flags |= nonblock; @@ -1776,33 +1842,35 @@ return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); psock = sk_psock_get(sk); - lock_sock(sk); + timeo = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT); + if (timeo < 0) + return timeo; bpf_strp_enabled = sk_psock_strp_enabled(psock); + /* If crypto failed the connection is broken */ + err = ctx->async_wait.err; + if (err) + goto end; + /* Process pending decrypted records. It must be non-zero-copy */ - err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false, - is_peek); + err = process_rx_list(ctx, msg, &control, 0, len, false, is_peek); if (err < 0) { tls_err_abort(sk, err); goto end; - } else { - copied = err; } - if (len <= copied) - goto recv_end; + copied = err; + if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA)) + goto end; target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); len = len - copied; - timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); + decrypted = 0; while (len && (decrypted + copied < target || ctx->recv_pkt)) { + struct tls_decrypt_arg darg = {}; bool retain_skb = false; - bool zc = false; - int to_decrypt; - int chunk = 0; - bool async_capable; - bool async = false; + int to_decrypt, chunk; skb = tls_wait_data(sk, psock, flags & MSG_DONTWAIT, timeo, &err); if (!skb) { @@ -1817,43 +1885,32 @@ } } goto recv_end; - } else { - tlm = tls_msg(skb); - if (prot->version == TLS_1_3_VERSION) - tlm->control = 0; - else - tlm->control = ctx->control; } rxm = strp_msg(skb); + tlm = tls_msg(skb); to_decrypt = rxm->full_len - prot->overhead_size; if (to_decrypt <= len && !is_kvec && !is_peek && - ctx->control == TLS_RECORD_TYPE_DATA && + tlm->control == TLS_RECORD_TYPE_DATA && prot->version != TLS_1_3_VERSION && !bpf_strp_enabled) - zc = true; + darg.zc = true; /* Do not use async mode if record is non-data */ - if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) - async_capable = ctx->async_capable; + if (tlm->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled) + darg.async = ctx->async_capable; else - async_capable = false; + darg.async = false; - err = decrypt_skb_update(sk, skb, &msg->msg_iter, - &chunk, &zc, async_capable); - if (err < 0 && err != -EINPROGRESS) { + err = decrypt_skb_update(sk, skb, &msg->msg_iter, &darg); + if (err < 0) { tls_err_abort(sk, -EBADMSG); goto recv_end; } - if (err == -EINPROGRESS) { - async = true; - num_async++; - } else if (prot->version == TLS_1_3_VERSION) { - tlm->control = ctx->control; - } + async |= darg.async; /* If the type of records being processed is not known yet, * set it to record type just dequeued. If it is already known, @@ -1862,30 +1919,19 @@ * is known just after record is dequeued from stream parser. * For tls1.3, we disable async. */ - - if (!control) - control = tlm->control; - else if (control != tlm->control) + err = tls_record_content_type(msg, tlm, &control); + if (err <= 0) goto recv_end; - if (!cmsg) { - int cerr; - - cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, - sizeof(control), &control); - cmsg = true; - if (control != TLS_RECORD_TYPE_DATA) { - if (cerr || msg->msg_flags & MSG_CTRUNC) { - err = -EIO; - goto recv_end; - } - } - } - - if (async) + if (async) { + /* TLS 1.2-only, to_decrypt must be text length */ + chunk = min_t(int, to_decrypt, len); goto pick_next_record; + } + /* TLS 1.3 may have updated the length by more than overhead */ + chunk = rxm->full_len; - if (!zc) { + if (!darg.zc) { if (bpf_strp_enabled) { err = sk_psock_tls_strp_read(psock, skb); if (err != __SK_PASS) { @@ -1899,11 +1945,9 @@ } } - if (rxm->full_len > len) { + if (chunk > len) { retain_skb = true; chunk = len; - } else { - chunk = rxm->full_len; } err = skb_copy_datagram_msg(skb, rxm->offset, @@ -1918,9 +1962,6 @@ } pick_next_record: - if (chunk > len) - chunk = len; - decrypted += chunk; len -= chunk; @@ -1944,36 +1985,25 @@ } recv_end: - if (num_async) { + if (async) { + int ret; + /* Wait for all previously submitted records to be decrypted */ - spin_lock_bh(&ctx->decrypt_compl_lock); - ctx->async_notify = true; - pending = atomic_read(&ctx->decrypt_pending); - spin_unlock_bh(&ctx->decrypt_compl_lock); - if (pending) { - err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); - if (err) { - /* one of async decrypt failed */ - tls_err_abort(sk, err); - copied = 0; - decrypted = 0; - goto end; - } - } else { - reinit_completion(&ctx->async_wait.completion); - } + ret = tls_decrypt_async_wait(ctx); - /* There can be no concurrent accesses, since we have no - * pending decrypt operations - */ - WRITE_ONCE(ctx->async_notify, false); + if (ret) { + if (err >= 0 || err == -EINPROGRESS) + err = ret; + decrypted = 0; + goto end; + } /* Drain records from the rx_list & copy if required */ if (is_peek || is_kvec) - err = process_rx_list(ctx, msg, &control, &cmsg, copied, + err = process_rx_list(ctx, msg, &control, copied, decrypted, false, is_peek); else - err = process_rx_list(ctx, msg, &control, &cmsg, 0, + err = process_rx_list(ctx, msg, &control, 0, decrypted, true, is_peek); if (err < 0) { tls_err_abort(sk, err); @@ -1985,7 +2015,7 @@ copied += decrypted; end: - release_sock(sk); + tls_rx_reader_unlock(sk, ctx); if (psock) sk_psock_put(sk, psock); return copied ? : err; @@ -1999,42 +2029,45 @@ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct strp_msg *rxm = NULL; struct sock *sk = sock->sk; + struct tls_msg *tlm; struct sk_buff *skb; ssize_t copied = 0; bool from_queue; int err = 0; long timeo; int chunk; - bool zc = false; - lock_sock(sk); - - timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK); + timeo = tls_rx_reader_lock(sk, ctx, flags & SPLICE_F_NONBLOCK); + if (timeo < 0) + return timeo; from_queue = !skb_queue_empty(&ctx->rx_list); if (from_queue) { skb = __skb_dequeue(&ctx->rx_list); } else { + struct tls_decrypt_arg darg = {}; + skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err); if (!skb) goto splice_read_end; - err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false); + err = decrypt_skb_update(sk, skb, NULL, &darg); if (err < 0) { tls_err_abort(sk, -EBADMSG); goto splice_read_end; } } + rxm = strp_msg(skb); + tlm = tls_msg(skb); + /* splice does not support reading control messages */ - if (ctx->control != TLS_RECORD_TYPE_DATA) { + if (tlm->control != TLS_RECORD_TYPE_DATA) { err = -EINVAL; goto splice_read_end; } - rxm = strp_msg(skb); - chunk = min_t(unsigned int, rxm->full_len, len); copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); if (copied < 0) @@ -2053,7 +2086,7 @@ } splice_read_end: - release_sock(sk); + tls_rx_reader_unlock(sk, ctx); return copied ? : err; } @@ -2077,10 +2110,10 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb) { struct tls_context *tls_ctx = tls_get_ctx(strp->sk); - struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct tls_prot_info *prot = &tls_ctx->prot_info; char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; struct strp_msg *rxm = strp_msg(skb); + struct tls_msg *tlm = tls_msg(skb); size_t cipher_overhead; size_t data_len = 0; int ret; @@ -2101,7 +2134,7 @@ if (ret < 0) goto read_failure; - ctx->control = header[0]; + tlm->control = header[0]; data_len = ((header[4] & 0xFF) | (header[3] << 8)); @@ -2141,8 +2174,9 @@ { struct tls_context *tls_ctx = tls_get_ctx(strp->sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); + struct tls_msg *tlm = tls_msg(skb); - ctx->decrypted = 0; + tlm->decrypted = 0; ctx->recv_pkt = skb; strp_pause(strp); @@ -2180,16 +2214,9 @@ struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); struct tls_rec *rec, *tmp; - int pending; /* Wait for any pending async encryptions to complete */ - spin_lock_bh(&ctx->encrypt_compl_lock); - ctx->async_notify = true; - pending = atomic_read(&ctx->encrypt_pending); - spin_unlock_bh(&ctx->encrypt_compl_lock); - - if (pending) - crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + tls_encrypt_async_wait(ctx); tls_tx_records(sk, -1); @@ -2327,6 +2354,47 @@ strp_check_rcv(&rx_ctx->strp); } +static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk) +{ + struct tls_sw_context_tx *sw_ctx_tx; + + if (!ctx->priv_ctx_tx) { + sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); + if (!sw_ctx_tx) + return NULL; + } else { + sw_ctx_tx = ctx->priv_ctx_tx; + } + + crypto_init_wait(&sw_ctx_tx->async_wait); + atomic_set(&sw_ctx_tx->encrypt_pending, 1); + INIT_LIST_HEAD(&sw_ctx_tx->tx_list); + INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); + sw_ctx_tx->tx_work.sk = sk; + + return sw_ctx_tx; +} + +static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx) +{ + struct tls_sw_context_rx *sw_ctx_rx; + + if (!ctx->priv_ctx_rx) { + sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); + if (!sw_ctx_rx) + return NULL; + } else { + sw_ctx_rx = ctx->priv_ctx_rx; + } + + crypto_init_wait(&sw_ctx_rx->async_wait); + atomic_set(&sw_ctx_rx->decrypt_pending, 1); + init_waitqueue_head(&sw_ctx_rx->wq); + skb_queue_head_init(&sw_ctx_rx->rx_list); + + return sw_ctx_rx; +} + int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) { struct tls_context *tls_ctx = tls_get_ctx(sk); @@ -2353,46 +2421,22 @@ } if (tx) { - if (!ctx->priv_ctx_tx) { - sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); - if (!sw_ctx_tx) { - rc = -ENOMEM; - goto out; - } - ctx->priv_ctx_tx = sw_ctx_tx; - } else { - sw_ctx_tx = - (struct tls_sw_context_tx *)ctx->priv_ctx_tx; - } - } else { - if (!ctx->priv_ctx_rx) { - sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); - if (!sw_ctx_rx) { - rc = -ENOMEM; - goto out; - } - ctx->priv_ctx_rx = sw_ctx_rx; - } else { - sw_ctx_rx = - (struct tls_sw_context_rx *)ctx->priv_ctx_rx; - } - } + ctx->priv_ctx_tx = init_ctx_tx(ctx, sk); + if (!ctx->priv_ctx_tx) + return -ENOMEM; - if (tx) { - crypto_init_wait(&sw_ctx_tx->async_wait); - spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); + sw_ctx_tx = ctx->priv_ctx_tx; crypto_info = &ctx->crypto_send.info; cctx = &ctx->tx; aead = &sw_ctx_tx->aead_send; - INIT_LIST_HEAD(&sw_ctx_tx->tx_list); - INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); - sw_ctx_tx->tx_work.sk = sk; } else { - crypto_init_wait(&sw_ctx_rx->async_wait); - spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); + ctx->priv_ctx_rx = init_ctx_rx(ctx); + if (!ctx->priv_ctx_rx) + return -ENOMEM; + + sw_ctx_rx = ctx->priv_ctx_rx; crypto_info = &ctx->crypto_recv.info; cctx = &ctx->rx; - skb_queue_head_init(&sw_ctx_rx->rx_list); aead = &sw_ctx_rx->aead_recv; } diff -u linux-aws-5.15-5.15.0/net/unix/garbage.c linux-aws-5.15-5.15.0/net/unix/garbage.c --- linux-aws-5.15-5.15.0/net/unix/garbage.c +++ linux-aws-5.15-5.15.0/net/unix/garbage.c @@ -198,7 +198,7 @@ if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && !READ_ONCE(gc_in_progress)) unix_gc(); - wait_event(unix_gc_wait, gc_in_progress == false); + wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress)); } /* The external entry point: unix_gc() */ @@ -284,9 +284,17 @@ * which are creating the cycle(s). */ skb_queue_head_init(&hitlist); - list_for_each_entry(u, &gc_candidates, link) + list_for_each_entry(u, &gc_candidates, link) { scan_children(&u->sk, inc_inflight, &hitlist); +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + if (u->oob_skb) { + kfree_skb(u->oob_skb); + u->oob_skb = NULL; + } +#endif + } + /* not_cycle_list contains those sockets which do not make up a * cycle. Restore these to the inflight list. */ @@ -314,18 +322,6 @@ /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); -#if IS_ENABLED(CONFIG_AF_UNIX_OOB) - while (!list_empty(&gc_candidates)) { - u = list_entry(gc_candidates.next, struct unix_sock, link); - if (u->oob_skb) { - struct sk_buff *skb = u->oob_skb; - - u->oob_skb = NULL; - kfree_skb(skb); - } - } -#endif - spin_lock(&unix_gc_lock); /* There could be io_uring registered files, just push them back to diff -u linux-aws-5.15-5.15.0/net/unix/scm.c linux-aws-5.15-5.15.0/net/unix/scm.c --- linux-aws-5.15-5.15.0/net/unix/scm.c +++ linux-aws-5.15-5.15.0/net/unix/scm.c @@ -34,10 +34,8 @@ /* PF_UNIX ? */ if (s && sock->ops && sock->ops->family == PF_UNIX) u_sock = s; - } else { - /* Could be an io_uring instance */ - u_sock = io_uring_get_socket(filp); } + return u_sock; } EXPORT_SYMBOL(unix_get_socket); diff -u linux-aws-5.15-5.15.0/net/wireless/nl80211.c linux-aws-5.15-5.15.0/net/wireless/nl80211.c --- linux-aws-5.15-5.15.0/net/wireless/nl80211.c +++ linux-aws-5.15-5.15.0/net/wireless/nl80211.c @@ -3737,6 +3737,7 @@ if_idx++; } + if_start = 0; wp_idx++; } out: @@ -3913,6 +3914,8 @@ if (ntype != NL80211_IFTYPE_MESH_POINT) return -EINVAL; + if (otype != NL80211_IFTYPE_MESH_POINT) + return -EINVAL; if (netif_running(dev)) return -EBUSY; diff -u linux-aws-5.15-5.15.0/net/wireless/wext-core.c linux-aws-5.15-5.15.0/net/wireless/wext-core.c --- linux-aws-5.15-5.15.0/net/wireless/wext-core.c +++ linux-aws-5.15-5.15.0/net/wireless/wext-core.c @@ -799,6 +799,12 @@ } } + /* Sanity-check to ensure we never end up _allocating_ zero + * bytes of data for extra. + */ + if (extra_size <= 0) + return -EFAULT; + /* kzalloc() ensures NULL-termination for essid_compat. */ extra = kzalloc(extra_size, GFP_KERNEL); if (!extra) diff -u linux-aws-5.15-5.15.0/net/x25/af_x25.c linux-aws-5.15-5.15.0/net/x25/af_x25.c --- linux-aws-5.15-5.15.0/net/x25/af_x25.c +++ linux-aws-5.15-5.15.0/net/x25/af_x25.c @@ -460,12 +460,12 @@ if (get_user(len, optlen)) goto out; - len = min_t(unsigned int, len, sizeof(int)); - rc = -EINVAL; if (len < 0) goto out; + len = min_t(unsigned int, len, sizeof(int)); + rc = -EFAULT; if (put_user(len, optlen)) goto out; diff -u linux-aws-5.15-5.15.0/sound/core/Makefile linux-aws-5.15-5.15.0/sound/core/Makefile --- linux-aws-5.15-5.15.0/sound/core/Makefile +++ linux-aws-5.15-5.15.0/sound/core/Makefile @@ -33,7 +33,6 @@ snd-rawmidi-objs := rawmidi.o snd-timer-objs := timer.o snd-hrtimer-objs := hrtimer.o -snd-rtctimer-objs := rtctimer.o snd-hwdep-objs := hwdep.o snd-seq-device-objs := seq_device.o diff -u linux-aws-5.15-5.15.0/sound/pci/hda/patch_realtek.c linux-aws-5.15-5.15.0/sound/pci/hda/patch_realtek.c --- linux-aws-5.15-5.15.0/sound/pci/hda/patch_realtek.c +++ linux-aws-5.15-5.15.0/sound/pci/hda/patch_realtek.c @@ -3680,6 +3680,7 @@ int i, val; int coef38, coef0d, coef36; + alc_write_coefex_idx(codec, 0x58, 0x00, 0x1888); /* write default value */ alc_update_coef_idx(codec, 0x4a, 1<<15, 1<<15); /* Reset HP JD */ coef38 = alc_read_coef_idx(codec, 0x38); /* Amp control */ coef0d = alc_read_coef_idx(codec, 0x0d); /* Digital Misc control */ @@ -6674,6 +6675,60 @@ } } +static void alc285_fixup_hp_envy_x360(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + static const struct coef_fw coefs[] = { + WRITE_COEF(0x08, 0x6a0c), WRITE_COEF(0x0d, 0xa023), + WRITE_COEF(0x10, 0x0320), WRITE_COEF(0x1a, 0x8c03), + WRITE_COEF(0x25, 0x1800), WRITE_COEF(0x26, 0x003a), + WRITE_COEF(0x28, 0x1dfe), WRITE_COEF(0x29, 0xb014), + WRITE_COEF(0x2b, 0x1dfe), WRITE_COEF(0x37, 0xfe15), + WRITE_COEF(0x38, 0x7909), WRITE_COEF(0x45, 0xd489), + WRITE_COEF(0x46, 0x00f4), WRITE_COEF(0x4a, 0x21e0), + WRITE_COEF(0x66, 0x03f0), WRITE_COEF(0x67, 0x1000), + WRITE_COEF(0x6e, 0x1005), { } + }; + + static const struct hda_pintbl pincfgs[] = { + { 0x12, 0xb7a60130 }, /* Internal microphone*/ + { 0x14, 0x90170150 }, /* B&O soundbar speakers */ + { 0x17, 0x90170153 }, /* Side speakers */ + { 0x19, 0x03a11040 }, /* Headset microphone */ + { } + }; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + snd_hda_apply_pincfgs(codec, pincfgs); + + /* Fixes volume control problem for side speakers */ + alc295_fixup_disable_dac3(codec, fix, action); + + /* Fixes no sound from headset speaker */ + snd_hda_codec_amp_stereo(codec, 0x21, HDA_OUTPUT, 0, -1, 0); + + /* Auto-enable headset mic when plugged */ + snd_hda_jack_set_gating_jack(codec, 0x19, 0x21); + + /* Headset mic volume enhancement */ + snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREF50); + break; + case HDA_FIXUP_ACT_INIT: + alc_process_coef_fw(codec, coefs); + break; + case HDA_FIXUP_ACT_BUILD: + rename_ctl(codec, "Bass Speaker Playback Volume", + "B&O-Tuned Playback Volume"); + rename_ctl(codec, "Front Playback Switch", + "B&O Soundbar Playback Switch"); + rename_ctl(codec, "Bass Speaker Playback Switch", + "Side Speaker Playback Switch"); + break; + } +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" @@ -7058,6 +7113,7 @@ ALC280_FIXUP_HP_9480M, ALC245_FIXUP_HP_X360_AMP, ALC285_FIXUP_HP_SPECTRE_X360_EB1, + ALC285_FIXUP_HP_ENVY_X360, ALC288_FIXUP_DELL_HEADSET_MODE, ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, ALC288_FIXUP_DELL_XPS_13, @@ -8869,6 +8925,12 @@ .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_spectre_x360_eb1 }, + [ALC285_FIXUP_HP_ENVY_X360] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_envy_x360, + .chained = true, + .chain_id = ALC285_FIXUP_HP_GPIO_AMP_INIT, + }, [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_ideapad_s740_coef, @@ -9300,6 +9362,7 @@ SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360), SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360), SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), @@ -9863,6 +9926,7 @@ {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"}, + {.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"}, {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"}, {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"}, {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"}, diff -u linux-aws-5.15-5.15.0/sound/soc/codecs/rt5645.c linux-aws-5.15-5.15.0/sound/soc/codecs/rt5645.c --- linux-aws-5.15-5.15.0/sound/soc/codecs/rt5645.c +++ linux-aws-5.15-5.15.0/sound/soc/codecs/rt5645.c @@ -3803,6 +3803,16 @@ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), DMI_EXACT_MATCH(DMI_BOARD_VERSION, "Default string"), + /* + * Above strings are too generic, LattePanda BIOS versions for + * all 4 hw revisions are: + * DF-BI-7-S70CR100-* + * DF-BI-7-S70CR110-* + * DF-BI-7-S70CR200-* + * LP-BS-7-S70CR700-* + * Do a partial match for S70CR to avoid false positive matches. + */ + DMI_MATCH(DMI_BIOS_VERSION, "S70CR"), }, .driver_data = (void *)&lattepanda_board_platform_data, }, diff -u linux-aws-5.15-5.15.0/sound/soc/codecs/wm8962.c linux-aws-5.15-5.15.0/sound/soc/codecs/wm8962.c --- linux-aws-5.15-5.15.0/sound/soc/codecs/wm8962.c +++ linux-aws-5.15-5.15.0/sound/soc/codecs/wm8962.c @@ -2219,6 +2219,9 @@ SND_SOC_DAPM_OUTPUT("HPOUTL"), SND_SOC_DAPM_OUTPUT("HPOUTR"), + +SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0), +SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0), }; static const struct snd_soc_dapm_widget wm8962_dapm_spk_mono_widgets[] = { @@ -2226,7 +2229,6 @@ spkmixl, ARRAY_SIZE(spkmixl)), SND_SOC_DAPM_MUX_E("Speaker PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux, out_pga_event, SND_SOC_DAPM_POST_PMU), -SND_SOC_DAPM_PGA("Speaker Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("SPKOUT"), }; @@ -2241,9 +2243,6 @@ SND_SOC_DAPM_MUX_E("SPKOUTR PGA", WM8962_PWR_MGMT_2, 3, 0, &spkoutr_mux, out_pga_event, SND_SOC_DAPM_POST_PMU), -SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0), -SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0), - SND_SOC_DAPM_OUTPUT("SPKOUTL"), SND_SOC_DAPM_OUTPUT("SPKOUTR"), }; @@ -2353,12 +2352,18 @@ { "Speaker PGA", "Mixer", "Speaker Mixer" }, { "Speaker PGA", "DAC", "DACL" }, - { "Speaker Output", NULL, "Speaker PGA" }, - { "Speaker Output", NULL, "SYSCLK" }, - { "Speaker Output", NULL, "TOCLK" }, - { "Speaker Output", NULL, "TEMP_SPK" }, + { "SPKOUTL Output", NULL, "Speaker PGA" }, + { "SPKOUTL Output", NULL, "SYSCLK" }, + { "SPKOUTL Output", NULL, "TOCLK" }, + { "SPKOUTL Output", NULL, "TEMP_SPK" }, + + { "SPKOUTR Output", NULL, "Speaker PGA" }, + { "SPKOUTR Output", NULL, "SYSCLK" }, + { "SPKOUTR Output", NULL, "TOCLK" }, + { "SPKOUTR Output", NULL, "TEMP_SPK" }, - { "SPKOUT", NULL, "Speaker Output" }, + { "SPKOUT", NULL, "SPKOUTL Output" }, + { "SPKOUT", NULL, "SPKOUTR Output" }, }; static const struct snd_soc_dapm_route wm8962_spk_stereo_intercon[] = { @@ -2901,8 +2906,12 @@ switch (fll_id) { case WM8962_FLL_MCLK: case WM8962_FLL_BCLK: + fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT; + break; case WM8962_FLL_OSC: fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT; + snd_soc_component_update_bits(component, WM8962_PLL2, + WM8962_OSC_ENA, WM8962_OSC_ENA); break; case WM8962_FLL_INT: snd_soc_component_update_bits(component, WM8962_FLL_CONTROL_1, @@ -2911,7 +2920,7 @@ WM8962_FLL_FRC_NCO, WM8962_FLL_FRC_NCO); break; default: - dev_err(component->dev, "Unknown FLL source %d\n", ret); + dev_err(component->dev, "Unknown FLL source %d\n", source); return -EINVAL; } diff -u linux-aws-5.15-5.15.0/sound/soc/intel/boards/bytcr_rt5640.c linux-aws-5.15-5.15.0/sound/soc/intel/boards/bytcr_rt5640.c --- linux-aws-5.15-5.15.0/sound/soc/intel/boards/bytcr_rt5640.c +++ linux-aws-5.15-5.15.0/sound/soc/intel/boards/bytcr_rt5640.c @@ -668,6 +668,18 @@ BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { /* Chuwi Vi8 dual-boot (CWI506) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "i86"), + /* The above are too generic, also match BIOS info */ + DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* Chuwi Vi10 (CWI505) */ .matches = { diff -u linux-aws-5.15-5.15.0/sound/soc/meson/axg-spdifin.c linux-aws-5.15-5.15.0/sound/soc/meson/axg-spdifin.c --- linux-aws-5.15-5.15.0/sound/soc/meson/axg-spdifin.c +++ linux-aws-5.15-5.15.0/sound/soc/meson/axg-spdifin.c @@ -439,7 +439,6 @@ struct axg_spdifin *priv; struct snd_soc_dai_driver *dai_drv; void __iomem *regs; - int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -464,20 +463,12 @@ } priv->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(priv->pclk)) { - ret = PTR_ERR(priv->pclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get pclk: %d\n", ret); - return ret; - } + if (IS_ERR(priv->pclk)) + return dev_err_probe(dev, PTR_ERR(priv->pclk), "failed to get pclk\n"); priv->refclk = devm_clk_get(dev, "refclk"); - if (IS_ERR(priv->refclk)) { - ret = PTR_ERR(priv->refclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get mclk: %d\n", ret); - return ret; - } + if (IS_ERR(priv->refclk)) + return dev_err_probe(dev, PTR_ERR(priv->refclk), "failed to get mclk\n"); dai_drv = axg_spdifin_get_dai_drv(dev, priv); if (IS_ERR(dai_drv)) { diff -u linux-aws-5.15-5.15.0/sound/soc/meson/axg-tdm-formatter.c linux-aws-5.15-5.15.0/sound/soc/meson/axg-tdm-formatter.c --- linux-aws-5.15-5.15.0/sound/soc/meson/axg-tdm-formatter.c +++ linux-aws-5.15-5.15.0/sound/soc/meson/axg-tdm-formatter.c @@ -265,7 +265,6 @@ const struct axg_tdm_formatter_driver *drv; struct axg_tdm_formatter *formatter; void __iomem *regs; - int ret; drv = of_device_get_match_data(dev); if (!drv) { @@ -292,57 +291,34 @@ /* Peripharal clock */ formatter->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(formatter->pclk)) { - ret = PTR_ERR(formatter->pclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get pclk: %d\n", ret); - return ret; - } + if (IS_ERR(formatter->pclk)) + return dev_err_probe(dev, PTR_ERR(formatter->pclk), "failed to get pclk\n"); /* Formatter bit clock */ formatter->sclk = devm_clk_get(dev, "sclk"); - if (IS_ERR(formatter->sclk)) { - ret = PTR_ERR(formatter->sclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get sclk: %d\n", ret); - return ret; - } + if (IS_ERR(formatter->sclk)) + return dev_err_probe(dev, PTR_ERR(formatter->sclk), "failed to get sclk\n"); /* Formatter sample clock */ formatter->lrclk = devm_clk_get(dev, "lrclk"); - if (IS_ERR(formatter->lrclk)) { - ret = PTR_ERR(formatter->lrclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get lrclk: %d\n", ret); - return ret; - } + if (IS_ERR(formatter->lrclk)) + return dev_err_probe(dev, PTR_ERR(formatter->lrclk), "failed to get lrclk\n"); /* Formatter bit clock input multiplexer */ formatter->sclk_sel = devm_clk_get(dev, "sclk_sel"); - if (IS_ERR(formatter->sclk_sel)) { - ret = PTR_ERR(formatter->sclk_sel); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get sclk_sel: %d\n", ret); - return ret; - } + if (IS_ERR(formatter->sclk_sel)) + return dev_err_probe(dev, PTR_ERR(formatter->sclk_sel), "failed to get sclk_sel\n"); /* Formatter sample clock input multiplexer */ formatter->lrclk_sel = devm_clk_get(dev, "lrclk_sel"); - if (IS_ERR(formatter->lrclk_sel)) { - ret = PTR_ERR(formatter->lrclk_sel); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get lrclk_sel: %d\n", ret); - return ret; - } + if (IS_ERR(formatter->lrclk_sel)) + return dev_err_probe(dev, PTR_ERR(formatter->lrclk_sel), + "failed to get lrclk_sel\n"); /* Formatter dedicated reset line */ formatter->reset = devm_reset_control_get_optional_exclusive(dev, NULL); - if (IS_ERR(formatter->reset)) { - ret = PTR_ERR(formatter->reset); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get reset: %d\n", ret); - return ret; - } + if (IS_ERR(formatter->reset)) + return dev_err_probe(dev, PTR_ERR(formatter->reset), "failed to get reset\n"); return devm_snd_soc_register_component(dev, drv->component_drv, NULL, 0); diff -u linux-aws-5.15-5.15.0/sound/usb/card.c linux-aws-5.15-5.15.0/sound/usb/card.c --- linux-aws-5.15-5.15.0/sound/usb/card.c +++ linux-aws-5.15-5.15.0/sound/usb/card.c @@ -643,6 +643,7 @@ INIT_LIST_HEAD(&chip->pcm_list); INIT_LIST_HEAD(&chip->ep_list); INIT_LIST_HEAD(&chip->iface_ref_list); + INIT_LIST_HEAD(&chip->clock_ref_list); INIT_LIST_HEAD(&chip->midi_list); INIT_LIST_HEAD(&chip->mixer_list); diff -u linux-aws-5.15-5.15.0/sound/usb/card.h linux-aws-5.15-5.15.0/sound/usb/card.h --- linux-aws-5.15-5.15.0/sound/usb/card.h +++ linux-aws-5.15-5.15.0/sound/usb/card.h @@ -44,6 +44,7 @@ struct snd_usb_substream; struct snd_usb_iface_ref; +struct snd_usb_clock_ref; struct snd_usb_endpoint; struct snd_usb_power_domain; @@ -62,6 +63,7 @@ struct snd_usb_endpoint { struct snd_usb_audio *chip; struct snd_usb_iface_ref *iface_ref; + struct snd_usb_clock_ref *clock_ref; int opened; /* open refcount; protect with chip->mutex */ atomic_t running; /* running status */ @@ -127,7 +129,9 @@ in a stream */ bool implicit_fb_sync; /* syncs with implicit feedback */ bool lowlatency_playback; /* low-latency playback mode */ - bool need_setup; /* (re-)need for configure? */ + bool need_setup; /* (re-)need for hw_params? */ + bool need_prepare; /* (re-)need for prepare? */ + bool fixed_rate; /* skip rate setup */ /* for hw constraints */ const struct audioformat *cur_audiofmt; @@ -138,7 +142,6 @@ unsigned int cur_period_frames; unsigned int cur_period_bytes; unsigned int cur_buffer_periods; - unsigned char cur_clock; spinlock_t lock; struct list_head list; diff -u linux-aws-5.15-5.15.0/sound/usb/clock.c linux-aws-5.15-5.15.0/sound/usb/clock.c --- linux-aws-5.15-5.15.0/sound/usb/clock.c +++ linux-aws-5.15-5.15.0/sound/usb/clock.c @@ -328,8 +328,16 @@ if (chip->quirk_flags & QUIRK_FLAG_SKIP_CLOCK_SELECTOR) return ret; err = uac_clock_selector_set_val(chip, entity_id, cur); - if (err < 0) + if (err < 0) { + if (pins == 1) { + usb_audio_dbg(chip, + "%s(): selector returned an error, " + "assuming a firmware bug, id %d, ret %d\n", + __func__, clock_id, err); + return ret; + } return err; + } } if (!validate || ret > 0 || !chip->autoclock) diff -u linux-aws-5.15-5.15.0/sound/usb/endpoint.c linux-aws-5.15-5.15.0/sound/usb/endpoint.c --- linux-aws-5.15-5.15.0/sound/usb/endpoint.c +++ linux-aws-5.15-5.15.0/sound/usb/endpoint.c @@ -35,6 +35,16 @@ struct list_head list; }; +/* clock refcounting */ +struct snd_usb_clock_ref { + unsigned char clock; + atomic_t locked; + int opened; + int rate; + bool need_setup; + struct list_head list; +}; + /* * snd_usb_endpoint is a model that abstracts everything related to an * USB endpoint and its streaming. @@ -598,6 +608,25 @@ return ip; } +/* Similarly, a refcount object for clock */ +static struct snd_usb_clock_ref * +clock_ref_find(struct snd_usb_audio *chip, int clock) +{ + struct snd_usb_clock_ref *ref; + + list_for_each_entry(ref, &chip->clock_ref_list, list) + if (ref->clock == clock) + return ref; + + ref = kzalloc(sizeof(*ref), GFP_KERNEL); + if (!ref) + return NULL; + ref->clock = clock; + atomic_set(&ref->locked, 0); + list_add_tail(&ref->list, &chip->clock_ref_list); + return ref; +} + /* * Get the existing endpoint object corresponding EP * Returns NULL if not present. @@ -738,13 +767,15 @@ * The endpoint needs to be closed via snd_usb_endpoint_close() later. * * Note that this function doesn't configure the endpoint. The substream - * needs to set it up later via snd_usb_endpoint_configure(). + * needs to set it up later via snd_usb_endpoint_set_params() and + * snd_usb_endpoint_prepare(). */ struct snd_usb_endpoint * snd_usb_endpoint_open(struct snd_usb_audio *chip, const struct audioformat *fp, const struct snd_pcm_hw_params *params, - bool is_sync_ep) + bool is_sync_ep, + bool fixed_rate) { struct snd_usb_endpoint *ep; int ep_num = is_sync_ep ? fp->sync_ep : fp->endpoint; @@ -775,6 +806,15 @@ goto unlock; } + if (fp->protocol != UAC_VERSION_1) { + ep->clock_ref = clock_ref_find(chip, fp->clock); + if (!ep->clock_ref) { + ep = NULL; + goto unlock; + } + ep->clock_ref->opened++; + } + ep->cur_audiofmt = fp; ep->cur_channels = fp->channels; ep->cur_rate = params_rate(params); @@ -784,13 +824,14 @@ ep->cur_period_frames = params_period_size(params); ep->cur_period_bytes = ep->cur_period_frames * ep->cur_frame_bytes; ep->cur_buffer_periods = params_periods(params); - ep->cur_clock = fp->clock; if (ep->type == SND_USB_ENDPOINT_TYPE_SYNC) endpoint_set_syncinterval(chip, ep); ep->implicit_fb_sync = fp->implicit_fb; ep->need_setup = true; + ep->need_prepare = true; + ep->fixed_rate = fixed_rate; usb_audio_dbg(chip, " channels=%d, rate=%d, format=%s, period_bytes=%d, periods=%d, implicit_fb=%d\n", ep->cur_channels, ep->cur_rate, @@ -898,12 +939,16 @@ endpoint_set_interface(chip, ep, false); if (!--ep->opened) { + if (ep->clock_ref) { + if (!--ep->clock_ref->opened) + ep->clock_ref->rate = 0; + } ep->iface = 0; ep->altsetting = 0; ep->cur_audiofmt = NULL; ep->cur_rate = 0; - ep->cur_clock = 0; ep->iface_ref = NULL; + ep->clock_ref = NULL; usb_audio_dbg(chip, "EP 0x%x closed\n", ep->ep_num); } mutex_unlock(&chip->mutex); @@ -912,9 +957,11 @@ /* Prepare for suspening EP, called from the main suspend handler */ void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep) { - ep->need_setup = true; + ep->need_prepare = true; if (ep->iface_ref) ep->iface_ref->need_setup = true; + if (ep->clock_ref) + ep->clock_ref->rate = 0; } /* @@ -1258,23 +1305,51 @@ return -ENOMEM; } +/* update the rate of the referred clock; return the actual rate */ +static int update_clock_ref_rate(struct snd_usb_audio *chip, + struct snd_usb_endpoint *ep) +{ + struct snd_usb_clock_ref *clock = ep->clock_ref; + int rate = ep->cur_rate; + + if (!clock || clock->rate == rate) + return rate; + if (clock->rate) { + if (atomic_read(&clock->locked)) + return clock->rate; + if (clock->rate != rate) { + usb_audio_err(chip, "Mismatched sample rate %d vs %d for EP 0x%x\n", + clock->rate, rate, ep->ep_num); + return clock->rate; + } + } + clock->rate = rate; + clock->need_setup = true; + return rate; +} + /* * snd_usb_endpoint_set_params: configure an snd_usb_endpoint * + * It's called either from hw_params callback. * Determine the number of URBs to be used on this endpoint. * An endpoint must be configured before it can be started. * An endpoint that is already running can not be reconfigured. */ -static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, - struct snd_usb_endpoint *ep) +int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, + struct snd_usb_endpoint *ep) { const struct audioformat *fmt = ep->cur_audiofmt; - int err; + int err = 0; + + mutex_lock(&chip->mutex); + if (!ep->need_setup) + goto unlock; /* release old buffers, if any */ err = release_urbs(ep, false); if (err < 0) - return err; + goto unlock; ep->datainterval = fmt->datainterval; ep->maxpacksize = fmt->maxpacksize; @@ -1312,28 +1387,62 @@ usb_audio_dbg(chip, "Set up %d URBS, ret=%d\n", ep->nurbs, err); if (err < 0) - return err; + goto unlock; /* some unit conversions in runtime */ ep->maxframesize = ep->maxpacksize / ep->cur_frame_bytes; ep->curframesize = ep->curpacksize / ep->cur_frame_bytes; + err = update_clock_ref_rate(chip, ep); + if (err >= 0) { + ep->need_setup = false; + err = 0; + } + + unlock: + mutex_unlock(&chip->mutex); + return err; +} + +static int init_sample_rate(struct snd_usb_audio *chip, + struct snd_usb_endpoint *ep) +{ + struct snd_usb_clock_ref *clock = ep->clock_ref; + int rate, err; + + rate = update_clock_ref_rate(chip, ep); + if (rate < 0) + return rate; + if (clock && !clock->need_setup) + return 0; + + if (!ep->fixed_rate) { + err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt, rate); + if (err < 0) { + if (clock) + clock->rate = 0; /* reset rate */ + return err; + } + } + + if (clock) + clock->need_setup = false; return 0; } /* - * snd_usb_endpoint_configure: Configure the endpoint + * snd_usb_endpoint_prepare: Prepare the endpoint * * This function sets up the EP to be fully usable state. - * It's called either from hw_params or prepare callback. + * It's called either from prepare callback. * The function checks need_setup flag, and performs nothing unless needed, * so it's safe to call this multiple times. * * This returns zero if unchanged, 1 if the configuration has changed, * or a negative error code. */ -int snd_usb_endpoint_configure(struct snd_usb_audio *chip, - struct snd_usb_endpoint *ep) +int snd_usb_endpoint_prepare(struct snd_usb_audio *chip, + struct snd_usb_endpoint *ep) { bool iface_first; int err = 0; @@ -1341,7 +1450,7 @@ mutex_lock(&chip->mutex); if (WARN_ON(!ep->iface_ref)) goto unlock; - if (!ep->need_setup) + if (!ep->need_prepare) goto unlock; /* If the interface has been already set up, just set EP parameters */ @@ -1350,14 +1459,10 @@ * to update at each EP configuration */ if (ep->cur_audiofmt->protocol == UAC_VERSION_1) { - err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt, - ep->cur_rate); + err = init_sample_rate(chip, ep); if (err < 0) goto unlock; } - err = snd_usb_endpoint_set_params(chip, ep); - if (err < 0) - goto unlock; goto done; } @@ -1381,11 +1486,7 @@ if (err < 0) goto unlock; - err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt, ep->cur_rate); - if (err < 0) - goto unlock; - - err = snd_usb_endpoint_set_params(chip, ep); + err = init_sample_rate(chip, ep); if (err < 0) goto unlock; @@ -1403,7 +1504,7 @@ ep->iface_ref->need_setup = false; done: - ep->need_setup = false; + ep->need_prepare = false; err = 1; unlock: @@ -1414,15 +1515,15 @@ /* get the current rate set to the given clock by any endpoint */ int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock) { - struct snd_usb_endpoint *ep; + struct snd_usb_clock_ref *ref; int rate = 0; if (!clock) return 0; mutex_lock(&chip->mutex); - list_for_each_entry(ep, &chip->ep_list, list) { - if (ep->cur_clock == clock && ep->cur_rate) { - rate = ep->cur_rate; + list_for_each_entry(ref, &chip->clock_ref_list, list) { + if (ref->clock == clock) { + rate = ref->rate; break; } } @@ -1463,6 +1564,9 @@ if (atomic_inc_return(&ep->running) != 1) return 0; + if (ep->clock_ref) + atomic_inc(&ep->clock_ref->locked); + ep->active_mask = 0; ep->unlink_mask = 0; ep->phase = 0; @@ -1572,6 +1676,15 @@ if (ep->sync_source) WRITE_ONCE(ep->sync_source->sync_sink, NULL); stop_urbs(ep, false, keep_pending); + if (ep->clock_ref) + atomic_dec(&ep->clock_ref->locked); + + if (ep->chip->quirk_flags & QUIRK_FLAG_FORCE_IFACE_RESET && + usb_pipeout(ep->pipe)) { + ep->need_prepare = true; + if (ep->iface_ref) + ep->iface_ref->need_setup = true; + } } } @@ -1598,12 +1711,16 @@ { struct snd_usb_endpoint *ep, *en; struct snd_usb_iface_ref *ip, *in; + struct snd_usb_clock_ref *cp, *cn; list_for_each_entry_safe(ep, en, &chip->ep_list, list) kfree(ep); list_for_each_entry_safe(ip, in, &chip->iface_ref_list, list) kfree(ip); + + list_for_each_entry_safe(cp, cn, &chip->clock_ref_list, list) + kfree(cp); } /* diff -u linux-aws-5.15-5.15.0/sound/usb/endpoint.h linux-aws-5.15-5.15.0/sound/usb/endpoint.h --- linux-aws-5.15-5.15.0/sound/usb/endpoint.h +++ linux-aws-5.15-5.15.0/sound/usb/endpoint.h @@ -14,11 +14,14 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip, const struct audioformat *fp, const struct snd_pcm_hw_params *params, - bool is_sync_ep); + bool is_sync_ep, + bool fixed_rate); void snd_usb_endpoint_close(struct snd_usb_audio *chip, struct snd_usb_endpoint *ep); -int snd_usb_endpoint_configure(struct snd_usb_audio *chip, - struct snd_usb_endpoint *ep); +int snd_usb_endpoint_set_params(struct snd_usb_audio *chip, + struct snd_usb_endpoint *ep); +int snd_usb_endpoint_prepare(struct snd_usb_audio *chip, + struct snd_usb_endpoint *ep); int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock); bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip, diff -u linux-aws-5.15-5.15.0/sound/usb/format.c linux-aws-5.15-5.15.0/sound/usb/format.c --- linux-aws-5.15-5.15.0/sound/usb/format.c +++ linux-aws-5.15-5.15.0/sound/usb/format.c @@ -470,9 +470,11 @@ int clock) { struct usb_device *dev = chip->dev; + struct usb_host_interface *alts; unsigned int *table; unsigned int nr_rates; int i, err; + u32 bmControls; /* performing the rate verification may lead to unexpected USB bus * behavior afterwards by some unknown reason. Do this only for the @@ -481,6 +483,24 @@ if (!(chip->quirk_flags & QUIRK_FLAG_VALIDATE_RATES)) return 0; /* don't perform the validation as default */ + alts = snd_usb_get_host_interface(chip, fp->iface, fp->altsetting); + if (!alts) + return 0; + + if (fp->protocol == UAC_VERSION_3) { + struct uac3_as_header_descriptor *as = snd_usb_find_csint_desc( + alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); + bmControls = le32_to_cpu(as->bmControls); + } else { + struct uac2_as_header_descriptor *as = snd_usb_find_csint_desc( + alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); + bmControls = as->bmControls; + } + + if (!uac_v2v3_control_is_readable(bmControls, + UAC2_AS_VAL_ALT_SETTINGS)) + return 0; + table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; diff -u linux-aws-5.15-5.15.0/sound/usb/implicit.c linux-aws-5.15-5.15.0/sound/usb/implicit.c --- linux-aws-5.15-5.15.0/sound/usb/implicit.c +++ linux-aws-5.15-5.15.0/sound/usb/implicit.c @@ -15,6 +15,7 @@ #include "usbaudio.h" #include "card.h" #include "helper.h" +#include "pcm.h" #include "implicit.h" enum { @@ -455,7 +456,8 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip, const struct audioformat *target, const struct snd_pcm_hw_params *params, - int stream) + int stream, + bool *fixed_rate) { struct snd_usb_substream *subs; const struct audioformat *fp, *sync_fmt = NULL; @@ -469,7 +471,7 @@ subs = find_matching_substream(chip, stream, target->sync_ep, target->fmt_type); if (!subs) - return sync_fmt; + goto end; high_score = 0; list_for_each_entry(fp, &subs->fmt_list, list) { @@ -483,6 +485,9 @@ } } + end: + if (fixed_rate) + *fixed_rate = snd_usb_pcm_has_fixed_rate(subs); return sync_fmt; } diff -u linux-aws-5.15-5.15.0/sound/usb/mixer.c linux-aws-5.15-5.15.0/sound/usb/mixer.c --- linux-aws-5.15-5.15.0/sound/usb/mixer.c +++ linux-aws-5.15-5.15.0/sound/usb/mixer.c @@ -1205,6 +1205,13 @@ cval->res = 16; } break; + case USB_ID(0x1bcf, 0x2283): /* NexiGo N930AF FHD Webcam */ + if (!strcmp(kctl->id.name, "Mic Capture Volume")) { + usb_audio_info(chip, + "set resolution quirk: cval->res = 16\n"); + cval->res = 16; + } + break; } } diff -u linux-aws-5.15-5.15.0/sound/usb/pcm.c linux-aws-5.15-5.15.0/sound/usb/pcm.c --- linux-aws-5.15-5.15.0/sound/usb/pcm.c +++ linux-aws-5.15-5.15.0/sound/usb/pcm.c @@ -157,6 +157,33 @@ true, subs); } +bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *subs) +{ + const struct audioformat *fp; + struct snd_usb_audio *chip = subs->stream->chip; + int rate = -1; + + if (!subs) + return false; + if (!(chip->quirk_flags & QUIRK_FLAG_FIXED_RATE)) + return false; + list_for_each_entry(fp, &subs->fmt_list, list) { + if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) + return false; + if (fp->nr_rates < 1) + continue; + if (fp->nr_rates > 1) + return false; + if (rate < 0) { + rate = fp->rate_table[0]; + continue; + } + if (rate != fp->rate_table[0]) + return false; + } + return true; +} + static int init_pitch_v1(struct snd_usb_audio *chip, int ep) { struct usb_device *dev = chip->dev; @@ -433,35 +460,6 @@ } } -static int configure_endpoints(struct snd_usb_audio *chip, - struct snd_usb_substream *subs) -{ - int err; - - if (subs->data_endpoint->need_setup) { - /* stop any running stream beforehand */ - if (stop_endpoints(subs, false)) - sync_pending_stops(subs); - if (subs->sync_endpoint) { - err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); - if (err < 0) - return err; - } - err = snd_usb_endpoint_configure(chip, subs->data_endpoint); - if (err < 0) - return err; - snd_usb_set_format_quirk(subs, subs->cur_audiofmt); - } else { - if (subs->sync_endpoint) { - err = snd_usb_endpoint_configure(chip, subs->sync_endpoint); - if (err < 0) - return err; - } - } - - return 0; -} - /* * hw_params callback * @@ -479,12 +477,14 @@ struct snd_usb_audio *chip = subs->stream->chip; const struct audioformat *fmt; const struct audioformat *sync_fmt; + bool fixed_rate, sync_fixed_rate; int ret; ret = snd_media_start_pipeline(subs); if (ret) return ret; + fixed_rate = snd_usb_pcm_has_fixed_rate(subs); fmt = find_substream_format(subs, hw_params); if (!fmt) { usb_audio_dbg(chip, @@ -498,7 +498,8 @@ if (fmt->implicit_fb) { sync_fmt = snd_usb_find_implicit_fb_sync_format(chip, fmt, hw_params, - !substream->stream); + !substream->stream, + &sync_fixed_rate); if (!sync_fmt) { usb_audio_dbg(chip, "cannot find sync format: ep=0x%x, iface=%d:%d, format=%s, rate=%d, channels=%d\n", @@ -511,6 +512,7 @@ } } else { sync_fmt = fmt; + sync_fixed_rate = fixed_rate; } ret = snd_usb_lock_shutdown(chip); @@ -530,7 +532,7 @@ close_endpoints(chip, subs); } - subs->data_endpoint = snd_usb_endpoint_open(chip, fmt, hw_params, false); + subs->data_endpoint = snd_usb_endpoint_open(chip, fmt, hw_params, false, fixed_rate); if (!subs->data_endpoint) { ret = -EINVAL; goto unlock; @@ -539,7 +541,8 @@ if (fmt->sync_ep) { subs->sync_endpoint = snd_usb_endpoint_open(chip, sync_fmt, hw_params, - fmt == sync_fmt); + fmt == sync_fmt, + sync_fixed_rate); if (!subs->sync_endpoint) { ret = -EINVAL; goto unlock; @@ -553,7 +556,16 @@ subs->cur_audiofmt = fmt; mutex_unlock(&chip->mutex); - ret = configure_endpoints(chip, subs); + if (!subs->data_endpoint->need_setup) + goto unlock; + + if (subs->sync_endpoint) { + ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint); + if (ret < 0) + goto unlock; + } + + ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint); unlock: if (ret < 0) @@ -636,9 +648,18 @@ goto unlock; } - ret = configure_endpoints(chip, subs); + if (subs->sync_endpoint) { + ret = snd_usb_endpoint_prepare(chip, subs->sync_endpoint); + if (ret < 0) + goto unlock; + } + + ret = snd_usb_endpoint_prepare(chip, subs->data_endpoint); if (ret < 0) goto unlock; + else if (ret > 0) + snd_usb_set_format_quirk(subs, subs->cur_audiofmt); + ret = 0; /* reset the pointer */ subs->buffer_bytes = frames_to_bytes(runtime, runtime->buffer_size); diff -u linux-aws-5.15-5.15.0/sound/usb/quirks.c linux-aws-5.15-5.15.0/sound/usb/quirks.c --- linux-aws-5.15-5.15.0/sound/usb/quirks.c +++ linux-aws-5.15-5.15.0/sound/usb/quirks.c @@ -1765,6 +1765,8 @@ QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x04e8, 0xa051, /* Samsung USBC Headset (AKG) */ QUIRK_FLAG_SKIP_CLOCK_SELECTOR | QUIRK_FLAG_CTL_MSG_DELAY_5M), + DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */ + QUIRK_FLAG_IFACE_SKIP_CLOSE), DEVICE_FLG(0x054c, 0x0b8c, /* Sony WALKMAN NW-A45 DAC */ QUIRK_FLAG_SET_IFACE_FIRST), DEVICE_FLG(0x0556, 0x0014, /* Phoenix Audio TMX320VC */ @@ -1786,6 +1788,8 @@ DEVICE_FLG(0x0644, 0x804a, /* TEAC UD-301 */ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY | QUIRK_FLAG_IFACE_DELAY), + DEVICE_FLG(0x0644, 0x805f, /* TEAC Model 12 */ + QUIRK_FLAG_FORCE_IFACE_RESET), DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */ QUIRK_FLAG_IGNORE_CTL_ERROR), DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */ @@ -1806,8 +1810,14 @@ QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x0b0e, 0x0349, /* Jabra 550a */ QUIRK_FLAG_CTL_MSG_DELAY_1M), + DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */ + QUIRK_FLAG_FIXED_RATE), + DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */ + QUIRK_FLAG_FIXED_RATE), DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER), + DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */ + QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */ QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x1397, 0x0507, /* Behringer UMC202HD */ @@ -1838,6 +1848,10 @@ QUIRK_FLAG_ITF_USB_DSD_DAC | QUIRK_FLAG_CTL_MSG_DELAY), DEVICE_FLG(0x1901, 0x0191, /* GE B850V3 CP2114 audio interface */ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */ + QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */ + QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER), DEVICE_FLG(0x2040, 0x7201, /* Hauppauge HVR-950Q-MXL */ @@ -1876,6 +1890,12 @@ QUIRK_FLAG_IGNORE_CTL_ERROR), DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */ + QUIRK_FLAG_GENERIC_IMPLICIT_FB), + DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */ + QUIRK_FLAG_GENERIC_IMPLICIT_FB), + DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */ + QUIRK_FLAG_GENERIC_IMPLICIT_FB), DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */ QUIRK_FLAG_IGNORE_CTL_ERROR), DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */ @@ -1884,16 +1904,6 @@ QUIRK_FLAG_ALIGN_TRANSFER), DEVICE_FLG(0x534d, 0x2109, /* MacroSilicon MS2109 */ QUIRK_FLAG_ALIGN_TRANSFER), - DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */ - QUIRK_FLAG_GET_SAMPLE_RATE), - DEVICE_FLG(0x2b53, 0x0023, /* Fiero SC-01 (firmware v1.0.0 @ 48 kHz) */ - QUIRK_FLAG_GENERIC_IMPLICIT_FB), - DEVICE_FLG(0x2b53, 0x0024, /* Fiero SC-01 (firmware v1.0.0 @ 96 kHz) */ - QUIRK_FLAG_GENERIC_IMPLICIT_FB), - DEVICE_FLG(0x2b53, 0x0031, /* Fiero SC-01 (firmware v1.1.0) */ - QUIRK_FLAG_GENERIC_IMPLICIT_FB), - DEVICE_FLG(0x0525, 0xa4ad, /* Hamedal C20 usb camero */ - QUIRK_FLAG_IFACE_SKIP_CLOSE), /* Vendor matches */ VENDOR_FLG(0x045e, /* MS Lifecam */ diff -u linux-aws-5.15-5.15.0/sound/usb/stream.c linux-aws-5.15-5.15.0/sound/usb/stream.c --- linux-aws-5.15-5.15.0/sound/usb/stream.c +++ linux-aws-5.15-5.15.0/sound/usb/stream.c @@ -300,9 +300,12 @@ c = 0; if (bits) { - for (; bits && *maps; maps++, bits >>= 1) + for (; bits && *maps; maps++, bits >>= 1) { if (bits & 1) chmap->map[c++] = *maps; + if (c == chmap->channels) + break; + } } else { /* If we're missing wChannelConfig, then guess something to make sure the channel map is not skipped entirely */ diff -u linux-aws-5.15-5.15.0/sound/usb/usbaudio.h linux-aws-5.15-5.15.0/sound/usb/usbaudio.h --- linux-aws-5.15-5.15.0/sound/usb/usbaudio.h +++ linux-aws-5.15-5.15.0/sound/usb/usbaudio.h @@ -46,6 +46,7 @@ struct list_head pcm_list; /* list of pcm streams */ struct list_head ep_list; /* list of audio-related endpoints */ struct list_head iface_ref_list; /* list of interface refcounts */ + struct list_head clock_ref_list; /* list of clock refcounts */ int pcm_devs; struct list_head midi_list; /* list of midi interfaces */ @@ -171,6 +172,12 @@ * Don't apply implicit feedback sync mode * QUIRK_FLAG_IFACE_SKIP_CLOSE * Don't closed interface during setting sample rate + * QUIRK_FLAG_FORCE_IFACE_RESET + * Force an interface reset whenever stopping & restarting a stream + * (e.g. after xrun) + * QUIRK_FLAG_FIXED_RATE + * Do not set PCM rate (frequency) when only one rate is available + * for the given endpoint. */ #define QUIRK_FLAG_GET_SAMPLE_RATE (1U << 0) @@ -194,4 +201,6 @@ #define QUIRK_FLAG_SKIP_IMPLICIT_FB (1U << 18) #define QUIRK_FLAG_IFACE_SKIP_CLOSE (1U << 19) +#define QUIRK_FLAG_FORCE_IFACE_RESET (1U << 20) +#define QUIRK_FLAG_FIXED_RATE (1U << 21) #endif /* __USBAUDIO_H */ diff -u linux-aws-5.15-5.15.0/tools/bpf/bpftool/prog.c linux-aws-5.15-5.15.0/tools/bpf/bpftool/prog.c --- linux-aws-5.15-5.15.0/tools/bpf/bpftool/prog.c +++ linux-aws-5.15-5.15.0/tools/bpf/bpftool/prog.c @@ -2098,7 +2098,7 @@ int map_fd; profile_perf_events = calloc( - sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric); + obj->rodata->num_cpu * obj->rodata->num_metric, sizeof(int)); if (!profile_perf_events) { p_err("failed to allocate memory for perf_event array: %s", strerror(errno)); diff -u linux-aws-5.15-5.15.0/tools/include/uapi/linux/bpf.h linux-aws-5.15-5.15.0/tools/include/uapi/linux/bpf.h --- linux-aws-5.15-5.15.0/tools/include/uapi/linux/bpf.h +++ linux-aws-5.15-5.15.0/tools/include/uapi/linux/bpf.h @@ -3011,9 +3011,23 @@ * **BPF_FIB_LOOKUP_DIRECT** * Do a direct table lookup vs full lookup using FIB * rules. + * **BPF_FIB_LOOKUP_TBID** + * Used with BPF_FIB_LOOKUP_DIRECT. + * Use the routing table ID present in *params*->tbid + * for the fib lookup. * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). + * **BPF_FIB_LOOKUP_SKIP_NEIGH** + * Skip the neighbour table lookup. *params*->dmac + * and *params*->smac will not be set as output. A common + * use case is to call **bpf_redirect_neigh**\ () after + * doing **bpf_fib_lookup**\ (). + * **BPF_FIB_LOOKUP_SRC** + * Derive and set source IP addr in *params*->ipv{4,6}_src + * for the nexthop. If the src addr cannot be derived, + * **BPF_FIB_LKUP_RET_NO_SRC_ADDR** is returned. In this + * case, *params*->dmac and *params*->smac are not set either. * * *ctx* is either **struct xdp_md** for XDP programs or * **struct sk_buff** tc cls_act programs. @@ -6040,6 +6054,9 @@ enum { BPF_FIB_LOOKUP_DIRECT = (1U << 0), BPF_FIB_LOOKUP_OUTPUT = (1U << 1), + BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), + BPF_FIB_LOOKUP_TBID = (1U << 3), + BPF_FIB_LOOKUP_SRC = (1U << 4), }; enum { @@ -6052,6 +6069,7 @@ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ + BPF_FIB_LKUP_RET_NO_SRC_ADDR, /* failed to derive IP src addr */ }; struct bpf_fib_lookup { @@ -6086,6 +6104,9 @@ __u32 rt_metric; }; + /* input: source address to consider for lookup + * output: source address result from lookup + */ union { __be32 ipv4_src; __u32 ipv6_src[4]; /* in6_addr; network order */ @@ -6100,9 +6121,19 @@ __u32 ipv6_dst[4]; /* in6_addr; network order */ }; - /* output */ - __be16 h_vlan_proto; - __be16 h_vlan_TCI; + union { + struct { + /* output */ + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + }; + /* input: when accompanied with the + * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a + * specific routing table to use for the fib lookup. + */ + __u32 tbid; + }; + __u8 smac[6]; /* ETH_ALEN */ __u8 dmac[6]; /* ETH_ALEN */ }; diff -u linux-aws-5.15-5.15.0/tools/perf/util/evsel.c linux-aws-5.15-5.15.0/tools/perf/util/evsel.c --- linux-aws-5.15-5.15.0/tools/perf/util/evsel.c +++ linux-aws-5.15-5.15.0/tools/perf/util/evsel.c @@ -2252,7 +2252,6 @@ data->period = evsel->core.attr.sample_period; data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; data->misc = event->header.misc; - data->id = -1ULL; data->data_src = PERF_MEM_DATA_SRC_NONE; if (event->header.type != PERF_RECORD_SAMPLE) { diff -u linux-aws-5.15-5.15.0/tools/perf/util/stat-display.c linux-aws-5.15-5.15.0/tools/perf/util/stat-display.c --- linux-aws-5.15-5.15.0/tools/perf/util/stat-display.c +++ linux-aws-5.15-5.15.0/tools/perf/util/stat-display.c @@ -275,7 +275,7 @@ if (color) mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1; - color_snprintf(str, sizeof(str), color ?: "", fmt, val); + color_snprintf(str, sizeof(str), color ?: "", fmt ?: "", val); fprintf(out, "%*s ", mlen, str); } diff -u linux-aws-5.15-5.15.0/tools/testing/selftests/net/mptcp/config linux-aws-5.15-5.15.0/tools/testing/selftests/net/mptcp/config --- linux-aws-5.15-5.15.0/tools/testing/selftests/net/mptcp/config +++ linux-aws-5.15-5.15.0/tools/testing/selftests/net/mptcp/config @@ -19,0 +20,2 @@ +CONFIG_IP_NF_FILTER=m +CONFIG_IP6_NF_FILTER=m diff -u linux-aws-5.15-5.15.0/tools/testing/selftests/net/mptcp/simult_flows.sh linux-aws-5.15-5.15.0/tools/testing/selftests/net/mptcp/simult_flows.sh --- linux-aws-5.15-5.15.0/tools/testing/selftests/net/mptcp/simult_flows.sh +++ linux-aws-5.15-5.15.0/tools/testing/selftests/net/mptcp/simult_flows.sh @@ -291,8 +291,8 @@ run_test 10 10 0 0 "balanced bwidth" -run_test 10 10 1 50 "balanced bwidth with unbalanced delay" +run_test 10 10 1 25 "balanced bwidth with unbalanced delay" # we still need some additional infrastructure to pass the following test-cases -run_test 30 10 0 0 "unbalanced bwidth" -run_test 30 10 1 50 "unbalanced bwidth with unbalanced delay" -run_test 30 10 50 1 "unbalanced bwidth with opposed, unbalanced delay" +run_test 10 3 0 0 "unbalanced bwidth" +run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay" +run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay" exit $ret diff -u linux-aws-5.15-5.15.0/tools/testing/selftests/net/tls.c linux-aws-5.15-5.15.0/tools/testing/selftests/net/tls.c --- linux-aws-5.15-5.15.0/tools/testing/selftests/net/tls.c +++ linux-aws-5.15-5.15.0/tools/testing/selftests/net/tls.c @@ -666,12 +666,12 @@ memset(recv_mem, 0, sizeof(recv_mem)); EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len); - EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first), - MSG_WAITALL), -1); + EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_first), + MSG_WAITALL), strlen(test_str_first)); EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0); memset(recv_mem, 0, sizeof(recv_mem)); - EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second), - MSG_WAITALL), -1); + EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_second), + MSG_WAITALL), strlen(test_str_second)); EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)), 0); } diff -u linux-aws-5.15-5.15.0/tools/testing/selftests/vm/charge_reserved_hugetlb.sh linux-aws-5.15-5.15.0/tools/testing/selftests/vm/charge_reserved_hugetlb.sh --- linux-aws-5.15-5.15.0/tools/testing/selftests/vm/charge_reserved_hugetlb.sh +++ linux-aws-5.15-5.15.0/tools/testing/selftests/vm/charge_reserved_hugetlb.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 # Kselftest framework requirement - SKIP code is 4. diff -u linux-aws-5.15-5.15.0/tools/testing/selftests/vm/write_hugetlb_memory.sh linux-aws-5.15-5.15.0/tools/testing/selftests/vm/write_hugetlb_memory.sh --- linux-aws-5.15-5.15.0/tools/testing/selftests/vm/write_hugetlb_memory.sh +++ linux-aws-5.15-5.15.0/tools/testing/selftests/vm/write_hugetlb_memory.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 set -e diff -u linux-aws-5.15-5.15.0/tools/virtio/linux/kernel.h linux-aws-5.15-5.15.0/tools/virtio/linux/kernel.h --- linux-aws-5.15-5.15.0/tools/virtio/linux/kernel.h +++ linux-aws-5.15-5.15.0/tools/virtio/linux/kernel.h @@ -30,7 +30,6 @@ #define READ 0 #define WRITE 1 -typedef unsigned long long phys_addr_t; typedef unsigned long long dma_addr_t; typedef size_t __kernel_size_t; typedef unsigned int __wsum; @@ -137,6 +136,7 @@ #endif #define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) #define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) +#define dev_warn_once(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) #define min(x, y) ({ \ typeof(x) _min1 = (x); \ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/Documentation/ABI/testing/sysfs-platform-asus-wmi +++ linux-aws-5.15-5.15.0/Documentation/ABI/testing/sysfs-platform-asus-wmi @@ -57,3 +57,12 @@ * 0 - default, * 1 - overboost, * 2 - silent + +What: /sys/devices/platform//dgpu_disable +Date: Aug 2022 +KernelVersion: 5.17 +Contact: "Luke Jones" +Description: + Disable discrete GPU: + * 0 - Enable dGPU, + * 1 - Disable dGPU only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm/boot/dts/arm-realview-pb1176.dts +++ linux-aws-5.15-5.15.0/arch/arm/boot/dts/arm-realview-pb1176.dts @@ -435,7 +435,7 @@ /* Direct-mapped development chip ROM */ pb1176_rom@10200000 { - compatible = "direct-mapped"; + compatible = "mtd-rom"; reg = <0x10200000 0x4000>; bank-width = <1>; }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi +++ linux-aws-5.15-5.15.0/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi @@ -105,8 +105,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; phy-mode = "rgmii-id"; - phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; - phy-reset-duration = <20>; phy-supply = <&sw2_reg>; status = "okay"; @@ -119,17 +117,10 @@ #address-cells = <1>; #size-cells = <0>; - phy_port2: phy@1 { - reg = <1>; - }; - - phy_port3: phy@2 { - reg = <2>; - }; - switch@10 { compatible = "qca,qca8334"; - reg = <10>; + reg = <0x10>; + reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; switch_ports: ports { #address-cells = <1>; @@ -150,15 +141,30 @@ eth2: port@2 { reg = <2>; label = "eth2"; + phy-mode = "internal"; phy-handle = <&phy_port2>; }; eth1: port@3 { reg = <3>; label = "eth1"; + phy-mode = "internal"; phy-handle = <&phy_port3>; }; }; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + phy_port2: ethernet-phy@1 { + reg = <1>; + }; + + phy_port3: ethernet-phy@2 { + reg = <2>; + }; + }; }; }; }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm/crypto/sha256_glue.c +++ linux-aws-5.15-5.15.0/arch/arm/crypto/sha256_glue.c @@ -24,8 +24,8 @@ #include "sha256_glue.h" -asmlinkage void sha256_block_data_order(u32 *digest, const void *data, - unsigned int num_blks); +asmlinkage void sha256_block_data_order(struct sha256_state *state, + const u8 *data, int num_blks); int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, unsigned int len) @@ -33,23 +33,20 @@ /* make sure casting to sha256_block_fn() is safe */ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); - return sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_data_order); + return sha256_base_do_update(desc, data, len, sha256_block_data_order); } EXPORT_SYMBOL(crypto_sha256_arm_update); static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out) { - sha256_base_do_finalize(desc, - (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_finalize(desc, sha256_block_data_order); return sha256_base_finish(desc, out); } int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_update(desc, data, len, sha256_block_data_order); return crypto_sha256_arm_final(desc, out); } EXPORT_SYMBOL(crypto_sha256_arm_finup); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm/crypto/sha512-glue.c +++ linux-aws-5.15-5.15.0/arch/arm/crypto/sha512-glue.c @@ -25,27 +25,25 @@ MODULE_ALIAS_CRYPTO("sha384-arm"); MODULE_ALIAS_CRYPTO("sha512-arm"); -asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks); +asmlinkage void sha512_block_data_order(struct sha512_state *state, + u8 const *src, int blocks); int sha512_arm_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_block_data_order); + return sha512_base_do_update(desc, data, len, sha512_block_data_order); } static int sha512_arm_final(struct shash_desc *desc, u8 *out) { - sha512_base_do_finalize(desc, - (sha512_block_fn *)sha512_block_data_order); + sha512_base_do_finalize(desc, sha512_block_data_order); return sha512_base_finish(desc, out); } int sha512_arm_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_block_data_order); + sha512_base_do_update(desc, data, len, sha512_block_data_order); return sha512_arm_final(desc, out); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm/mach-ep93xx/core.c +++ linux-aws-5.15-5.15.0/arch/arm/mach-ep93xx/core.c @@ -337,6 +337,7 @@ GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), GPIO_LOOKUP_IDX("G", 0, NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), + { } }, }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/freescale/imx8mm-kontron-n801x-som.dtsi @@ -220,8 +220,8 @@ pinctrl_i2c1: i2c1grp { fsl,pins = < - MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3 - MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3 + MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000083 + MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x40000083 >; }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi @@ -506,14 +506,14 @@ CP11X_LABEL(crypto): crypto@800000 { compatible = "inside-secure,safexcel-eip197b"; reg = <0x800000 0x200000>; - interrupts = <87 IRQ_TYPE_LEVEL_HIGH>, - <88 IRQ_TYPE_LEVEL_HIGH>, + interrupts = <88 IRQ_TYPE_LEVEL_HIGH>, <89 IRQ_TYPE_LEVEL_HIGH>, <90 IRQ_TYPE_LEVEL_HIGH>, <91 IRQ_TYPE_LEVEL_HIGH>, - <92 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "mem", "ring0", "ring1", - "ring2", "ring3", "eip"; + <92 IRQ_TYPE_LEVEL_HIGH>, + <87 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "ring0", "ring1", "ring2", "ring3", + "eip", "mem"; clock-names = "core", "reg"; clocks = <&CP11X_LABEL(clk) 1 26>, <&CP11X_LABEL(clk) 1 17>; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi @@ -372,6 +372,16 @@ }; }; +&cros_ec { + cbas { + compatible = "google,cros-cbas"; + }; + + keyboard-controller { + compatible = "google,cros-ec-keyb-switches"; + }; +}; + &qca_wifi { qcom,ath10k-calibration-variant = "GO_KAKADU"; }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi @@ -338,6 +338,16 @@ }; }; +&cros_ec { + cbas { + compatible = "google,cros-cbas"; + }; + + keyboard-controller { + compatible = "google,cros-ec-keyb-switches"; + }; +}; + &qca_wifi { qcom,ath10k-calibration-variant = "GO_KODAMA"; }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi +++ linux-aws-5.15-5.15.0/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi @@ -342,6 +342,16 @@ }; }; +&cros_ec { + cbas { + compatible = "google,cros-cbas"; + }; + + keyboard-controller { + compatible = "google,cros-ec-keyb-switches"; + }; +}; + &qca_wifi { qcom,ath10k-calibration-variant = "LE_Krane"; }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/arm64/kernel/acpi.c +++ linux-aws-5.15-5.15.0/arch/arm64/kernel/acpi.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -233,6 +234,15 @@ if (earlycon_acpi_spcr_enable) early_init_dt_scan_chosen_stdout(); } else { +#ifdef CONFIG_HIBERNATION + struct acpi_table_header *facs = NULL; + acpi_get_table(ACPI_SIG_FACS, 1, &facs); + if (facs) { + swsusp_hardware_signature = + ((struct acpi_table_facs *)facs)->hardware_signature; + acpi_put_table(facs); + } +#endif acpi_parse_spcr(earlycon_acpi_spcr_enable, true); if (IS_ENABLED(CONFIG_ACPI_BGRT)) acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/mips/include/asm/ptrace.h +++ linux-aws-5.15-5.15.0/arch/mips/include/asm/ptrace.h @@ -60,6 +60,7 @@ unsigned long val) { regs->cp0_epc = val; + regs->cp0_cause &= ~CAUSEF_BD; } /* Query offset/name of register from its name/offset */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/mips/include/asm/vpe.h +++ linux-aws-5.15-5.15.0/arch/mips/include/asm/vpe.h @@ -102,7 +102,6 @@ struct list_head tc_list; /* Thread contexts */ }; -extern unsigned long physical_memsize; extern struct vpe_control vpecontrol; extern const struct file_operations vpe_fops; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/mips/kernel/smp-cps.c +++ linux-aws-5.15-5.15.0/arch/mips/kernel/smp-cps.c @@ -424,9 +424,11 @@ wmb(); } } else { - pr_debug("Gating power to core %d\n", core); - /* Power down the core */ - cps_pm_enter_state(CPS_PM_POWER_GATED); + if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) { + pr_debug("Gating power to core %d\n", core); + /* Power down the core */ + cps_pm_enter_state(CPS_PM_POWER_GATED); + } } } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/mips/lantiq/prom.c +++ linux-aws-5.15-5.15.0/arch/mips/lantiq/prom.c @@ -23,12 +23,6 @@ EXPORT_SYMBOL_GPL(ebu_lock); /* - * This is needed by the VPE loader code, just set it to 0 and assume - * that the firmware hardcodes this value to something useful. - */ -unsigned long physical_memsize = 0L; - -/* * this struct is filled by the soc specific detection code and holds * information about the specific soc type, revision and name */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/parisc/kernel/ftrace.c +++ linux-aws-5.15-5.15.0/arch/parisc/kernel/ftrace.c @@ -81,7 +81,7 @@ #endif } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER) int ftrace_enable_ftrace_graph_caller(void) { return 0; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/powerpc/include/asm/vmalloc.h +++ linux-aws-5.15-5.15.0/arch/powerpc/include/asm/vmalloc.h @@ -7,14 +7,14 @@ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #define arch_vmap_pud_supported arch_vmap_pud_supported -static inline bool arch_vmap_pud_supported(pgprot_t prot) +static __always_inline bool arch_vmap_pud_supported(pgprot_t prot) { /* HPT does not cope with large pages in the vmalloc area */ return radix_enabled(); } #define arch_vmap_pmd_supported arch_vmap_pmd_supported -static inline bool arch_vmap_pmd_supported(pgprot_t prot) +static __always_inline bool arch_vmap_pmd_supported(pgprot_t prot) { return radix_enabled(); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/powerpc/kernel/eeh_driver.c +++ linux-aws-5.15-5.15.0/arch/powerpc/kernel/eeh_driver.c @@ -1054,45 +1054,46 @@ } pr_info("EEH: Recovery successful.\n"); - } else { - /* - * About 90% of all real-life EEH failures in the field - * are due to poorly seated PCI cards. Only 10% or so are - * due to actual, failed cards. - */ - pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" - "Please try reseating or replacing it\n", - pe->phb->global_number, pe->addr); - - eeh_slot_error_detail(pe, EEH_LOG_PERM); - - /* Notify all devices that they're about to go down. */ - eeh_set_channel_state(pe, pci_channel_io_perm_failure); - eeh_set_irq_state(pe, false); - eeh_pe_report("error_detected(permanent failure)", pe, - eeh_report_failure, NULL); + goto out; + } - /* Mark the PE to be removed permanently */ - eeh_pe_state_mark(pe, EEH_PE_REMOVED); + /* + * About 90% of all real-life EEH failures in the field + * are due to poorly seated PCI cards. Only 10% or so are + * due to actual, failed cards. + */ + pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" + "Please try reseating or replacing it\n", + pe->phb->global_number, pe->addr); + + eeh_slot_error_detail(pe, EEH_LOG_PERM); + + /* Notify all devices that they're about to go down. */ + eeh_set_irq_state(pe, false); + eeh_pe_report("error_detected(permanent failure)", pe, + eeh_report_failure, NULL); + eeh_set_channel_state(pe, pci_channel_io_perm_failure); - /* - * Shut down the device drivers for good. We mark - * all removed devices correctly to avoid access - * the their PCI config any more. - */ - if (pe->type & EEH_PE_VF) { - eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); - eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); - } else { - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); - eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); + /* Mark the PE to be removed permanently */ + eeh_pe_state_mark(pe, EEH_PE_REMOVED); - pci_lock_rescan_remove(); - pci_hp_remove_devices(bus); - pci_unlock_rescan_remove(); - /* The passed PE should no longer be used */ - return; - } + /* + * Shut down the device drivers for good. We mark + * all removed devices correctly to avoid access + * the their PCI config any more. + */ + if (pe->type & EEH_PE_VF) { + eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); + eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); + } else { + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); + eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); + + pci_lock_rescan_remove(); + pci_hp_remove_devices(bus); + pci_unlock_rescan_remove(); + /* The passed PE should no longer be used */ + return; } out: @@ -1188,10 +1189,10 @@ /* Notify all devices to be down */ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); - eeh_set_channel_state(pe, pci_channel_io_perm_failure); eeh_pe_report( "error_detected(permanent failure)", pe, eeh_report_failure, NULL); + eeh_set_channel_state(pe, pci_channel_io_perm_failure); pci_lock_rescan_remove(); list_for_each_entry(hose, &hose_list, list_node) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/powerpc/platforms/embedded6xx/linkstation.c +++ linux-aws-5.15-5.15.0/arch/powerpc/platforms/embedded6xx/linkstation.c @@ -100,9 +100,6 @@ mpic_init(mpic); } -extern void avr_uart_configure(void); -extern void avr_uart_send(const char); - static void __noreturn linkstation_restart(char *cmd) { local_irq_disable(); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/powerpc/platforms/embedded6xx/mpc10x.h +++ linux-aws-5.15-5.15.0/arch/powerpc/platforms/embedded6xx/mpc10x.h @@ -156,4 +156,7 @@ /* For MPC107 boards that use the built-in openpic */ void mpc10x_set_openpic(void); +void avr_uart_configure(void); +void avr_uart_send(const char c); + #endif /* __PPC_KERNEL_MPC10X_H */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/s390/include/uapi/asm/dasd.h +++ linux-aws-5.15-5.15.0/arch/s390/include/uapi/asm/dasd.h @@ -78,6 +78,7 @@ * 0x040: give access to raw eckd data * 0x080: enable discard support * 0x100: enable autodisable for IFCC errors (default) + * 0x200: enable requeue of all requests on autoquiesce */ #define DASD_FEATURE_READONLY 0x001 #define DASD_FEATURE_USEDIAG 0x002 @@ -88,6 +89,7 @@ #define DASD_FEATURE_USERAW 0x040 #define DASD_FEATURE_DISCARD 0x080 #define DASD_FEATURE_PATH_AUTODISABLE 0x100 +#define DASD_FEATURE_REQUEUEQUIESCE 0x200 #define DASD_FEATURE_DEFAULT DASD_FEATURE_PATH_AUTODISABLE #define DASD_PARTN_BITS 2 only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/s390/kernel/vtime.c +++ linux-aws-5.15-5.15.0/arch/s390/kernel/vtime.c @@ -211,13 +211,13 @@ virt_timer_expire(); steal = S390_lowcore.steal_timer; - avg_steal = S390_lowcore.avg_steal_timer / 2; + avg_steal = S390_lowcore.avg_steal_timer; if ((s64) steal > 0) { S390_lowcore.steal_timer = 0; account_steal_time(cputime_to_nsecs(steal)); avg_steal += steal; } - S390_lowcore.avg_steal_timer = avg_steal; + S390_lowcore.avg_steal_timer = avg_steal / 2; } static u64 vtime_delta(void) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/sparc/kernel/leon_pci_grpci1.c +++ linux-aws-5.15-5.15.0/arch/sparc/kernel/leon_pci_grpci1.c @@ -696,7 +696,7 @@ return err; } -static const struct of_device_id grpci1_of_match[] __initconst = { +static const struct of_device_id grpci1_of_match[] = { { .name = "GAISLER_PCIFBRG", }, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/sparc/kernel/leon_pci_grpci2.c +++ linux-aws-5.15-5.15.0/arch/sparc/kernel/leon_pci_grpci2.c @@ -887,7 +887,7 @@ return err; } -static const struct of_device_id grpci2_of_match[] __initconst = { +static const struct of_device_id grpci2_of_match[] = { { .name = "GAISLER_GRPCI2", }, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/x86/include/asm/text-patching.h +++ linux-aws-5.15-5.15.0/arch/x86/include/asm/text-patching.h @@ -96,24 +96,40 @@ }; static __always_inline -void *text_gen_insn(u8 opcode, const void *addr, const void *dest) +void __text_gen_insn(void *buf, u8 opcode, const void *addr, const void *dest, int size) { - static union text_poke_insn insn; /* per instance */ - int size = text_opcode_size(opcode); + union text_poke_insn *insn = buf; + + BUG_ON(size < text_opcode_size(opcode)); - insn.opcode = opcode; + /* + * Hide the addresses to avoid the compiler folding in constants when + * referencing code, these can mess up annotations like + * ANNOTATE_NOENDBR. + */ + OPTIMIZER_HIDE_VAR(insn); + OPTIMIZER_HIDE_VAR(addr); + OPTIMIZER_HIDE_VAR(dest); + + insn->opcode = opcode; if (size > 1) { - insn.disp = (long)dest - (long)(addr + size); + insn->disp = (long)dest - (long)(addr + size); if (size == 2) { /* - * Ensure that for JMP9 the displacement + * Ensure that for JMP8 the displacement * actually fits the signed byte. */ - BUG_ON((insn.disp >> 31) != (insn.disp >> 7)); + BUG_ON((insn->disp >> 31) != (insn->disp >> 7)); } } +} +static __always_inline +void *text_gen_insn(u8 opcode, const void *addr, const void *dest) +{ + static union text_poke_insn insn; /* per instance */ + __text_gen_insn(&insn, opcode, addr, dest, text_opcode_size(opcode)); return &insn.text; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/x86/include/asm/vsyscall.h +++ linux-aws-5.15-5.15.0/arch/x86/include/asm/vsyscall.h @@ -4,6 +4,7 @@ #include #include +#include #ifdef CONFIG_X86_VSYSCALL_EMULATION extern void map_vsyscall(void); @@ -24,4 +25,13 @@ } #endif +/* + * The (legacy) vsyscall page is the long page in the kernel portion + * of the address space that has user-accessible permissions. + */ +static inline bool is_vsyscall_vaddr(unsigned long vaddr) +{ + return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); +} + #endif /* _ASM_X86_VSYSCALL_H */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/arch/x86/tools/relocs.c +++ linux-aws-5.15-5.15.0/arch/x86/tools/relocs.c @@ -599,6 +599,14 @@ if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) { continue; } + /* + * Do not perform relocations in .notes section; any + * values there are meant for pre-boot consumption (e.g. + * startup_xen). + */ + if (sec_applies->shdr.sh_type == SHT_NOTE) { + continue; + } sh_symtab = sec_symtab->symtab; sym_strtab = sec_symtab->link->strtab; for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/block/opal_proto.h +++ linux-aws-5.15-5.15.0/block/opal_proto.h @@ -66,6 +66,7 @@ #define SHORT_ATOM_BYTE 0xBF #define MEDIUM_ATOM_BYTE 0xDF #define LONG_ATOM_BYTE 0xE3 +#define EMPTY_ATOM_BYTE 0xFF #define OPAL_INVAL_PARAM 12 #define OPAL_MANUFACTURED_INACTIVE 0x08 only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/acpi/acpica/tbfadt.c +++ linux-aws-5.15-5.15.0/drivers/acpi/acpica/tbfadt.c @@ -315,23 +315,19 @@ ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, FALSE, TRUE, &acpi_gbl_dsdt_index); - /* If Hardware Reduced flag is set, there is no FACS */ - - if (!acpi_gbl_reduced_hardware) { - if (acpi_gbl_FADT.facs) { - acpi_tb_install_standard_table((acpi_physical_address) - acpi_gbl_FADT.facs, - ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, - FALSE, TRUE, - &acpi_gbl_facs_index); - } - if (acpi_gbl_FADT.Xfacs) { - acpi_tb_install_standard_table((acpi_physical_address) - acpi_gbl_FADT.Xfacs, - ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, - FALSE, TRUE, - &acpi_gbl_xfacs_index); - } + if (acpi_gbl_FADT.facs) { + acpi_tb_install_standard_table((acpi_physical_address) + acpi_gbl_FADT.facs, + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + FALSE, TRUE, + &acpi_gbl_facs_index); + } + if (acpi_gbl_FADT.Xfacs) { + acpi_tb_install_standard_table((acpi_physical_address) + acpi_gbl_FADT.Xfacs, + ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL, + FALSE, TRUE, + &acpi_gbl_xfacs_index); } } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/acpi/acpica/tbutils.c +++ linux-aws-5.15-5.15.0/drivers/acpi/acpica/tbutils.c @@ -36,12 +36,7 @@ { struct acpi_table_facs *facs; - /* If Hardware Reduced flag is set, there is no FACS */ - - if (acpi_gbl_reduced_hardware) { - acpi_gbl_FACS = NULL; - return (AE_OK); - } else if (acpi_gbl_FADT.Xfacs && + if (acpi_gbl_FADT.Xfacs && (!acpi_gbl_FADT.facs || !acpi_gbl_use32_bit_facs_addresses)) { (void)acpi_get_table_by_index(acpi_gbl_xfacs_index, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/acpi/button.c +++ linux-aws-5.15-5.15.0/drivers/acpi/button.c @@ -78,6 +78,15 @@ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED, }, { + /* Nextbook Ares 8A tablet, _LID device always reports lid closed */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), + DMI_MATCH(DMI_BIOS_VERSION, "M882"), + }, + .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_DISABLED, + }, + { /* * Lenovo Yoga 9 14ITL5, initial notification of the LID device * never happens. only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/ata/ahci_ceva.c +++ linux-aws-5.15-5.15.0/drivers/ata/ahci_ceva.c @@ -88,7 +88,6 @@ u32 axicc; bool is_cci_enabled; int flags; - struct reset_control *rst; }; static unsigned int ceva_ahci_read_id(struct ata_device *dev, @@ -189,6 +188,60 @@ AHCI_SHT(DRV_NAME), }; +static int ceva_ahci_platform_enable_resources(struct ahci_host_priv *hpriv) +{ + int rc, i; + + rc = ahci_platform_enable_regulators(hpriv); + if (rc) + return rc; + + rc = ahci_platform_enable_clks(hpriv); + if (rc) + goto disable_regulator; + + /* Assert the controller reset */ + rc = ahci_platform_assert_rsts(hpriv); + if (rc) + goto disable_clks; + + for (i = 0; i < hpriv->nports; i++) { + rc = phy_init(hpriv->phys[i]); + if (rc) + goto disable_rsts; + } + + /* De-assert the controller reset */ + ahci_platform_deassert_rsts(hpriv); + + for (i = 0; i < hpriv->nports; i++) { + rc = phy_power_on(hpriv->phys[i]); + if (rc) { + phy_exit(hpriv->phys[i]); + goto disable_phys; + } + } + + return 0; + +disable_rsts: + ahci_platform_deassert_rsts(hpriv); + +disable_phys: + while (--i >= 0) { + phy_power_off(hpriv->phys[i]); + phy_exit(hpriv->phys[i]); + } + +disable_clks: + ahci_platform_disable_clks(hpriv); + +disable_regulator: + ahci_platform_disable_regulators(hpriv); + + return rc; +} + static int ceva_ahci_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -203,47 +256,19 @@ return -ENOMEM; cevapriv->ahci_pdev = pdev; - - cevapriv->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, - NULL); - if (IS_ERR(cevapriv->rst)) - dev_err_probe(&pdev->dev, PTR_ERR(cevapriv->rst), - "failed to get reset\n"); - hpriv = ahci_platform_get_resources(pdev, 0); if (IS_ERR(hpriv)) return PTR_ERR(hpriv); - if (!cevapriv->rst) { - rc = ahci_platform_enable_resources(hpriv); - if (rc) - return rc; - } else { - int i; + hpriv->rsts = devm_reset_control_get_optional_exclusive(&pdev->dev, + NULL); + if (IS_ERR(hpriv->rsts)) + return dev_err_probe(&pdev->dev, PTR_ERR(hpriv->rsts), + "failed to get reset\n"); - rc = ahci_platform_enable_clks(hpriv); - if (rc) - return rc; - /* Assert the controller reset */ - reset_control_assert(cevapriv->rst); - - for (i = 0; i < hpriv->nports; i++) { - rc = phy_init(hpriv->phys[i]); - if (rc) - return rc; - } - - /* De-assert the controller reset */ - reset_control_deassert(cevapriv->rst); - - for (i = 0; i < hpriv->nports; i++) { - rc = phy_power_on(hpriv->phys[i]); - if (rc) { - phy_exit(hpriv->phys[i]); - return rc; - } - } - } + rc = ceva_ahci_platform_enable_resources(hpriv); + if (rc) + return rc; if (of_property_read_bool(np, "ceva,broken-gen2")) cevapriv->flags = CEVA_FLAG_BROKEN_GEN2; @@ -252,52 +277,60 @@ if (of_property_read_u8_array(np, "ceva,p0-cominit-params", (u8 *)&cevapriv->pp2c[0], 4) < 0) { dev_warn(dev, "ceva,p0-cominit-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } if (of_property_read_u8_array(np, "ceva,p1-cominit-params", (u8 *)&cevapriv->pp2c[1], 4) < 0) { dev_warn(dev, "ceva,p1-cominit-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } /* Read OOB timing value for COMWAKE from device-tree*/ if (of_property_read_u8_array(np, "ceva,p0-comwake-params", (u8 *)&cevapriv->pp3c[0], 4) < 0) { dev_warn(dev, "ceva,p0-comwake-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } if (of_property_read_u8_array(np, "ceva,p1-comwake-params", (u8 *)&cevapriv->pp3c[1], 4) < 0) { dev_warn(dev, "ceva,p1-comwake-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } /* Read phy BURST timing value from device-tree */ if (of_property_read_u8_array(np, "ceva,p0-burst-params", (u8 *)&cevapriv->pp4c[0], 4) < 0) { dev_warn(dev, "ceva,p0-burst-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } if (of_property_read_u8_array(np, "ceva,p1-burst-params", (u8 *)&cevapriv->pp4c[1], 4) < 0) { dev_warn(dev, "ceva,p1-burst-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } /* Read phy RETRY interval timing value from device-tree */ if (of_property_read_u16_array(np, "ceva,p0-retry-params", (u16 *)&cevapriv->pp5c[0], 2) < 0) { dev_warn(dev, "ceva,p0-retry-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } if (of_property_read_u16_array(np, "ceva,p1-retry-params", (u16 *)&cevapriv->pp5c[1], 2) < 0) { dev_warn(dev, "ceva,p1-retry-params property not defined\n"); - return -EINVAL; + rc = -EINVAL; + goto disable_resources; } /* @@ -335,7 +368,7 @@ struct ahci_host_priv *hpriv = host->private_data; int rc; - rc = ahci_platform_enable_resources(hpriv); + rc = ceva_ahci_platform_enable_resources(hpriv); if (rc) return rc; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/ata/ahci_da850.c +++ linux-aws-5.15-5.15.0/drivers/ata/ahci_da850.c @@ -163,7 +163,6 @@ struct ahci_host_priv *hpriv; void __iomem *pwrdn_reg; struct resource *res; - struct clk *clk; u32 mpy; int rc; @@ -172,36 +171,28 @@ return PTR_ERR(hpriv); /* - * Internally ahci_platform_get_resources() calls clk_get(dev, NULL) - * when trying to obtain the functional clock. This SATA controller - * uses two clocks for which we specify two connection ids. If we don't - * have the functional clock at this point - call clk_get() again with - * con_id = "fck". + * Internally ahci_platform_get_resources() calls the bulk clocks + * get method or falls back to using a single clk_get_optional(). + * This AHCI SATA controller uses two clocks: functional clock + * with "fck" connection id and external reference clock with + * "refclk" id. If we haven't got all of them re-try the clocks + * getting procedure with the explicitly specified ids. */ - if (!hpriv->clks[0]) { - clk = clk_get(dev, "fck"); - if (IS_ERR(clk)) - return PTR_ERR(clk); - - hpriv->clks[0] = clk; - } - - /* - * The second clock used by ahci-da850 is the external REFCLK. If we - * didn't get it from ahci_platform_get_resources(), let's try to - * specify the con_id in clk_get(). - */ - if (!hpriv->clks[1]) { - clk = clk_get(dev, "refclk"); - if (IS_ERR(clk)) { - dev_err(dev, "unable to obtain the reference clock"); - return -ENODEV; - } - - hpriv->clks[1] = clk; + if (hpriv->n_clks < 2) { + hpriv->clks = devm_kcalloc(dev, 2, sizeof(*hpriv->clks), GFP_KERNEL); + if (!hpriv->clks) + return -ENOMEM; + + hpriv->clks[0].id = "fck"; + hpriv->clks[1].id = "refclk"; + hpriv->n_clks = 2; + + rc = devm_clk_bulk_get(dev, hpriv->n_clks, hpriv->clks); + if (rc) + return rc; } - mpy = ahci_da850_calculate_mpy(clk_get_rate(hpriv->clks[1])); + mpy = ahci_da850_calculate_mpy(clk_get_rate(hpriv->clks[1].clk)); if (mpy == 0) { dev_err(dev, "invalid REFCLK multiplier value: 0x%x", mpy); return -EINVAL; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/ata/ahci_dm816.c +++ linux-aws-5.15-5.15.0/drivers/ata/ahci_dm816.c @@ -69,12 +69,12 @@ * keep-alive clock and the external reference clock. We need the * rate of the latter to calculate the correct value of MPY bits. */ - if (!hpriv->clks[1]) { + if (hpriv->n_clks < 2) { dev_err(dev, "reference clock not supplied\n"); return -EINVAL; } - refclk_rate = clk_get_rate(hpriv->clks[1]); + refclk_rate = clk_get_rate(hpriv->clks[1].clk); if ((refclk_rate % 100) != 0) { dev_err(dev, "reference clock rate must be divisible by 100\n"); return -EINVAL; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/base/regmap/internal.h +++ linux-aws-5.15-5.15.0/drivers/base/regmap/internal.h @@ -108,6 +108,10 @@ int (*reg_write)(void *context, unsigned int reg, unsigned int val); int (*reg_update_bits)(void *context, unsigned int reg, unsigned int mask, unsigned int val); + /* Bulk read/write */ + int (*read)(void *context, const void *reg_buf, size_t reg_size, + void *val_buf, size_t val_size); + int (*write)(void *context, const void *data, size_t count); bool defer_caching; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/block/aoe/aoecmd.c +++ linux-aws-5.15-5.15.0/drivers/block/aoe/aoecmd.c @@ -420,13 +420,16 @@ rcu_read_lock(); for_each_netdev_rcu(&init_net, ifp) { dev_hold(ifp); - if (!is_aoe_netif(ifp)) - goto cont; + if (!is_aoe_netif(ifp)) { + dev_put(ifp); + continue; + } skb = new_skb(sizeof *h + sizeof *ch); if (skb == NULL) { printk(KERN_INFO "aoe: skb alloc failure\n"); - goto cont; + dev_put(ifp); + continue; } skb_put(skb, sizeof *h + sizeof *ch); skb->dev = ifp; @@ -441,9 +444,6 @@ h->major = cpu_to_be16(aoemajor); h->minor = aoeminor; h->cmd = AOECMD_CFG; - -cont: - dev_put(ifp); } rcu_read_unlock(); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/block/aoe/aoenet.c +++ linux-aws-5.15-5.15.0/drivers/block/aoe/aoenet.c @@ -64,6 +64,7 @@ pr_warn("aoe: packet could not be sent on %s. %s\n", ifp ? ifp->name : "netif", "consider increasing tx_queue_len"); + dev_put(ifp); spin_lock_irq(&txlock); } return 0; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/bluetooth/btqca.c +++ linux-aws-5.15-5.15.0/drivers/bluetooth/btqca.c @@ -566,6 +566,9 @@ config.type = ELF_TYPE_PATCH; snprintf(config.fwname, sizeof(config.fwname), "qca/msbtfw%02x.mbn", rom_ver); + } else if (soc_type == QCA_WCN6855) { + snprintf(config.fwname, sizeof(config.fwname), + "qca/hpbtfw%02x.tlv", rom_ver); } else { snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin", soc_ver); @@ -600,6 +603,9 @@ else if (soc_type == QCA_WCN6750) snprintf(config.fwname, sizeof(config.fwname), "qca/msnv%02x.bin", rom_ver); + else if (soc_type == QCA_WCN6855) + snprintf(config.fwname, sizeof(config.fwname), + "qca/hpnv%02x.bin", rom_ver); else snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", soc_ver); @@ -637,11 +643,17 @@ return err; } - if (soc_type == QCA_WCN3991 || soc_type == QCA_WCN6750) { + switch (soc_type) { + case QCA_WCN3991: + case QCA_WCN6750: + case QCA_WCN6855: /* get fw build info */ err = qca_read_fw_build_info(hdev); if (err < 0) return err; + break; + default: + break; } bt_dev_info(hdev, "QCA setup on UART is completed"); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/bluetooth/btqca.h +++ linux-aws-5.15-5.15.0/drivers/bluetooth/btqca.h @@ -145,6 +145,7 @@ QCA_WCN3991, QCA_QCA6390, QCA_WCN6750, + QCA_WCN6855, }; #if IS_ENABLED(CONFIG_BT_QCA) @@ -166,6 +167,10 @@ { return soc_type == QCA_WCN6750; } +static inline bool qca_is_wcn6855(enum qca_btsoc_type soc_type) +{ + return soc_type == QCA_WCN6855; +} #else @@ -203,6 +208,11 @@ { return false; } + +static inline bool qca_is_wcn6855(enum qca_btsoc_type soc_type) +{ + return false; +} static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/bus/Kconfig +++ linux-aws-5.15-5.15.0/drivers/bus/Kconfig @@ -175,11 +175,12 @@ config TEGRA_ACONNECT tristate "Tegra ACONNECT Bus Driver" - depends on ARCH_TEGRA_210_SOC + depends on ARCH_TEGRA depends on OF && PM help Driver for the Tegra ACONNECT bus which is used to interface with - the devices inside the Audio Processing Engine (APE) for Tegra210. + the devices inside the Audio Processing Engine (APE) for + Tegra210 and later. config TEGRA_GMI tristate "Tegra Generic Memory Interface bus driver" only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/clk/hisilicon/clk-hi3519.c +++ linux-aws-5.15-5.15.0/drivers/clk/hisilicon/clk-hi3519.c @@ -130,7 +130,7 @@ of_clk_del_provider(pdev->dev.of_node); hisi_clk_unregister_gate(hi3519_gate_clks, - ARRAY_SIZE(hi3519_mux_clks), + ARRAY_SIZE(hi3519_gate_clks), crg->clk_data); hisi_clk_unregister_mux(hi3519_mux_clks, ARRAY_SIZE(hi3519_mux_clks), only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/clk/imx/clk.c +++ linux-aws-5.15-5.15.0/drivers/clk/imx/clk.c @@ -201,9 +201,10 @@ clk_disable_unprepare(imx_uart_clocks[i]); clk_put(imx_uart_clocks[i]); } - kfree(imx_uart_clocks); } + kfree(imx_uart_clocks); + return 0; } late_initcall_sync(imx_clk_disable_uart); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/clk/meson/axg.c +++ linux-aws-5.15-5.15.0/drivers/clk/meson/axg.c @@ -2144,7 +2144,9 @@ &axg_vclk_input, &axg_vclk2_input, &axg_vclk_div, + &axg_vclk_div1, &axg_vclk2_div, + &axg_vclk2_div1, &axg_vclk_div2_en, &axg_vclk_div4_en, &axg_vclk_div6_en, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/clk/qcom/dispcc-sdm845.c +++ linux-aws-5.15-5.15.0/drivers/clk/qcom/dispcc-sdm845.c @@ -759,6 +759,8 @@ static struct gdsc mdss_gdsc = { .gdscr = 0x3000, + .en_few_wait_val = 0x6, + .en_rest_wait_val = 0x5, .pd = { .name = "mdss_gdsc", }, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/clk/qcom/gcc-qcs404.c +++ linux-aws-5.15-5.15.0/drivers/clk/qcom/gcc-qcs404.c @@ -25,11 +25,9 @@ P_CORE_BI_PLL_TEST_SE, P_DSI0_PHY_PLL_OUT_BYTECLK, P_DSI0_PHY_PLL_OUT_DSICLK, - P_GPLL0_OUT_AUX, P_GPLL0_OUT_MAIN, P_GPLL1_OUT_MAIN, P_GPLL3_OUT_MAIN, - P_GPLL4_OUT_AUX, P_GPLL4_OUT_MAIN, P_GPLL6_OUT_AUX, P_HDMI_PHY_PLL_CLK, @@ -109,28 +107,24 @@ static const struct parent_map gcc_parent_map_5[] = { { P_XO, 0 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 }, - { P_GPLL0_OUT_AUX, 2 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_5[] = { "cxo", - "dsi0pll_byteclk_src", - "gpll0_out_aux", + "dsi0pllbyte", "core_bi_pll_test_se", }; static const struct parent_map gcc_parent_map_6[] = { { P_XO, 0 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, - { P_GPLL0_OUT_AUX, 3 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_6[] = { "cxo", - "dsi0_phy_pll_out_byteclk", - "gpll0_out_aux", + "dsi0pllbyte", "core_bi_pll_test_se", }; @@ -139,7 +133,6 @@ { P_GPLL0_OUT_MAIN, 1 }, { P_GPLL3_OUT_MAIN, 2 }, { P_GPLL6_OUT_AUX, 3 }, - { P_GPLL4_OUT_AUX, 4 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; @@ -148,7 +141,6 @@ "gpll0_out_main", "gpll3_out_main", "gpll6_out_aux", - "gpll4_out_aux", "core_bi_pll_test_se", }; @@ -175,7 +167,7 @@ static const char * const gcc_parent_names_9[] = { "cxo", "gpll0_out_main", - "dsi0_phy_pll_out_dsiclk", + "dsi0pll", "gpll6_out_aux", "core_bi_pll_test_se", }; @@ -207,14 +199,12 @@ static const struct parent_map gcc_parent_map_12[] = { { P_XO, 0 }, { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, - { P_GPLL0_OUT_AUX, 2 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_12[] = { "cxo", - "dsi0pll_pclk_src", - "gpll0_out_aux", + "dsi0pll", "core_bi_pll_test_se", }; @@ -237,40 +227,34 @@ static const struct parent_map gcc_parent_map_14[] = { { P_XO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, - { P_GPLL4_OUT_AUX, 2 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_14[] = { "cxo", "gpll0_out_main", - "gpll4_out_aux", "core_bi_pll_test_se", }; static const struct parent_map gcc_parent_map_15[] = { { P_XO, 0 }, - { P_GPLL0_OUT_AUX, 2 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_15[] = { "cxo", - "gpll0_out_aux", "core_bi_pll_test_se", }; static const struct parent_map gcc_parent_map_16[] = { { P_XO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, - { P_GPLL0_OUT_AUX, 2 }, { P_CORE_BI_PLL_TEST_SE, 7 }, }; static const char * const gcc_parent_names_16[] = { "cxo", "gpll0_out_main", - "gpll0_out_aux", "core_bi_pll_test_se", }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/clk/qcom/gpucc-sc7180.c +++ linux-aws-5.15-5.15.0/drivers/clk/qcom/gpucc-sc7180.c @@ -21,8 +21,6 @@ #define CX_GMU_CBCR_SLEEP_SHIFT 4 #define CX_GMU_CBCR_WAKE_MASK 0xF #define CX_GMU_CBCR_WAKE_SHIFT 8 -#define CLK_DIS_WAIT_SHIFT 12 -#define CLK_DIS_WAIT_MASK (0xf << CLK_DIS_WAIT_SHIFT) enum { P_BI_TCXO, @@ -160,6 +158,7 @@ static struct gdsc cx_gdsc = { .gdscr = 0x106c, .gds_hw_ctrl = 0x1540, + .clk_dis_wait_val = 8, .pd = { .name = "cx_gdsc", }, @@ -242,10 +241,6 @@ value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT; regmap_update_bits(regmap, 0x1098, mask, value); - /* Configure clk_dis_wait for gpu_cx_gdsc */ - regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK, - 8 << CLK_DIS_WAIT_SHIFT); - return qcom_cc_really_probe(pdev, &gpu_cc_sc7180_desc, regmap); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/clk/qcom/gpucc-sdm845.c +++ linux-aws-5.15-5.15.0/drivers/clk/qcom/gpucc-sdm845.c @@ -22,8 +22,6 @@ #define CX_GMU_CBCR_SLEEP_SHIFT 4 #define CX_GMU_CBCR_WAKE_MASK 0xf #define CX_GMU_CBCR_WAKE_SHIFT 8 -#define CLK_DIS_WAIT_SHIFT 12 -#define CLK_DIS_WAIT_MASK (0xf << CLK_DIS_WAIT_SHIFT) enum { P_BI_TCXO, @@ -121,6 +119,7 @@ static struct gdsc gpu_cx_gdsc = { .gdscr = 0x106c, .gds_hw_ctrl = 0x1540, + .clk_dis_wait_val = 0x8, .pd = { .name = "gpu_cx_gdsc", }, @@ -193,10 +192,6 @@ value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT; regmap_update_bits(regmap, 0x1098, mask, value); - /* Configure clk_dis_wait for gpu_cx_gdsc */ - regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK, - 8 << CLK_DIS_WAIT_SHIFT); - return qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/clk/renesas/renesas-cpg-mssr.c +++ linux-aws-5.15-5.15.0/drivers/clk/renesas/renesas-cpg-mssr.c @@ -911,9 +911,8 @@ } if (!i) - dev_warn(dev, "Failed to enable %s%u[0x%x]\n", - priv->reg_layout == CLK_REG_LAYOUT_RZ_A ? - "STB" : "SMSTP", reg, oldval & mask); + dev_warn(dev, "Failed to enable SMSTP%u[0x%x]\n", reg, + oldval & mask); } return 0; @@ -957,7 +956,6 @@ goto out_err; } - cpg_mssr_priv = priv; priv->num_core_clks = info->num_total_core_clks; priv->num_mod_clks = info->num_hw_mod_clks; priv->last_dt_core_clk = info->last_dt_core_clk; @@ -987,6 +985,8 @@ if (error) goto out_err; + cpg_mssr_priv = priv; + return 0; out_err: only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/clk/zynq/clkc.c +++ linux-aws-5.15-5.15.0/drivers/clk/zynq/clkc.c @@ -42,6 +42,7 @@ #define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204) #define NUM_MIO_PINS 54 +#define CLK_NAME_LEN 16 #define DBG_CLK_CTRL_CLKACT_TRC BIT(0) #define DBG_CLK_CTRL_CPU_1XCLKACT BIT(1) @@ -215,7 +216,7 @@ int i; u32 tmp; int ret; - char *clk_name; + char clk_name[CLK_NAME_LEN]; unsigned int fclk_enable = 0; const char *clk_output_name[clk_max]; const char *cpu_parents[4]; @@ -425,12 +426,10 @@ "gem1_emio_mux", CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); - tmp = strlen("mio_clk_00x"); - clk_name = kmalloc(tmp, GFP_KERNEL); for (i = 0; i < NUM_MIO_PINS; i++) { int idx; - snprintf(clk_name, tmp, "mio_clk_%2.2d", i); + snprintf(clk_name, CLK_NAME_LEN, "mio_clk_%2.2d", i); idx = of_property_match_string(np, "clock-names", clk_name); if (idx >= 0) can_mio_mux_parents[i] = of_clk_get_parent_name(np, @@ -438,7 +437,6 @@ else can_mio_mux_parents[i] = dummy_nm; } - kfree(clk_name); clk_register_mux(NULL, "can_mux", periph_parents, 4, CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0, &canclk_lock); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/comedi/drivers/comedi_test.c +++ linux-aws-5.15-5.15.0/drivers/comedi/drivers/comedi_test.c @@ -87,6 +87,8 @@ struct comedi_device *dev; /* parent comedi device */ u64 ao_last_scan_time; /* time of previous AO scan in usec */ unsigned int ao_scan_period; /* AO scan period in usec */ + bool ai_timer_enable:1; /* should AI timer be running? */ + bool ao_timer_enable:1; /* should AO timer be running? */ unsigned short ao_loopbacks[N_CHANS]; }; @@ -236,8 +238,12 @@ time_increment = devpriv->ai_convert_time - now; else time_increment = 1; - mod_timer(&devpriv->ai_timer, - jiffies + usecs_to_jiffies(time_increment)); + spin_lock(&dev->spinlock); + if (devpriv->ai_timer_enable) { + mod_timer(&devpriv->ai_timer, + jiffies + usecs_to_jiffies(time_increment)); + } + spin_unlock(&dev->spinlock); } overrun: @@ -393,9 +399,12 @@ * Seem to need an extra jiffy here, otherwise timer expires slightly * early! */ + spin_lock_bh(&dev->spinlock); + devpriv->ai_timer_enable = true; devpriv->ai_timer.expires = jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1; add_timer(&devpriv->ai_timer); + spin_unlock_bh(&dev->spinlock); return 0; } @@ -404,6 +413,9 @@ { struct waveform_private *devpriv = dev->private; + spin_lock_bh(&dev->spinlock); + devpriv->ai_timer_enable = false; + spin_unlock_bh(&dev->spinlock); if (in_softirq()) { /* Assume we were called from the timer routine itself. */ del_timer(&devpriv->ai_timer); @@ -495,8 +507,12 @@ unsigned int time_inc = devpriv->ao_last_scan_time + devpriv->ao_scan_period - now; - mod_timer(&devpriv->ao_timer, - jiffies + usecs_to_jiffies(time_inc)); + spin_lock(&dev->spinlock); + if (devpriv->ao_timer_enable) { + mod_timer(&devpriv->ao_timer, + jiffies + usecs_to_jiffies(time_inc)); + } + spin_unlock(&dev->spinlock); } underrun: @@ -517,9 +533,12 @@ async->inttrig = NULL; devpriv->ao_last_scan_time = ktime_to_us(ktime_get()); + spin_lock_bh(&dev->spinlock); + devpriv->ao_timer_enable = true; devpriv->ao_timer.expires = jiffies + usecs_to_jiffies(devpriv->ao_scan_period); add_timer(&devpriv->ao_timer); + spin_unlock_bh(&dev->spinlock); return 1; } @@ -604,6 +623,9 @@ struct waveform_private *devpriv = dev->private; s->async->inttrig = NULL; + spin_lock_bh(&dev->spinlock); + devpriv->ao_timer_enable = false; + spin_unlock_bh(&dev->spinlock); if (in_softirq()) { /* Assume we were called from the timer routine itself. */ del_timer(&devpriv->ao_timer); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/cpufreq/mediatek-cpufreq-hw.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -10,8 +10,10 @@ #include #include #include -#include +#include #include +#include +#include #include #define LUT_MAX_ENTRIES 32U @@ -267,7 +269,23 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev) { const void *data; - int ret; + int ret, cpu; + struct device *cpu_dev; + struct regulator *cpu_reg; + + /* Make sure that all CPU supplies are available before proceeding. */ + for_each_possible_cpu(cpu) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + return dev_err_probe(&pdev->dev, -EPROBE_DEFER, + "Failed to get cpu%d device\n", cpu); + + cpu_reg = devm_regulator_get(cpu_dev, "cpu"); + if (IS_ERR(cpu_reg)) + return dev_err_probe(&pdev->dev, PTR_ERR(cpu_reg), + "CPU%d regulator get failed\n", cpu); + } + data = of_device_get_match_data(&pdev->dev); if (!data) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/cpufreq/ppc_cbe_cpufreq.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/ppc_cbe_cpufreq.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/cpufreq/scpi-cpufreq.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/scpi-cpufreq.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/cpufreq/sti-cpufreq.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/sti-cpufreq.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/cpufreq/ti-cpufreq.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/ti-cpufreq.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include #include only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/cpufreq/vexpress-spc-cpufreq.c +++ linux-aws-5.15-5.15.0/drivers/cpufreq/vexpress-spc-cpufreq.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ linux-aws-5.15-5.15.0/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -231,7 +231,10 @@ err = zynqmp_aes_aead_cipher(areq); } + local_bh_disable(); crypto_finalize_aead_request(engine, areq, err); + local_bh_enable(); + return 0; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/dma/ptdma/ptdma-dmaengine.c +++ linux-aws-5.15-5.15.0/drivers/dma/ptdma/ptdma-dmaengine.c @@ -361,8 +361,6 @@ chan->vc.desc_free = pt_do_cleanup; vchan_init(&chan->vc, dma_dev); - dma_set_mask_and_coherent(pt->dev, DMA_BIT_MASK(64)); - ret = dma_async_device_register(dma_dev); if (ret) goto err_reg; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/dma/sh/shdma.h +++ linux-aws-5.15-5.15.0/drivers/dma/sh/shdma.h @@ -25,7 +25,7 @@ const struct sh_dmae_slave_config *config; /* Slave DMA configuration */ int xmit_shift; /* log_2(bytes_per_xfer) */ void __iomem *base; - char dev_id[16]; /* unique name per DMAC of channel */ + char dev_id[32]; /* unique name per DMAC of channel */ int pm_error; dma_addr_t slave_addr; }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/firmware/efi/arm-runtime.c +++ linux-aws-5.15-5.15.0/drivers/firmware/efi/arm-runtime.c @@ -107,7 +107,7 @@ efi_memory_desc_t *md; for_each_efi_memory_desc(md) { - int md_size = md->num_pages << EFI_PAGE_SHIFT; + u64 md_size = md->num_pages << EFI_PAGE_SHIFT; struct resource *res; if (!(md->attribute & EFI_MEMORY_SP)) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/firmware/efi/riscv-runtime.c +++ linux-aws-5.15-5.15.0/drivers/firmware/efi/riscv-runtime.c @@ -85,7 +85,7 @@ efi_memory_desc_t *md; for_each_efi_memory_desc(md) { - int md_size = md->num_pages << EFI_PAGE_SHIFT; + u64 md_size = md->num_pages << EFI_PAGE_SHIFT; struct resource *res; if (!(md->attribute & EFI_MEMORY_SP)) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/firmware/psci/psci.c +++ linux-aws-5.15-5.15.0/drivers/firmware/psci/psci.c @@ -77,6 +77,7 @@ static u32 psci_cpu_suspend_feature; static bool psci_system_reset2_supported; +static bool psci_system_off2_hibernate_supported; static inline bool psci_has_ext_power_state(void) { @@ -324,6 +325,31 @@ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); } +#ifdef CONFIG_HIBERNATION +static void (*psci_orig_pm_power_off)(void); + +static void psci_sys_hibernate(void) +{ + if (system_entering_hibernation()) + invoke_psci_fn(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2), + PSCI_1_3_HIBERNATE_TYPE_OFF, 0, 0); + if (psci_orig_pm_power_off) + psci_orig_pm_power_off(); + psci_sys_poweroff(); +} + +static int __init psci_hibernate_init(void) +{ + /* This runs *later* than efi_shutdown_init */ + if (psci_system_off2_hibernate_supported) { + psci_orig_pm_power_off = pm_power_off; + pm_power_off = psci_sys_hibernate; + } + return 0; +} +late_initcall(psci_hibernate_init); +#endif + static int __init psci_features(u32 psci_func_id) { return invoke_psci_fn(PSCI_1_0_FN_PSCI_FEATURES, @@ -385,6 +411,18 @@ psci_system_reset2_supported = true; } +static void __init psci_init_system_off2(void) +{ + int ret; + + ret = psci_features(PSCI_FN_NATIVE(1_3, SYSTEM_OFF2)); + if (ret < 0) + return; + + if (ret & BIT(PSCI_1_3_HIBERNATE_TYPE_OFF)) + psci_system_off2_hibernate_supported = true; +} + static void __init psci_init_system_suspend(void) { int ret; @@ -515,6 +553,7 @@ psci_init_cpu_suspend(); psci_init_system_suspend(); psci_init_system_reset2(); + psci_init_system_off2(); kvm_init_hyp_services(); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpio/gpio-74x164.c +++ linux-aws-5.15-5.15.0/drivers/gpio/gpio-74x164.c @@ -127,8 +127,6 @@ if (IS_ERR(chip->gpiod_oe)) return PTR_ERR(chip->gpiod_oe); - gpiod_set_value_cansleep(chip->gpiod_oe, 1); - spi_set_drvdata(spi, chip); chip->gpio_chip.label = spi->modalias; @@ -153,6 +151,8 @@ goto exit_destroy; } + gpiod_set_value_cansleep(chip->gpiod_oe, 1); + ret = gpiochip_add_data(&chip->gpio_chip, chip); if (!ret) return 0; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -169,6 +169,7 @@ return 0; err_free: + ttm_resource_fini(man, &node->base.base); kfree(node); err_out: @@ -200,6 +201,7 @@ if (!(res->placement & TTM_PL_FLAG_TEMPORARY)) atomic64_sub(res->num_pages, &mgr->used); + ttm_resource_fini(man, res); kfree(node); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c @@ -95,6 +95,7 @@ struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); atomic64_sub(res->num_pages, &mgr->used); + ttm_resource_fini(man, res); kfree(res); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -476,6 +476,7 @@ while (i--) drm_mm_remove_node(&node->mm_nodes[i]); spin_unlock(&mgr->lock); + ttm_resource_fini(man, &node->base); kvfree(node); error_sub: @@ -515,6 +516,7 @@ atomic64_sub(usage, &mgr->usage); atomic64_sub(vis_usage, &mgr->vis_usage); + ttm_resource_fini(man, res); kvfree(node); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/amdgpu/atom.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/atom.c @@ -311,7 +311,7 @@ DEBUG("IMM 0x%02X\n", val); return val; } - return 0; + break; case ATOM_ARG_PLL: idx = U8(*ptr); (*ptr)++; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -204,6 +204,12 @@ tmp = RREG32(mmIH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(mmIH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; + WREG32(mmIH_RB_CNTL, tmp); } return (wptr & ih->ptr_mask); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -216,6 +216,11 @@ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); out: return (wptr & ih->ptr_mask); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -215,6 +215,11 @@ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); out: return (wptr & ih->ptr_mask); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -447,6 +447,12 @@ tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); out: return (wptr & ih->ptr_mask); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -119,6 +119,12 @@ tmp = RREG32(IH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(IH_RB_CNTL, tmp); + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; + WREG32(IH_RB_CNTL, tmp); } return (wptr & ih->ptr_mask); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -219,6 +219,12 @@ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + WREG32(mmIH_RB_CNTL, tmp); + out: return (wptr & ih->ptr_mask); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -52,7 +52,7 @@ * This function tells if the code is already under FPU protection or not. A * function that works as an API for a set of FPU operations can use this * function for checking if the caller invoked it after DC_FP_START(). For - * example, take a look at dcn2x.c file. + * example, take a look at dcn20_fpu.c file. */ inline void dc_assert_fp_enabled(void) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c @@ -199,7 +199,7 @@ uint32_t extended_size; /* size of the remaining partitioned address space */ uint32_t size_left_to_read; - enum dc_status status; + enum dc_status status = DC_ERROR_UNEXPECTED; /* size of the next partition to be read from */ uint32_t partition_size; uint32_t data_index = 0; @@ -228,7 +228,7 @@ { uint32_t partition_size; uint32_t data_index = 0; - enum dc_status status; + enum dc_status status = DC_ERROR_UNEXPECTED; while (size) { partition_size = dpcd_get_next_partition_size(address, size); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c @@ -0,0 +1,191 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dm_services.h" +#include "bw_fixed.h" + + +#define MAX_I64 \ + (int64_t)((1ULL << 63) - 1) + +#define MIN_I64 \ + (-MAX_I64 - 1) + +#define FRACTIONAL_PART_MASK \ + ((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1) + +#define GET_FRACTIONAL_PART(x) \ + (FRACTIONAL_PART_MASK & (x)) + +static uint64_t abs_i64(int64_t arg) +{ + if (arg >= 0) + return (uint64_t)(arg); + else + return (uint64_t)(-arg); +} + +struct bw_fixed bw_int_to_fixed_nonconst(int64_t value) +{ + struct bw_fixed res; + ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32); + res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART; + return res; +} + +struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator) +{ + struct bw_fixed res; + bool arg1_negative = numerator < 0; + bool arg2_negative = denominator < 0; + uint64_t arg1_value; + uint64_t arg2_value; + uint64_t remainder; + + /* determine integer part */ + uint64_t res_value; + + ASSERT(denominator != 0); + + arg1_value = abs_i64(numerator); + arg2_value = abs_i64(denominator); + res_value = div64_u64_rem(arg1_value, arg2_value, &remainder); + + ASSERT(res_value <= BW_FIXED_MAX_I32); + + /* determine fractional part */ + { + uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART; + + do + { + remainder <<= 1; + + res_value <<= 1; + + if (remainder >= arg2_value) + { + res_value |= 1; + remainder -= arg2_value; + } + } while (--i != 0); + } + + /* round up LSB */ + { + uint64_t summand = (remainder << 1) >= arg2_value; + + ASSERT(res_value <= MAX_I64 - summand); + + res_value += summand; + } + + res.value = (int64_t)(res_value); + + if (arg1_negative ^ arg2_negative) + res.value = -res.value; + return res; +} + +struct bw_fixed bw_floor2( + const struct bw_fixed arg, + const struct bw_fixed significance) +{ + struct bw_fixed result; + int64_t multiplicand; + + multiplicand = div64_s64(arg.value, abs_i64(significance.value)); + result.value = abs_i64(significance.value) * multiplicand; + ASSERT(abs_i64(result.value) <= abs_i64(arg.value)); + return result; +} + +struct bw_fixed bw_ceil2( + const struct bw_fixed arg, + const struct bw_fixed significance) +{ + struct bw_fixed result; + int64_t multiplicand; + + multiplicand = div64_s64(arg.value, abs_i64(significance.value)); + result.value = abs_i64(significance.value) * multiplicand; + if (abs_i64(result.value) < abs_i64(arg.value)) { + if (arg.value < 0) + result.value -= abs_i64(significance.value); + else + result.value += abs_i64(significance.value); + } + return result; +} + +struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2) +{ + struct bw_fixed res; + + bool arg1_negative = arg1.value < 0; + bool arg2_negative = arg2.value < 0; + + uint64_t arg1_value = abs_i64(arg1.value); + uint64_t arg2_value = abs_i64(arg2.value); + + uint64_t arg1_int = BW_FIXED_GET_INTEGER_PART(arg1_value); + uint64_t arg2_int = BW_FIXED_GET_INTEGER_PART(arg2_value); + + uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value); + uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value); + + uint64_t tmp; + + res.value = arg1_int * arg2_int; + + ASSERT(res.value <= BW_FIXED_MAX_I32); + + res.value <<= BW_FIXED_BITS_PER_FRACTIONAL_PART; + + tmp = arg1_int * arg2_fra; + + ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value)); + + res.value += tmp; + + tmp = arg2_int * arg1_fra; + + ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value)); + + res.value += tmp; + + tmp = arg1_fra * arg2_fra; + + tmp = (tmp >> BW_FIXED_BITS_PER_FRACTIONAL_PART) + + (tmp >= (uint64_t)(bw_frc_to_fixed(1, 2).value)); + + ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value)); + + res.value += tmp; + + if (arg1_negative ^ arg2_negative) + res.value = -res.value; + return res; +} + only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/dml/calcs/calcs_logger.h +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/calcs/calcs_logger.h @@ -0,0 +1,578 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _CALCS_CALCS_LOGGER_H_ +#define _CALCS_CALCS_LOGGER_H_ +#define DC_LOGGER ctx->logger + +static void print_bw_calcs_dceip(struct dc_context *ctx, const struct bw_calcs_dceip *dceip) +{ + + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_dceip"); + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_calcs_version version %d", dceip->version); + DC_LOG_BANDWIDTH_CALCS(" [bool] large_cursor: %d", dceip->large_cursor); + DC_LOG_BANDWIDTH_CALCS(" [bool] dmif_pipe_en_fbc_chunk_tracker: %d", dceip->dmif_pipe_en_fbc_chunk_tracker); + DC_LOG_BANDWIDTH_CALCS(" [bool] display_write_back_supported: %d", dceip->display_write_back_supported); + DC_LOG_BANDWIDTH_CALCS(" [bool] argb_compression_support: %d", dceip->argb_compression_support); + DC_LOG_BANDWIDTH_CALCS(" [bool] pre_downscaler_enabled: %d", dceip->pre_downscaler_enabled); + DC_LOG_BANDWIDTH_CALCS(" [bool] underlay_downscale_prefetch_enabled: %d", + dceip->underlay_downscale_prefetch_enabled); + DC_LOG_BANDWIDTH_CALCS(" [bool] graphics_lb_nodownscaling_multi_line_prefetching: %d", + dceip->graphics_lb_nodownscaling_multi_line_prefetching); + DC_LOG_BANDWIDTH_CALCS(" [bool] limit_excessive_outstanding_dmif_requests: %d", + dceip->limit_excessive_outstanding_dmif_requests); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_max_outstanding_group_num: %d", + dceip->cursor_max_outstanding_group_num); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lines_interleaved_into_lb: %d", dceip->lines_interleaved_into_lb); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] low_power_tiling_mode: %d", dceip->low_power_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_width: %d", dceip->chunk_width); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_graphics_pipes: %d", dceip->number_of_graphics_pipes); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_pipes: %d", dceip->number_of_underlay_pipes); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_dmif_buffer_allocated: %d", dceip->max_dmif_buffer_allocated); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_dmif_size: %d", dceip->graphics_dmif_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_luma_dmif_size: %d", dceip->underlay_luma_dmif_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_chroma_dmif_size: %d", dceip->underlay_chroma_dmif_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_lines_of_pte_prefetching_in_linear_mode: %d", + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_luma_mcifwr_buffer_size: %d", + dceip->display_write_back420_luma_mcifwr_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_chroma_mcifwr_buffer_size: %d", + dceip->display_write_back420_chroma_mcifwr_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_pte_request_rows_in_tiling_mode: %d", + dceip->scatter_gather_pte_request_rows_in_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency10_bit_per_component: %d", + bw_fixed_to_int(dceip->underlay_vscaler_efficiency10_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency12_bit_per_component: %d", + bw_fixed_to_int(dceip->underlay_vscaler_efficiency12_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency6_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency6_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency8_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency8_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency10_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency10_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency12_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency12_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] alpha_vscaler_efficiency: %d", + bw_fixed_to_int(dceip->alpha_vscaler_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_write_pixels_per_dispclk: %d", + bw_fixed_to_int(dceip->lb_write_pixels_per_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component444: %d", + bw_fixed_to_int(dceip->lb_size_per_component444)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_and_dram_clock_state_change_gated_before_cursor: %d", + bw_fixed_to_int(dceip->stutter_and_dram_clock_state_change_gated_before_cursor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_luma_lb_size_per_component: %d", + bw_fixed_to_int(dceip->underlay420_luma_lb_size_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_chroma_lb_size_per_component: %d", + bw_fixed_to_int(dceip->underlay420_chroma_lb_size_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay422_lb_size_per_component: %d", + bw_fixed_to_int(dceip->underlay422_lb_size_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_chunk_width: %d", bw_fixed_to_int(dceip->cursor_chunk_width)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_dcp_buffer_lines: %d", + bw_fixed_to_int(dceip->cursor_dcp_buffer_lines)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_width_efficient_for_tiling: %d", + bw_fixed_to_int(dceip->underlay_maximum_width_efficient_for_tiling)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_height_efficient_for_tiling: %d", + bw_fixed_to_int(dceip->underlay_maximum_height_efficient_for_tiling)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display: %d", + bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation: %d", + bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_outstanding_pte_request_limit: %d", + bw_fixed_to_int(dceip->minimum_outstanding_pte_request_limit)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_total_outstanding_pte_requests_allowed_by_saw: %d", + bw_fixed_to_int(dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] linear_mode_line_request_alternation_slice: %d", + bw_fixed_to_int(dceip->linear_mode_line_request_alternation_slice)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_efficiency: %d", bw_fixed_to_int(dceip->request_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_per_request: %d", bw_fixed_to_int(dceip->dispclk_per_request)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_ramping_factor: %d", + bw_fixed_to_int(dceip->dispclk_ramping_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_throughput_factor: %d", + bw_fixed_to_int(dceip->display_pipe_throughput_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_all_surfaces_burst_time: %d", + bw_fixed_to_int(dceip->mcifwr_all_surfaces_burst_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_request_buffer_size: %d", + bw_fixed_to_int(dceip->dmif_request_buffer_size)); + + +} + +static void print_bw_calcs_vbios(struct dc_context *ctx, const struct bw_calcs_vbios *vbios) +{ + + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_vbios vbios"); + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] dram_channel_width_in_bits: %d", vbios->dram_channel_width_in_bits); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", vbios->number_of_dram_channels); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_banks: %d", vbios->number_of_dram_banks); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_yclk: %d", bw_fixed_to_int(vbios->low_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_yclk: %d", bw_fixed_to_int(vbios->mid_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_yclk: %d", bw_fixed_to_int(vbios->high_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_sclk: %d", bw_fixed_to_int(vbios->low_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid1_sclk: %d", bw_fixed_to_int(vbios->mid1_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid2_sclk: %d", bw_fixed_to_int(vbios->mid2_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid3_sclk: %d", bw_fixed_to_int(vbios->mid3_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid4_sclk: %d", bw_fixed_to_int(vbios->mid4_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid5_sclk: %d", bw_fixed_to_int(vbios->mid5_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid6_sclk: %d", bw_fixed_to_int(vbios->mid6_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_sclk: %d", bw_fixed_to_int(vbios->high_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_dispclk: %d", + bw_fixed_to_int(vbios->low_voltage_max_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_dispclk;: %d", + bw_fixed_to_int(vbios->mid_voltage_max_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_dispclk;: %d", + bw_fixed_to_int(vbios->high_voltage_max_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_phyclk: %d", + bw_fixed_to_int(vbios->low_voltage_max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_phyclk: %d", + bw_fixed_to_int(vbios->mid_voltage_max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_phyclk: %d", + bw_fixed_to_int(vbios->high_voltage_max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_return_bus_width: %d", bw_fixed_to_int(vbios->data_return_bus_width)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] trc: %d", bw_fixed_to_int(vbios->trc)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency: %d", bw_fixed_to_int(vbios->dmifmc_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_exit_latency: %d", + bw_fixed_to_int(vbios->stutter_self_refresh_exit_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_entry_latency: %d", + bw_fixed_to_int(vbios->stutter_self_refresh_entry_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_latency: %d", + bw_fixed_to_int(vbios->nbp_state_change_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrmc_urgent_latency: %d", + bw_fixed_to_int(vbios->mcifwrmc_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable: %d", vbios->scatter_gather_enable); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] down_spread_percentage: %d", + bw_fixed_to_int(vbios->down_spread_percentage)); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_width: %d", vbios->cursor_width); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] average_compression_rate: %d", vbios->average_compression_rate); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_request_slots_gmc_reserves_for_dmif_per_channel: %d", + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration: %d", bw_fixed_to_int(vbios->blackout_duration)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_blackout_recovery_time: %d", + bw_fixed_to_int(vbios->maximum_blackout_recovery_time)); + + +} + +static void print_bw_calcs_data(struct dc_context *ctx, struct bw_calcs_data *data) +{ + + int i, j, k; + + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_data data"); + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_displays: %d", data->number_of_displays); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_surface_type: %d", data->underlay_surface_type); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines panning_and_bezel_adjustment: %d", + data->panning_and_bezel_adjustment); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_tiling_mode: %d", data->graphics_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_lb_bpc: %d", data->graphics_lb_bpc); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_lb_bpc: %d", data->underlay_lb_bpc); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_tiling_mode: %d", data->underlay_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d0_underlay_mode: %d", data->d0_underlay_mode); + DC_LOG_BANDWIDTH_CALCS(" [bool] d1_display_write_back_dwb_enable: %d", data->d1_display_write_back_dwb_enable); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d1_underlay_mode: %d", data->d1_underlay_mode); + DC_LOG_BANDWIDTH_CALCS(" [bool] cpup_state_change_enable: %d", data->cpup_state_change_enable); + DC_LOG_BANDWIDTH_CALCS(" [bool] cpuc_state_change_enable: %d", data->cpuc_state_change_enable); + DC_LOG_BANDWIDTH_CALCS(" [bool] nbp_state_change_enable: %d", data->nbp_state_change_enable); + DC_LOG_BANDWIDTH_CALCS(" [bool] stutter_mode_enable: %d", data->stutter_mode_enable); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] y_clk_level: %d", data->y_clk_level); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] sclk_level: %d", data->sclk_level); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_surfaces: %d", data->number_of_underlay_surfaces); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_wrchannels: %d", data->number_of_dram_wrchannels); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_request_delay: %d", data->chunk_request_delay); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", data->number_of_dram_channels); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_micro_tile_mode: %d", data->underlay_micro_tile_mode); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_micro_tile_mode: %d", data->graphics_micro_tile_mode); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] max_phyclk: %d", bw_fixed_to_int(data->max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_efficiency: %d", bw_fixed_to_int(data->dram_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_surface_type: %d", + bw_fixed_to_int(data->src_width_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_surface_type: %d", + bw_fixed_to_int(data->src_height_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_surface_type: %d", + bw_fixed_to_int(data->hsr_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_surface_type: %d", bw_fixed_to_int(data->vsr_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_rotation: %d", + bw_fixed_to_int(data->src_width_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_rotation: %d", + bw_fixed_to_int(data->src_height_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_rotation: %d", bw_fixed_to_int(data->hsr_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_rotation: %d", bw_fixed_to_int(data->vsr_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_pixels: %d", bw_fixed_to_int(data->source_height_pixels)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_stereo: %d", bw_fixed_to_int(data->hsr_after_stereo)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_stereo: %d", bw_fixed_to_int(data->vsr_after_stereo)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_in_lb: %d", bw_fixed_to_int(data->source_width_in_lb)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_line_pitch: %d", bw_fixed_to_int(data->lb_line_pitch)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_source_efficient_for_tiling: %d", + bw_fixed_to_int(data->underlay_maximum_source_efficient_for_tiling)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] num_lines_at_frame_start: %d", + bw_fixed_to_int(data->num_lines_at_frame_start)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dmif_size_in_time: %d", bw_fixed_to_int(data->min_dmif_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_mcifwr_size_in_time: %d", + bw_fixed_to_int(data->min_mcifwr_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_dmif_size: %d", + bw_fixed_to_int(data->total_requests_for_dmif_size)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting: %d", + bw_fixed_to_int(data->peak_pte_request_to_eviction_ratio_limiting)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_pte_per_pte_request: %d", + bw_fixed_to_int(data->useful_pte_per_pte_request)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_rows: %d", + bw_fixed_to_int(data->scatter_gather_pte_request_rows)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_row_height: %d", + bw_fixed_to_int(data->scatter_gather_row_height)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_vblank: %d", + bw_fixed_to_int(data->scatter_gather_pte_requests_in_vblank)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] inefficient_linear_pitch_in_bytes: %d", + bw_fixed_to_int(data->inefficient_linear_pitch_in_bytes)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_data: %d", bw_fixed_to_int(data->cursor_total_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_request_groups: %d", + bw_fixed_to_int(data->cursor_total_request_groups)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_requests: %d", + bw_fixed_to_int(data->scatter_gather_total_pte_requests)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_request_groups: %d", + bw_fixed_to_int(data->scatter_gather_total_pte_request_groups)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] tile_width_in_pixels: %d", bw_fixed_to_int(data->tile_width_in_pixels)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_number_of_data_request_page_close_open: %d", + bw_fixed_to_int(data->dmif_total_number_of_data_request_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_number_of_data_request_page_close_open: %d", + bw_fixed_to_int(data->mcifwr_total_number_of_data_request_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_page_close_open: %d", + bw_fixed_to_int(data->bytes_per_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_page_close_open_time: %d", + bw_fixed_to_int(data->mcifwr_total_page_close_open_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_adjusted_dmif_size: %d", + bw_fixed_to_int(data->total_requests_for_adjusted_dmif_size)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_trips: %d", + bw_fixed_to_int(data->total_dmifmc_urgent_trips)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_latency: %d", + bw_fixed_to_int(data->total_dmifmc_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_data: %d", + bw_fixed_to_int(data->total_display_reads_required_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_dram_access_data: %d", + bw_fixed_to_int(data->total_display_reads_required_dram_access_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_data: %d", + bw_fixed_to_int(data->total_display_writes_required_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_dram_access_data: %d", + bw_fixed_to_int(data->total_display_writes_required_dram_access_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_data: %d", + bw_fixed_to_int(data->display_reads_required_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_dram_access_data: %d", + bw_fixed_to_int(data->display_reads_required_dram_access_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_page_close_open_time: %d", + bw_fixed_to_int(data->dmif_total_page_close_open_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_cursor_memory_interface_buffer_size_in_time: %d", + bw_fixed_to_int(data->min_cursor_memory_interface_buffer_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_read_buffer_size_in_time: %d", + bw_fixed_to_int(data->min_read_buffer_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer: %d", + bw_fixed_to_int(data->display_reads_time_for_data_transfer)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_writes_time_for_data_transfer: %d", + bw_fixed_to_int(data->display_writes_time_for_data_transfer)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_dram_bandwidth: %d", + bw_fixed_to_int(data->dmif_required_dram_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_dram_bandwidth: %d", + bw_fixed_to_int(data->mcifwr_required_dram_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dmifmc_urgent_latency_for_page_close_open: %d", + bw_fixed_to_int(data->required_dmifmc_urgent_latency_for_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_mcifmcwr_urgent_latency: %d", + bw_fixed_to_int(data->required_mcifmcwr_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dram_bandwidth_gbyte_per_second: %d", + bw_fixed_to_int(data->required_dram_bandwidth_gbyte_per_second)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_bandwidth: %d", bw_fixed_to_int(data->dram_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk: %d", bw_fixed_to_int(data->dmif_required_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_sclk: %d", bw_fixed_to_int(data->mcifwr_required_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_sclk: %d", bw_fixed_to_int(data->required_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] downspread_factor: %d", bw_fixed_to_int(data->downspread_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scaler_efficiency: %d", bw_fixed_to_int(data->v_scaler_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scaler_limits_factor: %d", bw_fixed_to_int(data->scaler_limits_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_pixel_throughput: %d", + bw_fixed_to_int(data->display_pipe_pixel_throughput)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping: %d", + bw_fixed_to_int(data->total_dispclk_required_with_ramping)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping: %d", + bw_fixed_to_int(data->total_dispclk_required_without_ramping)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_read_request_bandwidth: %d", + bw_fixed_to_int(data->total_read_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_write_request_bandwidth: %d", + bw_fixed_to_int(data->total_write_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_total_read_request_bandwidth: %d", + bw_fixed_to_int(data->dispclk_required_for_total_read_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping_with_request_bandwidth: %d", + bw_fixed_to_int(data->total_dispclk_required_with_ramping_with_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping_with_request_bandwidth: %d", + bw_fixed_to_int(data->total_dispclk_required_without_ramping_with_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk: %d", bw_fixed_to_int(data->dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_recovery_time: %d", bw_fixed_to_int(data->blackout_recovery_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_pixels_per_data_fifo_entry: %d", + bw_fixed_to_int(data->min_pixels_per_data_fifo_entry)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] sclk_deep_sleep: %d", bw_fixed_to_int(data->sclk_deep_sleep)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] chunk_request_time: %d", bw_fixed_to_int(data->chunk_request_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_request_time: %d", bw_fixed_to_int(data->cursor_request_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] line_source_pixels_transfer_time: %d", + bw_fixed_to_int(data->line_source_pixels_transfer_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifdram_access_efficiency: %d", + bw_fixed_to_int(data->dmifdram_access_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrdram_access_efficiency: %d", + bw_fixed_to_int(data->mcifwrdram_access_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth_no_compression: %d", + bw_fixed_to_int(data->total_average_bandwidth_no_compression)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth: %d", + bw_fixed_to_int(data->total_average_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_stutter_cycle_duration: %d", + bw_fixed_to_int(data->total_stutter_cycle_duration)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_burst_time: %d", bw_fixed_to_int(data->stutter_burst_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] time_in_self_refresh: %d", bw_fixed_to_int(data->time_in_self_refresh)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_efficiency: %d", bw_fixed_to_int(data->stutter_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] worst_number_of_trips_to_memory: %d", + bw_fixed_to_int(data->worst_number_of_trips_to_memory)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] immediate_flip_time: %d", bw_fixed_to_int(data->immediate_flip_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_dmif_clients: %d", + bw_fixed_to_int(data->latency_for_non_dmif_clients)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_mcifwr_clients: %d", + bw_fixed_to_int(data->latency_for_non_mcifwr_clients)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency_supported_in_high_sclk_and_yclk: %d", + bw_fixed_to_int(data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_margin: %d", + bw_fixed_to_int(data->nbp_state_dram_speed_change_margin)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer_and_urgent_latency: %d", + bw_fixed_to_int(data->display_reads_time_for_data_transfer_and_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_margin: %d", + bw_fixed_to_int(data->dram_speed_change_margin)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_vblank_dram_speed_change_margin: %d", + bw_fixed_to_int(data->min_vblank_dram_speed_change_margin)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_stutter_refresh_duration: %d", + bw_fixed_to_int(data->min_stutter_refresh_duration)); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_stutter_dmif_buffer_size: %d", data->total_stutter_dmif_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_bytes_requested: %d", data->total_bytes_requested); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] min_stutter_dmif_buffer_size: %d", data->min_stutter_dmif_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] num_stutter_bursts: %d", data->num_stutter_bursts); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_nbp_state_dram_speed_change_latency_supported: %d", + bw_fixed_to_int(data->v_blank_nbp_state_dram_speed_change_latency_supported)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_latency_supported: %d", + bw_fixed_to_int(data->nbp_state_dram_speed_change_latency_supported)); + + for (i = 0; i < maximum_number_of_surfaces; i++) { + DC_LOG_BANDWIDTH_CALCS(" [bool] fbc_en[%d]:%d\n", i, data->fbc_en[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] lpt_en[%d]:%d", i, data->lpt_en[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] displays_match_flag[%d]:%d", i, data->displays_match_flag[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] use_alpha[%d]:%d", i, data->use_alpha[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] orthogonal_rotation[%d]:%d", i, data->orthogonal_rotation[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] enable[%d]:%d", i, data->enable[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] access_one_channel_only[%d]:%d", i, data->access_one_channel_only[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable_for_pipe[%d]:%d", + i, data->scatter_gather_enable_for_pipe[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] interlace_mode[%d]:%d", + i, data->interlace_mode[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] display_pstate_change_enable[%d]:%d", + i, data->display_pstate_change_enable[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] line_buffer_prefetch[%d]:%d", i, data->line_buffer_prefetch[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] bytes_per_pixel[%d]:%d", i, data->bytes_per_pixel[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_chunks_non_fbc_mode[%d]:%d", + i, data->max_chunks_non_fbc_mode[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lb_bpc[%d]:%d", i, data->lb_bpc[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bpphdmi[%d]:%d", i, data->output_bpphdmi[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr[%d]:%d", i, data->output_bppdp4_lane_hbr[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr2[%d]:%d", + i, data->output_bppdp4_lane_hbr2[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr3[%d]:%d", + i, data->output_bppdp4_lane_hbr3[i]); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines stereo_mode[%d]:%d", i, data->stereo_mode[i]); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_buffer_transfer_time[%d]:%d", + i, bw_fixed_to_int(data->dmif_buffer_transfer_time[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] displays_with_same_mode[%d]:%d", + i, bw_fixed_to_int(data->displays_with_same_mode[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_dmif_buffer_size[%d]:%d", + i, bw_fixed_to_int(data->stutter_dmif_buffer_size[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_refresh_duration[%d]:%d", + i, bw_fixed_to_int(data->stutter_refresh_duration[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_exit_watermark[%d]:%d", + i, bw_fixed_to_int(data->stutter_exit_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_entry_watermark[%d]:%d", + i, bw_fixed_to_int(data->stutter_entry_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_total[%d]:%d", i, bw_fixed_to_int(data->h_total[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_total[%d]:%d", i, bw_fixed_to_int(data->v_total[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixel_rate[%d]:%d", i, bw_fixed_to_int(data->pixel_rate[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width[%d]:%d", i, bw_fixed_to_int(data->src_width[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels[%d]:%d", + i, bw_fixed_to_int(data->pitch_in_pixels[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels_after_surface_type[%d]:%d", + i, bw_fixed_to_int(data->pitch_in_pixels_after_surface_type[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height[%d]:%d", i, bw_fixed_to_int(data->src_height[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scale_ratio[%d]:%d", i, bw_fixed_to_int(data->scale_ratio[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_taps[%d]:%d", i, bw_fixed_to_int(data->h_taps[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_taps[%d]:%d", i, bw_fixed_to_int(data->v_taps[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->h_scale_ratio[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->v_scale_ratio[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] rotation_angle[%d]:%d", + i, bw_fixed_to_int(data->rotation_angle[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] compression_rate[%d]:%d", + i, bw_fixed_to_int(data->compression_rate[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr[%d]:%d", i, bw_fixed_to_int(data->hsr[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr[%d]:%d", i, bw_fixed_to_int(data->vsr[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_rounded_up_to_chunks[%d]:%d", + i, bw_fixed_to_int(data->source_width_rounded_up_to_chunks[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_pixels[%d]:%d", + i, bw_fixed_to_int(data->source_width_pixels[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_rounded_up_to_chunks[%d]:%d", + i, bw_fixed_to_int(data->source_height_rounded_up_to_chunks[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_bandwidth[%d]:%d", + i, bw_fixed_to_int(data->display_bandwidth[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_bandwidth[%d]:%d", + i, bw_fixed_to_int(data->request_bandwidth[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_request[%d]:%d", + i, bw_fixed_to_int(data->bytes_per_request[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_bytes_per_request[%d]:%d", + i, bw_fixed_to_int(data->useful_bytes_per_request[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lines_interleaved_in_mem_access[%d]:%d", + i, bw_fixed_to_int(data->lines_interleaved_in_mem_access[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_hiding_lines[%d]:%d", + i, bw_fixed_to_int(data->latency_hiding_lines[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions[%d]:%d", + i, bw_fixed_to_int(data->lb_partitions[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions_max[%d]:%d", + i, bw_fixed_to_int(data->lb_partitions_max[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_with_ramping[%d]:%d", + i, bw_fixed_to_int(data->dispclk_required_with_ramping[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_without_ramping[%d]:%d", + i, bw_fixed_to_int(data->dispclk_required_without_ramping[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_buffer_size[%d]:%d", + i, bw_fixed_to_int(data->data_buffer_size[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] outstanding_chunk_request_limit[%d]:%d", + i, bw_fixed_to_int(data->outstanding_chunk_request_limit[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] urgent_watermark[%d]:%d", + i, bw_fixed_to_int(data->urgent_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_watermark[%d]:%d", + i, bw_fixed_to_int(data->nbp_state_change_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_filter_init[%d]:%d", i, bw_fixed_to_int(data->v_filter_init[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_cycle_duration[%d]:%d", + i, bw_fixed_to_int(data->stutter_cycle_duration[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth[%d]:%d", + i, bw_fixed_to_int(data->average_bandwidth[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth_no_compression[%d]:%d", + i, bw_fixed_to_int(data->average_bandwidth_no_compression[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_limit[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_pte_request_limit[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component[%d]:%d", + i, bw_fixed_to_int(data->lb_size_per_component[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] memory_chunk_size_in_bytes[%d]:%d", + i, bw_fixed_to_int(data->memory_chunk_size_in_bytes[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pipe_chunk_size_in_bytes[%d]:%d", + i, bw_fixed_to_int(data->pipe_chunk_size_in_bytes[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] number_of_trips_to_memory_for_getting_apte_row[%d]:%d", + i, bw_fixed_to_int(data->number_of_trips_to_memory_for_getting_apte_row[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size[%d]:%d", + i, bw_fixed_to_int(data->adjusted_data_buffer_size[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size_in_memory[%d]:%d", + i, bw_fixed_to_int(data->adjusted_data_buffer_size_in_memory[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixels_per_data_fifo_entry[%d]:%d", + i, bw_fixed_to_int(data->pixels_per_data_fifo_entry[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_row[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_pte_requests_in_row[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pte_request_per_chunk[%d]:%d", + i, bw_fixed_to_int(data->pte_request_per_chunk[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_width[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_page_width[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_height[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_page_height[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_beginning_of_frame[%d]:%d", + i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_beginning_of_frame[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_middle_of_frame[%d]:%d", + i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_middle_of_frame[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_width_pixels[%d]:%d", + i, bw_fixed_to_int(data->cursor_width_pixels[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding[%d]:%d", + i, bw_fixed_to_int(data->minimum_latency_hiding[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding[%d]:%d", + i, bw_fixed_to_int(data->maximum_latency_hiding[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding_with_cursor[%d]:%d", + i, bw_fixed_to_int(data->minimum_latency_hiding_with_cursor[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding_with_cursor[%d]:%d", + i, bw_fixed_to_int(data->maximum_latency_hiding_with_cursor[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_first_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_pixels_for_first_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_last_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_pixels_for_last_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_first_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_data_for_first_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_last_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_data_for_last_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] active_time[%d]:%d", i, bw_fixed_to_int(data->active_time[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] horizontal_blank_and_chunk_granularity_factor[%d]:%d", + i, bw_fixed_to_int(data->horizontal_blank_and_chunk_granularity_factor[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_latency_hiding[%d]:%d", + i, bw_fixed_to_int(data->cursor_latency_hiding[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_dram_speed_change_margin[%d]:%d", + i, bw_fixed_to_int(data->v_blank_dram_speed_change_margin[i])); + } + + for (i = 0; i < maximum_number_of_surfaces; i++) { + for (j = 0; j < 3; j++) { + for (k = 0; k < 8; k++) { + + DC_LOG_BANDWIDTH_CALCS("\n [bw_fixed] line_source_transfer_time[%d][%d][%d]:%d", + i, j, k, bw_fixed_to_int(data->line_source_transfer_time[i][j][k])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_line_source_transfer_time[%d][%d][%d]:%d", + i, j, k, + bw_fixed_to_int(data->dram_speed_change_line_source_transfer_time[i][j][k])); + } + } + } + + for (i = 0; i < 3; i++) { + for (j = 0; j < 8; j++) { + + DC_LOG_BANDWIDTH_CALCS("\n [uint32_t] num_displays_with_margin[%d][%d]:%d", + i, j, data->num_displays_with_margin[i][j]); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_burst_time[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dmif_burst_time[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_burst_time[%d][%d]:%d", + i, j, bw_fixed_to_int(data->mcifwr_burst_time[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dram_speed_change_margin[%d][%d]:%d", + i, j, bw_fixed_to_int(data->min_dram_speed_change_margin[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_dram_speed_change[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dispclk_required_for_dram_speed_change[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration_margin[%d][%d]:%d", + i, j, bw_fixed_to_int(data->blackout_duration_margin[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_duration[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_duration[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_recovery[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_recovery[i][j])); + } + } + + for (i = 0; i < 6; i++) { + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk_for_urgent_latency[%d]:%d", + i, bw_fixed_to_int(data->dmif_required_sclk_for_urgent_latency[i])); + } +} +; + +#endif /* _CALCS_CALCS_LOGGER_H_ */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/dml/calcs/custom_float.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/calcs/custom_float.c @@ -0,0 +1,197 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dm_services.h" +#include "custom_float.h" + + +static bool build_custom_float( + struct fixed31_32 value, + const struct custom_float_format *format, + bool *negative, + uint32_t *mantissa, + uint32_t *exponenta) +{ + uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1; + + const struct fixed31_32 mantissa_constant_plus_max_fraction = + dc_fixpt_from_fraction( + (1LL << (format->mantissa_bits + 1)) - 1, + 1LL << format->mantissa_bits); + + struct fixed31_32 mantiss; + + if (dc_fixpt_eq( + value, + dc_fixpt_zero)) { + *negative = false; + *mantissa = 0; + *exponenta = 0; + return true; + } + + if (dc_fixpt_lt( + value, + dc_fixpt_zero)) { + *negative = format->sign; + value = dc_fixpt_neg(value); + } else { + *negative = false; + } + + if (dc_fixpt_lt( + value, + dc_fixpt_one)) { + uint32_t i = 1; + + do { + value = dc_fixpt_shl(value, 1); + ++i; + } while (dc_fixpt_lt( + value, + dc_fixpt_one)); + + --i; + + if (exp_offset <= i) { + *mantissa = 0; + *exponenta = 0; + return true; + } + + *exponenta = exp_offset - i; + } else if (dc_fixpt_le( + mantissa_constant_plus_max_fraction, + value)) { + uint32_t i = 1; + + do { + value = dc_fixpt_shr(value, 1); + ++i; + } while (dc_fixpt_lt( + mantissa_constant_plus_max_fraction, + value)); + + *exponenta = exp_offset + i - 1; + } else { + *exponenta = exp_offset; + } + + mantiss = dc_fixpt_sub( + value, + dc_fixpt_one); + + if (dc_fixpt_lt( + mantiss, + dc_fixpt_zero) || + dc_fixpt_lt( + dc_fixpt_one, + mantiss)) + mantiss = dc_fixpt_zero; + else + mantiss = dc_fixpt_shl( + mantiss, + format->mantissa_bits); + + *mantissa = dc_fixpt_floor(mantiss); + + return true; +} + +static bool setup_custom_float( + const struct custom_float_format *format, + bool negative, + uint32_t mantissa, + uint32_t exponenta, + uint32_t *result) +{ + uint32_t i = 0; + uint32_t j = 0; + + uint32_t value = 0; + + /* verification code: + * once calculation is ok we can remove it + */ + + const uint32_t mantissa_mask = + (1 << (format->mantissa_bits + 1)) - 1; + + const uint32_t exponenta_mask = + (1 << (format->exponenta_bits + 1)) - 1; + + if (mantissa & ~mantissa_mask) { + BREAK_TO_DEBUGGER(); + mantissa = mantissa_mask; + } + + if (exponenta & ~exponenta_mask) { + BREAK_TO_DEBUGGER(); + exponenta = exponenta_mask; + } + + /* end of verification code */ + + while (i < format->mantissa_bits) { + uint32_t mask = 1 << i; + + if (mantissa & mask) + value |= mask; + + ++i; + } + + while (j < format->exponenta_bits) { + uint32_t mask = 1 << j; + + if (exponenta & mask) + value |= mask << i; + + ++j; + } + + if (negative && format->sign) + value |= 1 << (i + j); + + *result = value; + + return true; +} + +bool convert_to_custom_float_format( + struct fixed31_32 value, + const struct custom_float_format *format, + uint32_t *result) +{ + uint32_t mantissa; + uint32_t exponenta; + bool negative; + + return build_custom_float( + value, format, &negative, &mantissa, &exponenta) && + setup_custom_float( + format, negative, mantissa, exponenta, result); +} + + only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c @@ -0,0 +1,3625 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include + +#include "resource.h" +#include "dm_services.h" +#include "dce_calcs.h" +#include "dc.h" +#include "core_types.h" +#include "dal_asic_id.h" +#include "calcs_logger.h" + +/* + * NOTE: + * This file is gcc-parseable HW gospel, coming straight from HW engineers. + * + * It doesn't adhere to Linux kernel style and sometimes will do things in odd + * ways. Unless there is something clearly wrong with it the code should + * remain as-is as it provides us with a guarantee from HW that it is correct. + */ + +/******************************************************************************* + * Private Functions + ******************************************************************************/ + +static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asic_id) +{ + switch (asic_id.chip_family) { + + case FAMILY_CZ: + if (ASIC_REV_IS_STONEY(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_STONEY; + return BW_CALCS_VERSION_CARRIZO; + + case FAMILY_VI: + if (ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_POLARIS12; + if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_POLARIS10; + if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_POLARIS11; + if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_VEGAM; + return BW_CALCS_VERSION_INVALID; + + case FAMILY_AI: + return BW_CALCS_VERSION_VEGA10; + + default: + return BW_CALCS_VERSION_INVALID; + } +} + +static void calculate_bandwidth( + const struct bw_calcs_dceip *dceip, + const struct bw_calcs_vbios *vbios, + struct bw_calcs_data *data) + +{ + const int32_t pixels_per_chunk = 512; + const int32_t high = 2; + const int32_t mid = 1; + const int32_t low = 0; + const uint32_t s_low = 0; + const uint32_t s_mid1 = 1; + const uint32_t s_mid2 = 2; + const uint32_t s_mid3 = 3; + const uint32_t s_mid4 = 4; + const uint32_t s_mid5 = 5; + const uint32_t s_mid6 = 6; + const uint32_t s_high = 7; + const uint32_t dmif_chunk_buff_margin = 1; + + uint32_t max_chunks_fbc_mode; + int32_t num_cursor_lines; + + int32_t i, j, k; + struct bw_fixed *yclk; + struct bw_fixed *sclk; + bool d0_underlay_enable; + bool d1_underlay_enable; + bool fbc_enabled; + bool lpt_enabled; + enum bw_defines sclk_message; + enum bw_defines yclk_message; + enum bw_defines *tiling_mode; + enum bw_defines *surface_type; + enum bw_defines voltage; + enum bw_defines pipe_check; + enum bw_defines hsr_check; + enum bw_defines vsr_check; + enum bw_defines lb_size_check; + enum bw_defines fbc_check; + enum bw_defines rotation_check; + enum bw_defines mode_check; + enum bw_defines nbp_state_change_enable_blank; + /*initialize variables*/ + int32_t number_of_displays_enabled = 0; + int32_t number_of_displays_enabled_with_margin = 0; + int32_t number_of_aligned_displays_with_no_margin = 0; + + yclk = kcalloc(3, sizeof(*yclk), GFP_KERNEL); + if (!yclk) + return; + + sclk = kcalloc(8, sizeof(*sclk), GFP_KERNEL); + if (!sclk) + goto free_yclk; + + tiling_mode = kcalloc(maximum_number_of_surfaces, sizeof(*tiling_mode), GFP_KERNEL); + if (!tiling_mode) + goto free_sclk; + + surface_type = kcalloc(maximum_number_of_surfaces, sizeof(*surface_type), GFP_KERNEL); + if (!surface_type) + goto free_tiling_mode; + + yclk[low] = vbios->low_yclk; + yclk[mid] = vbios->mid_yclk; + yclk[high] = vbios->high_yclk; + sclk[s_low] = vbios->low_sclk; + sclk[s_mid1] = vbios->mid1_sclk; + sclk[s_mid2] = vbios->mid2_sclk; + sclk[s_mid3] = vbios->mid3_sclk; + sclk[s_mid4] = vbios->mid4_sclk; + sclk[s_mid5] = vbios->mid5_sclk; + sclk[s_mid6] = vbios->mid6_sclk; + sclk[s_high] = vbios->high_sclk; + /*''''''''''''''''''*/ + /* surface assignment:*/ + /* 0: d0 underlay or underlay luma*/ + /* 1: d0 underlay chroma*/ + /* 2: d1 underlay or underlay luma*/ + /* 3: d1 underlay chroma*/ + /* 4: d0 graphics*/ + /* 5: d1 graphics*/ + /* 6: d2 graphics*/ + /* 7: d3 graphics, same mode as d2*/ + /* 8: d4 graphics, same mode as d2*/ + /* 9: d5 graphics, same mode as d2*/ + /* ...*/ + /* maximum_number_of_surfaces-2: d1 display_write_back420 luma*/ + /* maximum_number_of_surfaces-1: d1 display_write_back420 chroma*/ + /* underlay luma and chroma surface parameters from spreadsheet*/ + + + + + if (data->d0_underlay_mode == bw_def_none) + d0_underlay_enable = false; + else + d0_underlay_enable = true; + if (data->d1_underlay_mode == bw_def_none) + d1_underlay_enable = false; + else + d1_underlay_enable = true; + data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable; + switch (data->underlay_surface_type) { + case bw_def_420: + surface_type[0] = bw_def_underlay420_luma; + surface_type[2] = bw_def_underlay420_luma; + data->bytes_per_pixel[0] = 1; + data->bytes_per_pixel[2] = 1; + surface_type[1] = bw_def_underlay420_chroma; + surface_type[3] = bw_def_underlay420_chroma; + data->bytes_per_pixel[1] = 2; + data->bytes_per_pixel[3] = 2; + data->lb_size_per_component[0] = dceip->underlay420_luma_lb_size_per_component; + data->lb_size_per_component[1] = dceip->underlay420_chroma_lb_size_per_component; + data->lb_size_per_component[2] = dceip->underlay420_luma_lb_size_per_component; + data->lb_size_per_component[3] = dceip->underlay420_chroma_lb_size_per_component; + break; + case bw_def_422: + surface_type[0] = bw_def_underlay422; + surface_type[2] = bw_def_underlay422; + data->bytes_per_pixel[0] = 2; + data->bytes_per_pixel[2] = 2; + data->lb_size_per_component[0] = dceip->underlay422_lb_size_per_component; + data->lb_size_per_component[2] = dceip->underlay422_lb_size_per_component; + break; + default: + surface_type[0] = bw_def_underlay444; + surface_type[2] = bw_def_underlay444; + data->bytes_per_pixel[0] = 4; + data->bytes_per_pixel[2] = 4; + data->lb_size_per_component[0] = dceip->lb_size_per_component444; + data->lb_size_per_component[2] = dceip->lb_size_per_component444; + break; + } + if (d0_underlay_enable) { + switch (data->underlay_surface_type) { + case bw_def_420: + data->enable[0] = 1; + data->enable[1] = 1; + break; + default: + data->enable[0] = 1; + data->enable[1] = 0; + break; + } + } + else { + data->enable[0] = 0; + data->enable[1] = 0; + } + if (d1_underlay_enable) { + switch (data->underlay_surface_type) { + case bw_def_420: + data->enable[2] = 1; + data->enable[3] = 1; + break; + default: + data->enable[2] = 1; + data->enable[3] = 0; + break; + } + } + else { + data->enable[2] = 0; + data->enable[3] = 0; + } + data->use_alpha[0] = 0; + data->use_alpha[1] = 0; + data->use_alpha[2] = 0; + data->use_alpha[3] = 0; + data->scatter_gather_enable_for_pipe[0] = vbios->scatter_gather_enable; + data->scatter_gather_enable_for_pipe[1] = vbios->scatter_gather_enable; + data->scatter_gather_enable_for_pipe[2] = vbios->scatter_gather_enable; + data->scatter_gather_enable_for_pipe[3] = vbios->scatter_gather_enable; + /*underlay0 same and graphics display pipe0*/ + data->interlace_mode[0] = data->interlace_mode[4]; + data->interlace_mode[1] = data->interlace_mode[4]; + /*underlay1 same and graphics display pipe1*/ + data->interlace_mode[2] = data->interlace_mode[5]; + data->interlace_mode[3] = data->interlace_mode[5]; + /*underlay0 same and graphics display pipe0*/ + data->h_total[0] = data->h_total[4]; + data->v_total[0] = data->v_total[4]; + data->h_total[1] = data->h_total[4]; + data->v_total[1] = data->v_total[4]; + /*underlay1 same and graphics display pipe1*/ + data->h_total[2] = data->h_total[5]; + data->v_total[2] = data->v_total[5]; + data->h_total[3] = data->h_total[5]; + data->v_total[3] = data->v_total[5]; + /*underlay0 same and graphics display pipe0*/ + data->pixel_rate[0] = data->pixel_rate[4]; + data->pixel_rate[1] = data->pixel_rate[4]; + /*underlay1 same and graphics display pipe1*/ + data->pixel_rate[2] = data->pixel_rate[5]; + data->pixel_rate[3] = data->pixel_rate[5]; + if ((data->underlay_tiling_mode == bw_def_array_linear_general || data->underlay_tiling_mode == bw_def_array_linear_aligned)) { + tiling_mode[0] = bw_def_linear; + tiling_mode[1] = bw_def_linear; + tiling_mode[2] = bw_def_linear; + tiling_mode[3] = bw_def_linear; + } + else { + tiling_mode[0] = bw_def_landscape; + tiling_mode[1] = bw_def_landscape; + tiling_mode[2] = bw_def_landscape; + tiling_mode[3] = bw_def_landscape; + } + data->lb_bpc[0] = data->underlay_lb_bpc; + data->lb_bpc[1] = data->underlay_lb_bpc; + data->lb_bpc[2] = data->underlay_lb_bpc; + data->lb_bpc[3] = data->underlay_lb_bpc; + data->compression_rate[0] = bw_int_to_fixed(1); + data->compression_rate[1] = bw_int_to_fixed(1); + data->compression_rate[2] = bw_int_to_fixed(1); + data->compression_rate[3] = bw_int_to_fixed(1); + data->access_one_channel_only[0] = 0; + data->access_one_channel_only[1] = 0; + data->access_one_channel_only[2] = 0; + data->access_one_channel_only[3] = 0; + data->cursor_width_pixels[0] = bw_int_to_fixed(0); + data->cursor_width_pixels[1] = bw_int_to_fixed(0); + data->cursor_width_pixels[2] = bw_int_to_fixed(0); + data->cursor_width_pixels[3] = bw_int_to_fixed(0); + /* graphics surface parameters from spreadsheet*/ + fbc_enabled = false; + lpt_enabled = false; + for (i = 4; i <= maximum_number_of_surfaces - 3; i++) { + if (i < data->number_of_displays + 4) { + if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) { + data->enable[i] = 0; + data->use_alpha[i] = 0; + } + else if (i == 4 && data->d0_underlay_mode == bw_def_blend) { + data->enable[i] = 1; + data->use_alpha[i] = 1; + } + else if (i == 4) { + data->enable[i] = 1; + data->use_alpha[i] = 0; + } + else if (i == 5 && data->d1_underlay_mode == bw_def_underlay_only) { + data->enable[i] = 0; + data->use_alpha[i] = 0; + } + else if (i == 5 && data->d1_underlay_mode == bw_def_blend) { + data->enable[i] = 1; + data->use_alpha[i] = 1; + } + else { + data->enable[i] = 1; + data->use_alpha[i] = 0; + } + } + else { + data->enable[i] = 0; + data->use_alpha[i] = 0; + } + data->scatter_gather_enable_for_pipe[i] = vbios->scatter_gather_enable; + surface_type[i] = bw_def_graphics; + data->lb_size_per_component[i] = dceip->lb_size_per_component444; + if (data->graphics_tiling_mode == bw_def_array_linear_general || data->graphics_tiling_mode == bw_def_array_linear_aligned) { + tiling_mode[i] = bw_def_linear; + } + else { + tiling_mode[i] = bw_def_tiled; + } + data->lb_bpc[i] = data->graphics_lb_bpc; + if ((data->fbc_en[i] == 1 && (dceip->argb_compression_support || data->d0_underlay_mode != bw_def_blended))) { + data->compression_rate[i] = bw_int_to_fixed(vbios->average_compression_rate); + data->access_one_channel_only[i] = data->lpt_en[i]; + } + else { + data->compression_rate[i] = bw_int_to_fixed(1); + data->access_one_channel_only[i] = 0; + } + if (data->fbc_en[i] == 1) { + fbc_enabled = true; + if (data->lpt_en[i] == 1) { + lpt_enabled = true; + } + } + data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width); + } + /* display_write_back420*/ + data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 2] = 0; + data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 1] = 0; + if (data->d1_display_write_back_dwb_enable == 1) { + data->enable[maximum_number_of_surfaces - 2] = 1; + data->enable[maximum_number_of_surfaces - 1] = 1; + } + else { + data->enable[maximum_number_of_surfaces - 2] = 0; + data->enable[maximum_number_of_surfaces - 1] = 0; + } + surface_type[maximum_number_of_surfaces - 2] = bw_def_display_write_back420_luma; + surface_type[maximum_number_of_surfaces - 1] = bw_def_display_write_back420_chroma; + data->lb_size_per_component[maximum_number_of_surfaces - 2] = dceip->underlay420_luma_lb_size_per_component; + data->lb_size_per_component[maximum_number_of_surfaces - 1] = dceip->underlay420_chroma_lb_size_per_component; + data->bytes_per_pixel[maximum_number_of_surfaces - 2] = 1; + data->bytes_per_pixel[maximum_number_of_surfaces - 1] = 2; + data->interlace_mode[maximum_number_of_surfaces - 2] = data->interlace_mode[5]; + data->interlace_mode[maximum_number_of_surfaces - 1] = data->interlace_mode[5]; + data->h_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); + data->h_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); + data->v_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); + data->v_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); + data->rotation_angle[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0); + data->rotation_angle[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0); + tiling_mode[maximum_number_of_surfaces - 2] = bw_def_linear; + tiling_mode[maximum_number_of_surfaces - 1] = bw_def_linear; + data->lb_bpc[maximum_number_of_surfaces - 2] = 8; + data->lb_bpc[maximum_number_of_surfaces - 1] = 8; + data->compression_rate[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); + data->compression_rate[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); + data->access_one_channel_only[maximum_number_of_surfaces - 2] = 0; + data->access_one_channel_only[maximum_number_of_surfaces - 1] = 0; + /*assume display pipe1 has dwb enabled*/ + data->h_total[maximum_number_of_surfaces - 2] = data->h_total[5]; + data->h_total[maximum_number_of_surfaces - 1] = data->h_total[5]; + data->v_total[maximum_number_of_surfaces - 2] = data->v_total[5]; + data->v_total[maximum_number_of_surfaces - 1] = data->v_total[5]; + data->pixel_rate[maximum_number_of_surfaces - 2] = data->pixel_rate[5]; + data->pixel_rate[maximum_number_of_surfaces - 1] = data->pixel_rate[5]; + data->src_width[maximum_number_of_surfaces - 2] = data->src_width[5]; + data->src_width[maximum_number_of_surfaces - 1] = data->src_width[5]; + data->src_height[maximum_number_of_surfaces - 2] = data->src_height[5]; + data->src_height[maximum_number_of_surfaces - 1] = data->src_height[5]; + data->pitch_in_pixels[maximum_number_of_surfaces - 2] = data->src_width[5]; + data->pitch_in_pixels[maximum_number_of_surfaces - 1] = data->src_width[5]; + data->h_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); + data->h_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); + data->v_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); + data->v_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); + data->stereo_mode[maximum_number_of_surfaces - 2] = bw_def_mono; + data->stereo_mode[maximum_number_of_surfaces - 1] = bw_def_mono; + data->cursor_width_pixels[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0); + data->cursor_width_pixels[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0); + data->use_alpha[maximum_number_of_surfaces - 2] = 0; + data->use_alpha[maximum_number_of_surfaces - 1] = 0; + /*mode check calculations:*/ + /* mode within dce ip capabilities*/ + /* fbc*/ + /* hsr*/ + /* vsr*/ + /* lb size*/ + /*effective scaling source and ratios:*/ + /*for graphics, non-stereo, non-interlace surfaces when the size of the source and destination are the same, only one tap is used*/ + /*420 chroma has half the width, height, horizontal and vertical scaling ratios than luma*/ + /*rotating a graphic or underlay surface swaps the width, height, horizontal and vertical scaling ratios*/ + /*in top-bottom stereo mode there is 2:1 vertical downscaling for each eye*/ + /*in side-by-side stereo mode there is 2:1 horizontal downscaling for each eye*/ + /*in interlace mode there is 2:1 vertical downscaling for each field*/ + /*in panning or bezel adjustment mode the source width has an extra 128 pixels*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_equ(data->h_scale_ratio[i], bw_int_to_fixed(1)) && bw_equ(data->v_scale_ratio[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics && data->stereo_mode[i] == bw_def_mono && data->interlace_mode[i] == 0) { + data->h_taps[i] = bw_int_to_fixed(1); + data->v_taps[i] = bw_int_to_fixed(1); + } + if (surface_type[i] == bw_def_display_write_back420_chroma || surface_type[i] == bw_def_underlay420_chroma) { + data->pitch_in_pixels_after_surface_type[i] = bw_div(data->pitch_in_pixels[i], bw_int_to_fixed(2)); + data->src_width_after_surface_type = bw_div(data->src_width[i], bw_int_to_fixed(2)); + data->src_height_after_surface_type = bw_div(data->src_height[i], bw_int_to_fixed(2)); + data->hsr_after_surface_type = bw_div(data->h_scale_ratio[i], bw_int_to_fixed(2)); + data->vsr_after_surface_type = bw_div(data->v_scale_ratio[i], bw_int_to_fixed(2)); + } + else { + data->pitch_in_pixels_after_surface_type[i] = data->pitch_in_pixels[i]; + data->src_width_after_surface_type = data->src_width[i]; + data->src_height_after_surface_type = data->src_height[i]; + data->hsr_after_surface_type = data->h_scale_ratio[i]; + data->vsr_after_surface_type = data->v_scale_ratio[i]; + } + if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->src_width_after_rotation = data->src_height_after_surface_type; + data->src_height_after_rotation = data->src_width_after_surface_type; + data->hsr_after_rotation = data->vsr_after_surface_type; + data->vsr_after_rotation = data->hsr_after_surface_type; + } + else { + data->src_width_after_rotation = data->src_width_after_surface_type; + data->src_height_after_rotation = data->src_height_after_surface_type; + data->hsr_after_rotation = data->hsr_after_surface_type; + data->vsr_after_rotation = data->vsr_after_surface_type; + } + switch (data->stereo_mode[i]) { + case bw_def_top_bottom: + data->source_width_pixels[i] = data->src_width_after_rotation; + data->source_height_pixels = bw_mul(bw_int_to_fixed(2), data->src_height_after_rotation); + data->hsr_after_stereo = data->hsr_after_rotation; + data->vsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->vsr_after_rotation); + break; + case bw_def_side_by_side: + data->source_width_pixels[i] = bw_mul(bw_int_to_fixed(2), data->src_width_after_rotation); + data->source_height_pixels = data->src_height_after_rotation; + data->hsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->hsr_after_rotation); + data->vsr_after_stereo = data->vsr_after_rotation; + break; + default: + data->source_width_pixels[i] = data->src_width_after_rotation; + data->source_height_pixels = data->src_height_after_rotation; + data->hsr_after_stereo = data->hsr_after_rotation; + data->vsr_after_stereo = data->vsr_after_rotation; + break; + } + data->hsr[i] = data->hsr_after_stereo; + if (data->interlace_mode[i]) { + data->vsr[i] = bw_mul(data->vsr_after_stereo, bw_int_to_fixed(2)); + } + else { + data->vsr[i] = data->vsr_after_stereo; + } + if (data->panning_and_bezel_adjustment != bw_def_none) { + data->source_width_rounded_up_to_chunks[i] = bw_add(bw_floor2(bw_sub(data->source_width_pixels[i], bw_int_to_fixed(1)), bw_int_to_fixed(128)), bw_int_to_fixed(256)); + } + else { + data->source_width_rounded_up_to_chunks[i] = bw_ceil2(data->source_width_pixels[i], bw_int_to_fixed(128)); + } + data->source_height_rounded_up_to_chunks[i] = data->source_height_pixels; + } + } + /*mode support checks:*/ + /*the number of graphics and underlay pipes is limited by the ip support*/ + /*maximum horizontal and vertical scale ratio is 4, and should not exceed the number of taps*/ + /*for downscaling with the pre-downscaler, the horizontal scale ratio must be more than the ceiling of one quarter of the number of taps*/ + /*the pre-downscaler reduces the line buffer source by the horizontal scale ratio*/ + /*the number of lines in the line buffer has to exceed the number of vertical taps*/ + /*the size of the line in the line buffer is the product of the source width and the bits per component, rounded up to a multiple of 48*/ + /*the size of the line in the line buffer in the case of 10 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/ + /*the size of the line in the line buffer in the case of 8 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/ + /*frame buffer compression is not supported with stereo mode, rotation, or non- 888 formats*/ + /*rotation is not supported with linear of stereo modes*/ + if (dceip->number_of_graphics_pipes >= data->number_of_displays && dceip->number_of_underlay_pipes >= data->number_of_underlay_surfaces && !(dceip->display_write_back_supported == 0 && data->d1_display_write_back_dwb_enable == 1)) { + pipe_check = bw_def_ok; + } + else { + pipe_check = bw_def_notok; + } + hsr_check = bw_def_ok; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) { + if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) { + hsr_check = bw_def_hsr_mtn_4; + } + else { + if (bw_mtn(data->hsr[i], data->h_taps[i])) { + hsr_check = bw_def_hsr_mtn_h_taps; + } + else { + if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) { + hsr_check = bw_def_ceiling__h_taps_div_4___meq_hsr; + } + } + } + } + } + } + vsr_check = bw_def_ok; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_neq(data->vsr[i], bw_int_to_fixed(1))) { + if (bw_mtn(data->vsr[i], bw_int_to_fixed(4))) { + vsr_check = bw_def_vsr_mtn_4; + } + else { + if (bw_mtn(data->vsr[i], data->v_taps[i])) { + vsr_check = bw_def_vsr_mtn_v_taps; + } + } + } + } + } + lb_size_check = bw_def_ok; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) { + data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]); + } + else { + data->source_width_in_lb = data->source_width_pixels[i]; + } + switch (data->lb_bpc[i]) { + case 8: + data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(2401171875ul, 100000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48)); + break; + case 10: + data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(300234375, 10000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48)); + break; + default: + data->lb_line_pitch = bw_ceil2(bw_mul(bw_int_to_fixed(data->lb_bpc[i]), data->source_width_in_lb), bw_int_to_fixed(48)); + break; + } + data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1)); + /*clamp the partitions to the maxium number supported by the lb*/ + if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) { + data->lb_partitions_max[i] = bw_int_to_fixed(10); + } + else { + data->lb_partitions_max[i] = bw_int_to_fixed(7); + } + data->lb_partitions[i] = bw_min2(data->lb_partitions_max[i], data->lb_partitions[i]); + if (bw_mtn(bw_add(data->v_taps[i], bw_int_to_fixed(1)), data->lb_partitions[i])) { + lb_size_check = bw_def_notok; + } + } + } + fbc_check = bw_def_ok; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i] && data->fbc_en[i] == 1 && (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)) || data->stereo_mode[i] != bw_def_mono || data->bytes_per_pixel[i] != 4)) { + fbc_check = bw_def_invalid_rotation_or_bpp_or_stereo; + } + } + rotation_check = bw_def_ok; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && (tiling_mode[i] == bw_def_linear || data->stereo_mode[i] != bw_def_mono)) { + rotation_check = bw_def_invalid_linear_or_stereo_mode; + } + } + } + if (pipe_check == bw_def_ok && hsr_check == bw_def_ok && vsr_check == bw_def_ok && lb_size_check == bw_def_ok && fbc_check == bw_def_ok && rotation_check == bw_def_ok) { + mode_check = bw_def_ok; + } + else { + mode_check = bw_def_notok; + } + /*number of memory channels for write-back client*/ + data->number_of_dram_wrchannels = vbios->number_of_dram_channels; + data->number_of_dram_channels = vbios->number_of_dram_channels; + /*modify number of memory channels if lpt mode is enabled*/ + /* low power tiling mode register*/ + /* 0 = use channel 0*/ + /* 1 = use channel 0 and 1*/ + /* 2 = use channel 0,1,2,3*/ + if ((fbc_enabled == 1 && lpt_enabled == 1)) { + if (vbios->memory_type == bw_def_hbm) + data->dram_efficiency = bw_frc_to_fixed(5, 10); + else + data->dram_efficiency = bw_int_to_fixed(1); + + + if (dceip->low_power_tiling_mode == 0) { + data->number_of_dram_channels = 1; + } + else if (dceip->low_power_tiling_mode == 1) { + data->number_of_dram_channels = 2; + } + else if (dceip->low_power_tiling_mode == 2) { + data->number_of_dram_channels = 4; + } + else { + data->number_of_dram_channels = 1; + } + } + else { + if (vbios->memory_type == bw_def_hbm) + data->dram_efficiency = bw_frc_to_fixed(5, 10); + else + data->dram_efficiency = bw_frc_to_fixed(8, 10); + } + /*memory request size and latency hiding:*/ + /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/ + /*the display write-back requests are single line*/ + /*for tiled graphics surfaces, or undelay surfaces with width higher than the maximum size for full efficiency, request size is 32 byte in 8 and 16 bpp or if the rotation is orthogonal to the tiling grain. only half is useful of the bytes in the request size in 8 bpp or in 32 bpp if the rotation is orthogonal to the tiling grain.*/ + /*for undelay surfaces with width lower than the maximum size for full efficiency, requests are 4-line interleaved in 16bpp if the rotation is parallel to the tiling grain, and 8-line interleaved with 4-line latency hiding in 8bpp or if the rotation is orthogonal to the tiling grain.*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)))) { + if ((i < 4)) { + /*underlay portrait tiling mode is not supported*/ + data->orthogonal_rotation[i] = 1; + } + else { + /*graphics portrait tiling mode*/ + if (data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling) { + data->orthogonal_rotation[i] = 0; + } + else { + data->orthogonal_rotation[i] = 1; + } + } + } + else { + if ((i < 4)) { + /*underlay landscape tiling mode is only supported*/ + if (data->underlay_micro_tile_mode == bw_def_display_micro_tiling) { + data->orthogonal_rotation[i] = 0; + } + else { + data->orthogonal_rotation[i] = 1; + } + } + else { + /*graphics landscape tiling mode*/ + if (data->graphics_micro_tile_mode == bw_def_display_micro_tiling) { + data->orthogonal_rotation[i] = 0; + } + else { + data->orthogonal_rotation[i] = 1; + } + } + } + if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) { + data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_height_efficient_for_tiling; + } + else { + data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_width_efficient_for_tiling; + } + if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) { + data->bytes_per_request[i] = bw_int_to_fixed(64); + data->useful_bytes_per_request[i] = bw_int_to_fixed(64); + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(1); + data->latency_hiding_lines[i] = bw_int_to_fixed(1); + } + else if (tiling_mode[i] == bw_def_linear) { + data->bytes_per_request[i] = bw_int_to_fixed(64); + data->useful_bytes_per_request[i] = bw_int_to_fixed(64); + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + } + else { + if (surface_type[i] == bw_def_graphics || (bw_mtn(data->source_width_rounded_up_to_chunks[i], bw_ceil2(data->underlay_maximum_source_efficient_for_tiling, bw_int_to_fixed(256))))) { + switch (data->bytes_per_pixel[i]) { + case 8: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + if (data->orthogonal_rotation[i]) { + data->bytes_per_request[i] = bw_int_to_fixed(32); + data->useful_bytes_per_request[i] = bw_int_to_fixed(32); + } + else { + data->bytes_per_request[i] = bw_int_to_fixed(64); + data->useful_bytes_per_request[i] = bw_int_to_fixed(64); + } + break; + case 4: + if (data->orthogonal_rotation[i]) { + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + data->bytes_per_request[i] = bw_int_to_fixed(32); + data->useful_bytes_per_request[i] = bw_int_to_fixed(16); + } + else { + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + data->bytes_per_request[i] = bw_int_to_fixed(64); + data->useful_bytes_per_request[i] = bw_int_to_fixed(64); + } + break; + case 2: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + data->bytes_per_request[i] = bw_int_to_fixed(32); + data->useful_bytes_per_request[i] = bw_int_to_fixed(32); + break; + default: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + data->bytes_per_request[i] = bw_int_to_fixed(32); + data->useful_bytes_per_request[i] = bw_int_to_fixed(16); + break; + } + } + else { + data->bytes_per_request[i] = bw_int_to_fixed(64); + data->useful_bytes_per_request[i] = bw_int_to_fixed(64); + if (data->orthogonal_rotation[i]) { + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8); + data->latency_hiding_lines[i] = bw_int_to_fixed(4); + } + else { + switch (data->bytes_per_pixel[i]) { + case 4: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + break; + case 2: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(4); + data->latency_hiding_lines[i] = bw_int_to_fixed(4); + break; + default: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8); + data->latency_hiding_lines[i] = bw_int_to_fixed(4); + break; + } + } + } + } + } + } + /*requested peak bandwidth:*/ + /*the peak request-per-second bandwidth is the product of the maximum source lines in per line out in the beginning*/ + /*and in the middle of the frame, the ratio of the source width to the line time, the ratio of line interleaving*/ + /*in memory to lines of latency hiding, and the ratio of bytes per pixel to useful bytes per request.*/ + /**/ + /*if the dmif data buffer size holds more than vta_ps worth of source lines, then only vsr is used.*/ + /*the peak bandwidth is the peak request-per-second bandwidth times the request size.*/ + /**/ + /*the line buffer lines in per line out in the beginning of the frame is the vertical filter initialization value*/ + /*rounded up to even and divided by the line times for initialization, which is normally three.*/ + /*the line buffer lines in per line out in the middle of the frame is at least one, or the vertical scale ratio,*/ + /*rounded up to line pairs if not doing line buffer prefetching.*/ + /**/ + /*the non-prefetching rounding up of the vertical scale ratio can also be done up to 1 (for a 0,2 pattern), 4/3 (for a 0,2,2 pattern),*/ + /*6/4 (for a 0,2,2,2 pattern), or 3 (for a 2,4 pattern).*/ + /**/ + /*the scaler vertical filter initialization value is calculated by the hardware as the floor of the average of the*/ + /*vertical scale ratio and the number of vertical taps increased by one. add one more for possible odd line*/ + /*panning/bezel adjustment mode.*/ + /**/ + /*for the bottom interlace field an extra 50% of the vertical scale ratio is considered for this calculation.*/ + /*in top-bottom stereo mode software has to set the filter initialization value manually and explicitly limit it to 4.*/ + /*furthermore, there is only one line time for initialization.*/ + /**/ + /*line buffer prefetching is done when the number of lines in the line buffer exceeds the number of taps plus*/ + /*the ceiling of the vertical scale ratio.*/ + /**/ + /*multi-line buffer prefetching is only done in the graphics pipe when the scaler is disabled or when upscaling and the vsr <= 0.8.'*/ + /**/ + /*the horizontal blank and chunk granularity factor is indirectly used indicate the interval of time required to transfer the source pixels.*/ + /*the denominator of this term represents the total number of destination output pixels required for the input source pixels.*/ + /*it applies when the lines in per line out is not 2 or 4. it does not apply when there is a line buffer between the scl and blnd.*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->v_filter_init[i] = bw_floor2(bw_div((bw_add(bw_add(bw_add(bw_int_to_fixed(1), data->v_taps[i]), data->vsr[i]), bw_mul(bw_mul(bw_int_to_fixed(data->interlace_mode[i]), bw_frc_to_fixed(5, 10)), data->vsr[i]))), bw_int_to_fixed(2)), bw_int_to_fixed(1)); + if (data->panning_and_bezel_adjustment == bw_def_any_lines) { + data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1)); + } + if (data->stereo_mode[i] == bw_def_top_bottom) { + data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4)); + } + if (data->stereo_mode[i] == bw_def_top_bottom) { + data->num_lines_at_frame_start = bw_int_to_fixed(1); + } + else { + data->num_lines_at_frame_start = bw_int_to_fixed(3); + } + if ((bw_mtn(data->vsr[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics) || data->panning_and_bezel_adjustment == bw_def_any_lines) { + data->line_buffer_prefetch[i] = 0; + } + else if ((((dceip->underlay_downscale_prefetch_enabled == 1 && surface_type[i] != bw_def_graphics) || surface_type[i] == bw_def_graphics) && (bw_mtn(data->lb_partitions[i], bw_add(data->v_taps[i], bw_ceil2(data->vsr[i], bw_int_to_fixed(1))))))) { + data->line_buffer_prefetch[i] = 1; + } + else { + data->line_buffer_prefetch[i] = 0; + } + data->lb_lines_in_per_line_out_in_beginning_of_frame[i] = bw_div(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->num_lines_at_frame_start); + if (data->line_buffer_prefetch[i] == 1) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_max2(bw_int_to_fixed(1), data->vsr[i]); + } + else if (bw_leq(data->vsr[i], bw_int_to_fixed(1))) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(1); + } else if (bw_leq(data->vsr[i], + bw_frc_to_fixed(4, 3))) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(4), bw_int_to_fixed(3)); + } else if (bw_leq(data->vsr[i], + bw_frc_to_fixed(6, 4))) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(6), bw_int_to_fixed(4)); + } + else if (bw_leq(data->vsr[i], bw_int_to_fixed(2))) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(2); + } + else if (bw_leq(data->vsr[i], bw_int_to_fixed(3))) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(3); + } + else { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(4); + } + if (data->line_buffer_prefetch[i] == 1 || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(2)) || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(4))) { + data->horizontal_blank_and_chunk_granularity_factor[i] = bw_int_to_fixed(1); + } + else { + data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2)))); + } + data->request_bandwidth[i] = bw_div(bw_mul(bw_div(bw_mul(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], data->lb_lines_in_per_line_out_in_middle_of_frame[i]), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), bw_int_to_fixed(data->bytes_per_pixel[i])), data->useful_bytes_per_request[i]), data->lines_interleaved_in_mem_access[i]), data->latency_hiding_lines[i]); + data->display_bandwidth[i] = bw_mul(data->request_bandwidth[i], data->bytes_per_request[i]); + } + } + /*outstanding chunk request limit*/ + /*if underlay buffer sharing is enabled, the data buffer size for underlay in 422 or 444 is the sum of the luma and chroma data buffer sizes.*/ + /*underlay buffer sharing mode is only permitted in orthogonal rotation modes.*/ + /**/ + /*if there is only one display enabled, the dmif data buffer size for the graphics surface is increased by concatenating the adjacent buffers.*/ + /**/ + /*the memory chunk size in bytes is 1024 for the writeback, and 256 times the memory line interleaving and the bytes per pixel for graphics*/ + /*and underlay.*/ + /**/ + /*the pipe chunk size uses 2 for line interleaving, except for the write back, in which case it is 1.*/ + /*graphics and underlay data buffer size is adjusted (limited) using the outstanding chunk request limit if there is more than one*/ + /*display enabled or if the dmif request buffer is not large enough for the total data buffer size.*/ + /*the outstanding chunk request limit is the ceiling of the adjusted data buffer size divided by the chunk size in bytes*/ + /*the adjusted data buffer size is the product of the display bandwidth and the minimum effective data buffer size in terms of time,*/ + /*rounded up to the chunk size in bytes, but should not exceed the original data buffer size*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((dceip->dmif_pipe_en_fbc_chunk_tracker + 3 == i && fbc_enabled == 0 && tiling_mode[i] != bw_def_linear)) { + data->max_chunks_non_fbc_mode[i] = 128 - dmif_chunk_buff_margin; + } + else { + data->max_chunks_non_fbc_mode[i] = 16 - dmif_chunk_buff_margin; + } + } + if (data->fbc_en[i] == 1) { + max_chunks_fbc_mode = 128 - dmif_chunk_buff_margin; + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + switch (surface_type[i]) { + case bw_def_display_write_back420_luma: + data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_luma_mcifwr_buffer_size); + break; + case bw_def_display_write_back420_chroma: + data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_chroma_mcifwr_buffer_size); + break; + case bw_def_underlay420_luma: + data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size); + break; + case bw_def_underlay420_chroma: + data->data_buffer_size[i] = bw_div(bw_int_to_fixed(dceip->underlay_chroma_dmif_size), bw_int_to_fixed(2)); + break; + case bw_def_underlay422:case bw_def_underlay444: + if (data->orthogonal_rotation[i] == 0) { + data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size); + } + else { + data->data_buffer_size[i] = bw_add(bw_int_to_fixed(dceip->underlay_luma_dmif_size), bw_int_to_fixed(dceip->underlay_chroma_dmif_size)); + } + break; + default: + if (data->fbc_en[i] == 1) { + /*data_buffer_size(i) = max_dmif_buffer_allocated * graphics_dmif_size*/ + if (data->number_of_displays == 1) { + data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size))); + } + else { + data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size)); + } + } + else { + /*the effective dmif buffer size in non-fbc mode is limited by the 16 entry chunk tracker*/ + if (data->number_of_displays == 1) { + data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size))); + } + else { + data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size)); + } + } + break; + } + if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) { + data->memory_chunk_size_in_bytes[i] = bw_int_to_fixed(1024); + data->pipe_chunk_size_in_bytes[i] = bw_int_to_fixed(1024); + } + else { + data->memory_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), data->lines_interleaved_in_mem_access[i]), bw_int_to_fixed(data->bytes_per_pixel[i])); + data->pipe_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_int_to_fixed(data->bytes_per_pixel[i])); + } + } + } + data->min_dmif_size_in_time = bw_int_to_fixed(9999); + data->min_mcifwr_size_in_time = bw_int_to_fixed(9999); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_dmif_size_in_time)) { + data->min_dmif_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]); + } + } + else { + if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_mcifwr_size_in_time)) { + data->min_mcifwr_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]); + } + } + } + } + data->total_requests_for_dmif_size = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i] && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->total_requests_for_dmif_size = bw_add(data->total_requests_for_dmif_size, bw_div(data->data_buffer_size[i], data->useful_bytes_per_request[i])); + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma && dceip->limit_excessive_outstanding_dmif_requests && (data->number_of_displays > 1 || bw_mtn(data->total_requests_for_dmif_size, dceip->dmif_request_buffer_size))) { + data->adjusted_data_buffer_size[i] = bw_min2(data->data_buffer_size[i], bw_ceil2(bw_mul(data->min_dmif_size_in_time, data->display_bandwidth[i]), data->memory_chunk_size_in_bytes[i])); + } + else { + data->adjusted_data_buffer_size[i] = data->data_buffer_size[i]; + } + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0) { + /*set maximum chunk limit if only one graphic pipe is enabled*/ + data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127); + } + else { + data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1)); + /*clamp maximum chunk limit in the graphic display pipe*/ + if (i >= 4) { + data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]); + } + } + } + } + /*outstanding pte request limit*/ + /*in tiling mode with no rotation the sg pte requests are 8 useful pt_es, the sg row height is the page height and the sg page width x height is 64x64 for 8bpp, 64x32 for 16 bpp, 32x32 for 32 bpp*/ + /*in tiling mode with rotation the sg pte requests are only one useful pte, and the sg row height is also the page height, but the sg page width and height are swapped*/ + /*in linear mode the pte requests are 8 useful pt_es, the sg page width is 4096 divided by the bytes per pixel, the sg page height is 1, but there is just one row whose height is the lines of pte prefetching*/ + /*the outstanding pte request limit is obtained by multiplying the outstanding chunk request limit by the peak pte request to eviction limiting ratio, rounding up to integer, multiplying by the pte requests per chunk, and rounding up to integer again*/ + /*if not using peak pte request to eviction limiting, the outstanding pte request limit is the pte requests in the vblank*/ + /*the pte requests in the vblank is the product of the number of pte request rows times the number of pte requests in a row*/ + /*the number of pte requests in a row is the quotient of the source width divided by 256, multiplied by the pte requests per chunk, rounded up to even, multiplied by the scatter-gather row height and divided by the scatter-gather page height*/ + /*the pte requests per chunk is 256 divided by the scatter-gather page width and the useful pt_es per pte request*/ + if (data->number_of_displays > 1 || (bw_neq(data->rotation_angle[4], bw_int_to_fixed(0)) && bw_neq(data->rotation_angle[4], bw_int_to_fixed(180)))) { + data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display; + } + else { + data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation; + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) { + if (tiling_mode[i] == bw_def_linear) { + data->useful_pte_per_pte_request = bw_int_to_fixed(8); + data->scatter_gather_page_width[i] = bw_div(bw_int_to_fixed(4096), bw_int_to_fixed(data->bytes_per_pixel[i])); + data->scatter_gather_page_height[i] = bw_int_to_fixed(1); + data->scatter_gather_pte_request_rows = bw_int_to_fixed(1); + data->scatter_gather_row_height = bw_int_to_fixed(dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode); + } + else if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(0)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(180))) { + data->useful_pte_per_pte_request = bw_int_to_fixed(8); + switch (data->bytes_per_pixel[i]) { + case 4: + data->scatter_gather_page_width[i] = bw_int_to_fixed(32); + data->scatter_gather_page_height[i] = bw_int_to_fixed(32); + break; + case 2: + data->scatter_gather_page_width[i] = bw_int_to_fixed(64); + data->scatter_gather_page_height[i] = bw_int_to_fixed(32); + break; + default: + data->scatter_gather_page_width[i] = bw_int_to_fixed(64); + data->scatter_gather_page_height[i] = bw_int_to_fixed(64); + break; + } + data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode); + data->scatter_gather_row_height = data->scatter_gather_page_height[i]; + } + else { + data->useful_pte_per_pte_request = bw_int_to_fixed(1); + switch (data->bytes_per_pixel[i]) { + case 4: + data->scatter_gather_page_width[i] = bw_int_to_fixed(32); + data->scatter_gather_page_height[i] = bw_int_to_fixed(32); + break; + case 2: + data->scatter_gather_page_width[i] = bw_int_to_fixed(32); + data->scatter_gather_page_height[i] = bw_int_to_fixed(64); + break; + default: + data->scatter_gather_page_width[i] = bw_int_to_fixed(64); + data->scatter_gather_page_height[i] = bw_int_to_fixed(64); + break; + } + data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode); + data->scatter_gather_row_height = data->scatter_gather_page_height[i]; + } + data->pte_request_per_chunk[i] = bw_div(bw_div(bw_int_to_fixed(dceip->chunk_width), data->scatter_gather_page_width[i]), data->useful_pte_per_pte_request); + data->scatter_gather_pte_requests_in_row[i] = bw_div(bw_mul(bw_ceil2(bw_mul(bw_div(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(dceip->chunk_width)), data->pte_request_per_chunk[i]), bw_int_to_fixed(1)), data->scatter_gather_row_height), data->scatter_gather_page_height[i]); + data->scatter_gather_pte_requests_in_vblank = bw_mul(data->scatter_gather_pte_request_rows, data->scatter_gather_pte_requests_in_row[i]); + if (bw_equ(data->peak_pte_request_to_eviction_ratio_limiting, bw_int_to_fixed(0))) { + data->scatter_gather_pte_request_limit[i] = data->scatter_gather_pte_requests_in_vblank; + } + else { + data->scatter_gather_pte_request_limit[i] = bw_max2(dceip->minimum_outstanding_pte_request_limit, bw_min2(data->scatter_gather_pte_requests_in_vblank, bw_ceil2(bw_mul(bw_mul(bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->memory_chunk_size_in_bytes[i]), data->pte_request_per_chunk[i]), data->peak_pte_request_to_eviction_ratio_limiting), bw_int_to_fixed(1)))); + } + } + } + /*pitch padding recommended for efficiency in linear mode*/ + /*in linear mode graphics or underlay with scatter gather, a pitch that is a multiple of the channel interleave (256 bytes) times the channel-bank rotation is not efficient*/ + /*if that is the case it is recommended to pad the pitch by at least 256 pixels*/ + data->inefficient_linear_pitch_in_bytes = bw_mul(bw_mul(bw_int_to_fixed(256), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)); + + /*pixel transfer time*/ + /*the dmif and mcifwr yclk(pclk) required is the one that allows the transfer of all pipe's data buffer size in memory in the time for data transfer*/ + /*for dmif, pte and cursor requests have to be included.*/ + /*the dram data requirement is doubled when the data request size in bytes is less than the dram channel width times the burst size (8)*/ + /*the dram data requirement is also multiplied by the number of channels in the case of low power tiling*/ + /*the page close-open time is determined by trc and the number of page close-opens*/ + /*in tiled mode graphics or underlay with scatter-gather enabled the bytes per page close-open is the product of the memory line interleave times the maximum of the scatter-gather page width and the product of the tile width (8 pixels) times the number of channels times the number of banks.*/ + /*in linear mode graphics or underlay with scatter-gather enabled and inefficient pitch, the bytes per page close-open is the line request alternation slice, because different lines are in completely different 4k address bases.*/ + /*otherwise, the bytes page close-open is the chunk size because that is the arbitration slice.*/ + /*pte requests are grouped by pte requests per chunk if that is more than 1. each group costs a page close-open time for dmif reads*/ + /*cursor requests outstanding are limited to a group of two source lines. each group costs a page close-open time for dmif reads*/ + /*the display reads and writes time for data transfer is the minimum data or cursor buffer size in time minus the mc urgent latency*/ + /*the mc urgent latency is experienced more than one time if the number of dmif requests in the data buffer exceeds the request buffer size plus the request slots reserved for dmif in the dram channel arbiter queues*/ + /*the dispclk required is the maximum for all surfaces of the maximum of the source pixels for first output pixel times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, and the source pixels for last output pixel, times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, plus the active time.*/ + /*the data burst time is the maximum of the total page close-open time, total dmif/mcifwr buffer size in memory divided by the dram bandwidth, and the total dmif/mcifwr buffer size in memory divided by the 32 byte sclk data bus bandwidth, each multiplied by its efficiency.*/ + /*the source line transfer time is the maximum for all surfaces of the maximum of the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the fist pixel, and the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the last pixel plus the active time.*/ + /*the source pixels for the first output pixel is 512 if the scaler vertical filter initialization value is greater than 2, and it is 4 times the source width if it is greater than 4.*/ + /*the source pixels for the last output pixel is the source width times the scaler vertical filter initialization value rounded up to even*/ + /*the source data for these pixels is the number of pixels times the bytes per pixel times the bytes per request divided by the useful bytes per request.*/ + data->cursor_total_data = bw_int_to_fixed(0); + data->cursor_total_request_groups = bw_int_to_fixed(0); + data->scatter_gather_total_pte_requests = bw_int_to_fixed(0); + data->scatter_gather_total_pte_request_groups = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->cursor_total_data = bw_add(data->cursor_total_data, bw_mul(bw_mul(bw_int_to_fixed(2), data->cursor_width_pixels[i]), bw_int_to_fixed(4))); + if (dceip->large_cursor == 1) { + data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_int_to_fixed((dceip->cursor_max_outstanding_group_num + 1))); + } + else { + data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_ceil2(bw_div(data->cursor_width_pixels[i], dceip->cursor_chunk_width), bw_int_to_fixed(1))); + } + if (data->scatter_gather_enable_for_pipe[i]) { + data->scatter_gather_total_pte_requests = bw_add(data->scatter_gather_total_pte_requests, data->scatter_gather_pte_request_limit[i]); + data->scatter_gather_total_pte_request_groups = bw_add(data->scatter_gather_total_pte_request_groups, bw_ceil2(bw_div(data->scatter_gather_pte_request_limit[i], bw_ceil2(data->pte_request_per_chunk[i], bw_int_to_fixed(1))), bw_int_to_fixed(1))); + } + } + } + data->tile_width_in_pixels = bw_int_to_fixed(8); + data->dmif_total_number_of_data_request_page_close_open = bw_int_to_fixed(0); + data->mcifwr_total_number_of_data_request_page_close_open = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] != bw_def_linear) { + data->bytes_per_page_close_open = bw_mul(data->lines_interleaved_in_mem_access[i], bw_max2(bw_mul(bw_mul(bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->tile_width_in_pixels), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)), bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->scatter_gather_page_width[i]))); + } + else if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] == bw_def_linear && bw_equ(bw_mod((bw_mul(data->pitch_in_pixels_after_surface_type[i], bw_int_to_fixed(data->bytes_per_pixel[i]))), data->inefficient_linear_pitch_in_bytes), bw_int_to_fixed(0))) { + data->bytes_per_page_close_open = dceip->linear_mode_line_request_alternation_slice; + } + else { + data->bytes_per_page_close_open = data->memory_chunk_size_in_bytes[i]; + } + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->dmif_total_number_of_data_request_page_close_open = bw_add(data->dmif_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open)); + } + else { + data->mcifwr_total_number_of_data_request_page_close_open = bw_add(data->mcifwr_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open)); + } + } + } + data->dmif_total_page_close_open_time = bw_div(bw_mul((bw_add(bw_add(data->dmif_total_number_of_data_request_page_close_open, data->scatter_gather_total_pte_request_groups), data->cursor_total_request_groups)), vbios->trc), bw_int_to_fixed(1000)); + data->mcifwr_total_page_close_open_time = bw_div(bw_mul(data->mcifwr_total_number_of_data_request_page_close_open, vbios->trc), bw_int_to_fixed(1000)); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->adjusted_data_buffer_size_in_memory[i] = bw_div(bw_mul(data->adjusted_data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]); + } + } + data->total_requests_for_adjusted_dmif_size = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->total_requests_for_adjusted_dmif_size = bw_add(data->total_requests_for_adjusted_dmif_size, bw_div(data->adjusted_data_buffer_size[i], data->useful_bytes_per_request[i])); + } + } + } + data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1)); + data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips); + data->total_display_reads_required_data = bw_int_to_fixed(0); + data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0); + data->total_display_writes_required_data = bw_int_to_fixed(0); + data->total_display_writes_required_dram_access_data = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->display_reads_required_data = data->adjusted_data_buffer_size_in_memory[i]; + /*for hbm memories, each channel is split into 2 pseudo-channels that are each 64 bits in width. each*/ + /*pseudo-channel may be read independently of one another.*/ + /*the read burst length (bl) for hbm memories is 4, so each read command will access 32 bytes of data.*/ + /*the 64 or 32 byte sized data is stored in one pseudo-channel.*/ + /*it will take 4 memclk cycles or 8 yclk cycles to fetch 64 bytes of data from the hbm memory (2 read commands).*/ + /*it will take 2 memclk cycles or 4 yclk cycles to fetch 32 bytes of data from the hbm memory (1 read command).*/ + /*for gddr5/ddr4 memories, there is additional overhead if the size of the request is smaller than 64 bytes.*/ + /*the read burst length (bl) for gddr5/ddr4 memories is 8, regardless of the size of the data request.*/ + /*therefore it will require 8 cycles to fetch 64 or 32 bytes of data from the memory.*/ + /*the memory efficiency will be 50% for the 32 byte sized data.*/ + if (vbios->memory_type == bw_def_hbm) { + data->display_reads_required_dram_access_data = data->adjusted_data_buffer_size_in_memory[i]; + } + else { + data->display_reads_required_dram_access_data = bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed((8 * vbios->dram_channel_width_in_bits / 8)), data->bytes_per_request[i]), bw_int_to_fixed(1))); + } + data->total_display_reads_required_data = bw_add(data->total_display_reads_required_data, data->display_reads_required_data); + data->total_display_reads_required_dram_access_data = bw_add(data->total_display_reads_required_dram_access_data, data->display_reads_required_dram_access_data); + } + else { + data->total_display_writes_required_data = bw_add(data->total_display_writes_required_data, data->adjusted_data_buffer_size_in_memory[i]); + data->total_display_writes_required_dram_access_data = bw_add(data->total_display_writes_required_dram_access_data, bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits), data->bytes_per_request[i]), bw_int_to_fixed(1)))); + } + } + } + data->total_display_reads_required_data = bw_add(bw_add(data->total_display_reads_required_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64))); + data->total_display_reads_required_dram_access_data = bw_add(bw_add(data->total_display_reads_required_dram_access_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64))); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(4))) { + data->src_pixels_for_first_output_pixel[i] = bw_mul(bw_int_to_fixed(4), data->source_width_rounded_up_to_chunks[i]); + } + else { + if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(2))) { + data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(512); + } + else { + data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(0); + } + } + data->src_data_for_first_output_pixel[i] = bw_div(bw_mul(bw_mul(data->src_pixels_for_first_output_pixel[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]); + data->src_pixels_for_last_output_pixel[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_mul(bw_ceil2(data->vsr[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->horizontal_blank_and_chunk_granularity_factor[i]))); + data->src_data_for_last_output_pixel[i] = bw_div(bw_mul(bw_mul(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->lines_interleaved_in_mem_access[i])), bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]); + data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]); + } + } + for (i = 0; i <= 2; i++) { + for (j = 0; j <= 7; j++) { + data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))))); + if (data->d1_display_write_back_dwb_enable == 1) { + data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(sclk[j], vbios->data_return_bus_width)))); + } + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + for (j = 0; j <= 2; j++) { + for (k = 0; k <= 7; k++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + /*time to transfer data from the dmif buffer to the lb. since the mc to dmif transfer time overlaps*/ + /*with the dmif to lb transfer time, only time to transfer the last chunk is considered.*/ + data->dmif_buffer_transfer_time[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], (bw_div(dceip->lb_write_pixels_per_dispclk, (bw_div(vbios->low_voltage_max_dispclk, dceip->display_pipe_throughput_factor))))); + data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_add(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->dmif_buffer_transfer_time[i]), data->active_time[i])); + /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/ + /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/ + /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/ + /*immediately serviced without a gap in the urgent requests.*/ + /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/ + if (surface_type[i] == bw_def_graphics) { + switch (data->lb_bpc[i]) { + case 6: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component; + break; + case 8: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component; + break; + case 10: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component; + break; + default: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component; + break; + } + if (data->use_alpha[i] == 1) { + data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency); + } + } + else { + switch (data->lb_bpc[i]) { + case 6: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component; + break; + case 8: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component; + break; + case 10: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component; + break; + default: + data->v_scaler_efficiency = bw_int_to_fixed(3); + break; + } + } + if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) { + data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i])); + } + else { + data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1)))); + } + data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_mul(bw_int_to_fixed(2), bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])))))); + } + else { + data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])); + /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/ + /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/ + /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/ + /*immediately serviced without a gap in the urgent requests.*/ + /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/ + data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]))))); + } + } + } + } + } + /*cpu c-state and p-state change enable*/ + /*for cpu p-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration*/ + /*for cpu c-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration and recovery*/ + /*condition for the blackout duration:*/ + /* minimum latency hiding > blackout duration + dmif burst time + line source transfer time*/ + /*condition for the blackout recovery:*/ + /* recovery time > dmif burst time + 2 * urgent latency*/ + /* recovery time > (display bw * blackout duration + (2 * urgent latency + dmif burst time)*dispclk - dmif size )*/ + /* / (dispclk - display bw)*/ + /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/ + /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/ + + /*initialize variables*/ + number_of_displays_enabled = 0; + number_of_displays_enabled_with_margin = 0; + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k]) { + number_of_displays_enabled = number_of_displays_enabled + 1; + } + data->display_pstate_change_enable[k] = 0; + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) { + if (bw_ltn(data->vsr[i], bw_int_to_fixed(2))) { + data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(1))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]); + } + else { + data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(3))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]); + } + } + else { + data->cursor_latency_hiding[i] = bw_int_to_fixed(9999); + } + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) { + if (number_of_displays_enabled > 2) + data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(2)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); + else + data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); + } + else { + data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); + } + data->minimum_latency_hiding_with_cursor[i] = bw_min2(data->minimum_latency_hiding[i], data->cursor_latency_hiding[i]); + } + } + for (i = 0; i <= 2; i++) { + for (j = 0; j <= 7; j++) { + data->blackout_duration_margin[i][j] = bw_int_to_fixed(9999); + data->dispclk_required_for_blackout_duration[i][j] = bw_int_to_fixed(0); + data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(0); + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0))) { + if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { + data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->line_source_transfer_time[k][i][j])); + data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->active_time[k])))); + if (bw_leq(vbios->maximum_blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))) { + data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999); + } + else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) { + data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, bw_sub(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k])); + } + } + else { + data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->line_source_transfer_time[k][i][j])); + data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); + if (bw_ltn(vbios->maximum_blackout_recovery_time, bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))) { + data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999); + } + else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) { + data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, (bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k])); + } + } + } + } + } + } + if (bw_mtn(data->blackout_duration_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[high][s_high], vbios->high_voltage_max_dispclk)) { + data->cpup_state_change_enable = bw_def_yes; + if (bw_ltn(data->dispclk_required_for_blackout_recovery[high][s_high], vbios->high_voltage_max_dispclk)) { + data->cpuc_state_change_enable = bw_def_yes; + } + else { + data->cpuc_state_change_enable = bw_def_no; + } + } + else { + data->cpup_state_change_enable = bw_def_no; + data->cpuc_state_change_enable = bw_def_no; + } + /*nb p-state change enable*/ + /*for dram speed/p-state change to be possible for a yclk(pclk) and sclk level there has to be positive margin and the dispclk required has to be*/ + /*below the maximum.*/ + /*the dram speed/p-state change margin is the minimum for all surfaces of the maximum latency hiding minus the dram speed/p-state change latency,*/ + /*minus the dmif burst time, minus the source line transfer time*/ + /*the maximum latency hiding is the minimum latency hiding plus one source line used for de-tiling in the line buffer, plus half the urgent latency*/ + /*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) **/ + /* h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/ + data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], + bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency)); + data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]); + } + } + for (i = 0; i <= 2; i++) { + for (j = 0; j <= 7; j++) { + data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999); + data->dram_speed_change_margin = bw_int_to_fixed(9999); + data->dispclk_required_for_dram_speed_change[i][j] = bw_int_to_fixed(0); + data->num_displays_with_margin[i][j] = 0; + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k]) { + if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { + data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]); + if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) { + /*determine the minimum dram clock change margin for each set of clock frequencies*/ + data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); + /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ + data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k])))); + if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) { + data->display_pstate_change_enable[k] = 1; + data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; + data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]); + } + } + } + else { + data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]); + if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) { + /*determine the minimum dram clock change margin for each display pipe*/ + data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); + /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ + data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); + if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) { + data->display_pstate_change_enable[k] = 1; + data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; + data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]); + } + } + } + } + } + } + } + /*determine the number of displays with margin to switch in the v_active region*/ + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1) { + number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1; + } + } + /*determine the number of displays that don't have any dram clock change margin, but*/ + /*have the same resolution. these displays can switch in a common vblank region if*/ + /*their frames are aligned.*/ + data->min_vblank_dram_speed_change_margin = bw_int_to_fixed(9999); + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k]) { + if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { + data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]); + data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]); + } + else { + data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->mcifwr_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]); + data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]); + } + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + data->displays_with_same_mode[i] = bw_int_to_fixed(0); + if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) { + for (j = 0; j <= maximum_number_of_surfaces - 1; j++) { + if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) { + data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1)); + } + } + } + } + /*compute the maximum number of aligned displays with no margin*/ + number_of_aligned_displays_with_no_margin = 0; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + number_of_aligned_displays_with_no_margin = bw_fixed_to_int(bw_max2(bw_int_to_fixed(number_of_aligned_displays_with_no_margin), data->displays_with_same_mode[i])); + } + /*dram clock change is possible, if all displays have positive margin except for one display or a group of*/ + /*aligned displays with the same timing.*/ + /*the display(s) with the negative margin can be switched in the v_blank region while the other*/ + /*displays are in v_blank or v_active.*/ + if (number_of_displays_enabled_with_margin > 0 && (number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin) == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk)) { + data->nbp_state_change_enable = bw_def_yes; + } + else { + data->nbp_state_change_enable = bw_def_no; + } + /*dram clock change is possible only in vblank if all displays are aligned and have no margin*/ + if (number_of_aligned_displays_with_no_margin == number_of_displays_enabled) { + nbp_state_change_enable_blank = bw_def_yes; + } + else { + nbp_state_change_enable_blank = bw_def_no; + } + + /*average bandwidth*/ + /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/ + /*the average bandwidth with compression is the same, divided by the compression ratio*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]); + data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]); + } + } + data->total_average_bandwidth_no_compression = bw_int_to_fixed(0); + data->total_average_bandwidth = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]); + data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]); + } + } + + /*required yclk(pclk)*/ + /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/ + /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/ + /*high yclk(pclk) has to be selected when dram speed/p-state change is not possible.*/ + data->min_cursor_memory_interface_buffer_size_in_time = bw_int_to_fixed(9999); + /* number of cursor lines stored in the cursor data return buffer*/ + num_cursor_lines = 0; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0))) { + /*compute number of cursor lines stored in data return buffer*/ + if (bw_leq(data->cursor_width_pixels[i], bw_int_to_fixed(64)) && dceip->large_cursor == 1) { + num_cursor_lines = 4; + } + else { + num_cursor_lines = 2; + } + data->min_cursor_memory_interface_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, bw_div(bw_mul(bw_div(bw_int_to_fixed(num_cursor_lines), data->vsr[i]), data->h_total[i]), data->pixel_rate[i])); + } + } + } + /*compute minimum time to read one chunk from the dmif buffer*/ + if (number_of_displays_enabled > 2) { + data->chunk_request_delay = 0; + } + else { + data->chunk_request_delay = bw_fixed_to_int(bw_div(bw_int_to_fixed(512), vbios->high_voltage_max_dispclk)); + } + data->min_read_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, data->min_dmif_size_in_time); + data->display_reads_time_for_data_transfer = bw_sub(bw_sub(data->min_read_buffer_size_in_time, data->total_dmifmc_urgent_latency), bw_int_to_fixed(data->chunk_request_delay)); + data->display_writes_time_for_data_transfer = bw_sub(data->min_mcifwr_size_in_time, vbios->mcifwrmc_urgent_latency); + data->dmif_required_dram_bandwidth = bw_div(data->total_display_reads_required_dram_access_data, data->display_reads_time_for_data_transfer); + data->mcifwr_required_dram_bandwidth = bw_div(data->total_display_writes_required_dram_access_data, data->display_writes_time_for_data_transfer); + data->required_dmifmc_urgent_latency_for_page_close_open = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_total_page_close_open_time)), data->total_dmifmc_urgent_trips); + data->required_mcifmcwr_urgent_latency = bw_sub(data->min_mcifwr_size_in_time, data->mcifwr_total_page_close_open_time); + if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) { + data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999); + yclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size; + data->y_clk_level = high; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) { + data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999); + yclk_message = bw_def_exceeded_allowed_page_close_open; + data->y_clk_level = high; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + else { + data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000)); + if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[low]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels))) + && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) { + yclk_message = bw_fixed_to_int(vbios->low_yclk); + data->y_clk_level = low; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[mid]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels))) + && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) { + yclk_message = bw_fixed_to_int(vbios->mid_yclk); + data->y_clk_level = mid; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[high]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels))) + && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) { + yclk_message = bw_fixed_to_int(vbios->high_yclk); + data->y_clk_level = high; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + else { + yclk_message = bw_def_exceeded_allowed_maximum_bw; + data->y_clk_level = high; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + } + /*required sclk*/ + /*sclk requirement only makes sense if the total pte requests fit in the scatter-gather saw queque size*/ + /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/ + /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/ + /*for dmif, pte and cursor requests have to be included.*/ + data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))); + data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), vbios->data_return_bus_width); + if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) { + data->required_sclk = bw_int_to_fixed(9999); + sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size; + data->sclk_level = s_high; + } + else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) { + data->required_sclk = bw_int_to_fixed(9999); + sclk_message = bw_def_exceeded_allowed_page_close_open; + data->sclk_level = s_high; + } + else { + data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk); + if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[low]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_low; + data->sclk_level = s_low; + data->required_sclk = vbios->low_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[mid]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid1; + data->required_sclk = vbios->mid1_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid2]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid2; + data->required_sclk = vbios->mid2_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid3]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid3; + data->required_sclk = vbios->mid3_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid4]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid4; + data->required_sclk = vbios->mid4_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid5]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid5; + data->required_sclk = vbios->mid5_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid6]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid6; + data->required_sclk = vbios->mid6_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_high])) { + sclk_message = bw_def_high; + data->sclk_level = s_high; + data->required_sclk = vbios->high_sclk; + } + else if (bw_meq(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_high])) { + sclk_message = bw_def_high; + data->sclk_level = s_high; + data->required_sclk = vbios->high_sclk; + } + else { + sclk_message = bw_def_exceeded_allowed_maximum_sclk; + data->sclk_level = s_high; + /*required_sclk = high_sclk*/ + } + } + /*dispclk*/ + /*if dispclk is set to the maximum, ramping is not required. dispclk required without ramping is less than the dispclk required with ramping.*/ + /*if dispclk required without ramping is more than the maximum dispclk, that is the dispclk required, and the mode is not supported*/ + /*if that does not happen, but dispclk required with ramping is more than the maximum dispclk, dispclk required is just the maximum dispclk*/ + /*if that does not happen either, dispclk required is the dispclk required with ramping.*/ + /*dispclk required without ramping is the maximum of the one required for display pipe pixel throughput, for scaler throughput, for total read request thrrougput and for dram/np p-state change if enabled.*/ + /*the display pipe pixel throughput is the maximum of lines in per line out in the beginning of the frame and lines in per line out in the middle of the frame multiplied by the horizontal blank and chunk granularity factor, altogether multiplied by the ratio of the source width to the line time, divided by the line buffer pixels per dispclk throughput, and multiplied by the display pipe throughput factor.*/ + /*the horizontal blank and chunk granularity factor is the ratio of the line time divided by the line time minus half the horizontal blank and chunk time. it applies when the lines in per line out is not 2 or 4.*/ + /*the dispclk required for scaler throughput is the product of the pixel rate and the scaling limits factor.*/ + /*the dispclk required for total read request throughput is the product of the peak request-per-second bandwidth and the dispclk cycles per request, divided by the request efficiency.*/ + /*for the dispclk required with ramping, instead of multiplying just the pipe throughput by the display pipe throughput factor, we multiply the scaler and pipe throughput by the ramping factor.*/ + /*the scaling limits factor is the product of the horizontal scale ratio, and the ratio of the vertical taps divided by the scaler efficiency clamped to at least 1.*/ + /*the scaling limits factor itself it also clamped to at least 1*/ + /*if doing downscaling with the pre-downscaler enabled, the horizontal scale ratio should not be considered above (use "1")*/ + data->downspread_factor = bw_add(bw_int_to_fixed(1), bw_div(vbios->down_spread_percentage, bw_int_to_fixed(100))); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] == bw_def_graphics) { + switch (data->lb_bpc[i]) { + case 6: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component; + break; + case 8: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component; + break; + case 10: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component; + break; + default: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component; + break; + } + if (data->use_alpha[i] == 1) { + data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency); + } + } + else { + switch (data->lb_bpc[i]) { + case 6: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component; + break; + case 8: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component; + break; + case 10: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component; + break; + default: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency12_bit_per_component; + break; + } + } + if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) { + data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i])); + } + else { + data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1)))); + } + data->display_pipe_pixel_throughput = bw_div(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], bw_mul(data->lb_lines_in_per_line_out_in_middle_of_frame[i], data->horizontal_blank_and_chunk_granularity_factor[i])), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), dceip->lb_write_pixels_per_dispclk); + data->dispclk_required_without_ramping[i] = bw_mul(data->downspread_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), bw_mul(dceip->display_pipe_throughput_factor, data->display_pipe_pixel_throughput))); + data->dispclk_required_with_ramping[i] = bw_mul(dceip->dispclk_ramping_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), data->display_pipe_pixel_throughput)); + } + } + data->total_dispclk_required_with_ramping = bw_int_to_fixed(0); + data->total_dispclk_required_without_ramping = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_ltn(data->total_dispclk_required_with_ramping, data->dispclk_required_with_ramping[i])) { + data->total_dispclk_required_with_ramping = data->dispclk_required_with_ramping[i]; + } + if (bw_ltn(data->total_dispclk_required_without_ramping, data->dispclk_required_without_ramping[i])) { + data->total_dispclk_required_without_ramping = data->dispclk_required_without_ramping[i]; + } + } + } + data->total_read_request_bandwidth = bw_int_to_fixed(0); + data->total_write_request_bandwidth = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->total_read_request_bandwidth = bw_add(data->total_read_request_bandwidth, data->request_bandwidth[i]); + } + else { + data->total_write_request_bandwidth = bw_add(data->total_write_request_bandwidth, data->request_bandwidth[i]); + } + } + } + data->dispclk_required_for_total_read_request_bandwidth = bw_div(bw_mul(data->total_read_request_bandwidth, dceip->dispclk_per_request), dceip->request_efficiency); + data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping, data->dispclk_required_for_total_read_request_bandwidth); + data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping, data->dispclk_required_for_total_read_request_bandwidth); + if (data->cpuc_state_change_enable == bw_def_yes) { + data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]); + data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]); + } + if (data->cpup_state_change_enable == bw_def_yes) { + data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); + data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); + } + if (data->nbp_state_change_enable == bw_def_yes && data->increase_voltage_to_support_mclk_switch) { + data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); + data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); + } + if (bw_ltn(data->total_dispclk_required_with_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) { + data->dispclk = data->total_dispclk_required_with_ramping_with_request_bandwidth; + } + else if (bw_ltn(data->total_dispclk_required_without_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) { + data->dispclk = vbios->high_voltage_max_dispclk; + } + else { + data->dispclk = data->total_dispclk_required_without_ramping_with_request_bandwidth; + } + /* required core voltage*/ + /* the core voltage required is low if sclk, yclk(pclk)and dispclk are within the low limits*/ + /* otherwise, the core voltage required is medium if yclk (pclk) is within the low limit and sclk and dispclk are within the medium limit*/ + /* otherwise, the core voltage required is high if the three clocks are within the high limits*/ + /* otherwise, or if the mode is not supported, core voltage requirement is not applicable*/ + if (pipe_check == bw_def_notok) { + voltage = bw_def_na; + } + else if (mode_check == bw_def_notok) { + voltage = bw_def_notok; + } + else if (bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) && sclk_message == bw_def_low && bw_ltn(data->dispclk, vbios->low_voltage_max_dispclk)) { + voltage = bw_def_0_72; + } + else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid) && bw_ltn(data->dispclk, vbios->mid_voltage_max_dispclk)) { + voltage = bw_def_0_8; + } + else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->high_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid || sclk_message == bw_def_high) && bw_leq(data->dispclk, vbios->high_voltage_max_dispclk)) { + if ((data->nbp_state_change_enable == bw_def_no && nbp_state_change_enable_blank == bw_def_no)) { + voltage = bw_def_high_no_nbp_state_change; + } + else { + voltage = bw_def_0_9; + } + } + else { + voltage = bw_def_notok; + } + if (voltage == bw_def_0_72) { + data->max_phyclk = vbios->low_voltage_max_phyclk; + } + else if (voltage == bw_def_0_8) { + data->max_phyclk = vbios->mid_voltage_max_phyclk; + } + else { + data->max_phyclk = vbios->high_voltage_max_phyclk; + } + /*required blackout recovery time*/ + data->blackout_recovery_time = bw_int_to_fixed(0); + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0)) && data->cpup_state_change_enable == bw_def_yes) { + if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { + data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])); + if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])))))) { + data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]))))); + } + } + else { + data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])); + if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])))))) { + data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]))))); + } + } + } + } + /*sclk deep sleep*/ + /*during self-refresh, sclk can be reduced to dispclk divided by the minimum pixels in the data fifo entry, with 15% margin, but shoudl not be set to less than the request bandwidth.*/ + /*the data fifo entry is 16 pixels for the writeback, 64 bytes/bytes_per_pixel for the graphics, 16 pixels for the parallel rotation underlay,*/ + /*and 16 bytes/bytes_per_pixel for the orthogonal rotation underlay.*/ + /*in parallel mode (underlay pipe), the data read from the dmifv buffer is variable and based on the pixel depth (8bbp - 16 bytes, 16 bpp - 32 bytes, 32 bpp - 64 bytes)*/ + /*in orthogonal mode (underlay pipe), the data read from the dmifv buffer is fixed at 16 bytes.*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) { + data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16); + } + else if (surface_type[i] == bw_def_graphics) { + data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(64), bw_int_to_fixed(data->bytes_per_pixel[i])); + } + else if (data->orthogonal_rotation[i] == 0) { + data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16); + } + else { + data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(16), bw_int_to_fixed(data->bytes_per_pixel[i])); + } + } + } + data->min_pixels_per_data_fifo_entry = bw_int_to_fixed(9999); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_mtn(data->min_pixels_per_data_fifo_entry, data->pixels_per_data_fifo_entry[i])) { + data->min_pixels_per_data_fifo_entry = data->pixels_per_data_fifo_entry[i]; + } + } + } + data->sclk_deep_sleep = bw_max2(bw_div(bw_mul(data->dispclk, bw_frc_to_fixed(115, 100)), data->min_pixels_per_data_fifo_entry), data->total_read_request_bandwidth); + /*urgent, stutter and nb-p_state watermark*/ + /*the urgent watermark is the maximum of the urgent trip time plus the pixel transfer time, the urgent trip times to get data for the first pixel, and the urgent trip times to get data for the last pixel.*/ + /*the stutter exit watermark is the self refresh exit time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel. it does not apply to the writeback.*/ + /*the nb p-state change watermark is the dram speed/p-state change time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.*/ + /*the pixel transfer time is the maximum of the time to transfer the source pixels required for the first output pixel, and the time to transfer the pixels for the last output pixel minus the active line time.*/ + /*blackout_duration is added to the urgent watermark*/ + data->chunk_request_time = bw_int_to_fixed(0); + data->cursor_request_time = bw_int_to_fixed(0); + /*compute total time to request one chunk from each active display pipe*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->chunk_request_time = bw_add(data->chunk_request_time, (bw_div((bw_div(bw_int_to_fixed(pixels_per_chunk * data->bytes_per_pixel[i]), data->useful_bytes_per_request[i])), bw_min2(sclk[data->sclk_level], bw_div(data->dispclk, bw_int_to_fixed(2)))))); + } + } + /*compute total time to request cursor data*/ + data->cursor_request_time = (bw_div(data->cursor_total_data, (bw_mul(bw_int_to_fixed(32), sclk[data->sclk_level])))); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->line_source_pixels_transfer_time = bw_max2(bw_div(bw_div(data->src_pixels_for_first_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), bw_sub(bw_div(bw_div(data->src_pixels_for_last_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), data->active_time[i])); + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time); + data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]); + data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]); + /*unconditionally remove black out time from the nb p_state watermark*/ + if (data->display_pstate_change_enable[i] == 1) { + data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level])); + } + else { + /*maximize the watermark to force the switch in the vb_lank region of the frame*/ + data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000); + } + } + else { + data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time); + data->stutter_exit_watermark[i] = bw_int_to_fixed(0); + data->stutter_entry_watermark[i] = bw_int_to_fixed(0); + if (data->display_pstate_change_enable[i] == 1) { + data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level])); + } + else { + /*maximize the watermark to force the switch in the vb_lank region of the frame*/ + data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000); + } + } + } + } + /*stutter mode enable*/ + /*in the multi-display case the stutter exit or entry watermark cannot exceed the minimum latency hiding capabilities of the*/ + /*display pipe.*/ + data->stutter_mode_enable = data->cpuc_state_change_enable; + if (data->number_of_displays > 1) { + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((bw_mtn(data->stutter_exit_watermark[i], data->minimum_latency_hiding[i]) || bw_mtn(data->stutter_entry_watermark[i], data->minimum_latency_hiding[i]))) { + data->stutter_mode_enable = bw_def_no; + } + } + } + } + /*performance metrics*/ + /* display read access efficiency (%)*/ + /* display write back access efficiency (%)*/ + /* stutter efficiency (%)*/ + /* extra underlay pitch recommended for efficiency (pixels)*/ + /* immediate flip time (us)*/ + /* latency for other clients due to urgent display read (us)*/ + /* latency for other clients due to urgent display write (us)*/ + /* average bandwidth consumed by display (no compression) (gb/s)*/ + /* required dram bandwidth (gb/s)*/ + /* required sclk (m_hz)*/ + /* required rd urgent latency (us)*/ + /* nb p-state change margin (us)*/ + /*dmif and mcifwr dram access efficiency*/ + /*is the ratio between the ideal dram access time (which is the data buffer size in memory divided by the dram bandwidth), and the actual time which is the total page close-open time. but it cannot exceed the dram efficiency provided by the memory subsystem*/ + data->dmifdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_reads_required_dram_access_data, data->dram_bandwidth), data->dmif_total_page_close_open_time), bw_int_to_fixed(1)); + if (bw_mtn(data->total_display_writes_required_dram_access_data, bw_int_to_fixed(0))) { + data->mcifwrdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_writes_required_dram_access_data, data->dram_bandwidth), data->mcifwr_total_page_close_open_time), bw_int_to_fixed(1)); + } + else { + data->mcifwrdram_access_efficiency = bw_int_to_fixed(0); + } + /*stutter efficiency*/ + /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/ + /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/ + /*the frame-average time in self-refresh is the stutter cycle minus the self refresh exit latency and the burst time*/ + /*the stutter cycle is the dmif buffer size reduced by the excess of the stutter exit watermark over the lb size in time.*/ + /*the burst time is the data needed during the stutter cycle divided by the available bandwidth*/ + /*compute the time read all the data from the dmif buffer to the lb (dram refresh period)*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->stutter_refresh_duration[i] = bw_sub(bw_mul(bw_div(bw_div(bw_mul(bw_div(bw_div(data->adjusted_data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]), bw_max2(bw_int_to_fixed(0), bw_sub(data->stutter_exit_watermark[i], bw_div(bw_mul((bw_sub(data->lb_partitions[i], bw_int_to_fixed(1))), data->h_total[i]), data->pixel_rate[i])))); + data->stutter_dmif_buffer_size[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(bw_mul(data->stutter_refresh_duration[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]); + } + } + data->min_stutter_refresh_duration = bw_int_to_fixed(9999); + data->total_stutter_dmif_buffer_size = 0; + data->total_bytes_requested = 0; + data->min_stutter_dmif_buffer_size = 9999; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_mtn(data->min_stutter_refresh_duration, data->stutter_refresh_duration[i])) { + data->min_stutter_refresh_duration = data->stutter_refresh_duration[i]; + data->total_bytes_requested = bw_fixed_to_int(bw_add(bw_int_to_fixed(data->total_bytes_requested), (bw_mul(bw_mul(data->source_height_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[i]), bw_int_to_fixed(data->bytes_per_pixel[i]))))); + data->min_stutter_dmif_buffer_size = bw_fixed_to_int(data->stutter_dmif_buffer_size[i]); + } + data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size))); + } + } + data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_mul(sclk[data->sclk_level], vbios->data_return_bus_width)); + data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size; + data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time); + data->time_in_self_refresh = data->min_stutter_refresh_duration; + if (data->d1_display_write_back_dwb_enable == 1) { + data->stutter_efficiency = bw_int_to_fixed(0); + } + else if (bw_ltn(data->time_in_self_refresh, bw_int_to_fixed(0))) { + data->stutter_efficiency = bw_int_to_fixed(0); + } + else { + /*compute stutter efficiency assuming 60 hz refresh rate*/ + data->stutter_efficiency = bw_max2(bw_int_to_fixed(0), bw_mul((bw_sub(bw_int_to_fixed(1), (bw_div(bw_mul((bw_add(vbios->stutter_self_refresh_exit_latency, data->stutter_burst_time)), bw_int_to_fixed(data->num_stutter_bursts)), bw_frc_to_fixed(166666667, 10000))))), bw_int_to_fixed(100))); + } + /*immediate flip time*/ + /*if scatter gather is enabled, the immediate flip takes a number of urgent memory trips equivalent to the pte requests in a row divided by the pte request limit.*/ + /*otherwise, it may take just one urgenr memory trip*/ + data->worst_number_of_trips_to_memory = bw_int_to_fixed(1); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) { + data->number_of_trips_to_memory_for_getting_apte_row[i] = bw_ceil2(bw_div(data->scatter_gather_pte_requests_in_row[i], data->scatter_gather_pte_request_limit[i]), bw_int_to_fixed(1)); + if (bw_ltn(data->worst_number_of_trips_to_memory, data->number_of_trips_to_memory_for_getting_apte_row[i])) { + data->worst_number_of_trips_to_memory = data->number_of_trips_to_memory_for_getting_apte_row[i]; + } + } + } + data->immediate_flip_time = bw_mul(data->worst_number_of_trips_to_memory, data->total_dmifmc_urgent_latency); + /*worst latency for other clients*/ + /*it is the urgent latency plus the urgent burst time*/ + data->latency_for_non_dmif_clients = bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]); + if (data->d1_display_write_back_dwb_enable == 1) { + data->latency_for_non_mcifwr_clients = bw_add(vbios->mcifwrmc_urgent_latency, dceip->mcifwr_all_surfaces_burst_time); + } + else { + data->latency_for_non_mcifwr_clients = bw_int_to_fixed(0); + } + /*dmif mc urgent latency supported in high sclk and yclk*/ + data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_burst_time[high][s_high])), data->total_dmifmc_urgent_trips); + /*dram speed/p-state change margin*/ + /*in the multi-display case the nb p-state change watermark cannot exceed the average lb size plus the dmif size or the cursor dcp buffer size*/ + data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999); + data->nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->nbp_state_dram_speed_change_latency_supported = bw_min2(data->nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(data->maximum_latency_hiding_with_cursor[i], data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency)); + data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_min2(data->v_blank_nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[i], bw_sub(bw_div(data->src_height[i], data->v_scale_ratio[i]), bw_int_to_fixed(4)))), data->h_total[i]), data->pixel_rate[i]), data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency)); + } + } + /*sclk required vs urgent latency*/ + for (i = 1; i <= 5; i++) { + data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i))); + if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) { + data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))); + } + else { + data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na); + } + } + /*output link bit per pixel supported*/ + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + data->output_bpphdmi[k] = bw_def_na; + data->output_bppdp4_lane_hbr[k] = bw_def_na; + data->output_bppdp4_lane_hbr2[k] = bw_def_na; + data->output_bppdp4_lane_hbr3[k] = bw_def_na; + if (data->enable[k]) { + data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24))); + if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) { + data->output_bppdp4_lane_hbr[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(270), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8))); + } + if (bw_meq(data->max_phyclk, bw_int_to_fixed(540))) { + data->output_bppdp4_lane_hbr2[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(540), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8))); + } + if (bw_meq(data->max_phyclk, bw_int_to_fixed(810))) { + data->output_bppdp4_lane_hbr3[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(810), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8))); + } + } + } + + kfree(surface_type); +free_tiling_mode: + kfree(tiling_mode); +free_yclk: + kfree(yclk); +free_sclk: + kfree(sclk); +} + +/******************************************************************************* + * Public functions + ******************************************************************************/ +void bw_calcs_init(struct bw_calcs_dceip *bw_dceip, + struct bw_calcs_vbios *bw_vbios, + struct hw_asic_id asic_id) +{ + struct bw_calcs_dceip *dceip; + struct bw_calcs_vbios *vbios; + + enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id); + + dceip = kzalloc(sizeof(*dceip), GFP_KERNEL); + if (!dceip) + return; + + vbios = kzalloc(sizeof(*vbios), GFP_KERNEL); + if (!vbios) { + kfree(dceip); + return; + } + + dceip->version = version; + + switch (version) { + case BW_CALCS_VERSION_CARRIZO: + vbios->memory_type = bw_def_gddr5; + vbios->dram_channel_width_in_bits = 64; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 8; + vbios->high_yclk = bw_int_to_fixed(1600); + vbios->mid_yclk = bw_int_to_fixed(1600); + vbios->low_yclk = bw_frc_to_fixed(66666, 100); + vbios->low_sclk = bw_int_to_fixed(200); + vbios->mid1_sclk = bw_int_to_fixed(300); + vbios->mid2_sclk = bw_int_to_fixed(300); + vbios->mid3_sclk = bw_int_to_fixed(300); + vbios->mid4_sclk = bw_int_to_fixed(300); + vbios->mid5_sclk = bw_int_to_fixed(300); + vbios->mid6_sclk = bw_int_to_fixed(300); + vbios->high_sclk = bw_frc_to_fixed(62609, 100); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(352); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(643); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(50); + vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); + vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10); + vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios->nbp_state_change_latency = bw_frc_to_fixed(19649, 1000); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = true; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(768); + dceip->dmif_pipe_en_fbc_chunk_tracker = false; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 3; + dceip->number_of_underlay_pipes = 1; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = false; + dceip->argb_compression_support = false; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 2; + dceip->graphics_dmif_size = 12288; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = true; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(82176); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = false; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(0); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/ + break; + case BW_CALCS_VERSION_POLARIS10: + /* TODO: Treat VEGAM the same as P10 for now + * Need to tune the para for VEGAM if needed */ + case BW_CALCS_VERSION_VEGAM: + vbios->memory_type = bw_def_gddr5; + vbios->dram_channel_width_in_bits = 32; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 8; + vbios->high_yclk = bw_int_to_fixed(6000); + vbios->mid_yclk = bw_int_to_fixed(3200); + vbios->low_yclk = bw_int_to_fixed(1000); + vbios->low_sclk = bw_int_to_fixed(300); + vbios->mid1_sclk = bw_int_to_fixed(400); + vbios->mid2_sclk = bw_int_to_fixed(500); + vbios->mid3_sclk = bw_int_to_fixed(600); + vbios->mid4_sclk = bw_int_to_fixed(700); + vbios->mid5_sclk = bw_int_to_fixed(800); + vbios->mid6_sclk = bw_int_to_fixed(974); + vbios->high_sclk = bw_int_to_fixed(1154); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(459); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(48); + vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); + vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5); + vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios->nbp_state_change_latency = bw_int_to_fixed(45); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = true; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(768); + dceip->dmif_pipe_en_fbc_chunk_tracker = false; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 6; + dceip->number_of_underlay_pipes = 0; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = false; + dceip->argb_compression_support = true; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 4; + dceip->graphics_dmif_size = 12288; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = true; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(245952); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(1); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; + case BW_CALCS_VERSION_POLARIS11: + vbios->memory_type = bw_def_gddr5; + vbios->dram_channel_width_in_bits = 32; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 8; + vbios->high_yclk = bw_int_to_fixed(6000); + vbios->mid_yclk = bw_int_to_fixed(3200); + vbios->low_yclk = bw_int_to_fixed(1000); + vbios->low_sclk = bw_int_to_fixed(300); + vbios->mid1_sclk = bw_int_to_fixed(400); + vbios->mid2_sclk = bw_int_to_fixed(500); + vbios->mid3_sclk = bw_int_to_fixed(600); + vbios->mid4_sclk = bw_int_to_fixed(700); + vbios->mid5_sclk = bw_int_to_fixed(800); + vbios->mid6_sclk = bw_int_to_fixed(974); + vbios->high_sclk = bw_int_to_fixed(1154); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(459); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(48); + if (vbios->number_of_dram_channels == 2) // 64-bit + vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); + else + vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); + vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5); + vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios->nbp_state_change_latency = bw_int_to_fixed(45); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = true; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(768); + dceip->dmif_pipe_en_fbc_chunk_tracker = false; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 5; + dceip->number_of_underlay_pipes = 0; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = false; + dceip->argb_compression_support = true; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 4; + dceip->graphics_dmif_size = 12288; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = true; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(245952); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(1); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; + case BW_CALCS_VERSION_POLARIS12: + vbios->memory_type = bw_def_gddr5; + vbios->dram_channel_width_in_bits = 32; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 8; + vbios->high_yclk = bw_int_to_fixed(6000); + vbios->mid_yclk = bw_int_to_fixed(3200); + vbios->low_yclk = bw_int_to_fixed(1000); + vbios->low_sclk = bw_int_to_fixed(678); + vbios->mid1_sclk = bw_int_to_fixed(864); + vbios->mid2_sclk = bw_int_to_fixed(900); + vbios->mid3_sclk = bw_int_to_fixed(920); + vbios->mid4_sclk = bw_int_to_fixed(940); + vbios->mid5_sclk = bw_int_to_fixed(960); + vbios->mid6_sclk = bw_int_to_fixed(980); + vbios->high_sclk = bw_int_to_fixed(1049); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(459); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(48); + if (vbios->number_of_dram_channels == 2) // 64-bit + vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); + else + vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); + vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5); + vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios->nbp_state_change_latency = bw_int_to_fixed(250); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = false; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(768); + dceip->dmif_pipe_en_fbc_chunk_tracker = false; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 5; + dceip->number_of_underlay_pipes = 0; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = true; + dceip->argb_compression_support = true; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 4; + dceip->graphics_dmif_size = 12288; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = true; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(245952); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(1); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; + case BW_CALCS_VERSION_STONEY: + vbios->memory_type = bw_def_gddr5; + vbios->dram_channel_width_in_bits = 64; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 8; + vbios->high_yclk = bw_int_to_fixed(1866); + vbios->mid_yclk = bw_int_to_fixed(1866); + vbios->low_yclk = bw_int_to_fixed(1333); + vbios->low_sclk = bw_int_to_fixed(200); + vbios->mid1_sclk = bw_int_to_fixed(600); + vbios->mid2_sclk = bw_int_to_fixed(600); + vbios->mid3_sclk = bw_int_to_fixed(600); + vbios->mid4_sclk = bw_int_to_fixed(600); + vbios->mid5_sclk = bw_int_to_fixed(600); + vbios->mid6_sclk = bw_int_to_fixed(600); + vbios->high_sclk = bw_int_to_fixed(800); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(352); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(643); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(50); + vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); + vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10); + vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios->nbp_state_change_latency = bw_frc_to_fixed(2008, 100); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = true; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(768); + dceip->dmif_pipe_en_fbc_chunk_tracker = false; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 2; + dceip->number_of_underlay_pipes = 1; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = false; + dceip->argb_compression_support = true; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 2; + dceip->graphics_dmif_size = 12288; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = true; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(82176); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = false; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(0); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; + case BW_CALCS_VERSION_VEGA10: + vbios->memory_type = bw_def_hbm; + vbios->dram_channel_width_in_bits = 128; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 16; + vbios->high_yclk = bw_int_to_fixed(2400); + vbios->mid_yclk = bw_int_to_fixed(1700); + vbios->low_yclk = bw_int_to_fixed(1000); + vbios->low_sclk = bw_int_to_fixed(300); + vbios->mid1_sclk = bw_int_to_fixed(350); + vbios->mid2_sclk = bw_int_to_fixed(400); + vbios->mid3_sclk = bw_int_to_fixed(500); + vbios->mid4_sclk = bw_int_to_fixed(600); + vbios->mid5_sclk = bw_int_to_fixed(700); + vbios->mid6_sclk = bw_int_to_fixed(760); + vbios->high_sclk = bw_int_to_fixed(776); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(460); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(670); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(1133); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(48); + vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); + vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10); + vbios->stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10); + vbios->nbp_state_change_latency = bw_int_to_fixed(39); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = false; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(2304); + dceip->dmif_pipe_en_fbc_chunk_tracker = true; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 6; + dceip->number_of_underlay_pipes = 0; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = true; + dceip->argb_compression_support = true; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 4; + dceip->graphics_dmif_size = 24576; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = false; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(245952); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(1); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; + default: + break; + } + *bw_dceip = *dceip; + *bw_vbios = *vbios; + + kfree(dceip); + kfree(vbios); +} + +/* + * Compare calculated (required) clocks against the clocks available at + * maximum voltage (max Performance Level). + */ +static bool is_display_configuration_supported( + const struct bw_calcs_vbios *vbios, + const struct dce_bw_output *calcs_output) +{ + uint32_t int_max_clk; + + int_max_clk = bw_fixed_to_int(vbios->high_voltage_max_dispclk); + int_max_clk *= 1000; /* MHz to kHz */ + if (calcs_output->dispclk_khz > int_max_clk) + return false; + + int_max_clk = bw_fixed_to_int(vbios->high_sclk); + int_max_clk *= 1000; /* MHz to kHz */ + if (calcs_output->sclk_khz > int_max_clk) + return false; + + return true; +} + +static void populate_initial_data( + const struct pipe_ctx pipe[], int pipe_count, struct bw_calcs_data *data) +{ + int i, j; + int num_displays = 0; + + data->underlay_surface_type = bw_def_420; + data->panning_and_bezel_adjustment = bw_def_none; + data->graphics_lb_bpc = 10; + data->underlay_lb_bpc = 8; + data->underlay_tiling_mode = bw_def_tiled; + data->graphics_tiling_mode = bw_def_tiled; + data->underlay_micro_tile_mode = bw_def_display_micro_tiling; + data->graphics_micro_tile_mode = bw_def_display_micro_tiling; + data->increase_voltage_to_support_mclk_switch = true; + + /* Pipes with underlay first */ + for (i = 0; i < pipe_count; i++) { + if (!pipe[i].stream || !pipe[i].bottom_pipe) + continue; + + ASSERT(pipe[i].plane_state); + + if (num_displays == 0) { + if (!pipe[i].plane_state->visible) + data->d0_underlay_mode = bw_def_underlay_only; + else + data->d0_underlay_mode = bw_def_blend; + } else { + if (!pipe[i].plane_state->visible) + data->d1_underlay_mode = bw_def_underlay_only; + else + data->d1_underlay_mode = bw_def_blend; + } + + data->fbc_en[num_displays + 4] = false; + data->lpt_en[num_displays + 4] = false; + data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); + data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); + data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_100hz, 10000); + data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); + data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; + data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height); + data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps); + data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps); + data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value); + data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value); + switch (pipe[i].plane_state->rotation) { + case ROTATION_ANGLE_0: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); + break; + case ROTATION_ANGLE_90: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90); + break; + case ROTATION_ANGLE_180: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180); + break; + case ROTATION_ANGLE_270: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270); + break; + default: + break; + } + switch (pipe[i].plane_state->format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + data->bytes_per_pixel[num_displays + 4] = 2; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + data->bytes_per_pixel[num_displays + 4] = 4; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + data->bytes_per_pixel[num_displays + 4] = 8; + break; + default: + data->bytes_per_pixel[num_displays + 4] = 4; + break; + } + data->interlace_mode[num_displays + 4] = false; + data->stereo_mode[num_displays + 4] = bw_def_mono; + + + for (j = 0; j < 2; j++) { + data->fbc_en[num_displays * 2 + j] = false; + data->lpt_en[num_displays * 2 + j] = false; + + data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.height); + data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.width); + data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed( + pipe[i].bottom_pipe->plane_state->plane_size.surface_pitch); + data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.h_taps); + data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.v_taps); + data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed( + pipe[i].bottom_pipe->plane_res.scl_data.ratios.horz.value); + data->v_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed( + pipe[i].bottom_pipe->plane_res.scl_data.ratios.vert.value); + switch (pipe[i].bottom_pipe->plane_state->rotation) { + case ROTATION_ANGLE_0: + data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(0); + break; + case ROTATION_ANGLE_90: + data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(90); + break; + case ROTATION_ANGLE_180: + data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(180); + break; + case ROTATION_ANGLE_270: + data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(270); + break; + default: + break; + } + data->stereo_mode[num_displays * 2 + j] = bw_def_mono; + } + + num_displays++; + } + + /* Pipes without underlay after */ + for (i = 0; i < pipe_count; i++) { + unsigned int pixel_clock_100hz; + if (!pipe[i].stream || pipe[i].bottom_pipe) + continue; + + + data->fbc_en[num_displays + 4] = false; + data->lpt_en[num_displays + 4] = false; + data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); + data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); + pixel_clock_100hz = pipe[i].stream->timing.pix_clk_100hz; + if (pipe[i].stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clock_100hz *= 2; + data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pixel_clock_100hz, 10000); + if (pipe[i].plane_state) { + data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); + data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; + data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height); + data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps); + data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps); + data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value); + data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value); + switch (pipe[i].plane_state->rotation) { + case ROTATION_ANGLE_0: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); + break; + case ROTATION_ANGLE_90: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90); + break; + case ROTATION_ANGLE_180: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180); + break; + case ROTATION_ANGLE_270: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270); + break; + default: + break; + } + switch (pipe[i].plane_state->format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + data->bytes_per_pixel[num_displays + 4] = 2; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + data->bytes_per_pixel[num_displays + 4] = 4; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + data->bytes_per_pixel[num_displays + 4] = 8; + break; + default: + data->bytes_per_pixel[num_displays + 4] = 4; + break; + } + } else if (pipe[i].stream->dst.width != 0 && + pipe[i].stream->dst.height != 0 && + pipe[i].stream->src.width != 0 && + pipe[i].stream->src.height != 0) { + data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.width); + data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; + data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.height); + data->h_taps[num_displays + 4] = pipe[i].stream->src.width == pipe[i].stream->dst.width ? bw_int_to_fixed(1) : bw_int_to_fixed(2); + data->v_taps[num_displays + 4] = pipe[i].stream->src.height == pipe[i].stream->dst.height ? bw_int_to_fixed(1) : bw_int_to_fixed(2); + data->h_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.width, pipe[i].stream->dst.width); + data->v_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.height, pipe[i].stream->dst.height); + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); + data->bytes_per_pixel[num_displays + 4] = 4; + } else { + data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable); + data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; + data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_addressable); + data->h_taps[num_displays + 4] = bw_int_to_fixed(1); + data->v_taps[num_displays + 4] = bw_int_to_fixed(1); + data->h_scale_ratio[num_displays + 4] = bw_int_to_fixed(1); + data->v_scale_ratio[num_displays + 4] = bw_int_to_fixed(1); + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); + data->bytes_per_pixel[num_displays + 4] = 4; + } + + data->interlace_mode[num_displays + 4] = false; + data->stereo_mode[num_displays + 4] = bw_def_mono; + num_displays++; + } + + data->number_of_displays = num_displays; +} + +static bool all_displays_in_sync(const struct pipe_ctx pipe[], + int pipe_count) +{ + const struct pipe_ctx *active_pipes[MAX_PIPES]; + int i, num_active_pipes = 0; + + for (i = 0; i < pipe_count; i++) { + if (!pipe[i].stream || pipe[i].top_pipe) + continue; + + active_pipes[num_active_pipes++] = &pipe[i]; + } + + if (!num_active_pipes) + return false; + + for (i = 1; i < num_active_pipes; ++i) { + if (!resource_are_streams_timing_synchronizable( + active_pipes[0]->stream, active_pipes[i]->stream)) { + return false; + } + } + + return true; +} + +/* + * Return: + * true - Display(s) configuration supported. + * In this case 'calcs_output' contains data for HW programming + * false - Display(s) configuration not supported (not enough bandwidth). + */ +bool bw_calcs(struct dc_context *ctx, + const struct bw_calcs_dceip *dceip, + const struct bw_calcs_vbios *vbios, + const struct pipe_ctx pipe[], + int pipe_count, + struct dce_bw_output *calcs_output) +{ + struct bw_calcs_data *data = kzalloc(sizeof(struct bw_calcs_data), + GFP_KERNEL); + if (!data) + return false; + + populate_initial_data(pipe, pipe_count, data); + + if (ctx->dc->config.multi_mon_pp_mclk_switch) + calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, pipe_count); + else + calcs_output->all_displays_in_sync = false; + + if (data->number_of_displays != 0) { + uint8_t yclk_lvl; + struct bw_fixed high_sclk = vbios->high_sclk; + struct bw_fixed mid1_sclk = vbios->mid1_sclk; + struct bw_fixed mid2_sclk = vbios->mid2_sclk; + struct bw_fixed mid3_sclk = vbios->mid3_sclk; + struct bw_fixed mid4_sclk = vbios->mid4_sclk; + struct bw_fixed mid5_sclk = vbios->mid5_sclk; + struct bw_fixed mid6_sclk = vbios->mid6_sclk; + struct bw_fixed low_sclk = vbios->low_sclk; + struct bw_fixed high_yclk = vbios->high_yclk; + struct bw_fixed mid_yclk = vbios->mid_yclk; + struct bw_fixed low_yclk = vbios->low_yclk; + + if (ctx->dc->debug.bandwidth_calcs_trace) { + print_bw_calcs_dceip(ctx, dceip); + print_bw_calcs_vbios(ctx, vbios); + print_bw_calcs_data(ctx, data); + } + calculate_bandwidth(dceip, vbios, data); + + yclk_lvl = data->y_clk_level; + + calcs_output->nbp_state_change_enable = + data->nbp_state_change_enable; + calcs_output->cpuc_state_change_enable = + data->cpuc_state_change_enable; + calcs_output->cpup_state_change_enable = + data->cpup_state_change_enable; + calcs_output->stutter_mode_enable = + data->stutter_mode_enable; + calcs_output->dispclk_khz = + bw_fixed_to_int(bw_mul(data->dispclk, + bw_int_to_fixed(1000))); + calcs_output->blackout_recovery_time_us = + bw_fixed_to_int(data->blackout_recovery_time); + calcs_output->sclk_khz = + bw_fixed_to_int(bw_mul(data->required_sclk, + bw_int_to_fixed(1000))); + calcs_output->sclk_deep_sleep_khz = + bw_fixed_to_int(bw_mul(data->sclk_deep_sleep, + bw_int_to_fixed(1000))); + if (yclk_lvl == 0) + calcs_output->yclk_khz = bw_fixed_to_int( + bw_mul(low_yclk, bw_int_to_fixed(1000))); + else if (yclk_lvl == 1) + calcs_output->yclk_khz = bw_fixed_to_int( + bw_mul(mid_yclk, bw_int_to_fixed(1000))); + else + calcs_output->yclk_khz = bw_fixed_to_int( + bw_mul(high_yclk, bw_int_to_fixed(1000))); + + /* units: nanosecond, 16bit storage. */ + + calcs_output->nbp_state_change_wm_ns[0].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[4], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[1].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[5], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[2].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[6], bw_int_to_fixed(1000))); + + if (ctx->dc->caps.max_slave_planes) { + calcs_output->nbp_state_change_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[0], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->nbp_state_change_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[7], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->nbp_state_change_wm_ns[5].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[9], bw_int_to_fixed(1000))); + + + + calcs_output->stutter_exit_wm_ns[0].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[1].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[2].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_exit_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_exit_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_exit_wm_ns[5].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->stutter_entry_wm_ns[0].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->urgent_wm_ns[0].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[4], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[1].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[5], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[2].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->urgent_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[0], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->urgent_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[7], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->urgent_wm_ns[5].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[9], bw_int_to_fixed(1000))); + + if (dceip->version != BW_CALCS_VERSION_CARRIZO) { + ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk; + ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk; + ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk; + calculate_bandwidth(dceip, vbios, data); + + calcs_output->nbp_state_change_wm_ns[0].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[4],bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[1].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[5], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[2].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[6], bw_int_to_fixed(1000))); + + if (ctx->dc->caps.max_slave_planes) { + calcs_output->nbp_state_change_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[0], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->nbp_state_change_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[7], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->nbp_state_change_wm_ns[5].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[9], bw_int_to_fixed(1000))); + + + + calcs_output->stutter_exit_wm_ns[0].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[1].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[2].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_exit_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_exit_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_exit_wm_ns[5].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->stutter_entry_wm_ns[0].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->urgent_wm_ns[0].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[4], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[1].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[5], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[2].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->urgent_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[0], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->urgent_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[7], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->urgent_wm_ns[5].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[9], bw_int_to_fixed(1000))); + + ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk; + ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk; + ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk; + ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk; + calculate_bandwidth(dceip, vbios, data); + + calcs_output->nbp_state_change_wm_ns[0].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[4], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[1].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[5], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[2].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->nbp_state_change_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[0], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->nbp_state_change_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[7], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->nbp_state_change_wm_ns[5].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[9], bw_int_to_fixed(1000))); + + + calcs_output->stutter_exit_wm_ns[0].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[1].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[2].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_exit_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_exit_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_exit_wm_ns[5].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->stutter_entry_wm_ns[0].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->urgent_wm_ns[0].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[4], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[1].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[5], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[2].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->urgent_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[0], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->urgent_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[7], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->urgent_wm_ns[5].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[9], bw_int_to_fixed(1000))); + } + + if (dceip->version == BW_CALCS_VERSION_CARRIZO) { + ((struct bw_calcs_vbios *)vbios)->low_yclk = high_yclk; + ((struct bw_calcs_vbios *)vbios)->mid_yclk = high_yclk; + ((struct bw_calcs_vbios *)vbios)->low_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid1_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid2_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid3_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid4_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid5_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid6_sclk = high_sclk; + } else { + ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk; + ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk; + ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk; + ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk; + } + + calculate_bandwidth(dceip, vbios, data); + + calcs_output->nbp_state_change_wm_ns[0].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[4], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[1].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[5], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[2].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->nbp_state_change_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[0], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->nbp_state_change_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[7], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->nbp_state_change_wm_ns[5].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->stutter_exit_wm_ns[0].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[1].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[2].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_exit_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_exit_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_exit_wm_ns[5].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->stutter_entry_wm_ns[0].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->urgent_wm_ns[0].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[4], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[1].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[5], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[2].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->urgent_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[0], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->urgent_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[7], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->urgent_wm_ns[5].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[9], bw_int_to_fixed(1000))); + + ((struct bw_calcs_vbios *)vbios)->low_yclk = low_yclk; + ((struct bw_calcs_vbios *)vbios)->mid_yclk = mid_yclk; + ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk; + ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk; + ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk; + ((struct bw_calcs_vbios *)vbios)->mid3_sclk = mid3_sclk; + ((struct bw_calcs_vbios *)vbios)->mid4_sclk = mid4_sclk; + ((struct bw_calcs_vbios *)vbios)->mid5_sclk = mid5_sclk; + ((struct bw_calcs_vbios *)vbios)->mid6_sclk = mid6_sclk; + ((struct bw_calcs_vbios *)vbios)->high_sclk = high_sclk; + } else { + calcs_output->nbp_state_change_enable = true; + calcs_output->cpuc_state_change_enable = true; + calcs_output->cpup_state_change_enable = true; + calcs_output->stutter_mode_enable = true; + calcs_output->dispclk_khz = 0; + calcs_output->sclk_khz = 0; + } + + kfree(data); + + return is_display_configuration_supported(vbios, calcs_output); +} only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c @@ -0,0 +1,1933 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dcn_calc_auto.h" +#include "dcn_calc_math.h" + +/* + * NOTE: + * This file is gcc-parseable HW gospel, coming straight from HW engineers. + * + * It doesn't adhere to Linux kernel style and sometimes will do things in odd + * ways. Unless there is something clearly wrong with it the code should + * remain as-is as it provides us with a guarantee from HW that it is correct. + */ + +/*REVISION#250*/ +void scaler_settings_calculation(struct dcn_bw_internal_vars *v) +{ + int k; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->allow_different_hratio_vratio == dcn_bw_yes) { + if (v->source_scan[k] == dcn_bw_hor) { + v->h_ratio[k] = v->viewport_width[k] / v->scaler_rec_out_width[k]; + v->v_ratio[k] = v->viewport_height[k] / v->scaler_recout_height[k]; + } + else { + v->h_ratio[k] = v->viewport_height[k] / v->scaler_rec_out_width[k]; + v->v_ratio[k] = v->viewport_width[k] / v->scaler_recout_height[k]; + } + } + else { + if (v->source_scan[k] == dcn_bw_hor) { + v->h_ratio[k] =dcn_bw_max2(v->viewport_width[k] / v->scaler_rec_out_width[k], v->viewport_height[k] / v->scaler_recout_height[k]); + } + else { + v->h_ratio[k] =dcn_bw_max2(v->viewport_height[k] / v->scaler_rec_out_width[k], v->viewport_width[k] / v->scaler_recout_height[k]); + } + v->v_ratio[k] = v->h_ratio[k]; + } + if (v->interlace_output[k] == 1.0) { + v->v_ratio[k] = 2.0 * v->v_ratio[k]; + } + if (v->underscan_output[k] == 1.0) { + v->h_ratio[k] = v->h_ratio[k] * v->under_scan_factor; + v->v_ratio[k] = v->v_ratio[k] * v->under_scan_factor; + } + } + /*scaler taps calculation*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->h_ratio[k] > 1.0) { + v->acceptable_quality_hta_ps =dcn_bw_min2(v->max_hscl_taps, 2.0 *dcn_bw_ceil2(v->h_ratio[k], 1.0)); + } + else if (v->h_ratio[k] < 1.0) { + v->acceptable_quality_hta_ps = 4.0; + } + else { + v->acceptable_quality_hta_ps = 1.0; + } + if (v->ta_pscalculation == dcn_bw_override) { + v->htaps[k] = v->override_hta_ps[k]; + } + else { + v->htaps[k] = v->acceptable_quality_hta_ps; + } + if (v->v_ratio[k] > 1.0) { + v->acceptable_quality_vta_ps =dcn_bw_min2(v->max_vscl_taps, 2.0 *dcn_bw_ceil2(v->v_ratio[k], 1.0)); + } + else if (v->v_ratio[k] < 1.0) { + v->acceptable_quality_vta_ps = 4.0; + } + else { + v->acceptable_quality_vta_ps = 1.0; + } + if (v->ta_pscalculation == dcn_bw_override) { + v->vtaps[k] = v->override_vta_ps[k]; + } + else { + v->vtaps[k] = v->acceptable_quality_vta_ps; + } + if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { + v->vta_pschroma[k] = 0.0; + v->hta_pschroma[k] = 0.0; + } + else { + if (v->ta_pscalculation == dcn_bw_override) { + v->vta_pschroma[k] = v->override_vta_pschroma[k]; + v->hta_pschroma[k] = v->override_hta_pschroma[k]; + } + else { + v->vta_pschroma[k] = v->acceptable_quality_vta_ps; + v->hta_pschroma[k] = v->acceptable_quality_hta_ps; + } + } + } +} + +void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v) +{ + int i; + int j; + int k; + /*mode support, voltage state and soc configuration*/ + + /*scale ratio support check*/ + + v->scale_ratio_support = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->h_ratio[k] > v->max_hscl_ratio || v->v_ratio[k] > v->max_vscl_ratio || v->h_ratio[k] > v->htaps[k] || v->v_ratio[k] > v->vtaps[k] || (v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16 && (v->h_ratio[k] / 2.0 > v->hta_pschroma[k] || v->v_ratio[k] / 2.0 > v->vta_pschroma[k]))) { + v->scale_ratio_support = dcn_bw_no; + } + } + /*source format, pixel format and scan support check*/ + + v->source_format_pixel_and_scan_support = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((v->source_surface_mode[k] == dcn_bw_sw_linear && v->source_scan[k] != dcn_bw_hor) || ((v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_var_d || v->source_surface_mode[k] == dcn_bw_sw_var_d_x) && v->source_pixel_format[k] != dcn_bw_rgb_sub_64)) { + v->source_format_pixel_and_scan_support = dcn_bw_no; + } + } + /*bandwidth support check*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_scan[k] == dcn_bw_hor) { + v->swath_width_ysingle_dpp[k] = v->viewport_width[k]; + } + else { + v->swath_width_ysingle_dpp[k] = v->viewport_height[k]; + } + if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->byte_per_pixel_in_dety[k] = 8.0; + v->byte_per_pixel_in_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { + v->byte_per_pixel_in_dety[k] = 4.0; + v->byte_per_pixel_in_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { + v->byte_per_pixel_in_dety[k] = 2.0; + v->byte_per_pixel_in_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->byte_per_pixel_in_dety[k] = 1.0; + v->byte_per_pixel_in_detc[k] = 2.0; + } + else { + v->byte_per_pixel_in_dety[k] = 4.0f / 3.0f; + v->byte_per_pixel_in_detc[k] = 8.0f / 3.0f; + } + } + v->total_read_bandwidth_consumed_gbyte_per_second = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->read_bandwidth[k] = v->swath_width_ysingle_dpp[k] * (dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) * v->v_ratio[k] +dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0 * v->v_ratio[k] / 2) / (v->htotal[k] / v->pixel_clock[k]); + if (v->dcc_enable[k] == dcn_bw_yes) { + v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256); + } + if (v->pte_enable == dcn_bw_yes && v->source_scan[k] != dcn_bw_hor && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x)) { + v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 64); + } + else if (v->pte_enable == dcn_bw_yes && v->source_scan[k] == dcn_bw_hor && (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32) && (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x)) { + v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256); + } + else if (v->pte_enable == dcn_bw_yes) { + v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 512); + } + v->total_read_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->read_bandwidth[k] / 1000.0; + } + v->total_write_bandwidth_consumed_gbyte_per_second = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) { + v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0; + } + else if (v->output[k] == dcn_bw_writeback) { + v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5; + } + else { + v->write_bandwidth[k] = 0.0; + } + v->total_write_bandwidth_consumed_gbyte_per_second = v->total_write_bandwidth_consumed_gbyte_per_second + v->write_bandwidth[k] / 1000.0; + } + v->total_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->total_write_bandwidth_consumed_gbyte_per_second; + v->dcc_enabled_in_any_plane = dcn_bw_no; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->dcc_enabled_in_any_plane = dcn_bw_yes; + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0); + v->return_bw_per_state[i] = v->return_bw_todcn_per_state; + if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) { + v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency))); + } + v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); + if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) { + v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); + } + v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0); + if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) { + v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency))); + } + v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); + if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) { + v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + if ((v->total_read_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->return_bw_per_state[i]) && (v->total_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0)) { + v->bandwidth_support[i] = dcn_bw_yes; + } + else { + v->bandwidth_support[i] = dcn_bw_no; + } + } + /*writeback latency support check*/ + + v->writeback_latency_support = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444 && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0 > (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) { + v->writeback_latency_support = dcn_bw_no; + } + else if (v->output[k] == dcn_bw_writeback && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) >dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) { + v->writeback_latency_support = dcn_bw_no; + } + } + /*re-ordering buffer support check*/ + + for (i = 0; i <= number_of_states_plus_one; i++) { + v->urgent_round_trip_and_out_of_order_latency_per_state[i] = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk_per_state[i] + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw_per_state[i]; + if ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / v->return_bw_per_state[i] > v->urgent_round_trip_and_out_of_order_latency_per_state[i]) { + v->rob_support[i] = dcn_bw_yes; + } + else { + v->rob_support[i] = dcn_bw_no; + } + } + /*display io support check*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_dp && v->dsc_capability == dcn_bw_yes) { + if (v->output_format[k] == dcn_bw_420) { + v->required_output_bw = v->pixel_clock[k] / 2.0; + } + else { + v->required_output_bw = v->pixel_clock[k]; + } + } + else if (v->output_format[k] == dcn_bw_420) { + v->required_output_bw = v->pixel_clock[k] * 3.0 / 2.0; + } + else { + v->required_output_bw = v->pixel_clock[k] * 3.0; + } + if (v->output[k] == dcn_bw_hdmi) { + v->required_phyclk[k] = v->required_output_bw; + switch (v->output_deep_color[k]) { + case dcn_bw_encoder_10bpc: + v->required_phyclk[k] = v->required_phyclk[k] * 5.0 / 4; + break; + case dcn_bw_encoder_12bpc: + v->required_phyclk[k] = v->required_phyclk[k] * 3.0 / 2; + break; + default: + break; + } + v->required_phyclk[k] = v->required_phyclk[k] / 3.0; + } + else if (v->output[k] == dcn_bw_dp) { + v->required_phyclk[k] = v->required_output_bw / 4.0; + } + else { + v->required_phyclk[k] = 0.0; + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + v->dio_support[i] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->required_phyclk[k] > v->phyclk_per_state[i] || (v->output[k] == dcn_bw_hdmi && v->required_phyclk[k] > 600.0)) { + v->dio_support[i] = dcn_bw_no; + } + } + } + /*total available writeback support check*/ + + v->total_number_of_active_writeback = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_writeback) { + v->total_number_of_active_writeback = v->total_number_of_active_writeback + 1.0; + } + } + if (v->total_number_of_active_writeback <= v->max_num_writeback) { + v->total_available_writeback_support = dcn_bw_yes; + } + else { + v->total_available_writeback_support = dcn_bw_no; + } + /*maximum dispclk/dppclk support check*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->h_ratio[k] > 1.0) { + v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0)); + } + else { + v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); + } + if (v->byte_per_pixel_in_detc[k] == 0.0) { + v->pscl_factor_chroma[k] = 0.0; + v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], 1.0); + } + else { + if (v->h_ratio[k] / 2.0 > 1.0) { + v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0)); + } + else { + v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); + } + v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max5(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_factor_chroma[k], 1.0); + } + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->read256_block_height_y[k] = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->read256_block_height_y[k] = 4.0; + } + else { + v->read256_block_height_y[k] = 8.0; + } + v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k]; + v->read256_block_height_c[k] = 0.0; + v->read256_block_width_c[k] = 0.0; + } + else { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->read256_block_height_y[k] = 1.0; + v->read256_block_height_c[k] = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->read256_block_height_y[k] = 16.0; + v->read256_block_height_c[k] = 8.0; + } + else { + v->read256_block_height_y[k] = 8.0; + v->read256_block_height_c[k] = 8.0; + } + v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k]; + v->read256_block_width_c[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->read256_block_height_c[k]; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->max_swath_height_y[k] = v->read256_block_height_y[k]; + v->max_swath_height_c[k] = v->read256_block_height_c[k]; + } + else { + v->max_swath_height_y[k] = v->read256_block_width_y[k]; + v->max_swath_height_c[k] = v->read256_block_width_c[k]; + } + if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) { + v->min_swath_height_y[k] = v->max_swath_height_y[k]; + } + else { + v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0; + } + v->min_swath_height_c[k] = v->max_swath_height_c[k]; + } + else { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->min_swath_height_y[k] = v->max_swath_height_y[k]; + v->min_swath_height_c[k] = v->max_swath_height_c[k]; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) { + v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0; + if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { + v->min_swath_height_c[k] = v->max_swath_height_c[k]; + } + else { + v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0; + } + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) { + v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0; + if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { + v->min_swath_height_y[k] = v->max_swath_height_y[k]; + } + else { + v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0; + } + } + else { + v->min_swath_height_y[k] = v->max_swath_height_y[k]; + v->min_swath_height_c[k] = v->max_swath_height_c[k]; + } + } + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->maximum_swath_width = 8192.0; + } + else { + v->maximum_swath_width = 5120.0; + } + v->number_of_dpp_required_for_det_size =dcn_bw_ceil2(v->swath_width_ysingle_dpp[k] /dcn_bw_min2(v->maximum_swath_width, v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / (v->byte_per_pixel_in_dety[k] * v->min_swath_height_y[k] + v->byte_per_pixel_in_detc[k] / 2.0 * v->min_swath_height_c[k])), 1.0); + if (v->byte_per_pixel_in_detc[k] == 0.0) { + v->number_of_dpp_required_for_lb_size =dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0); + } + else { + v->number_of_dpp_required_for_lb_size =dcn_bw_max2(dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0),dcn_bw_ceil2((v->vta_pschroma[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k] / 2.0, 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0)); + } + v->number_of_dpp_required_for_det_and_lb_size[k] =dcn_bw_max2(v->number_of_dpp_required_for_det_size, v->number_of_dpp_required_for_lb_size); + } + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->total_number_of_active_dpp[i][j] = 0.0; + v->required_dispclk[i][j] = 0.0; + v->dispclk_dppclk_support[i][j] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0); + if (v->odm_capability == dcn_bw_yes) { + v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k] / 2.0, v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0); + } + else { + v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0); + } + if (i < number_of_states) { + v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); + v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); + } + if (v->min_dispclk_using_single_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i]) && v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) { + v->no_of_dpp[i][j][k] = 1.0; + v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp); + } + else if (v->min_dispclk_using_dual_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) { + v->no_of_dpp[i][j][k] = 2.0; + v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp); + } + else { + v->no_of_dpp[i][j][k] = 2.0; + v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp); + v->dispclk_dppclk_support[i][j] = dcn_bw_no; + } + v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k]; + } + if (v->total_number_of_active_dpp[i][j] > v->max_num_dpp) { + v->total_number_of_active_dpp[i][j] = 0.0; + v->required_dispclk[i][j] = 0.0; + v->dispclk_dppclk_support[i][j] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0); + v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0); + if (i < number_of_states) { + v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); + v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); + } + if (v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) { + v->no_of_dpp[i][j][k] = 1.0; + v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp); + if (v->min_dispclk_using_single_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) { + v->dispclk_dppclk_support[i][j] = dcn_bw_no; + } + } + else { + v->no_of_dpp[i][j][k] = 2.0; + v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp); + if (v->min_dispclk_using_dual_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) { + v->dispclk_dppclk_support[i][j] = dcn_bw_no; + } + } + v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k]; + } + } + } + } + /*viewport size check*/ + + v->viewport_size_support = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->number_of_dpp_required_for_det_and_lb_size[k] > 2.0) { + v->viewport_size_support = dcn_bw_no; + } + } + /*total available pipes support check*/ + + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + if (v->total_number_of_active_dpp[i][j] <= v->max_num_dpp) { + v->total_available_pipes_support[i][j] = dcn_bw_yes; + } + else { + v->total_available_pipes_support[i][j] = dcn_bw_no; + } + } + } + /*urgent latency support check*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->swath_width_yper_state[i][j][k] = v->swath_width_ysingle_dpp[k] / v->no_of_dpp[i][j][k]; + v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->max_swath_height_y[k]; + v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pixel_in_dety[k] * v->max_swath_height_y[k]; + if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { + v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256; + } + if (v->max_swath_height_c[k] > 0.0) { + v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k]; + } + v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k]; + if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { + v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256; + } + if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) { + v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k]; + v->swath_height_cper_state[i][j][k] = v->max_swath_height_c[k]; + } + else { + v->swath_height_yper_state[i][j][k] = v->min_swath_height_y[k]; + v->swath_height_cper_state[i][j][k] = v->min_swath_height_c[k]; + } + if (v->byte_per_pixel_in_detc[k] == 0.0) { + v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k]; + v->lines_in_det_chroma = 0.0; + } + else if (v->swath_height_yper_state[i][j][k] <= v->swath_height_cper_state[i][j][k]) { + v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k]; + v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_detc[k] / (v->swath_width_yper_state[i][j][k] / 2.0); + } + else { + v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k]; + v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0); + } + v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0); + v->effective_lb_latency_hiding_source_lines_chroma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0); + v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]); + v->effective_detlb_lines_chroma =dcn_bw_floor2(v->lines_in_det_chroma +dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]); + if (v->byte_per_pixel_in_detc[k] == 0.0) { + v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]); + } + else { + v->urgent_latency_support_us_per_state[i][j][k] =dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k])); + } + } + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->urgent_latency_support[i][j] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->urgent_latency_support_us_per_state[i][j][k] < v->urgent_latency / 1.0) { + v->urgent_latency_support[i][j] = dcn_bw_no; + } + } + } + } + /*prefetch check*/ + + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->total_number_of_dcc_active_dpp[i][j] = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->total_number_of_dcc_active_dpp[i][j] = v->total_number_of_dcc_active_dpp[i][j] + v->no_of_dpp[i][j][k]; + } + } + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->projected_dcfclk_deep_sleep = 8.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, v->pixel_clock[k] / 16.0); + if (v->byte_per_pixel_in_detc[k] == 0.0) { + if (v->v_ratio[k] <= 1.0) { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]); + } + else { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j)); + } + } + else { + if (v->v_ratio[k] <= 1.0) { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]); + } + else { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j)); + } + if (v->v_ratio[k] / 2.0 <= 1.0) { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->h_ratio[k] / 2.0 * v->pixel_clock[k] / v->no_of_dpp[i][j][k]); + } + else { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->pscl_factor_chroma[k] * v->required_dispclk[i][j] / (1 + j)); + } + } + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->meta_req_height_y = 8.0 * v->read256_block_height_y[k]; + v->meta_req_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->meta_req_height_y; + v->meta_surface_width_y =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0, v->meta_req_width_y) + v->meta_req_width_y; + v->meta_surface_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, v->meta_req_height_y) + v->meta_req_height_y; + if (v->pte_enable == dcn_bw_yes) { + v->meta_pte_bytes_per_frame_y = (dcn_bw_ceil2((v->meta_surface_width_y * v->meta_surface_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; + } + else { + v->meta_pte_bytes_per_frame_y = 0.0; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->meta_row_bytes_y = v->meta_surface_width_y * v->meta_req_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0; + } + else { + v->meta_row_bytes_y = v->meta_surface_height_y * v->meta_req_width_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0; + } + } + else { + v->meta_pte_bytes_per_frame_y = 0.0; + v->meta_row_bytes_y = 0.0; + } + if (v->pte_enable == dcn_bw_yes) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->macro_tile_block_size_bytes_y = 256.0; + v->macro_tile_block_height_y = 1.0; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { + v->macro_tile_block_size_bytes_y = 4096.0; + v->macro_tile_block_height_y = 4.0 * v->read256_block_height_y[k]; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { + v->macro_tile_block_size_bytes_y = 64.0 * 1024; + v->macro_tile_block_height_y = 16.0 * v->read256_block_height_y[k]; + } + else { + v->macro_tile_block_size_bytes_y = 256.0 * 1024; + v->macro_tile_block_height_y = 32.0 * v->read256_block_height_y[k]; + } + if (v->macro_tile_block_size_bytes_y <= 65536.0) { + v->data_pte_req_height_y = v->macro_tile_block_height_y; + } + else { + v->data_pte_req_height_y = 16.0 * v->read256_block_height_y[k]; + } + v->data_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->data_pte_req_height_y * 8; + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_y / (v->viewport_width[k] / v->no_of_dpp[i][j][k]), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_y, 1.0) + 1); + } + else if (v->source_scan[k] == dcn_bw_hor) { + v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0) / v->data_pte_req_width_y, 1.0) + 1); + } + else { + v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->data_pte_req_height_y, 1.0) + 1); + } + } + else { + v->dpte_bytes_per_row_y = 0.0; + } + if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->meta_req_height_c = 8.0 * v->read256_block_height_c[k]; + v->meta_req_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->meta_req_height_c; + v->meta_surface_width_c =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0, v->meta_req_width_c) + v->meta_req_width_c; + v->meta_surface_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, v->meta_req_height_c) + v->meta_req_height_c; + if (v->pte_enable == dcn_bw_yes) { + v->meta_pte_bytes_per_frame_c = (dcn_bw_ceil2((v->meta_surface_width_c * v->meta_surface_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; + } + else { + v->meta_pte_bytes_per_frame_c = 0.0; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->meta_row_bytes_c = v->meta_surface_width_c * v->meta_req_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0; + } + else { + v->meta_row_bytes_c = v->meta_surface_height_c * v->meta_req_width_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0; + } + } + else { + v->meta_pte_bytes_per_frame_c = 0.0; + v->meta_row_bytes_c = 0.0; + } + if (v->pte_enable == dcn_bw_yes) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->macro_tile_block_size_bytes_c = 256.0; + v->macro_tile_block_height_c = 1.0; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { + v->macro_tile_block_size_bytes_c = 4096.0; + v->macro_tile_block_height_c = 4.0 * v->read256_block_height_c[k]; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { + v->macro_tile_block_size_bytes_c = 64.0 * 1024; + v->macro_tile_block_height_c = 16.0 * v->read256_block_height_c[k]; + } + else { + v->macro_tile_block_size_bytes_c = 256.0 * 1024; + v->macro_tile_block_height_c = 32.0 * v->read256_block_height_c[k]; + } + v->macro_tile_block_width_c = v->macro_tile_block_size_bytes_c /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->macro_tile_block_height_c; + if (v->macro_tile_block_size_bytes_c <= 65536.0) { + v->data_pte_req_height_c = v->macro_tile_block_height_c; + } + else { + v->data_pte_req_height_c = 16.0 * v->read256_block_height_c[k]; + } + v->data_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->data_pte_req_height_c * 8; + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_c / (v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_c, 1.0) + 1); + } + else if (v->source_scan[k] == dcn_bw_hor) { + v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0) / v->data_pte_req_width_c, 1.0) + 1); + } + else { + v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->data_pte_req_height_c, 1.0) + 1); + } + } + else { + v->dpte_bytes_per_row_c = 0.0; + } + } + else { + v->dpte_bytes_per_row_c = 0.0; + v->meta_pte_bytes_per_frame_c = 0.0; + v->meta_row_bytes_c = 0.0; + } + v->dpte_bytes_per_row[k] = v->dpte_bytes_per_row_y + v->dpte_bytes_per_row_c; + v->meta_pte_bytes_per_frame[k] = v->meta_pte_bytes_per_frame_y + v->meta_pte_bytes_per_frame_c; + v->meta_row_bytes[k] = v->meta_row_bytes_y + v->meta_row_bytes_c; + v->v_init_y = (v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0; + v->prefill_y[k] =dcn_bw_floor2(v->v_init_y, 1.0); + v->max_num_sw_y[k] =dcn_bw_ceil2((v->prefill_y[k] - 1.0) / v->swath_height_yper_state[i][j][k], 1.0) + 1; + if (v->prefill_y[k] > 1.0) { + v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] - 2.0), v->swath_height_yper_state[i][j][k]); + } + else { + v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] + v->swath_height_yper_state[i][j][k] - 2.0), v->swath_height_yper_state[i][j][k]); + } + v->max_partial_sw_y =dcn_bw_max2(1.0, v->max_partial_sw_y); + v->prefetch_lines_y[k] = v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k] + v->max_partial_sw_y; + if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { + v->v_init_c = (v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0; + v->prefill_c[k] =dcn_bw_floor2(v->v_init_c, 1.0); + v->max_num_sw_c[k] =dcn_bw_ceil2((v->prefill_c[k] - 1.0) / v->swath_height_cper_state[i][j][k], 1.0) + 1; + if (v->prefill_c[k] > 1.0) { + v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] - 2.0), v->swath_height_cper_state[i][j][k]); + } + else { + v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] + v->swath_height_cper_state[i][j][k] - 2.0), v->swath_height_cper_state[i][j][k]); + } + v->max_partial_sw_c =dcn_bw_max2(1.0, v->max_partial_sw_c); + v->prefetch_lines_c[k] = v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k] + v->max_partial_sw_c; + } + else { + v->prefetch_lines_c[k] = 0.0; + } + v->dst_x_after_scaler = 90.0 * v->pixel_clock[k] / (v->required_dispclk[i][j] / (j + 1)) + 42.0 * v->pixel_clock[k] / v->required_dispclk[i][j]; + if (v->no_of_dpp[i][j][k] > 1.0) { + v->dst_x_after_scaler = v->dst_x_after_scaler + v->scaler_rec_out_width[k] / 2.0; + } + if (v->output_format[k] == dcn_bw_420) { + v->dst_y_after_scaler = 1.0; + } + else { + v->dst_y_after_scaler = 0.0; + } + v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep; + v->v_update_offset[k][j] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0); + v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]); + v->v_update_width[k][j] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k]; + v->v_ready_offset[k][j] = dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k]; + v->time_setup = (v->v_update_offset[k][j] + v->v_update_width[k][j] + v->v_ready_offset[k][j]) / v->pixel_clock[k]; + v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i]; + if (v->pte_enable == dcn_bw_yes) { + v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i]; + } + if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) { + v->maximum_vstartup = v->vtotal[k] - v->vactive[k] - 1.0; + } + else { + v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0; + } + + do { + v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]); + v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4; + v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]); + + if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { + v->time_for_meta_pte_without_immediate_flip = dcn_bw_max3( + v->meta_pte_bytes_frame[k] / v->prefetch_bw[k], + v->extra_latency, + v->htotal[k] / v->pixel_clock[k] / 4.0); + } else { + v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0; + } + + if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) { + v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max3(( + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], + v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, + v->extra_latency); + } else { + v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max2( + v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, + v->extra_latency - v->time_for_meta_pte_with_immediate_flip); + } + + v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->maximum_vstartup = v->maximum_vstartup - 1; + + if (v->lines_for_meta_pte_without_immediate_flip[k] < 32.0 && v->lines_for_meta_and_dpte_row_without_immediate_flip[k] < 16.0) + break; + + } while(1); + } + v->bw_available_for_immediate_flip = v->return_bw_per_state[i]; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->bw_available_for_immediate_flip = v->bw_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth[k], v->prefetch_bw[k]); + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->total_immediate_flip_bytes[k] = 0.0; + if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->total_immediate_flip_bytes[k] = v->total_immediate_flip_bytes[k] + v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]; + } + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { + v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); + } + else { + v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0; + } + if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) { + v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency); + } + else { + v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip); + } + v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k]; + v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k]; + if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) { + v->v_ratio_pre_ywith_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip; + if ((v->swath_height_yper_state[i][j][k] > 4.0)) { + if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) { + v->v_ratio_pre_ywith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywith_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0; + } + } + v->v_ratio_pre_cwith_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip; + if ((v->swath_height_cper_state[i][j][k] > 4.0)) { + if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) { + v->v_ratio_pre_cwith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwith_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0; + } + } + v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]); + } + else { + v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0; + v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0; + v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = 999999.0; + } + if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip > 0.0) { + v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip; + if ((v->swath_height_yper_state[i][j][k] > 4.0)) { + if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) { + v->v_ratio_pre_ywithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywithout_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0; + } + } + v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip; + if ((v->swath_height_cper_state[i][j][k] > 4.0)) { + if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) { + v->v_ratio_pre_cwithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwithout_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0; + } + } + v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]); + } + else { + v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0; + v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0; + v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = 999999.0; + } + } + v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k]) +dcn_bw_max2(v->meta_pte_bytes_per_frame[k] / (v->lines_for_meta_pte_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / (v->lines_for_meta_and_dpte_row_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k])); + } + else { + v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]); + } + } + v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = v->maximum_read_bandwidth_with_prefetch_without_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]); + } + v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes; + if (v->maximum_read_bandwidth_with_prefetch_with_immediate_flip > v->return_bw_per_state[i]) { + v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_with_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_with_immediate_flip[k] >= 16.0) { + v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no; + } + } + v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes; + if (v->maximum_read_bandwidth_with_prefetch_without_immediate_flip > v->return_bw_per_state[i]) { + v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_without_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_without_immediate_flip[k] >= 16.0) { + v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no; + } + } + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywith_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwith_immediate_flip[i][j][k] > 4.0)) || ((v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 || v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)))) { + v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no; + } + } + v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)) { + v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no; + } + } + } + } + /*mode support, voltage state and soc configuration*/ + + for (i = number_of_states_plus_one; i >= 0; i--) { + for (j = 0; j <= 1; j++) { + if (v->scale_ratio_support == dcn_bw_yes && v->source_format_pixel_and_scan_support == dcn_bw_yes && v->viewport_size_support == dcn_bw_yes && v->bandwidth_support[i] == dcn_bw_yes && v->dio_support[i] == dcn_bw_yes && v->urgent_latency_support[i][j] == dcn_bw_yes && v->rob_support[i] == dcn_bw_yes && v->dispclk_dppclk_support[i][j] == dcn_bw_yes && v->total_available_pipes_support[i][j] == dcn_bw_yes && v->total_available_writeback_support == dcn_bw_yes && v->writeback_latency_support == dcn_bw_yes) { + if (v->prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes) { + v->mode_support_with_immediate_flip[i][j] = dcn_bw_yes; + } + else { + v->mode_support_with_immediate_flip[i][j] = dcn_bw_no; + } + if (v->prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes) { + v->mode_support_without_immediate_flip[i][j] = dcn_bw_yes; + } + else { + v->mode_support_without_immediate_flip[i][j] = dcn_bw_no; + } + } + else { + v->mode_support_with_immediate_flip[i][j] = dcn_bw_no; + v->mode_support_without_immediate_flip[i][j] = dcn_bw_no; + } + } + } + for (i = number_of_states_plus_one; i >= 0; i--) { + if ((i == number_of_states_plus_one || v->mode_support_with_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_with_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) { + v->voltage_level_with_immediate_flip = i; + } + } + for (i = number_of_states_plus_one; i >= 0; i--) { + if ((i == number_of_states_plus_one || v->mode_support_without_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_without_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) { + v->voltage_level_without_immediate_flip = i; + } + } + if (v->voltage_level_with_immediate_flip == number_of_states_plus_one) { + v->immediate_flip_supported = dcn_bw_no; + v->voltage_level = v->voltage_level_without_immediate_flip; + } + else { + v->immediate_flip_supported = dcn_bw_yes; + v->voltage_level = v->voltage_level_with_immediate_flip; + } + v->dcfclk = v->dcfclk_per_state[v->voltage_level]; + v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level]; + for (j = 0; j <= 1; j++) { + v->required_dispclk_per_ratio[j] = v->required_dispclk[v->voltage_level][j]; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->dpp_per_plane_per_ratio[j][k] = v->no_of_dpp[v->voltage_level][j][k]; + } + v->dispclk_dppclk_support_per_ratio[j] = v->dispclk_dppclk_support[v->voltage_level][j]; + } + v->max_phyclk = v->phyclk_per_state[v->voltage_level]; +} +void display_pipe_configuration(struct dcn_bw_internal_vars *v) +{ + int j; + int k; + /*display pipe configuration*/ + + for (j = 0; j <= 1; j++) { + v->total_number_of_active_dpp_per_ratio[j] = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->total_number_of_active_dpp_per_ratio[j] = v->total_number_of_active_dpp_per_ratio[j] + v->dpp_per_plane_per_ratio[j][k]; + } + } + if ((v->dispclk_dppclk_support_per_ratio[0] == dcn_bw_yes && v->dispclk_dppclk_support_per_ratio[1] == dcn_bw_no) || (v->dispclk_dppclk_support_per_ratio[0] == v->dispclk_dppclk_support_per_ratio[1] && (v->total_number_of_active_dpp_per_ratio[0] < v->total_number_of_active_dpp_per_ratio[1] || (((v->total_number_of_active_dpp_per_ratio[0] == v->total_number_of_active_dpp_per_ratio[1]) && v->required_dispclk_per_ratio[0] <= 0.5 * v->required_dispclk_per_ratio[1]))))) { + v->dispclk_dppclk_ratio = 1; + v->final_error_message = v->error_message[0]; + } + else { + v->dispclk_dppclk_ratio = 2; + v->final_error_message = v->error_message[1]; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->dpp_per_plane[k] = v->dpp_per_plane_per_ratio[v->dispclk_dppclk_ratio - 1][k]; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->byte_per_pix_dety = 8.0; + v->byte_per_pix_detc = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { + v->byte_per_pix_dety = 4.0; + v->byte_per_pix_detc = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { + v->byte_per_pix_dety = 2.0; + v->byte_per_pix_detc = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->byte_per_pix_dety = 1.0; + v->byte_per_pix_detc = 2.0; + } + else { + v->byte_per_pix_dety = 4.0f / 3.0f; + v->byte_per_pix_detc = 8.0f / 3.0f; + } + if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->read256_bytes_block_height_y = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->read256_bytes_block_height_y = 4.0; + } + else { + v->read256_bytes_block_height_y = 8.0; + } + v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y; + v->read256_bytes_block_height_c = 0.0; + v->read256_bytes_block_width_c = 0.0; + } + else { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->read256_bytes_block_height_y = 1.0; + v->read256_bytes_block_height_c = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->read256_bytes_block_height_y = 16.0; + v->read256_bytes_block_height_c = 8.0; + } + else { + v->read256_bytes_block_height_y = 8.0; + v->read256_bytes_block_height_c = 8.0; + } + v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y; + v->read256_bytes_block_width_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->read256_bytes_block_height_c; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->maximum_swath_height_y = v->read256_bytes_block_height_y; + v->maximum_swath_height_c = v->read256_bytes_block_height_c; + } + else { + v->maximum_swath_height_y = v->read256_bytes_block_width_y; + v->maximum_swath_height_c = v->read256_bytes_block_width_c; + } + if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) { + v->minimum_swath_height_y = v->maximum_swath_height_y; + } + else { + v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0; + } + v->minimum_swath_height_c = v->maximum_swath_height_c; + } + else { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->minimum_swath_height_y = v->maximum_swath_height_y; + v->minimum_swath_height_c = v->maximum_swath_height_c; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) { + v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0; + if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { + v->minimum_swath_height_c = v->maximum_swath_height_c; + } + else { + v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0; + } + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) { + v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0; + if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { + v->minimum_swath_height_y = v->maximum_swath_height_y; + } + else { + v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0; + } + } + else { + v->minimum_swath_height_y = v->maximum_swath_height_y; + v->minimum_swath_height_c = v->maximum_swath_height_c; + } + } + if (v->source_scan[k] == dcn_bw_hor) { + v->swath_width = v->viewport_width[k] / v->dpp_per_plane[k]; + } + else { + v->swath_width = v->viewport_height[k] / v->dpp_per_plane[k]; + } + v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->maximum_swath_height_y; + v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pix_dety * v->maximum_swath_height_y; + if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { + v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256; + } + if (v->maximum_swath_height_c > 0.0) { + v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c; + } + v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c; + if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { + v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256; + } + if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) { + v->swath_height_y[k] = v->maximum_swath_height_y; + v->swath_height_c[k] = v->maximum_swath_height_c; + } + else { + v->swath_height_y[k] = v->minimum_swath_height_y; + v->swath_height_c[k] = v->minimum_swath_height_c; + } + if (v->swath_height_c[k] == 0.0) { + v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0; + v->det_buffer_size_c[k] = 0.0; + } + else if (v->swath_height_y[k] <= v->swath_height_c[k]) { + v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0; + v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0; + } + else { + v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0; + v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 3.0; + } + } +} +void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(struct dcn_bw_internal_vars *v) +{ + int k; + /*dispclk and dppclk calculation*/ + + v->dispclk_with_ramping = 0.0; + v->dispclk_without_ramping = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->h_ratio[k] > 1.0) { + v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0)); + } + else { + v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); + } + v->dppclk_using_single_dpp_luma = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_throughput[k], 1.0); + if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->pscl_throughput_chroma[k] = 0.0; + v->dppclk_using_single_dpp = v->dppclk_using_single_dpp_luma; + } + else { + if (v->h_ratio[k] > 1.0) { + v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0)); + } + else { + v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); + } + v->dppclk_using_single_dpp_chroma = v->pixel_clock[k] *dcn_bw_max3(v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_throughput_chroma[k], 1.0); + v->dppclk_using_single_dpp =dcn_bw_max2(v->dppclk_using_single_dpp_luma, v->dppclk_using_single_dpp_chroma); + } + if (v->odm_capable == dcn_bw_yes) { + v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0)); + v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0)); + } + else { + v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0)); + v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0)); + } + } + if (v->dispclk_without_ramping > v->max_dispclk[number_of_states]) { + v->dispclk = v->dispclk_without_ramping; + } + else if (v->dispclk_with_ramping > v->max_dispclk[number_of_states]) { + v->dispclk = v->max_dispclk[number_of_states]; + } + else { + v->dispclk = v->dispclk_with_ramping; + } + v->dppclk = v->dispclk / v->dispclk_dppclk_ratio; + /*urgent watermark*/ + + v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0); + v->dcc_enabled_any_plane = dcn_bw_no; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->dcc_enabled_any_plane = dcn_bw_yes; + } + } + v->return_bw = v->return_bandwidth_to_dcn; + if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) { + v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency))); + } + v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); + if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) { + v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); + } + v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0); + if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) { + v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency))); + } + v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); + if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) { + v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_scan[k] == dcn_bw_hor) { + v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k]; + } + else { + v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k]; + } + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->byte_per_pixel_dety[k] = 8.0; + v->byte_per_pixel_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { + v->byte_per_pixel_dety[k] = 4.0; + v->byte_per_pixel_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { + v->byte_per_pixel_dety[k] = 2.0; + v->byte_per_pixel_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->byte_per_pixel_dety[k] = 1.0; + v->byte_per_pixel_detc[k] = 2.0; + } + else { + v->byte_per_pixel_dety[k] = 4.0f / 3.0f; + v->byte_per_pixel_detc[k] = 8.0f / 3.0f; + } + } + v->total_data_read_bandwidth = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k]; + v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0; + v->total_data_read_bandwidth = v->total_data_read_bandwidth + v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k]; + } + v->total_active_dpp = 0.0; + v->total_dcc_active_dpp = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->total_active_dpp = v->total_active_dpp + v->dpp_per_plane[k]; + if (v->dcc_enable[k] == dcn_bw_yes) { + v->total_dcc_active_dpp = v->total_dcc_active_dpp + v->dpp_per_plane[k]; + } + } + v->urgent_round_trip_and_out_of_order_latency = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw; + v->last_pixel_of_line_extra_watermark = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->v_ratio[k] <= 1.0) { + v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k]; + } + else { + v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk; + } + v->data_fabric_line_delivery_time_luma = v->swath_width_y[k] * v->swath_height_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->return_bw * v->read_bandwidth_plane_luma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth); + v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_luma - v->display_pipe_line_delivery_time_luma[k]); + if (v->byte_per_pixel_detc[k] == 0.0) { + v->display_pipe_line_delivery_time_chroma[k] = 0.0; + } + else { + if (v->v_ratio[k] / 2.0 <= 1.0) { + v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] / (v->h_ratio[k] / 2.0) / v->pixel_clock[k]; + } + else { + v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 / v->pscl_throughput_chroma[k] / v->dppclk; + } + v->data_fabric_line_delivery_time_chroma = v->swath_width_y[k] / 2.0 * v->swath_height_c[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->return_bw * v->read_bandwidth_plane_chroma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth); + v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_chroma - v->display_pipe_line_delivery_time_chroma[k]); + } + } + v->urgent_extra_latency = v->urgent_round_trip_and_out_of_order_latency + (v->total_active_dpp * v->pixel_chunk_size_in_kbyte + v->total_dcc_active_dpp * v->meta_chunk_size) * 1024.0 / v->return_bw; + if (v->pte_enable == dcn_bw_yes) { + v->urgent_extra_latency = v->urgent_extra_latency + v->total_active_dpp * v->pte_chunk_size * 1024.0 / v->return_bw; + } + v->urgent_watermark = v->urgent_latency + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency; + v->ptemeta_urgent_watermark = v->urgent_watermark + 2.0 * v->urgent_latency; + /*nb p-state/dram clock change watermark*/ + + v->dram_clock_change_watermark = v->dram_clock_change_latency + v->urgent_watermark; + v->total_active_writeback = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_writeback) { + v->total_active_writeback = v->total_active_writeback + 1.0; + } + } + if (v->total_active_writeback <= 1.0) { + v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency; + } + else { + v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency + v->writeback_chunk_size * 1024.0 / 32.0 / v->socclk; + } + /*stutter efficiency*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->lines_in_dety[k] = v->det_buffer_size_y[k] / v->byte_per_pixel_dety[k] / v->swath_width_y[k]; + v->lines_in_dety_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_dety[k], v->swath_height_y[k]); + v->full_det_buffering_time_y[k] = v->lines_in_dety_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k]; + if (v->byte_per_pixel_detc[k] > 0.0) { + v->lines_in_detc[k] = v->det_buffer_size_c[k] / v->byte_per_pixel_detc[k] / (v->swath_width_y[k] / 2.0); + v->lines_in_detc_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_detc[k], v->swath_height_c[k]); + v->full_det_buffering_time_c[k] = v->lines_in_detc_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0); + } + else { + v->lines_in_detc[k] = 0.0; + v->lines_in_detc_rounded_down_to_swath[k] = 0.0; + v->full_det_buffering_time_c[k] = 999999.0; + } + } + v->min_full_det_buffering_time = 999999.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->full_det_buffering_time_y[k] < v->min_full_det_buffering_time) { + v->min_full_det_buffering_time = v->full_det_buffering_time_y[k]; + v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k]; + } + if (v->full_det_buffering_time_c[k] < v->min_full_det_buffering_time) { + v->min_full_det_buffering_time = v->full_det_buffering_time_c[k]; + v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k]; + } + } + v->average_read_bandwidth_gbyte_per_second = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / v->dcc_rate[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / v->dcc_rate[k] / 1000.0; + } + else { + v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / 1000.0; + } + if (v->dcc_enable[k] == dcn_bw_yes) { + v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 256.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 256.0; + } + if (v->pte_enable == dcn_bw_yes) { + v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 512.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 512.0; + } + } + v->part_of_burst_that_fits_in_rob =dcn_bw_min2(v->min_full_det_buffering_time * v->total_data_read_bandwidth, v->rob_buffer_size_in_kbyte * 1024.0 * v->total_data_read_bandwidth / (v->average_read_bandwidth_gbyte_per_second * 1000.0)); + v->stutter_burst_time = v->part_of_burst_that_fits_in_rob * (v->average_read_bandwidth_gbyte_per_second * 1000.0) / v->total_data_read_bandwidth / v->return_bw + (v->min_full_det_buffering_time * v->total_data_read_bandwidth - v->part_of_burst_that_fits_in_rob) / (v->dcfclk * 64.0); + if (v->total_active_writeback == 0.0) { + v->stutter_efficiency_not_including_vblank = (1.0 - (v->sr_exit_time + v->stutter_burst_time) / v->min_full_det_buffering_time) * 100.0; + } + else { + v->stutter_efficiency_not_including_vblank = 0.0; + } + v->smallest_vblank = 999999.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) { + v->v_blank_time = (v->vtotal[k] - v->vactive[k]) * v->htotal[k] / v->pixel_clock[k]; + } + else { + v->v_blank_time = 0.0; + } + v->smallest_vblank =dcn_bw_min2(v->smallest_vblank, v->v_blank_time); + } + v->stutter_efficiency = (v->stutter_efficiency_not_including_vblank / 100.0 * (v->frame_time_for_min_full_det_buffering_time - v->smallest_vblank) + v->smallest_vblank) / v->frame_time_for_min_full_det_buffering_time * 100.0; + /*dcfclk deep sleep*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->byte_per_pixel_detc[k] > 0.0) { + v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 32.0 / v->display_pipe_line_delivery_time_luma[k], 1.1 * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 32.0 / v->display_pipe_line_delivery_time_chroma[k]); + } + else { + v->dcfclk_deep_sleep_per_plane[k] = 1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 64.0 / v->display_pipe_line_delivery_time_luma[k]; + } + v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(v->dcfclk_deep_sleep_per_plane[k], v->pixel_clock[k] / 16.0); + } + v->dcf_clk_deep_sleep = 8.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->dcf_clk_deep_sleep =dcn_bw_max2(v->dcf_clk_deep_sleep, v->dcfclk_deep_sleep_per_plane[k]); + } + /*stutter watermark*/ + + v->stutter_exit_watermark = v->sr_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency + 10.0 / v->dcf_clk_deep_sleep; + v->stutter_enter_plus_exit_watermark = v->sr_enter_plus_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency; + /*urgent latency supported*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->effective_det_plus_lb_lines_luma =dcn_bw_floor2(v->lines_in_dety[k] +dcn_bw_min2(v->lines_in_dety[k] * v->dppclk * v->byte_per_pixel_dety[k] * v->pscl_throughput[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_y[k]); + v->urgent_latency_support_us_luma = v->effective_det_plus_lb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_det_plus_lb_lines_luma * v->swath_width_y[k] * v->byte_per_pixel_dety[k] / (v->return_bw / v->dpp_per_plane[k]); + if (v->byte_per_pixel_detc[k] > 0.0) { + v->effective_det_plus_lb_lines_chroma =dcn_bw_floor2(v->lines_in_detc[k] +dcn_bw_min2(v->lines_in_detc[k] * v->dppclk * v->byte_per_pixel_detc[k] * v->pscl_throughput_chroma[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_c[k]); + v->urgent_latency_support_us_chroma = v->effective_det_plus_lb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_det_plus_lb_lines_chroma * (v->swath_width_y[k] / 2.0) * v->byte_per_pixel_detc[k] / (v->return_bw / v->dpp_per_plane[k]); + v->urgent_latency_support_us[k] =dcn_bw_min2(v->urgent_latency_support_us_luma, v->urgent_latency_support_us_chroma); + } + else { + v->urgent_latency_support_us[k] = v->urgent_latency_support_us_luma; + } + } + v->min_urgent_latency_support_us = 999999.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->min_urgent_latency_support_us =dcn_bw_min2(v->min_urgent_latency_support_us, v->urgent_latency_support_us[k]); + } + /*non-urgent latency tolerance*/ + + v->non_urgent_latency_tolerance = v->min_urgent_latency_support_us - v->urgent_watermark; + /*prefetch*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->block_height256_bytes_y = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->block_height256_bytes_y = 4.0; + } + else { + v->block_height256_bytes_y = 8.0; + } + v->block_height256_bytes_c = 0.0; + } + else { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->block_height256_bytes_y = 1.0; + v->block_height256_bytes_c = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->block_height256_bytes_y = 16.0; + v->block_height256_bytes_c = 8.0; + } + else { + v->block_height256_bytes_y = 8.0; + v->block_height256_bytes_c = 8.0; + } + } + if (v->dcc_enable[k] == dcn_bw_yes) { + v->meta_request_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (8.0 * v->block_height256_bytes_y); + v->meta_surf_width_y =dcn_bw_ceil2(v->swath_width_y[k] - 1.0, v->meta_request_width_y) + v->meta_request_width_y; + v->meta_surf_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, 8.0 * v->block_height256_bytes_y) + 8.0 * v->block_height256_bytes_y; + if (v->pte_enable == dcn_bw_yes) { + v->meta_pte_bytes_frame_y = (dcn_bw_ceil2((v->meta_surf_width_y * v->meta_surf_height_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; + } + else { + v->meta_pte_bytes_frame_y = 0.0; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->meta_row_byte_y = v->meta_surf_width_y * 8.0 * v->block_height256_bytes_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0; + } + else { + v->meta_row_byte_y = v->meta_surf_height_y * v->meta_request_width_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0; + } + } + else { + v->meta_pte_bytes_frame_y = 0.0; + v->meta_row_byte_y = 0.0; + } + if (v->pte_enable == dcn_bw_yes) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->macro_tile_size_byte_y = 256.0; + v->macro_tile_height_y = 1.0; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { + v->macro_tile_size_byte_y = 4096.0; + v->macro_tile_height_y = 4.0 * v->block_height256_bytes_y; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { + v->macro_tile_size_byte_y = 64.0 * 1024; + v->macro_tile_height_y = 16.0 * v->block_height256_bytes_y; + } + else { + v->macro_tile_size_byte_y = 256.0 * 1024; + v->macro_tile_height_y = 32.0 * v->block_height256_bytes_y; + } + if (v->macro_tile_size_byte_y <= 65536.0) { + v->pixel_pte_req_height_y = v->macro_tile_height_y; + } + else { + v->pixel_pte_req_height_y = 16.0 * v->block_height256_bytes_y; + } + v->pixel_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / v->pixel_pte_req_height_y * 8; + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_y / v->swath_width_y[k], 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1); + } + else if (v->source_scan[k] == dcn_bw_hor) { + v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1); + } + else { + v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->pixel_pte_req_height_y, 1.0) + 1); + } + } + else { + v->pixel_pte_bytes_per_row_y = 0.0; + } + if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->meta_request_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (8.0 * v->block_height256_bytes_c); + v->meta_surf_width_c =dcn_bw_ceil2(v->swath_width_y[k] / 2.0 - 1.0, v->meta_request_width_c) + v->meta_request_width_c; + v->meta_surf_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, 8.0 * v->block_height256_bytes_c) + 8.0 * v->block_height256_bytes_c; + if (v->pte_enable == dcn_bw_yes) { + v->meta_pte_bytes_frame_c = (dcn_bw_ceil2((v->meta_surf_width_c * v->meta_surf_height_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; + } + else { + v->meta_pte_bytes_frame_c = 0.0; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->meta_row_byte_c = v->meta_surf_width_c * 8.0 * v->block_height256_bytes_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0; + } + else { + v->meta_row_byte_c = v->meta_surf_height_c * v->meta_request_width_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0; + } + } + else { + v->meta_pte_bytes_frame_c = 0.0; + v->meta_row_byte_c = 0.0; + } + if (v->pte_enable == dcn_bw_yes) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->macro_tile_size_bytes_c = 256.0; + v->macro_tile_height_c = 1.0; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { + v->macro_tile_size_bytes_c = 4096.0; + v->macro_tile_height_c = 4.0 * v->block_height256_bytes_c; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { + v->macro_tile_size_bytes_c = 64.0 * 1024; + v->macro_tile_height_c = 16.0 * v->block_height256_bytes_c; + } + else { + v->macro_tile_size_bytes_c = 256.0 * 1024; + v->macro_tile_height_c = 32.0 * v->block_height256_bytes_c; + } + if (v->macro_tile_size_bytes_c <= 65536.0) { + v->pixel_pte_req_height_c = v->macro_tile_height_c; + } + else { + v->pixel_pte_req_height_c = 16.0 * v->block_height256_bytes_c; + } + v->pixel_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / v->pixel_pte_req_height_c * 8; + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_c / (v->swath_width_y[k] / 2.0), 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1); + } + else if (v->source_scan[k] == dcn_bw_hor) { + v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1); + } + else { + v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->pixel_pte_req_height_c, 1.0) + 1); + } + } + else { + v->pixel_pte_bytes_per_row_c = 0.0; + } + } + else { + v->pixel_pte_bytes_per_row_c = 0.0; + v->meta_pte_bytes_frame_c = 0.0; + v->meta_row_byte_c = 0.0; + } + v->pixel_pte_bytes_per_row[k] = v->pixel_pte_bytes_per_row_y + v->pixel_pte_bytes_per_row_c; + v->meta_pte_bytes_frame[k] = v->meta_pte_bytes_frame_y + v->meta_pte_bytes_frame_c; + v->meta_row_byte[k] = v->meta_row_byte_y + v->meta_row_byte_c; + v->v_init_pre_fill_y[k] =dcn_bw_floor2((v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0, 1.0); + v->max_num_swath_y[k] =dcn_bw_ceil2((v->v_init_pre_fill_y[k] - 1.0) / v->swath_height_y[k], 1.0) + 1; + if (v->v_init_pre_fill_y[k] > 1.0) { + v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] - 2.0), v->swath_height_y[k]); + } + else { + v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] + v->swath_height_y[k] - 2.0), v->swath_height_y[k]); + } + v->max_partial_swath_y =dcn_bw_max2(1.0, v->max_partial_swath_y); + v->prefetch_source_lines_y[k] = v->max_num_swath_y[k] * v->swath_height_y[k] + v->max_partial_swath_y; + if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { + v->v_init_pre_fill_c[k] =dcn_bw_floor2((v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0, 1.0); + v->max_num_swath_c[k] =dcn_bw_ceil2((v->v_init_pre_fill_c[k] - 1.0) / v->swath_height_c[k], 1.0) + 1; + if (v->v_init_pre_fill_c[k] > 1.0) { + v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] - 2.0), v->swath_height_c[k]); + } + else { + v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] + v->swath_height_c[k] - 2.0), v->swath_height_c[k]); + } + v->max_partial_swath_c =dcn_bw_max2(1.0, v->max_partial_swath_c); + } + else { + v->max_num_swath_c[k] = 0.0; + v->max_partial_swath_c = 0.0; + } + v->prefetch_source_lines_c[k] = v->max_num_swath_c[k] * v->swath_height_c[k] + v->max_partial_swath_c; + } + v->t_calc = 24.0 / v->dcf_clk_deep_sleep; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) { + v->max_vstartup_lines[k] = v->vtotal[k] - v->vactive[k] - 1.0; + } + else { + v->max_vstartup_lines[k] = v->v_sync_plus_back_porch[k] - 1.0; + } + } + v->next_prefetch_mode = 0.0; + do { + v->v_startup_lines = 13.0; + do { + v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_yes; + v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_no; + v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_no; + v->v_ratio_prefetch_more_than4 = dcn_bw_no; + v->destination_line_times_for_prefetch_less_than2 = dcn_bw_no; + v->prefetch_mode = v->next_prefetch_mode; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->dstx_after_scaler = 90.0 * v->pixel_clock[k] / v->dppclk + 42.0 * v->pixel_clock[k] / v->dispclk; + if (v->dpp_per_plane[k] > 1.0) { + v->dstx_after_scaler = v->dstx_after_scaler + v->scaler_rec_out_width[k] / 2.0; + } + if (v->output_format[k] == dcn_bw_420) { + v->dsty_after_scaler = 1.0; + } + else { + v->dsty_after_scaler = 0.0; + } + v->v_update_offset_pix[k] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0); + v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk); + v->v_update_width_pix[k] = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k]; + v->v_ready_offset_pix[k] = dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k]; + v->t_setup = (v->v_update_offset_pix[k] + v->v_update_width_pix[k] + v->v_ready_offset_pix[k]) / v->pixel_clock[k]; + v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]); + if (v->prefetch_mode == 0.0) { + v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency); + } + else if (v->prefetch_mode == 1.0) { + v->t_wait =dcn_bw_max2(v->sr_enter_plus_exit_time, v->urgent_latency); + } + else { + v->t_wait = v->urgent_latency; + } + v->destination_lines_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->v_startup[k] - v->t_wait / (v->htotal[k] / v->pixel_clock[k]) - (v->t_calc + v->t_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dsty_after_scaler + v->dstx_after_scaler / v->htotal[k]) + 0.125), 1.0) / 4; + if (v->destination_lines_for_prefetch[k] > 0.0) { + v->prefetch_bandwidth[k] = (v->meta_pte_bytes_frame[k] + 2.0 * v->meta_row_byte[k] + 2.0 * v->pixel_pte_bytes_per_row[k] + v->prefetch_source_lines_y[k] * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0)) / (v->destination_lines_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]); + } + else { + v->prefetch_bandwidth[k] = 999999.0; + } + } + v->bandwidth_available_for_immediate_flip = v->return_bw; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->bandwidth_available_for_immediate_flip = v->bandwidth_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->prefetch_bandwidth[k]); + } + v->tot_immediate_flip_bytes = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->tot_immediate_flip_bytes = v->tot_immediate_flip_bytes + v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]; + } + } + v->max_rd_bandwidth = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { + if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->time_for_fetching_meta_pte =dcn_bw_max5(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->meta_pte_bytes_frame[k] * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); + } + else { + v->time_for_fetching_meta_pte =dcn_bw_max3(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); + } + } + else { + v->time_for_fetching_meta_pte = v->htotal[k] / v->pixel_clock[k] / 4.0; + } + v->destination_lines_to_request_vm_inv_blank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_meta_pte / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + if ((v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes)) { + if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->time_for_fetching_row_in_vblank =dcn_bw_max5((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, 2.0 * v->urgent_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte); + } + else { + v->time_for_fetching_row_in_vblank =dcn_bw_max3((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte); + } + } + else { + v->time_for_fetching_row_in_vblank =dcn_bw_max2(v->urgent_extra_latency - v->time_for_fetching_meta_pte, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte); + } + v->destination_lines_to_request_row_in_vblank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_row_in_vblank / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->lines_to_request_prefetch_pixel_data = v->destination_lines_for_prefetch[k] - v->destination_lines_to_request_vm_inv_blank[k] - v->destination_lines_to_request_row_in_vblank[k]; + if (v->lines_to_request_prefetch_pixel_data > 0.0) { + v->v_ratio_prefetch_y[k] = v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data; + if ((v->swath_height_y[k] > 4.0)) { + if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_y[k] - 3.0) / 2.0) { + v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], v->max_num_swath_y[k] * v->swath_height_y[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_y[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_prefetch_y[k] = 999999.0; + } + } + } + else { + v->v_ratio_prefetch_y[k] = 999999.0; + } + v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], 1.0); + if (v->lines_to_request_prefetch_pixel_data > 0.0) { + v->v_ratio_prefetch_c[k] = v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data; + if ((v->swath_height_c[k] > 4.0)) { + if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_c[k] - 3.0) / 2.0) { + v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], v->max_num_swath_c[k] * v->swath_height_c[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_c[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_prefetch_c[k] = 999999.0; + } + } + } + else { + v->v_ratio_prefetch_c[k] = 999999.0; + } + v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], 1.0); + if (v->lines_to_request_prefetch_pixel_data > 0.0) { + v->required_prefetch_pix_data_bw = v->dpp_per_plane[k] * (v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 2.0) * v->swath_width_y[k] / (v->htotal[k] / v->pixel_clock[k]); + } + else { + v->required_prefetch_pix_data_bw = 999999.0; + } + v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->required_prefetch_pix_data_bw); + if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->meta_pte_bytes_frame[k] / (v->destination_lines_to_request_vm_inv_blank[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / (v->destination_lines_to_request_row_in_vblank[k] * v->htotal[k] / v->pixel_clock[k])); + } + if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) { + v->v_ratio_prefetch_more_than4 = dcn_bw_yes; + } + if (v->destination_lines_for_prefetch[k] < 2.0) { + v->destination_line_times_for_prefetch_less_than2 = dcn_bw_yes; + } + if (v->max_vstartup_lines[k] > v->v_startup_lines) { + if (v->required_prefetch_pix_data_bw > (v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k])) { + v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_no; + } + if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) { + v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_yes; + } + if (v->destination_lines_for_prefetch[k] < 2.0) { + v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_yes; + } + } + } + if (v->max_rd_bandwidth <= v->return_bw && v->v_ratio_prefetch_more_than4 == dcn_bw_no && v->destination_line_times_for_prefetch_less_than2 == dcn_bw_no) { + v->prefetch_mode_supported = dcn_bw_yes; + } + else { + v->prefetch_mode_supported = dcn_bw_no; + } + v->v_startup_lines = v->v_startup_lines + 1.0; + } while (!(v->prefetch_mode_supported == dcn_bw_yes || (v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw == dcn_bw_yes && v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 == dcn_bw_no && v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 == dcn_bw_no))); + v->next_prefetch_mode = v->next_prefetch_mode + 1.0; + } while (!(v->prefetch_mode_supported == dcn_bw_yes || v->prefetch_mode == 2.0)); + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->v_ratio_prefetch_y[k] <= 1.0) { + v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k]; + } + else { + v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk; + } + if (v->byte_per_pixel_detc[k] == 0.0) { + v->display_pipe_line_delivery_time_chroma_prefetch[k] = 0.0; + } + else { + if (v->v_ratio_prefetch_c[k] <= 1.0) { + v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k]; + } + else { + v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk; + } + } + } + /*min ttuv_blank*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->prefetch_mode == 0.0) { + v->allow_dram_clock_change_during_vblank[k] = dcn_bw_yes; + v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes; + v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max3(v->dram_clock_change_watermark, v->stutter_enter_plus_exit_watermark, v->urgent_watermark); + } + else if (v->prefetch_mode == 1.0) { + v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no; + v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes; + v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max2(v->stutter_enter_plus_exit_watermark, v->urgent_watermark); + } + else { + v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no; + v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_no; + v->min_ttuv_blank[k] = v->t_calc + v->urgent_watermark; + } + } + /*nb p-state/dram clock change support*/ + + v->active_dp_ps = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->active_dp_ps = v->active_dp_ps + v->dpp_per_plane[k]; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->lb_latency_hiding_source_lines_y =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0); + v->lb_latency_hiding_source_lines_c =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0); + v->effective_lb_latency_hiding_y = v->lb_latency_hiding_source_lines_y / v->v_ratio[k] * (v->htotal[k] / v->pixel_clock[k]); + v->effective_lb_latency_hiding_c = v->lb_latency_hiding_source_lines_c / (v->v_ratio[k] / 2.0) * (v->htotal[k] / v->pixel_clock[k]); + if (v->swath_width_y[k] > 2.0 * v->dpp_output_buffer_pixels) { + v->dpp_output_buffer_lines_y = v->dpp_output_buffer_pixels / v->swath_width_y[k]; + } + else if (v->swath_width_y[k] > v->dpp_output_buffer_pixels) { + v->dpp_output_buffer_lines_y = 0.5; + } + else { + v->dpp_output_buffer_lines_y = 1.0; + } + if (v->swath_width_y[k] / 2.0 > 2.0 * v->dpp_output_buffer_pixels) { + v->dpp_output_buffer_lines_c = v->dpp_output_buffer_pixels / (v->swath_width_y[k] / 2.0); + } + else if (v->swath_width_y[k] / 2.0 > v->dpp_output_buffer_pixels) { + v->dpp_output_buffer_lines_c = 0.5; + } + else { + v->dpp_output_buffer_lines_c = 1.0; + } + v->dppopp_buffering_y = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_y + v->opp_output_buffer_lines); + v->max_det_buffering_time_y = v->full_det_buffering_time_y[k] + (v->lines_in_dety[k] - v->lines_in_dety_rounded_down_to_swath[k]) / v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]); + v->active_dram_clock_change_latency_margin_y = v->dppopp_buffering_y + v->effective_lb_latency_hiding_y + v->max_det_buffering_time_y - v->dram_clock_change_watermark; + if (v->active_dp_ps > 1.0) { + v->active_dram_clock_change_latency_margin_y = v->active_dram_clock_change_latency_margin_y - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]); + } + if (v->byte_per_pixel_detc[k] > 0.0) { + v->dppopp_buffering_c = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_c + v->opp_output_buffer_lines); + v->max_det_buffering_time_c = v->full_det_buffering_time_c[k] + (v->lines_in_detc[k] - v->lines_in_detc_rounded_down_to_swath[k]) / v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]); + v->active_dram_clock_change_latency_margin_c = v->dppopp_buffering_c + v->effective_lb_latency_hiding_c + v->max_det_buffering_time_c - v->dram_clock_change_watermark; + if (v->active_dp_ps > 1.0) { + v->active_dram_clock_change_latency_margin_c = v->active_dram_clock_change_latency_margin_c - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]); + } + v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin_y, v->active_dram_clock_change_latency_margin_c); + } + else { + v->active_dram_clock_change_latency_margin[k] = v->active_dram_clock_change_latency_margin_y; + } + if (v->output_format[k] == dcn_bw_444) { + v->writeback_dram_clock_change_latency_margin = (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0) - v->writeback_dram_clock_change_watermark; + } + else { + v->writeback_dram_clock_change_latency_margin =dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k])) - v->writeback_dram_clock_change_watermark; + } + if (v->output[k] == dcn_bw_writeback) { + v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin[k], v->writeback_dram_clock_change_latency_margin); + } + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->allow_dram_clock_change_during_vblank[k] == dcn_bw_yes) { + v->v_blank_dram_clock_change_latency_margin[k] = (v->vtotal[k] - v->scaler_recout_height[k]) * (v->htotal[k] / v->pixel_clock[k]) -dcn_bw_max2(v->dram_clock_change_watermark, v->writeback_dram_clock_change_watermark); + } + else { + v->v_blank_dram_clock_change_latency_margin[k] = 0.0; + } + } + v->min_active_dram_clock_change_margin = 999999.0; + v->v_blank_of_min_active_dram_clock_change_margin = 999999.0; + v->second_min_active_dram_clock_change_margin = 999999.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->active_dram_clock_change_latency_margin[k] < v->min_active_dram_clock_change_margin) { + v->second_min_active_dram_clock_change_margin = v->min_active_dram_clock_change_margin; + v->min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k]; + v->v_blank_of_min_active_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k]; + } + else if (v->active_dram_clock_change_latency_margin[k] < v->second_min_active_dram_clock_change_margin) { + v->second_min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k]; + } + } + v->min_vblank_dram_clock_change_margin = 999999.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->min_vblank_dram_clock_change_margin > v->v_blank_dram_clock_change_latency_margin[k]) { + v->min_vblank_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k]; + } + } + if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) { + v->dram_clock_change_margin =dcn_bw_max2(v->min_active_dram_clock_change_margin, v->min_vblank_dram_clock_change_margin); + } + else if (v->v_blank_of_min_active_dram_clock_change_margin > v->min_active_dram_clock_change_margin) { + v->dram_clock_change_margin =dcn_bw_min2(v->second_min_active_dram_clock_change_margin, v->v_blank_of_min_active_dram_clock_change_margin); + } + else { + v->dram_clock_change_margin = v->min_active_dram_clock_change_margin; + } + if (v->min_active_dram_clock_change_margin > 0.0) { + v->dram_clock_change_support = dcn_bw_supported_in_v_active; + } + else if (v->dram_clock_change_margin > 0.0) { + v->dram_clock_change_support = dcn_bw_supported_in_v_blank; + } + else { + v->dram_clock_change_support = dcn_bw_not_supported; + } + /*maximum bandwidth used*/ + + v->wr_bandwidth = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) { + v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0; + } + else if (v->output[k] == dcn_bw_writeback) { + v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5; + } + } + v->max_used_bw = v->max_rd_bandwidth + v->wr_bandwidth; +} only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.h +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.h @@ -0,0 +1,38 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN_CALC_AUTO_H_ +#define _DCN_CALC_AUTO_H_ + +#include "dc.h" +#include "dcn_calcs.h" + +void scaler_settings_calculation(struct dcn_bw_internal_vars *v); +void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v); +void display_pipe_configuration(struct dcn_bw_internal_vars *v); +void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation( + struct dcn_bw_internal_vars *v); + +#endif /* _DCN_CALC_AUTO_H_ */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c @@ -0,0 +1,147 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn_calc_math.h" + +#define isNaN(number) ((number) != (number)) + +/* + * NOTE: + * This file is gcc-parseable HW gospel, coming straight from HW engineers. + * + * It doesn't adhere to Linux kernel style and sometimes will do things in odd + * ways. Unless there is something clearly wrong with it the code should + * remain as-is as it provides us with a guarantee from HW that it is correct. + */ + +float dcn_bw_mod(const float arg1, const float arg2) +{ + if (isNaN(arg1)) + return arg2; + if (isNaN(arg2)) + return arg1; + return arg1 - arg1 * ((int) (arg1 / arg2)); +} + +float dcn_bw_min2(const float arg1, const float arg2) +{ + if (isNaN(arg1)) + return arg2; + if (isNaN(arg2)) + return arg1; + return arg1 < arg2 ? arg1 : arg2; +} + +unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2) +{ + return arg1 > arg2 ? arg1 : arg2; +} +float dcn_bw_max2(const float arg1, const float arg2) +{ + if (isNaN(arg1)) + return arg2; + if (isNaN(arg2)) + return arg1; + return arg1 > arg2 ? arg1 : arg2; +} + +float dcn_bw_floor2(const float arg, const float significance) +{ + if (significance == 0) + return 0; + return ((int) (arg / significance)) * significance; +} +float dcn_bw_floor(const float arg) +{ + return ((int) (arg)); +} + +float dcn_bw_ceil(const float arg) +{ + float flr = dcn_bw_floor2(arg, 1); + + return flr + 0.00001 >= arg ? arg : flr + 1; +} + +float dcn_bw_ceil2(const float arg, const float significance) +{ + float flr = dcn_bw_floor2(arg, significance); + if (significance == 0) + return 0; + return flr + 0.00001 >= arg ? arg : flr + significance; +} + +float dcn_bw_max3(float v1, float v2, float v3) +{ + return v3 > dcn_bw_max2(v1, v2) ? v3 : dcn_bw_max2(v1, v2); +} + +float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5) +{ + return dcn_bw_max3(v1, v2, v3) > dcn_bw_max2(v4, v5) ? dcn_bw_max3(v1, v2, v3) : dcn_bw_max2(v4, v5); +} + +float dcn_bw_pow(float a, float exp) +{ + float temp; + /*ASSERT(exp == (int)exp);*/ + if ((int)exp == 0) + return 1; + temp = dcn_bw_pow(a, (int)(exp / 2)); + if (((int)exp % 2) == 0) { + return temp * temp; + } else { + if ((int)exp > 0) + return a * temp * temp; + else + return (temp * temp) / a; + } +} + +double dcn_bw_fabs(double a) +{ + if (a > 0) + return (a); + else + return (-a); +} + + +float dcn_bw_log(float a, float b) +{ + int * const exp_ptr = (int *)(&a); + int x = *exp_ptr; + const int log_2 = ((x >> 23) & 255) - 128; + x &= ~(255 << 23); + x += 127 << 23; + *exp_ptr = x; + + a = ((-1.0f / 3) * a + 2) * a - 2.0f / 3; + + if (b > 2.00001 || b < 1.99999) + return (a + log_2) / dcn_bw_log(b, 2); + else + return (a + log_2); +} only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -0,0 +1,1803 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dc.h" +#include "dcn_calcs.h" +#include "dcn_calc_auto.h" +#include "dal_asic_id.h" +#include "resource.h" +#include "dcn10/dcn10_resource.h" +#include "dcn10/dcn10_hubbub.h" +#include "dml/dml1_display_rq_dlg_calc.h" + +#include "dcn_calc_math.h" + +#define DC_LOGGER \ + dc->ctx->logger + +#define WM_SET_COUNT 4 +#define WM_A 0 +#define WM_B 1 +#define WM_C 2 +#define WM_D 3 + +/* + * NOTE: + * This file is gcc-parseable HW gospel, coming straight from HW engineers. + * + * It doesn't adhere to Linux kernel style and sometimes will do things in odd + * ways. Unless there is something clearly wrong with it the code should + * remain as-is as it provides us with a guarantee from HW that it is correct. + */ + +/* Defaults from spreadsheet rev#247. + * RV2 delta: dram_clock_change_latency, max_num_dpp + */ +const struct dcn_soc_bounding_box dcn10_soc_defaults = { + /* latencies */ + .sr_exit_time = 17, /*us*/ + .sr_enter_plus_exit_time = 19, /*us*/ + .urgent_latency = 4, /*us*/ + .dram_clock_change_latency = 17, /*us*/ + .write_back_latency = 12, /*us*/ + .percent_of_ideal_drambw_received_after_urg_latency = 80, /*%*/ + + /* below default clocks derived from STA target base on + * slow-slow corner + 10% margin with voltages aligned to FCLK. + * + * Use these value if fused value doesn't make sense as earlier + * part don't have correct value fused */ + /* default DCF CLK DPM on RV*/ + .dcfclkv_max0p9 = 655, /* MHz, = 3600/5.5 */ + .dcfclkv_nom0p8 = 626, /* MHz, = 3600/5.75 */ + .dcfclkv_mid0p72 = 600, /* MHz, = 3600/6, bypass */ + .dcfclkv_min0p65 = 300, /* MHz, = 3600/12, bypass */ + + /* default DISP CLK voltage state on RV */ + .max_dispclk_vmax0p9 = 1108, /* MHz, = 3600/3.25 */ + .max_dispclk_vnom0p8 = 1029, /* MHz, = 3600/3.5 */ + .max_dispclk_vmid0p72 = 960, /* MHz, = 3600/3.75 */ + .max_dispclk_vmin0p65 = 626, /* MHz, = 3600/5.75 */ + + /* default DPP CLK voltage state on RV */ + .max_dppclk_vmax0p9 = 720, /* MHz, = 3600/5 */ + .max_dppclk_vnom0p8 = 686, /* MHz, = 3600/5.25 */ + .max_dppclk_vmid0p72 = 626, /* MHz, = 3600/5.75 */ + .max_dppclk_vmin0p65 = 400, /* MHz, = 3600/9 */ + + /* default PHY CLK voltage state on RV */ + .phyclkv_max0p9 = 900, /*MHz*/ + .phyclkv_nom0p8 = 847, /*MHz*/ + .phyclkv_mid0p72 = 800, /*MHz*/ + .phyclkv_min0p65 = 600, /*MHz*/ + + /* BW depend on FCLK, MCLK, # of channels */ + /* dual channel BW */ + .fabric_and_dram_bandwidth_vmax0p9 = 38.4f, /*GB/s*/ + .fabric_and_dram_bandwidth_vnom0p8 = 34.133f, /*GB/s*/ + .fabric_and_dram_bandwidth_vmid0p72 = 29.866f, /*GB/s*/ + .fabric_and_dram_bandwidth_vmin0p65 = 12.8f, /*GB/s*/ + /* single channel BW + .fabric_and_dram_bandwidth_vmax0p9 = 19.2f, + .fabric_and_dram_bandwidth_vnom0p8 = 17.066f, + .fabric_and_dram_bandwidth_vmid0p72 = 14.933f, + .fabric_and_dram_bandwidth_vmin0p65 = 12.8f, + */ + + .number_of_channels = 2, + + .socclk = 208, /*MHz*/ + .downspreading = 0.5f, /*%*/ + .round_trip_ping_latency_cycles = 128, /*DCFCLK Cycles*/ + .urgent_out_of_order_return_per_channel = 256, /*bytes*/ + .vmm_page_size = 4096, /*bytes*/ + .return_bus_width = 64, /*bytes*/ + .max_request_size = 256, /*bytes*/ + + /* Depends on user class (client vs embedded, workstation, etc) */ + .percent_disp_bw_limit = 0.3f /*%*/ +}; + +const struct dcn_ip_params dcn10_ip_defaults = { + .rob_buffer_size_in_kbyte = 64, + .det_buffer_size_in_kbyte = 164, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_in_kbyte = 8, + .pte_enable = dcn_bw_yes, + .pte_chunk_size = 2, /*kbytes*/ + .meta_chunk_size = 2, /*kbytes*/ + .writeback_chunk_size = 2, /*kbytes*/ + .odm_capability = dcn_bw_no, + .dsc_capability = dcn_bw_no, + .line_buffer_size = 589824, /*bit*/ + .max_line_buffer_lines = 12, + .is_line_buffer_bpp_fixed = dcn_bw_no, + .line_buffer_fixed_bpp = dcn_bw_na, + .writeback_luma_buffer_size = 12, /*kbytes*/ + .writeback_chroma_buffer_size = 8, /*kbytes*/ + .max_num_dpp = 4, + .max_num_writeback = 2, + .max_dchub_topscl_throughput = 4, /*pixels/dppclk*/ + .max_pscl_tolb_throughput = 2, /*pixels/dppclk*/ + .max_lb_tovscl_throughput = 4, /*pixels/dppclk*/ + .max_vscl_tohscl_throughput = 4, /*pixels/dppclk*/ + .max_hscl_ratio = 4, + .max_vscl_ratio = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .pte_buffer_size_in_requests = 42, + .dispclk_ramping_margin = 1, /*%*/ + .under_scan_factor = 1.11f, + .max_inter_dcn_tile_repeaters = 8, + .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = dcn_bw_no, + .bug_forcing_luma_and_chroma_request_to_same_size_fixed = dcn_bw_no, + .dcfclk_cstate_latency = 10 /*TODO clone of something else? sr_enter_plus_exit_time?*/ +}; + +static enum dcn_bw_defs tl_sw_mode_to_bw_defs(enum swizzle_mode_values sw_mode) +{ + switch (sw_mode) { + case DC_SW_LINEAR: + return dcn_bw_sw_linear; + case DC_SW_4KB_S: + return dcn_bw_sw_4_kb_s; + case DC_SW_4KB_D: + return dcn_bw_sw_4_kb_d; + case DC_SW_64KB_S: + return dcn_bw_sw_64_kb_s; + case DC_SW_64KB_D: + return dcn_bw_sw_64_kb_d; + case DC_SW_VAR_S: + return dcn_bw_sw_var_s; + case DC_SW_VAR_D: + return dcn_bw_sw_var_d; + case DC_SW_64KB_S_T: + return dcn_bw_sw_64_kb_s_t; + case DC_SW_64KB_D_T: + return dcn_bw_sw_64_kb_d_t; + case DC_SW_4KB_S_X: + return dcn_bw_sw_4_kb_s_x; + case DC_SW_4KB_D_X: + return dcn_bw_sw_4_kb_d_x; + case DC_SW_64KB_S_X: + return dcn_bw_sw_64_kb_s_x; + case DC_SW_64KB_D_X: + return dcn_bw_sw_64_kb_d_x; + case DC_SW_VAR_S_X: + return dcn_bw_sw_var_s_x; + case DC_SW_VAR_D_X: + return dcn_bw_sw_var_d_x; + case DC_SW_256B_S: + case DC_SW_256_D: + case DC_SW_256_R: + case DC_SW_4KB_R: + case DC_SW_64KB_R: + case DC_SW_VAR_R: + case DC_SW_4KB_R_X: + case DC_SW_64KB_R_X: + case DC_SW_VAR_R_X: + default: + BREAK_TO_DEBUGGER(); /*not in formula*/ + return dcn_bw_sw_4_kb_s; + } +} + +static int tl_lb_bpp_to_int(enum lb_pixel_depth depth) +{ + switch (depth) { + case LB_PIXEL_DEPTH_18BPP: + return 18; + case LB_PIXEL_DEPTH_24BPP: + return 24; + case LB_PIXEL_DEPTH_30BPP: + return 30; + case LB_PIXEL_DEPTH_36BPP: + return 36; + default: + return 30; + } +} + +static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format format) +{ + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + return dcn_bw_rgb_sub_16; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: + return dcn_bw_rgb_sub_32; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + return dcn_bw_rgb_sub_64; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + return dcn_bw_yuv420_sub_8; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + return dcn_bw_yuv420_sub_10; + default: + return dcn_bw_rgb_sub_32; + } +} + +enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode) +{ + switch (sw_mode) { + /* for 4/8/16 high tiles */ + case DC_SW_LINEAR: + return dm_4k_tile; + case DC_SW_4KB_S: + case DC_SW_4KB_S_X: + return dm_4k_tile; + case DC_SW_64KB_S: + case DC_SW_64KB_S_X: + case DC_SW_64KB_S_T: + return dm_64k_tile; + case DC_SW_VAR_S: + case DC_SW_VAR_S_X: + return dm_256k_tile; + + /* For 64bpp 2 high tiles */ + case DC_SW_4KB_D: + case DC_SW_4KB_D_X: + return dm_4k_tile; + case DC_SW_64KB_D: + case DC_SW_64KB_D_X: + case DC_SW_64KB_D_T: + return dm_64k_tile; + case DC_SW_VAR_D: + case DC_SW_VAR_D_X: + return dm_256k_tile; + + case DC_SW_4KB_R: + case DC_SW_4KB_R_X: + return dm_4k_tile; + case DC_SW_64KB_R: + case DC_SW_64KB_R_X: + return dm_64k_tile; + case DC_SW_VAR_R: + case DC_SW_VAR_R_X: + return dm_256k_tile; + + /* Unsupported swizzle modes for dcn */ + case DC_SW_256B_S: + default: + ASSERT(0); /* Not supported */ + return 0; + } +} + +static void pipe_ctx_to_e2e_pipe_params ( + const struct pipe_ctx *pipe, + struct _vcs_dpi_display_pipe_params_st *input) +{ + input->src.is_hsplit = false; + + /* stereo can never be split */ + if (pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE || + pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) { + /* reset the split group if it was already considered split. */ + input->src.hsplit_grp = pipe->pipe_idx; + } else if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state) { + input->src.is_hsplit = true; + } else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state) { + input->src.is_hsplit = true; + } + + if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) { + /* + * this method requires us to always re-calculate watermark when dcc change + * between flip. + */ + input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0; + } else { + /* + * allow us to disable dcc on the fly without re-calculating WM + * + * extra overhead for DCC is quite small. for 1080p WM without + * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us) + */ + unsigned int bpe; + + input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs-> + dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0; + } + input->src.dcc_rate = 1; + input->src.meta_pitch = pipe->plane_state->dcc.meta_pitch; + input->src.source_scan = dm_horz; + input->src.sw_mode = pipe->plane_state->tiling_info.gfx9.swizzle; + + input->src.viewport_width = pipe->plane_res.scl_data.viewport.width; + input->src.viewport_height = pipe->plane_res.scl_data.viewport.height; + input->src.data_pitch = pipe->plane_res.scl_data.viewport.width; + input->src.data_pitch_c = pipe->plane_res.scl_data.viewport.width; + input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */ + input->src.cur0_bpp = 32; + + input->src.macro_tile_size = swizzle_mode_to_macro_tile_size(pipe->plane_state->tiling_info.gfx9.swizzle); + + switch (pipe->plane_state->rotation) { + case ROTATION_ANGLE_0: + case ROTATION_ANGLE_180: + input->src.source_scan = dm_horz; + break; + case ROTATION_ANGLE_90: + case ROTATION_ANGLE_270: + input->src.source_scan = dm_vert; + break; + default: + ASSERT(0); /* Not supported */ + break; + } + + /* TODO: Fix pixel format mappings */ + switch (pipe->plane_state->format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + input->src.source_format = dm_420_8; + input->src.viewport_width_c = input->src.viewport_width / 2; + input->src.viewport_height_c = input->src.viewport_height / 2; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + input->src.source_format = dm_420_10; + input->src.viewport_width_c = input->src.viewport_width / 2; + input->src.viewport_height_c = input->src.viewport_height / 2; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + input->src.source_format = dm_444_64; + input->src.viewport_width_c = input->src.viewport_width; + input->src.viewport_height_c = input->src.viewport_height; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + input->src.source_format = dm_rgbe_alpha; + input->src.viewport_width_c = input->src.viewport_width; + input->src.viewport_height_c = input->src.viewport_height; + break; + default: + input->src.source_format = dm_444_32; + input->src.viewport_width_c = input->src.viewport_width; + input->src.viewport_height_c = input->src.viewport_height; + break; + } + + input->scale_taps.htaps = pipe->plane_res.scl_data.taps.h_taps; + input->scale_ratio_depth.hscl_ratio = pipe->plane_res.scl_data.ratios.horz.value/4294967296.0; + input->scale_ratio_depth.vscl_ratio = pipe->plane_res.scl_data.ratios.vert.value/4294967296.0; + input->scale_ratio_depth.vinit = pipe->plane_res.scl_data.inits.v.value/4294967296.0; + if (input->scale_ratio_depth.vinit < 1.0) + input->scale_ratio_depth.vinit = 1; + input->scale_taps.vtaps = pipe->plane_res.scl_data.taps.v_taps; + input->scale_taps.vtaps_c = pipe->plane_res.scl_data.taps.v_taps_c; + input->scale_taps.htaps_c = pipe->plane_res.scl_data.taps.h_taps_c; + input->scale_ratio_depth.hscl_ratio_c = pipe->plane_res.scl_data.ratios.horz_c.value/4294967296.0; + input->scale_ratio_depth.vscl_ratio_c = pipe->plane_res.scl_data.ratios.vert_c.value/4294967296.0; + input->scale_ratio_depth.vinit_c = pipe->plane_res.scl_data.inits.v_c.value/4294967296.0; + if (input->scale_ratio_depth.vinit_c < 1.0) + input->scale_ratio_depth.vinit_c = 1; + switch (pipe->plane_res.scl_data.lb_params.depth) { + case LB_PIXEL_DEPTH_30BPP: + input->scale_ratio_depth.lb_depth = 30; break; + case LB_PIXEL_DEPTH_36BPP: + input->scale_ratio_depth.lb_depth = 36; break; + default: + input->scale_ratio_depth.lb_depth = 24; break; + } + + + input->dest.vactive = pipe->stream->timing.v_addressable + pipe->stream->timing.v_border_top + + pipe->stream->timing.v_border_bottom; + + input->dest.recout_width = pipe->plane_res.scl_data.recout.width; + input->dest.recout_height = pipe->plane_res.scl_data.recout.height; + + input->dest.full_recout_width = pipe->plane_res.scl_data.recout.width; + input->dest.full_recout_height = pipe->plane_res.scl_data.recout.height; + + input->dest.htotal = pipe->stream->timing.h_total; + input->dest.hblank_start = input->dest.htotal - pipe->stream->timing.h_front_porch; + input->dest.hblank_end = input->dest.hblank_start + - pipe->stream->timing.h_addressable + - pipe->stream->timing.h_border_left + - pipe->stream->timing.h_border_right; + + input->dest.vtotal = pipe->stream->timing.v_total; + input->dest.vblank_start = input->dest.vtotal - pipe->stream->timing.v_front_porch; + input->dest.vblank_end = input->dest.vblank_start + - pipe->stream->timing.v_addressable + - pipe->stream->timing.v_border_bottom + - pipe->stream->timing.v_border_top; + input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_100hz/10000.0; + input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start; + input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset; + input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset; + input->dest.vupdate_width = pipe->pipe_dlg_param.vupdate_width; + +} + +static void dcn_bw_calc_rq_dlg_ttu( + const struct dc *dc, + const struct dcn_bw_internal_vars *v, + struct pipe_ctx *pipe, + int in_idx) +{ + struct display_mode_lib *dml = (struct display_mode_lib *)(&dc->dml); + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs; + struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs; + struct _vcs_dpi_display_rq_params_st rq_param = {0}; + struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0}; + struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } }; + float total_active_bw = 0; + float total_prefetch_bw = 0; + int total_flip_bytes = 0; + int i; + + memset(dlg_regs, 0, sizeof(*dlg_regs)); + memset(ttu_regs, 0, sizeof(*ttu_regs)); + memset(rq_regs, 0, sizeof(*rq_regs)); + + for (i = 0; i < number_of_planes; i++) { + total_active_bw += v->read_bandwidth[i]; + total_prefetch_bw += v->prefetch_bandwidth[i]; + total_flip_bytes += v->total_immediate_flip_bytes[i]; + } + dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw); + if (dlg_sys_param.total_flip_bw < 0.0) + dlg_sys_param.total_flip_bw = 0; + + dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark; + dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark; + dlg_sys_param.t_urg_wm_us = v->urgent_watermark; + dlg_sys_param.t_extra_us = v->urgent_extra_latency; + dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep; + dlg_sys_param.total_flip_bytes = total_flip_bytes; + + pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe); + input.clks_cfg.dcfclk_mhz = v->dcfclk; + input.clks_cfg.dispclk_mhz = v->dispclk; + input.clks_cfg.dppclk_mhz = v->dppclk; + input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + input.clks_cfg.socclk_mhz = v->socclk; + input.clks_cfg.voltage = v->voltage_level; +// dc->dml.logger = pool->base.logger; + input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444; + input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp; + //input[in_idx].dout.output_standard; + + /*todo: soc->sr_enter_plus_exit_time??*/ + dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep; + + dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src); + dml1_extract_rq_regs(dml, rq_regs, rq_param); + dml1_rq_dlg_get_dlg_params( + dml, + dlg_regs, + ttu_regs, + rq_param.dlg, + dlg_sys_param, + input, + true, + true, + v->pte_enable == dcn_bw_yes, + pipe->plane_state->flip_immediate); +} + +static void split_stream_across_pipes( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *primary_pipe, + struct pipe_ctx *secondary_pipe) +{ + int pipe_idx = secondary_pipe->pipe_idx; + + if (!primary_pipe->plane_state) + return; + + *secondary_pipe = *primary_pipe; + + secondary_pipe->pipe_idx = pipe_idx; + secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; + if (primary_pipe->bottom_pipe) { + ASSERT(primary_pipe->bottom_pipe != secondary_pipe); + secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; + secondary_pipe->bottom_pipe->top_pipe = secondary_pipe; + } + primary_pipe->bottom_pipe = secondary_pipe; + secondary_pipe->top_pipe = primary_pipe; + + resource_build_scaling_params(primary_pipe); + resource_build_scaling_params(secondary_pipe); +} + +#if 0 +static void calc_wm_sets_and_perf_params( + struct dc_state *context, + struct dcn_bw_internal_vars *v) +{ + /* Calculate set A last to keep internal var state consistent for required config */ + if (v->voltage_level < 2) { + v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vnom0p8; + v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vnom0p8; + v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8; + dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); + + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = + v->stutter_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = + v->stutter_enter_plus_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = + v->dram_clock_change_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000; + + v->dcfclk_per_state[1] = v->dcfclkv_nom0p8; + v->dcfclk_per_state[0] = v->dcfclkv_nom0p8; + v->dcfclk = v->dcfclkv_nom0p8; + dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); + + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = + v->stutter_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = + v->stutter_enter_plus_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = + v->dram_clock_change_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000; + } + + if (v->voltage_level < 3) { + v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vmax0p9; + v->dcfclk_per_state[2] = v->dcfclkv_max0p9; + v->dcfclk_per_state[1] = v->dcfclkv_max0p9; + v->dcfclk_per_state[0] = v->dcfclkv_max0p9; + v->dcfclk = v->dcfclkv_max0p9; + dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); + + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = + v->stutter_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = + v->stutter_enter_plus_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = + v->dram_clock_change_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000; + } + + v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8; + v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72; + v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65; + v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level]; + v->dcfclk_per_state[2] = v->dcfclkv_nom0p8; + v->dcfclk_per_state[1] = v->dcfclkv_mid0p72; + v->dcfclk_per_state[0] = v->dcfclkv_min0p65; + v->dcfclk = v->dcfclk_per_state[v->voltage_level]; + dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); + + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = + v->stutter_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + v->stutter_enter_plus_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = + v->dram_clock_change_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000; + if (v->voltage_level >= 2) { + context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a; + } + if (v->voltage_level >= 3) + context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; +} +#endif + +static bool dcn_bw_apply_registry_override(struct dc *dc) +{ + bool updated = false; + + DC_FP_START(); + if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns + && dc->debug.sr_exit_time_ns) { + updated = true; + dc->dcn_soc->sr_exit_time = dc->debug.sr_exit_time_ns / 1000.0; + } + + if ((int)(dc->dcn_soc->sr_enter_plus_exit_time * 1000) + != dc->debug.sr_enter_plus_exit_time_ns + && dc->debug.sr_enter_plus_exit_time_ns) { + updated = true; + dc->dcn_soc->sr_enter_plus_exit_time = + dc->debug.sr_enter_plus_exit_time_ns / 1000.0; + } + + if ((int)(dc->dcn_soc->urgent_latency * 1000) != dc->debug.urgent_latency_ns + && dc->debug.urgent_latency_ns) { + updated = true; + dc->dcn_soc->urgent_latency = dc->debug.urgent_latency_ns / 1000.0; + } + + if ((int)(dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency * 1000) + != dc->debug.percent_of_ideal_drambw + && dc->debug.percent_of_ideal_drambw) { + updated = true; + dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency = + dc->debug.percent_of_ideal_drambw; + } + + if ((int)(dc->dcn_soc->dram_clock_change_latency * 1000) + != dc->debug.dram_clock_change_latency_ns + && dc->debug.dram_clock_change_latency_ns) { + updated = true; + dc->dcn_soc->dram_clock_change_latency = + dc->debug.dram_clock_change_latency_ns / 1000.0; + } + DC_FP_END(); + + return updated; +} + +static void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v) +{ + /* + * disable optional pipe split by lower dispclk bounding box + * at DPM0 + */ + v->max_dispclk[0] = v->max_dppclk_vmin0p65; +} + +static void hack_force_pipe_split(struct dcn_bw_internal_vars *v, + unsigned int pixel_rate_100hz) +{ + float pixel_rate_mhz = pixel_rate_100hz / 10000; + + /* + * force enabling pipe split by lower dpp clock for DPM0 to just + * below the specify pixel_rate, so bw calc would split pipe. + */ + if (pixel_rate_mhz < v->max_dppclk[0]) + v->max_dppclk[0] = pixel_rate_mhz; +} + +static void hack_bounding_box(struct dcn_bw_internal_vars *v, + struct dc_debug_options *dbg, + struct dc_state *context) +{ + int i; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /** + * Workaround for avoiding pipe-split in cases where we'd split + * planes that are too small, resulting in splits that aren't + * valid for the scaler. + */ + if (pipe->plane_state && + (pipe->plane_state->dst_rect.width <= 16 || + pipe->plane_state->dst_rect.height <= 16 || + pipe->plane_state->src_rect.width <= 16 || + pipe->plane_state->src_rect.height <= 16)) { + hack_disable_optional_pipe_split(v); + return; + } + } + + if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) + hack_disable_optional_pipe_split(v); + + if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP && + context->stream_count >= 2) + hack_disable_optional_pipe_split(v); + + if (context->stream_count == 1 && + dbg->force_single_disp_pipe_split) + hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz); +} + +unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw_internal_rev, uint32_t pci_revision_id) +{ + /* for low power RV2 variants, the highest voltage level we want is 0 */ + if ((chip_family == FAMILY_RV) && + ASICREV_IS_RAVEN2(hw_internal_rev)) + switch (pci_revision_id) { + case PRID_DALI_DE: + case PRID_DALI_DF: + case PRID_DALI_E3: + case PRID_DALI_E4: + case PRID_POLLOCK_94: + case PRID_POLLOCK_95: + case PRID_POLLOCK_E9: + case PRID_POLLOCK_EA: + case PRID_POLLOCK_EB: + return 0; + default: + break; + } + + /* we are ok with all levels */ + return 4; +} + +bool dcn_validate_bandwidth( + struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + /* + * we want a breakdown of the various stages of validation, which the + * perf_trace macro doesn't support + */ + BW_VAL_TRACE_SETUP(); + + const struct resource_pool *pool = dc->res_pool; + struct dcn_bw_internal_vars *v = &context->dcn_bw_vars; + int i, input_idx, k; + int vesa_sync_start, asic_blank_end, asic_blank_start; + bool bw_limit_pass; + float bw_limit; + + PERFORMANCE_TRACE_START(); + + BW_VAL_TRACE_COUNT(); + + if (dcn_bw_apply_registry_override(dc)) + dcn_bw_sync_calcs_and_dml(dc); + + memset(v, 0, sizeof(*v)); + DC_FP_START(); + + v->sr_exit_time = dc->dcn_soc->sr_exit_time; + v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time; + v->urgent_latency = dc->dcn_soc->urgent_latency; + v->write_back_latency = dc->dcn_soc->write_back_latency; + v->percent_of_ideal_drambw_received_after_urg_latency = + dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency; + + v->dcfclkv_min0p65 = dc->dcn_soc->dcfclkv_min0p65; + v->dcfclkv_mid0p72 = dc->dcn_soc->dcfclkv_mid0p72; + v->dcfclkv_nom0p8 = dc->dcn_soc->dcfclkv_nom0p8; + v->dcfclkv_max0p9 = dc->dcn_soc->dcfclkv_max0p9; + + v->max_dispclk_vmin0p65 = dc->dcn_soc->max_dispclk_vmin0p65; + v->max_dispclk_vmid0p72 = dc->dcn_soc->max_dispclk_vmid0p72; + v->max_dispclk_vnom0p8 = dc->dcn_soc->max_dispclk_vnom0p8; + v->max_dispclk_vmax0p9 = dc->dcn_soc->max_dispclk_vmax0p9; + + v->max_dppclk_vmin0p65 = dc->dcn_soc->max_dppclk_vmin0p65; + v->max_dppclk_vmid0p72 = dc->dcn_soc->max_dppclk_vmid0p72; + v->max_dppclk_vnom0p8 = dc->dcn_soc->max_dppclk_vnom0p8; + v->max_dppclk_vmax0p9 = dc->dcn_soc->max_dppclk_vmax0p9; + + v->socclk = dc->dcn_soc->socclk; + + v->fabric_and_dram_bandwidth_vmin0p65 = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65; + v->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72; + v->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8; + v->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9; + + v->phyclkv_min0p65 = dc->dcn_soc->phyclkv_min0p65; + v->phyclkv_mid0p72 = dc->dcn_soc->phyclkv_mid0p72; + v->phyclkv_nom0p8 = dc->dcn_soc->phyclkv_nom0p8; + v->phyclkv_max0p9 = dc->dcn_soc->phyclkv_max0p9; + + v->downspreading = dc->dcn_soc->downspreading; + v->round_trip_ping_latency_cycles = dc->dcn_soc->round_trip_ping_latency_cycles; + v->urgent_out_of_order_return_per_channel = dc->dcn_soc->urgent_out_of_order_return_per_channel; + v->number_of_channels = dc->dcn_soc->number_of_channels; + v->vmm_page_size = dc->dcn_soc->vmm_page_size; + v->dram_clock_change_latency = dc->dcn_soc->dram_clock_change_latency; + v->return_bus_width = dc->dcn_soc->return_bus_width; + + v->rob_buffer_size_in_kbyte = dc->dcn_ip->rob_buffer_size_in_kbyte; + v->det_buffer_size_in_kbyte = dc->dcn_ip->det_buffer_size_in_kbyte; + v->dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels; + v->opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines; + v->pixel_chunk_size_in_kbyte = dc->dcn_ip->pixel_chunk_size_in_kbyte; + v->pte_enable = dc->dcn_ip->pte_enable; + v->pte_chunk_size = dc->dcn_ip->pte_chunk_size; + v->meta_chunk_size = dc->dcn_ip->meta_chunk_size; + v->writeback_chunk_size = dc->dcn_ip->writeback_chunk_size; + v->odm_capability = dc->dcn_ip->odm_capability; + v->dsc_capability = dc->dcn_ip->dsc_capability; + v->line_buffer_size = dc->dcn_ip->line_buffer_size; + v->is_line_buffer_bpp_fixed = dc->dcn_ip->is_line_buffer_bpp_fixed; + v->line_buffer_fixed_bpp = dc->dcn_ip->line_buffer_fixed_bpp; + v->max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines; + v->writeback_luma_buffer_size = dc->dcn_ip->writeback_luma_buffer_size; + v->writeback_chroma_buffer_size = dc->dcn_ip->writeback_chroma_buffer_size; + v->max_num_dpp = dc->dcn_ip->max_num_dpp; + v->max_num_writeback = dc->dcn_ip->max_num_writeback; + v->max_dchub_topscl_throughput = dc->dcn_ip->max_dchub_topscl_throughput; + v->max_pscl_tolb_throughput = dc->dcn_ip->max_pscl_tolb_throughput; + v->max_lb_tovscl_throughput = dc->dcn_ip->max_lb_tovscl_throughput; + v->max_vscl_tohscl_throughput = dc->dcn_ip->max_vscl_tohscl_throughput; + v->max_hscl_ratio = dc->dcn_ip->max_hscl_ratio; + v->max_vscl_ratio = dc->dcn_ip->max_vscl_ratio; + v->max_hscl_taps = dc->dcn_ip->max_hscl_taps; + v->max_vscl_taps = dc->dcn_ip->max_vscl_taps; + v->under_scan_factor = dc->dcn_ip->under_scan_factor; + v->pte_buffer_size_in_requests = dc->dcn_ip->pte_buffer_size_in_requests; + v->dispclk_ramping_margin = dc->dcn_ip->dispclk_ramping_margin; + v->max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters; + v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = + dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one; + v->bug_forcing_luma_and_chroma_request_to_same_size_fixed = + dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed; + + v->voltage[5] = dcn_bw_no_support; + v->voltage[4] = dcn_bw_v_max0p9; + v->voltage[3] = dcn_bw_v_max0p9; + v->voltage[2] = dcn_bw_v_nom0p8; + v->voltage[1] = dcn_bw_v_mid0p72; + v->voltage[0] = dcn_bw_v_min0p65; + v->fabric_and_dram_bandwidth_per_state[5] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth_per_state[4] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth_per_state[3] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8; + v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72; + v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65; + v->dcfclk_per_state[5] = v->dcfclkv_max0p9; + v->dcfclk_per_state[4] = v->dcfclkv_max0p9; + v->dcfclk_per_state[3] = v->dcfclkv_max0p9; + v->dcfclk_per_state[2] = v->dcfclkv_nom0p8; + v->dcfclk_per_state[1] = v->dcfclkv_mid0p72; + v->dcfclk_per_state[0] = v->dcfclkv_min0p65; + v->max_dispclk[5] = v->max_dispclk_vmax0p9; + v->max_dispclk[4] = v->max_dispclk_vmax0p9; + v->max_dispclk[3] = v->max_dispclk_vmax0p9; + v->max_dispclk[2] = v->max_dispclk_vnom0p8; + v->max_dispclk[1] = v->max_dispclk_vmid0p72; + v->max_dispclk[0] = v->max_dispclk_vmin0p65; + v->max_dppclk[5] = v->max_dppclk_vmax0p9; + v->max_dppclk[4] = v->max_dppclk_vmax0p9; + v->max_dppclk[3] = v->max_dppclk_vmax0p9; + v->max_dppclk[2] = v->max_dppclk_vnom0p8; + v->max_dppclk[1] = v->max_dppclk_vmid0p72; + v->max_dppclk[0] = v->max_dppclk_vmin0p65; + v->phyclk_per_state[5] = v->phyclkv_max0p9; + v->phyclk_per_state[4] = v->phyclkv_max0p9; + v->phyclk_per_state[3] = v->phyclkv_max0p9; + v->phyclk_per_state[2] = v->phyclkv_nom0p8; + v->phyclk_per_state[1] = v->phyclkv_mid0p72; + v->phyclk_per_state[0] = v->phyclkv_min0p65; + v->synchronized_vblank = dcn_bw_no; + v->ta_pscalculation = dcn_bw_override; + v->allow_different_hratio_vratio = dcn_bw_yes; + + for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->stream) + continue; + /* skip all but first of split pipes */ + if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) + continue; + + v->underscan_output[input_idx] = false; /* taken care of in recout already*/ + v->interlace_output[input_idx] = false; + + v->htotal[input_idx] = pipe->stream->timing.h_total; + v->vtotal[input_idx] = pipe->stream->timing.v_total; + v->vactive[input_idx] = pipe->stream->timing.v_addressable + + pipe->stream->timing.v_border_top + pipe->stream->timing.v_border_bottom; + v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total + - v->vactive[input_idx] + - pipe->stream->timing.v_front_porch; + v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_100hz/10000.0; + if (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + v->pixel_clock[input_idx] *= 2; + if (!pipe->plane_state) { + v->dcc_enable[input_idx] = dcn_bw_yes; + v->source_pixel_format[input_idx] = dcn_bw_rgb_sub_32; + v->source_surface_mode[input_idx] = dcn_bw_sw_4_kb_s; + v->lb_bit_per_pixel[input_idx] = 30; + v->viewport_width[input_idx] = pipe->stream->timing.h_addressable; + v->viewport_height[input_idx] = pipe->stream->timing.v_addressable; + /* + * for cases where we have no plane, we want to validate up to 1080p + * source size because here we are only interested in if the output + * timing is supported or not. if we cannot support native resolution + * of the high res display, we still want to support lower res up scale + * to native + */ + if (v->viewport_width[input_idx] > 1920) + v->viewport_width[input_idx] = 1920; + if (v->viewport_height[input_idx] > 1080) + v->viewport_height[input_idx] = 1080; + v->scaler_rec_out_width[input_idx] = v->viewport_width[input_idx]; + v->scaler_recout_height[input_idx] = v->viewport_height[input_idx]; + v->override_hta_ps[input_idx] = 1; + v->override_vta_ps[input_idx] = 1; + v->override_hta_pschroma[input_idx] = 1; + v->override_vta_pschroma[input_idx] = 1; + v->source_scan[input_idx] = dcn_bw_hor; + + } else { + v->viewport_height[input_idx] = pipe->plane_res.scl_data.viewport.height; + v->viewport_width[input_idx] = pipe->plane_res.scl_data.viewport.width; + v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width; + v->scaler_recout_height[input_idx] = pipe->plane_res.scl_data.recout.height; + if (pipe->bottom_pipe && pipe->bottom_pipe->plane_state == pipe->plane_state) { + if (pipe->plane_state->rotation % 2 == 0) { + int viewport_end = pipe->plane_res.scl_data.viewport.width + + pipe->plane_res.scl_data.viewport.x; + int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.width + + pipe->bottom_pipe->plane_res.scl_data.viewport.x; + + if (viewport_end > viewport_b_end) + v->viewport_width[input_idx] = viewport_end + - pipe->bottom_pipe->plane_res.scl_data.viewport.x; + else + v->viewport_width[input_idx] = viewport_b_end + - pipe->plane_res.scl_data.viewport.x; + } else { + int viewport_end = pipe->plane_res.scl_data.viewport.height + + pipe->plane_res.scl_data.viewport.y; + int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.height + + pipe->bottom_pipe->plane_res.scl_data.viewport.y; + + if (viewport_end > viewport_b_end) + v->viewport_height[input_idx] = viewport_end + - pipe->bottom_pipe->plane_res.scl_data.viewport.y; + else + v->viewport_height[input_idx] = viewport_b_end + - pipe->plane_res.scl_data.viewport.y; + } + v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width + + pipe->bottom_pipe->plane_res.scl_data.recout.width; + } + + if (pipe->plane_state->rotation % 2 == 0) { + ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value + || v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]); + ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value + || v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]); + } else { + ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value + || v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]); + ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value + || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]); + } + + if (dc->debug.optimized_watermark) { + /* + * this method requires us to always re-calculate watermark when dcc change + * between flip. + */ + v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no; + } else { + /* + * allow us to disable dcc on the fly without re-calculating WM + * + * extra overhead for DCC is quite small. for 1080p WM without + * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us) + */ + unsigned int bpe; + + v->dcc_enable[input_idx] = dc->res_pool->hubbub->funcs->dcc_support_pixel_format( + pipe->plane_state->format, &bpe) ? dcn_bw_yes : dcn_bw_no; + } + + v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs( + pipe->plane_state->format); + v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs( + pipe->plane_state->tiling_info.gfx9.swizzle); + v->lb_bit_per_pixel[input_idx] = tl_lb_bpp_to_int(pipe->plane_res.scl_data.lb_params.depth); + v->override_hta_ps[input_idx] = pipe->plane_res.scl_data.taps.h_taps; + v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps; + v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c; + v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c; + /* + * Spreadsheet doesn't handle taps_c is one properly, + * need to force Chroma to always be scaled to pass + * bandwidth validation. + */ + if (v->override_hta_pschroma[input_idx] == 1) + v->override_hta_pschroma[input_idx] = 2; + if (v->override_vta_pschroma[input_idx] == 1) + v->override_vta_pschroma[input_idx] = 2; + v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor; + } + if (v->is_line_buffer_bpp_fixed == dcn_bw_yes) + v->lb_bit_per_pixel[input_idx] = v->line_buffer_fixed_bpp; + v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/ + v->output_format[input_idx] = pipe->stream->timing.pixel_encoding == + PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444; + v->output[input_idx] = pipe->stream->signal == + SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp; + v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc; + if (v->output[input_idx] == dcn_bw_hdmi) { + switch (pipe->stream->timing.display_color_depth) { + case COLOR_DEPTH_101010: + v->output_deep_color[input_idx] = dcn_bw_encoder_10bpc; + break; + case COLOR_DEPTH_121212: + v->output_deep_color[input_idx] = dcn_bw_encoder_12bpc; + break; + case COLOR_DEPTH_161616: + v->output_deep_color[input_idx] = dcn_bw_encoder_16bpc; + break; + default: + break; + } + } + + input_idx++; + } + v->number_of_active_planes = input_idx; + + scaler_settings_calculation(v); + + hack_bounding_box(v, &dc->debug, context); + + mode_support_and_system_configuration(v); + + /* Unhack dppclk: dont bother with trying to pipe split if we cannot maintain dpm0 */ + if (v->voltage_level != 0 + && context->stream_count == 1 + && dc->debug.force_single_disp_pipe_split) { + v->max_dppclk[0] = v->max_dppclk_vmin0p65; + mode_support_and_system_configuration(v); + } + + if (v->voltage_level == 0 && + (dc->debug.sr_exit_time_dpm0_ns + || dc->debug.sr_enter_plus_exit_time_dpm0_ns)) { + + if (dc->debug.sr_enter_plus_exit_time_dpm0_ns) + v->sr_enter_plus_exit_time = + dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f; + if (dc->debug.sr_exit_time_dpm0_ns) + v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time; + context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time; + mode_support_and_system_configuration(v); + } + + display_pipe_configuration(v); + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_scan[k] == dcn_bw_hor) + v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k]; + else + v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k]; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->byte_per_pixel_dety[k] = 8.0; + v->byte_per_pixel_detc[k] = 0.0; + } else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { + v->byte_per_pixel_dety[k] = 4.0; + v->byte_per_pixel_detc[k] = 0.0; + } else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { + v->byte_per_pixel_dety[k] = 2.0; + v->byte_per_pixel_detc[k] = 0.0; + } else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->byte_per_pixel_dety[k] = 1.0; + v->byte_per_pixel_detc[k] = 2.0; + } else { + v->byte_per_pixel_dety[k] = 4.0f / 3.0f; + v->byte_per_pixel_detc[k] = 8.0f / 3.0f; + } + } + + v->total_data_read_bandwidth = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] * + dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k]; + v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] * + dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0; + v->total_data_read_bandwidth = v->total_data_read_bandwidth + + v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k]; + } + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (v->voltage_level != number_of_states_plus_one && !fast_validate) { + float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; + + if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) + bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65; + else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72) + bw_consumed = v->fabric_and_dram_bandwidth_vmid0p72; + else if (bw_consumed < v->fabric_and_dram_bandwidth_vnom0p8) + bw_consumed = v->fabric_and_dram_bandwidth_vnom0p8; + else + bw_consumed = v->fabric_and_dram_bandwidth_vmax0p9; + + if (bw_consumed < v->fabric_and_dram_bandwidth) + if (dc->debug.voltage_align_fclk) + bw_consumed = v->fabric_and_dram_bandwidth; + + display_pipe_configuration(v); + /*calc_wm_sets_and_perf_params(context, v);*/ + /* Only 1 set is used by dcn since no noticeable + * performance improvement was measured and due to hw bug DEGVIDCN10-254 + */ + dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); + + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = + v->stutter_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + v->stutter_enter_plus_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = + v->dram_clock_change_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; + + context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / + (ddr4_dram_factor_single_Channel * v->number_of_channels)); + if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) + context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32); + + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000); + context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000); + + context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000); + if (dc->debug.max_disp_clk == true) + context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000); + + if (context->bw_ctx.bw.dcn.clk.dispclk_khz < + dc->debug.min_disp_clk_khz) { + context->bw_ctx.bw.dcn.clk.dispclk_khz = + dc->debug.min_disp_clk_khz; + } + + context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz / + v->dispclk_dppclk_ratio; + context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level]; + switch (v->voltage_level) { + case 0: + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = + (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000); + break; + case 1: + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = + (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000); + break; + case 2: + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = + (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000); + break; + default: + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = + (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000); + break; + } + + BW_VAL_TRACE_END_WATERMARKS(); + + for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /* skip inactive pipe */ + if (!pipe->stream) + continue; + /* skip all but first of split pipes */ + if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) + continue; + + pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx]; + pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx]; + pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx]; + pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx]; + + pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total; + pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total; + vesa_sync_start = pipe->stream->timing.v_addressable + + pipe->stream->timing.v_border_bottom + + pipe->stream->timing.v_front_porch; + + asic_blank_end = (pipe->stream->timing.v_total - + vesa_sync_start - + pipe->stream->timing.v_border_top) + * (pipe->stream->timing.flags.INTERLACE ? 1 : 0); + + asic_blank_start = asic_blank_end + + (pipe->stream->timing.v_border_top + + pipe->stream->timing.v_addressable + + pipe->stream->timing.v_border_bottom) + * (pipe->stream->timing.flags.INTERLACE ? 1 : 0); + + pipe->pipe_dlg_param.vblank_start = asic_blank_start; + pipe->pipe_dlg_param.vblank_end = asic_blank_end; + + if (pipe->plane_state) { + struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; + + pipe->plane_state->update_flags.bits.full_update = 1; + + if (v->dpp_per_plane[input_idx] == 2 || + ((pipe->stream->view_format == + VIEW_3D_FORMAT_SIDE_BY_SIDE || + pipe->stream->view_format == + VIEW_3D_FORMAT_TOP_AND_BOTTOM) && + (pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM || + pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE))) { + if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { + /* update previously split pipe */ + hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx]; + hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx]; + hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx]; + hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx]; + + hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total; + hsplit_pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total; + hsplit_pipe->pipe_dlg_param.vblank_start = pipe->pipe_dlg_param.vblank_start; + hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end; + } else { + /* pipe not split previously needs split */ + hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe); + ASSERT(hsplit_pipe); + split_stream_across_pipes(&context->res_ctx, pool, pipe, hsplit_pipe); + } + + dcn_bw_calc_rq_dlg_ttu(dc, v, hsplit_pipe, input_idx); + } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { + /* merge previously split pipe */ + pipe->bottom_pipe = hsplit_pipe->bottom_pipe; + if (hsplit_pipe->bottom_pipe) + hsplit_pipe->bottom_pipe->top_pipe = pipe; + hsplit_pipe->plane_state = NULL; + hsplit_pipe->stream = NULL; + hsplit_pipe->top_pipe = NULL; + hsplit_pipe->bottom_pipe = NULL; + /* Clear plane_res and stream_res */ + memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res)); + memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res)); + resource_build_scaling_params(pipe); + } + /* for now important to do this after pipe split for building e2e params */ + dcn_bw_calc_rq_dlg_ttu(dc, v, pipe, input_idx); + } + + input_idx++; + } + } else if (v->voltage_level == number_of_states_plus_one) { + BW_VAL_TRACE_SKIP(fail); + } else if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + } + + if (v->voltage_level == 0) { + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = + dc->dcn_soc->sr_enter_plus_exit_time; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time; + } + + /* + * BW limit is set to prevent display from impacting other system functions + */ + + bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9; + bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit; + + DC_FP_END(); + + PERFORMANCE_TRACE_END(); + BW_VAL_TRACE_FINISH(); + + if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level( + dc->ctx->asic_id.chip_family, + dc->ctx->asic_id.hw_internal_rev, + dc->ctx->asic_id.pci_revision_id)) + return true; + else + return false; +} + +static unsigned int dcn_find_normalized_clock_vdd_Level( + const struct dc *dc, + enum dm_pp_clock_type clocks_type, + int clocks_in_khz) +{ + int vdd_level = dcn_bw_v_min0p65; + + if (clocks_in_khz == 0)/*todo some clock not in the considerations*/ + return vdd_level; + + switch (clocks_type) { + case DM_PP_CLOCK_TYPE_DISPLAY_CLK: + if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) { + vdd_level = dcn_bw_v_max0p91; + BREAK_TO_DEBUGGER(); + } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) { + vdd_level = dcn_bw_v_max0p9; + } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) { + vdd_level = dcn_bw_v_nom0p8; + } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) { + vdd_level = dcn_bw_v_mid0p72; + } else + vdd_level = dcn_bw_v_min0p65; + break; + case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: + if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) { + vdd_level = dcn_bw_v_max0p91; + BREAK_TO_DEBUGGER(); + } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) { + vdd_level = dcn_bw_v_max0p9; + } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) { + vdd_level = dcn_bw_v_nom0p8; + } else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) { + vdd_level = dcn_bw_v_mid0p72; + } else + vdd_level = dcn_bw_v_min0p65; + break; + + case DM_PP_CLOCK_TYPE_DPPCLK: + if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) { + vdd_level = dcn_bw_v_max0p91; + BREAK_TO_DEBUGGER(); + } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) { + vdd_level = dcn_bw_v_max0p9; + } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) { + vdd_level = dcn_bw_v_nom0p8; + } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) { + vdd_level = dcn_bw_v_mid0p72; + } else + vdd_level = dcn_bw_v_min0p65; + break; + + case DM_PP_CLOCK_TYPE_MEMORY_CLK: + { + unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels); + + if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) { + vdd_level = dcn_bw_v_max0p91; + BREAK_TO_DEBUGGER(); + } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) { + vdd_level = dcn_bw_v_max0p9; + } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) { + vdd_level = dcn_bw_v_nom0p8; + } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) { + vdd_level = dcn_bw_v_mid0p72; + } else + vdd_level = dcn_bw_v_min0p65; + } + break; + + case DM_PP_CLOCK_TYPE_DCFCLK: + if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) { + vdd_level = dcn_bw_v_max0p91; + BREAK_TO_DEBUGGER(); + } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) { + vdd_level = dcn_bw_v_max0p9; + } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) { + vdd_level = dcn_bw_v_nom0p8; + } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) { + vdd_level = dcn_bw_v_mid0p72; + } else + vdd_level = dcn_bw_v_min0p65; + break; + + default: + break; + } + return vdd_level; +} + +unsigned int dcn_find_dcfclk_suits_all( + const struct dc *dc, + struct dc_clocks *clocks) +{ + unsigned vdd_level, vdd_level_temp; + unsigned dcf_clk; + + /*find a common supported voltage level*/ + vdd_level = dcn_find_normalized_clock_vdd_Level( + dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz); + vdd_level_temp = dcn_find_normalized_clock_vdd_Level( + dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz); + + vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); + vdd_level_temp = dcn_find_normalized_clock_vdd_Level( + dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz); + vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); + + vdd_level_temp = dcn_find_normalized_clock_vdd_Level( + dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz); + vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); + vdd_level_temp = dcn_find_normalized_clock_vdd_Level( + dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz); + + /*find that level conresponding dcfclk*/ + vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); + if (vdd_level == dcn_bw_v_max0p91) { + BREAK_TO_DEBUGGER(); + dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000; + } else if (vdd_level == dcn_bw_v_max0p9) + dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000; + else if (vdd_level == dcn_bw_v_nom0p8) + dcf_clk = dc->dcn_soc->dcfclkv_nom0p8*1000; + else if (vdd_level == dcn_bw_v_mid0p72) + dcf_clk = dc->dcn_soc->dcfclkv_mid0p72*1000; + else + dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000; + + DC_LOG_BANDWIDTH_CALCS("\tdcf_clk for voltage = %d\n", dcf_clk); + return dcf_clk; +} + +static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks) +{ + int i; + + if (clks->num_levels == 0) + return false; + + for (i = 0; i < clks->num_levels; i++) + /* Ensure that the result is sane */ + if (clks->data[i].clocks_in_khz == 0) + return false; + + return true; +} + +void dcn_bw_update_from_pplib(struct dc *dc) +{ + struct dc_context *ctx = dc->ctx; + struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; + bool res; + unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx; + + /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ + res = dm_pp_get_clock_levels_by_type_with_voltage( + ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); + + DC_FP_START(); + + if (res) + res = verify_clock_values(&fclks); + + if (res) { + ASSERT(fclks.num_levels); + + vmin0p65_idx = 0; + vmid0p72_idx = fclks.num_levels - + (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1)); + vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1); + vmax0p9_idx = fclks.num_levels - 1; + + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = + 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = + dc->dcn_soc->number_of_channels * + (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + } else + BREAK_TO_DEBUGGER(); + + DC_FP_END(); + + res = dm_pp_get_clock_levels_by_type_with_voltage( + ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); + + DC_FP_START(); + + if (res) + res = verify_clock_values(&dcfclks); + + if (res && dcfclks.num_levels >= 3) { + dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0; + dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0; + dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0; + dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0; + } else + BREAK_TO_DEBUGGER(); + + DC_FP_END(); +} + +void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) +{ + struct pp_smu_funcs_rv *pp = NULL; + struct pp_smu_wm_range_sets ranges = {0}; + int min_fclk_khz, min_dcfclk_khz, socclk_khz; + const int overdrive = 5000000; /* 5 GHz to cover Overdrive */ + + if (dc->res_pool->pp_smu) + pp = &dc->res_pool->pp_smu->rv_funcs; + if (!pp || !pp->set_wm_ranges) + return; + + DC_FP_START(); + min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32; + min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000; + socclk_khz = dc->dcn_soc->socclk * 1000; + DC_FP_END(); + + /* Now notify PPLib/SMU about which Watermarks sets they should select + * depending on DPM state they are in. And update BW MGR GFX Engine and + * Memory clock member variables for Watermarks calculations for each + * Watermark Set. Only one watermark set for dcn1 due to hw bug DEGVIDCN10-254. + */ + /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont + * care what the value is, hence min to overdrive level + */ + ranges.num_reader_wm_sets = WM_SET_COUNT; + ranges.num_writer_wm_sets = WM_SET_COUNT; + ranges.reader_wm_sets[0].wm_inst = WM_A; + ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000; + ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; + ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000; + ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; + ranges.writer_wm_sets[0].wm_inst = WM_A; + ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000; + ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; + ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000; + ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; + + if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { + ranges.reader_wm_sets[0].wm_inst = WM_A; + ranges.reader_wm_sets[0].min_drain_clk_mhz = 300; + ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000; + ranges.reader_wm_sets[0].min_fill_clk_mhz = 800; + ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000; + ranges.writer_wm_sets[0].wm_inst = WM_A; + ranges.writer_wm_sets[0].min_fill_clk_mhz = 200; + ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000; + ranges.writer_wm_sets[0].min_drain_clk_mhz = 800; + ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000; + } + + ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0]; + ranges.reader_wm_sets[1].wm_inst = WM_B; + + ranges.reader_wm_sets[2] = ranges.writer_wm_sets[0]; + ranges.reader_wm_sets[2].wm_inst = WM_C; + + ranges.reader_wm_sets[3] = ranges.writer_wm_sets[0]; + ranges.reader_wm_sets[3].wm_inst = WM_D; + + /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ + pp->set_wm_ranges(&pp->pp_smu, &ranges); +} + +void dcn_bw_sync_calcs_and_dml(struct dc *dc) +{ + DC_FP_START(); + DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n" + "sr_enter_plus_exit_time: %f ns\n" + "urgent_latency: %f ns\n" + "write_back_latency: %f ns\n" + "percent_of_ideal_drambw_received_after_urg_latency: %f %%\n" + "max_request_size: %d bytes\n" + "dcfclkv_max0p9: %f kHz\n" + "dcfclkv_nom0p8: %f kHz\n" + "dcfclkv_mid0p72: %f kHz\n" + "dcfclkv_min0p65: %f kHz\n" + "max_dispclk_vmax0p9: %f kHz\n" + "max_dispclk_vnom0p8: %f kHz\n" + "max_dispclk_vmid0p72: %f kHz\n" + "max_dispclk_vmin0p65: %f kHz\n" + "max_dppclk_vmax0p9: %f kHz\n" + "max_dppclk_vnom0p8: %f kHz\n" + "max_dppclk_vmid0p72: %f kHz\n" + "max_dppclk_vmin0p65: %f kHz\n" + "socclk: %f kHz\n" + "fabric_and_dram_bandwidth_vmax0p9: %f MB/s\n" + "fabric_and_dram_bandwidth_vnom0p8: %f MB/s\n" + "fabric_and_dram_bandwidth_vmid0p72: %f MB/s\n" + "fabric_and_dram_bandwidth_vmin0p65: %f MB/s\n" + "phyclkv_max0p9: %f kHz\n" + "phyclkv_nom0p8: %f kHz\n" + "phyclkv_mid0p72: %f kHz\n" + "phyclkv_min0p65: %f kHz\n" + "downspreading: %f %%\n" + "round_trip_ping_latency_cycles: %d DCFCLK Cycles\n" + "urgent_out_of_order_return_per_channel: %d Bytes\n" + "number_of_channels: %d\n" + "vmm_page_size: %d Bytes\n" + "dram_clock_change_latency: %f ns\n" + "return_bus_width: %d Bytes\n", + dc->dcn_soc->sr_exit_time * 1000, + dc->dcn_soc->sr_enter_plus_exit_time * 1000, + dc->dcn_soc->urgent_latency * 1000, + dc->dcn_soc->write_back_latency * 1000, + dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency, + dc->dcn_soc->max_request_size, + dc->dcn_soc->dcfclkv_max0p9 * 1000, + dc->dcn_soc->dcfclkv_nom0p8 * 1000, + dc->dcn_soc->dcfclkv_mid0p72 * 1000, + dc->dcn_soc->dcfclkv_min0p65 * 1000, + dc->dcn_soc->max_dispclk_vmax0p9 * 1000, + dc->dcn_soc->max_dispclk_vnom0p8 * 1000, + dc->dcn_soc->max_dispclk_vmid0p72 * 1000, + dc->dcn_soc->max_dispclk_vmin0p65 * 1000, + dc->dcn_soc->max_dppclk_vmax0p9 * 1000, + dc->dcn_soc->max_dppclk_vnom0p8 * 1000, + dc->dcn_soc->max_dppclk_vmid0p72 * 1000, + dc->dcn_soc->max_dppclk_vmin0p65 * 1000, + dc->dcn_soc->socclk * 1000, + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000, + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000, + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000, + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000, + dc->dcn_soc->phyclkv_max0p9 * 1000, + dc->dcn_soc->phyclkv_nom0p8 * 1000, + dc->dcn_soc->phyclkv_mid0p72 * 1000, + dc->dcn_soc->phyclkv_min0p65 * 1000, + dc->dcn_soc->downspreading * 100, + dc->dcn_soc->round_trip_ping_latency_cycles, + dc->dcn_soc->urgent_out_of_order_return_per_channel, + dc->dcn_soc->number_of_channels, + dc->dcn_soc->vmm_page_size, + dc->dcn_soc->dram_clock_change_latency * 1000, + dc->dcn_soc->return_bus_width); + DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %f\n" + "det_buffer_size_in_kbyte: %f\n" + "dpp_output_buffer_pixels: %f\n" + "opp_output_buffer_lines: %f\n" + "pixel_chunk_size_in_kbyte: %f\n" + "pte_enable: %d\n" + "pte_chunk_size: %d kbytes\n" + "meta_chunk_size: %d kbytes\n" + "writeback_chunk_size: %d kbytes\n" + "odm_capability: %d\n" + "dsc_capability: %d\n" + "line_buffer_size: %d bits\n" + "max_line_buffer_lines: %d\n" + "is_line_buffer_bpp_fixed: %d\n" + "line_buffer_fixed_bpp: %d\n" + "writeback_luma_buffer_size: %d kbytes\n" + "writeback_chroma_buffer_size: %d kbytes\n" + "max_num_dpp: %d\n" + "max_num_writeback: %d\n" + "max_dchub_topscl_throughput: %d pixels/dppclk\n" + "max_pscl_tolb_throughput: %d pixels/dppclk\n" + "max_lb_tovscl_throughput: %d pixels/dppclk\n" + "max_vscl_tohscl_throughput: %d pixels/dppclk\n" + "max_hscl_ratio: %f\n" + "max_vscl_ratio: %f\n" + "max_hscl_taps: %d\n" + "max_vscl_taps: %d\n" + "pte_buffer_size_in_requests: %d\n" + "dispclk_ramping_margin: %f %%\n" + "under_scan_factor: %f %%\n" + "max_inter_dcn_tile_repeaters: %d\n" + "can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n" + "bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n" + "dcfclk_cstate_latency: %d\n", + dc->dcn_ip->rob_buffer_size_in_kbyte, + dc->dcn_ip->det_buffer_size_in_kbyte, + dc->dcn_ip->dpp_output_buffer_pixels, + dc->dcn_ip->opp_output_buffer_lines, + dc->dcn_ip->pixel_chunk_size_in_kbyte, + dc->dcn_ip->pte_enable, + dc->dcn_ip->pte_chunk_size, + dc->dcn_ip->meta_chunk_size, + dc->dcn_ip->writeback_chunk_size, + dc->dcn_ip->odm_capability, + dc->dcn_ip->dsc_capability, + dc->dcn_ip->line_buffer_size, + dc->dcn_ip->max_line_buffer_lines, + dc->dcn_ip->is_line_buffer_bpp_fixed, + dc->dcn_ip->line_buffer_fixed_bpp, + dc->dcn_ip->writeback_luma_buffer_size, + dc->dcn_ip->writeback_chroma_buffer_size, + dc->dcn_ip->max_num_dpp, + dc->dcn_ip->max_num_writeback, + dc->dcn_ip->max_dchub_topscl_throughput, + dc->dcn_ip->max_pscl_tolb_throughput, + dc->dcn_ip->max_lb_tovscl_throughput, + dc->dcn_ip->max_vscl_tohscl_throughput, + dc->dcn_ip->max_hscl_ratio, + dc->dcn_ip->max_vscl_ratio, + dc->dcn_ip->max_hscl_taps, + dc->dcn_ip->max_vscl_taps, + dc->dcn_ip->pte_buffer_size_in_requests, + dc->dcn_ip->dispclk_ramping_margin, + dc->dcn_ip->under_scan_factor * 100, + dc->dcn_ip->max_inter_dcn_tile_repeaters, + dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one, + dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed, + dc->dcn_ip->dcfclk_cstate_latency); + + dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time; + dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time; + dc->dml.soc.urgent_latency_us = dc->dcn_soc->urgent_latency; + dc->dml.soc.writeback_latency_us = dc->dcn_soc->write_back_latency; + dc->dml.soc.ideal_dram_bw_after_urgent_percent = + dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency; + dc->dml.soc.max_request_size_bytes = dc->dcn_soc->max_request_size; + dc->dml.soc.downspread_percent = dc->dcn_soc->downspreading; + dc->dml.soc.round_trip_ping_latency_dcfclk_cycles = + dc->dcn_soc->round_trip_ping_latency_cycles; + dc->dml.soc.urgent_out_of_order_return_per_channel_bytes = + dc->dcn_soc->urgent_out_of_order_return_per_channel; + dc->dml.soc.num_chans = dc->dcn_soc->number_of_channels; + dc->dml.soc.vmm_page_size_bytes = dc->dcn_soc->vmm_page_size; + dc->dml.soc.dram_clock_change_latency_us = dc->dcn_soc->dram_clock_change_latency; + dc->dml.soc.return_bus_width_bytes = dc->dcn_soc->return_bus_width; + + dc->dml.ip.rob_buffer_size_kbytes = dc->dcn_ip->rob_buffer_size_in_kbyte; + dc->dml.ip.det_buffer_size_kbytes = dc->dcn_ip->det_buffer_size_in_kbyte; + dc->dml.ip.dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels; + dc->dml.ip.opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines; + dc->dml.ip.pixel_chunk_size_kbytes = dc->dcn_ip->pixel_chunk_size_in_kbyte; + dc->dml.ip.pte_enable = dc->dcn_ip->pte_enable == dcn_bw_yes; + dc->dml.ip.pte_chunk_size_kbytes = dc->dcn_ip->pte_chunk_size; + dc->dml.ip.meta_chunk_size_kbytes = dc->dcn_ip->meta_chunk_size; + dc->dml.ip.writeback_chunk_size_kbytes = dc->dcn_ip->writeback_chunk_size; + dc->dml.ip.line_buffer_size_bits = dc->dcn_ip->line_buffer_size; + dc->dml.ip.max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines; + dc->dml.ip.IsLineBufferBppFixed = dc->dcn_ip->is_line_buffer_bpp_fixed == dcn_bw_yes; + dc->dml.ip.LineBufferFixedBpp = dc->dcn_ip->line_buffer_fixed_bpp; + dc->dml.ip.writeback_luma_buffer_size_kbytes = dc->dcn_ip->writeback_luma_buffer_size; + dc->dml.ip.writeback_chroma_buffer_size_kbytes = dc->dcn_ip->writeback_chroma_buffer_size; + dc->dml.ip.max_num_dpp = dc->dcn_ip->max_num_dpp; + dc->dml.ip.max_num_wb = dc->dcn_ip->max_num_writeback; + dc->dml.ip.max_dchub_pscl_bw_pix_per_clk = dc->dcn_ip->max_dchub_topscl_throughput; + dc->dml.ip.max_pscl_lb_bw_pix_per_clk = dc->dcn_ip->max_pscl_tolb_throughput; + dc->dml.ip.max_lb_vscl_bw_pix_per_clk = dc->dcn_ip->max_lb_tovscl_throughput; + dc->dml.ip.max_vscl_hscl_bw_pix_per_clk = dc->dcn_ip->max_vscl_tohscl_throughput; + dc->dml.ip.max_hscl_ratio = dc->dcn_ip->max_hscl_ratio; + dc->dml.ip.max_vscl_ratio = dc->dcn_ip->max_vscl_ratio; + dc->dml.ip.max_hscl_taps = dc->dcn_ip->max_hscl_taps; + dc->dml.ip.max_vscl_taps = dc->dcn_ip->max_vscl_taps; + /*pte_buffer_size_in_requests missing in dml*/ + dc->dml.ip.dispclk_ramp_margin_percent = dc->dcn_ip->dispclk_ramping_margin; + dc->dml.ip.underscan_factor = dc->dcn_ip->under_scan_factor; + dc->dml.ip.max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters; + dc->dml.ip.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = + dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes; + dc->dml.ip.bug_forcing_LC_req_same_size_fixed = + dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes; + dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency; + DC_FP_END(); +} only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "resource.h" + +#include "dcn20_fpu.h" + +/** + * DOC: DCN2x FPU manipulation Overview + * + * The DCN architecture relies on FPU operations, which require special + * compilation flags and the use of kernel_fpu_begin/end functions; ideally, we + * want to avoid spreading FPU access across multiple files. With this idea in + * mind, this file aims to centralize all DCN20 and DCN2.1 (DCN2x) functions + * that require FPU access in a single place. Code in this file follows the + * following code pattern: + * + * 1. Functions that use FPU operations should be isolated in static functions. + * 2. The FPU functions should have the noinline attribute to ensure anything + * that deals with FP register is contained within this call. + * 3. All function that needs to be accessed outside this file requires a + * public interface that not uses any FPU reference. + * 4. Developers **must not** use DC_FP_START/END in this file, but they need + * to ensure that the caller invokes it before access any function available + * in this file. For this reason, public functions in this file must invoke + * dc_assert_fp_enabled(); + * + * Let's expand a little bit more the idea in the code pattern. To fully + * isolate FPU operations in a single place, we must avoid situations where + * compilers spill FP values to registers due to FP enable in a specific C + * file. Note that even if we isolate all FPU functions in a single file and + * call its interface from other files, the compiler might enable the use of + * FPU before we call DC_FP_START. Nevertheless, it is the programmer's + * responsibility to invoke DC_FP_START/END in the correct place. To highlight + * situations where developers forgot to use the FP protection before calling + * the DC FPU interface functions, we introduce a helper that checks if the + * function is invoked under FP protection. If not, it will trigger a kernel + * warning. + */ + +void dcn20_populate_dml_writeback_from_context(struct dc *dc, + struct resource_context *res_ctx, + display_e2e_pipe_params_st *pipes) +{ + int pipe_cnt, i; + + dc_assert_fp_enabled(); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0]; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + + /* Set writeback information */ + pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0; + pipes[pipe_cnt].dout.num_active_wb++; + pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height; + pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width; + pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width; + pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height; + pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1; + pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1; + pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c; + pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c; + pipes[pipe_cnt].dout.wb.wb_hratio = 1.0; + pipes[pipe_cnt].dout.wb.wb_vratio = 1.0; + if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) { + if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC) + pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8; + else + pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10; + } else { + pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32; + } + + pipe_cnt++; + } +} only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN20_FPU_H__ +#define __DCN20_FPU_H__ + +void dcn20_populate_dml_writeback_from_context(struct dc *dc, + struct resource_context *res_ctx, + display_e2e_pipe_params_st *pipes); + +#endif /* __DCN20_FPU_H__ */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c @@ -109,6 +109,7 @@ i915_buddy_free_list(mm, &bman_res->blocks); mutex_unlock(&bman->lock); err_free_res: + ttm_resource_fini(man, &bman_res->base); kfree(bman_res); return err; } @@ -123,6 +124,7 @@ i915_buddy_free_list(&bman->mm, &bman_res->blocks); mutex_unlock(&bman->lock); + ttm_resource_fini(man, res); kfree(bman_res); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -267,12 +267,14 @@ mode.htotal >>= 1; mode.hsync_start >>= 1; mode.hsync_end >>= 1; + mode.hskew >>= 1; DPU_DEBUG_VIDENC(phys_enc, - "split_role %d, halve horizontal %d %d %d %d\n", + "split_role %d, halve horizontal %d %d %d %d %d\n", phys_enc->split_role, mode.hdisplay, mode.htotal, - mode.hsync_start, mode.hsync_end); + mode.hsync_start, mode.hsync_end, + mode.hskew); } drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/nouveau/nouveau_mem.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -162,11 +162,12 @@ } void -nouveau_mem_del(struct ttm_resource *reg) +nouveau_mem_del(struct ttm_resource_manager *man, struct ttm_resource *reg) { struct nouveau_mem *mem = nouveau_mem(reg); nouveau_mem_fini(mem); + ttm_resource_fini(man, reg); kfree(mem); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/nouveau/nouveau_mem.h +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/nouveau/nouveau_mem.h @@ -23,7 +23,8 @@ int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp, struct ttm_resource **); -void nouveau_mem_del(struct ttm_resource *); +void nouveau_mem_del(struct ttm_resource_manager *man, + struct ttm_resource *); int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *); void nouveau_mem_fini(struct nouveau_mem *); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -36,9 +36,10 @@ #include static void -nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg) +nouveau_manager_del(struct ttm_resource_manager *man, + struct ttm_resource *reg) { - nouveau_mem_del(reg); + nouveau_mem_del(man, reg); } static int @@ -62,7 +63,7 @@ ret = nouveau_mem_vram(*res, nvbo->contig, nvbo->page); if (ret) { - nouveau_mem_del(*res); + nouveau_mem_del(man, *res); return ret; } @@ -118,7 +119,7 @@ ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0, (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]); if (ret) { - nouveau_mem_del(*res); + nouveau_mem_del(man, *res); return ret; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -154,11 +154,17 @@ return (void *)fw; } +static void +shadow_fw_release(void *fw) +{ + release_firmware(fw); +} + static const struct nvbios_source shadow_fw = { .name = "firmware", .init = shadow_fw_init, - .fini = (void(*)(void *))release_firmware, + .fini = shadow_fw_release, .read = shadow_fw_read, .rw = false, }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -313,7 +313,7 @@ struct nv50_instobj *iobj = nv50_instobj(memory); struct nvkm_instmem *imem = &iobj->imem->base; struct nvkm_vma *bar; - void *map = map; + void *map; mutex_lock(&imem->mutex); if (likely(iobj->lru.next)) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/radeon/ni.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/radeon/ni.c @@ -813,7 +813,7 @@ err = 0; } else if (rdev->smc_fw->size != smc_req_size) { pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n", - rdev->mc_fw->size, fw_name); + rdev->smc_fw->size, fw_name); err = -EINVAL; } } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/tegra/Kconfig +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/Kconfig @@ -5,6 +5,7 @@ depends on COMMON_CLK depends on DRM depends on OF + select DRM_DP_AUX_BUS select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/tegra/dc.h +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/dc.h @@ -76,6 +76,7 @@ bool has_win_b_vfilter_mem_client; bool has_win_c_without_vert_filter; bool plane_tiled_memory_bandwidth_x2; + bool has_pll_d2_out0; }; struct tegra_dc { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/tegra/fb.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/fb.c @@ -165,6 +165,7 @@ if (gem->size < size) { err = -EINVAL; + drm_gem_object_put(gem); goto unreference; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/tegra/output.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/output.c @@ -139,8 +139,10 @@ GPIOD_IN, "HDMI hotplug detect"); if (IS_ERR(output->hpd_gpio)) { - if (PTR_ERR(output->hpd_gpio) != -ENOENT) - return PTR_ERR(output->hpd_gpio); + if (PTR_ERR(output->hpd_gpio) != -ENOENT) { + err = PTR_ERR(output->hpd_gpio); + goto put_i2c; + } output->hpd_gpio = NULL; } @@ -149,7 +151,7 @@ err = gpiod_to_irq(output->hpd_gpio); if (err < 0) { dev_err(output->dev, "gpiod_to_irq(): %d\n", err); - return err; + goto put_i2c; } output->hpd_irq = err; @@ -162,7 +164,7 @@ if (err < 0) { dev_err(output->dev, "failed to request IRQ#%u: %d\n", output->hpd_irq, err); - return err; + goto put_i2c; } output->connector.polled = DRM_CONNECTOR_POLL_HPD; @@ -176,6 +178,12 @@ } return 0; + +put_i2c: + if (output->ddc) + i2c_put_adapter(output->ddc); + + return err; } void tegra_output_remove(struct tegra_output *output) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/tegra/rgb.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/tegra/rgb.c @@ -17,6 +17,8 @@ struct tegra_output output; struct tegra_dc *dc; + struct clk *pll_d_out0; + struct clk *pll_d2_out0; struct clk *clk_parent; struct clk *clk; }; @@ -123,6 +125,18 @@ tegra_dc_commit(rgb->dc); } +static bool tegra_rgb_pll_rate_change_allowed(struct tegra_rgb *rgb) +{ + if (!rgb->pll_d2_out0) + return false; + + if (!clk_is_match(rgb->clk_parent, rgb->pll_d_out0) && + !clk_is_match(rgb->clk_parent, rgb->pll_d2_out0)) + return false; + + return true; +} + static int tegra_rgb_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -151,8 +165,17 @@ * and hope that the desired frequency can be matched (or at least * matched sufficiently close that the panel will still work). */ - div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2; - pclk = 0; + if (tegra_rgb_pll_rate_change_allowed(rgb)) { + /* + * Set display controller clock to x2 of PCLK in order to + * produce higher resolution pulse positions. + */ + div = 2; + pclk *= 2; + } else { + div = ((clk_get_rate(rgb->clk) * 2) / pclk) - 2; + pclk = 0; + } err = tegra_dc_state_setup_clock(dc, crtc_state, rgb->clk_parent, pclk, div); @@ -195,31 +218,61 @@ rgb->clk = devm_clk_get(dc->dev, NULL); if (IS_ERR(rgb->clk)) { dev_err(dc->dev, "failed to get clock\n"); - return PTR_ERR(rgb->clk); + err = PTR_ERR(rgb->clk); + goto remove; } rgb->clk_parent = devm_clk_get(dc->dev, "parent"); if (IS_ERR(rgb->clk_parent)) { dev_err(dc->dev, "failed to get parent clock\n"); - return PTR_ERR(rgb->clk_parent); + err = PTR_ERR(rgb->clk_parent); + goto remove; } err = clk_set_parent(rgb->clk, rgb->clk_parent); if (err < 0) { dev_err(dc->dev, "failed to set parent clock: %d\n", err); - return err; + goto remove; + } + + rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0"); + if (IS_ERR(rgb->pll_d_out0)) { + err = PTR_ERR(rgb->pll_d_out0); + dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err); + goto remove; + } + + if (dc->soc->has_pll_d2_out0) { + rgb->pll_d2_out0 = clk_get_sys(NULL, "pll_d2_out0"); + if (IS_ERR(rgb->pll_d2_out0)) { + err = PTR_ERR(rgb->pll_d2_out0); + dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err); + goto put_pll; + } } dc->rgb = &rgb->output; return 0; + +put_pll: + clk_put(rgb->pll_d_out0); +remove: + tegra_output_remove(&rgb->output); + return err; } int tegra_dc_rgb_remove(struct tegra_dc *dc) { + struct tegra_rgb *rgb; + if (!dc->rgb) return 0; + rgb = to_rgb(dc->rgb); + clk_put(rgb->pll_d2_out0); + clk_put(rgb->pll_d_out0); + tegra_output_remove(dc->rgb); dc->rgb = NULL; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/tidss/tidss_plane.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/tidss/tidss_plane.c @@ -210,7 +210,7 @@ drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs); - drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0, + drm_plane_create_zpos_property(&tplane->plane, tidss->num_planes, 0, num_planes - 1); ret = drm_plane_create_color_properties(&tplane->plane, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/ttm/ttm_range_manager.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -89,6 +89,7 @@ spin_unlock(&rman->lock); if (unlikely(ret)) { + ttm_resource_fini(man, *res); kfree(node); return ret; } @@ -108,6 +109,7 @@ drm_mm_remove_node(&node->mm_nodes[0]); spin_unlock(&rman->lock); + ttm_resource_fini(man, res); kfree(node); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/ttm/ttm_resource.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/ttm/ttm_resource.c @@ -29,6 +29,14 @@ #include #include +/** + * ttm_resource_init - resource object constructure + * @bo: buffer object this resources is allocated for + * @place: placement of the resource + * @res: the resource object to inistilize + * + * Initialize a new resource object. Counterpart of &ttm_resource_fini. + */ void ttm_resource_init(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource *res) @@ -44,6 +52,21 @@ } EXPORT_SYMBOL(ttm_resource_init); +/** + * ttm_resource_fini - resource destructor + * @man: the resource manager this resource belongs to + * @res: the resource to clean up + * + * Should be used by resource manager backends to clean up the TTM resource + * objects before freeing the underlying structure. Counterpart of + * &ttm_resource_init + */ +void ttm_resource_fini(struct ttm_resource_manager *man, + struct ttm_resource *res) +{ +} +EXPORT_SYMBOL(ttm_resource_fini); + int ttm_resource_alloc(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource **res_ptr) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/ttm/ttm_sys_manager.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/ttm/ttm_sys_manager.c @@ -23,6 +23,7 @@ static void ttm_sys_man_free(struct ttm_resource_manager *man, struct ttm_resource *res) { + ttm_resource_fini(man, res); kfree(res); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ linux-aws-5.15-5.15.0/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -64,8 +64,11 @@ ttm_resource_init(bo, place, *res); id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); - if (id < 0) + if (id < 0) { + ttm_resource_fini(man, *res); + kfree(*res); return id; + } spin_lock(&gman->lock); @@ -116,6 +119,7 @@ gman->used_gmr_pages -= (*res)->num_pages; spin_unlock(&gman->lock); ida_free(&gman->gmr_ida, id); + ttm_resource_fini(man, *res); kfree(*res); return -ENOSPC; } @@ -129,6 +133,7 @@ spin_lock(&gman->lock); gman->used_gmr_pages -= res->num_pages; spin_unlock(&gman->lock); + ttm_resource_fini(man, res); kfree(res); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/infiniband/hw/mlx5/wr.c +++ linux-aws-5.15-5.15.0/drivers/infiniband/hw/mlx5/wr.c @@ -128,7 +128,7 @@ */ copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start, left); - memcpy(eseg->inline_hdr.start, pdata, copysz); + memcpy(eseg->inline_hdr.data, pdata, copysz); stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) - sizeof(eseg->inline_hdr.start) + copysz, 16); *size += stride / 16; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/input/keyboard/gpio_keys_polled.c +++ linux-aws-5.15-5.15.0/drivers/input/keyboard/gpio_keys_polled.c @@ -319,12 +319,10 @@ error = devm_gpio_request_one(dev, button->gpio, flags, button->desc ? : DRV_NAME); - if (error) { - dev_err(dev, - "unable to claim gpio %u, err=%d\n", - button->gpio, error); - return error; - } + if (error) + return dev_err_probe(dev, error, + "unable to claim gpio %u\n", + button->gpio); bdata->gpiod = gpio_to_desc(button->gpio); if (!bdata->gpiod) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/input/misc/iqs269a.c +++ linux-aws-5.15-5.15.0/drivers/input/misc/iqs269a.c @@ -9,6 +9,7 @@ * axial sliders presented by the device. */ +#include #include #include #include @@ -96,8 +97,6 @@ #define IQS269_MISC_B_TRACKING_UI_ENABLE BIT(4) #define IQS269_MISC_B_FILT_STR_SLIDER GENMASK(1, 0) -#define IQS269_CHx_SETTINGS 0x8C - #define IQS269_CHx_ENG_A_MEAS_CAP_SIZE BIT(15) #define IQS269_CHx_ENG_A_RX_GND_INACTIVE BIT(13) #define IQS269_CHx_ENG_A_LOCAL_CAP_SIZE BIT(12) @@ -146,14 +145,7 @@ #define IQS269_NUM_CH 8 #define IQS269_NUM_SL 2 -#define IQS269_ATI_POLL_SLEEP_US (iqs269->delay_mult * 10000) -#define IQS269_ATI_POLL_TIMEOUT_US (iqs269->delay_mult * 500000) -#define IQS269_ATI_STABLE_DELAY_MS (iqs269->delay_mult * 150) - -#define IQS269_PWR_MODE_POLL_SLEEP_US IQS269_ATI_POLL_SLEEP_US -#define IQS269_PWR_MODE_POLL_TIMEOUT_US IQS269_ATI_POLL_TIMEOUT_US - -#define iqs269_irq_wait() usleep_range(100, 150) +#define iqs269_irq_wait() usleep_range(200, 250) enum iqs269_local_cap_size { IQS269_LOCAL_CAP_SIZE_0, @@ -245,6 +237,18 @@ u8 padding; } __packed; +struct iqs269_ch_reg { + u8 rx_enable; + u8 tx_enable; + __be16 engine_a; + __be16 engine_b; + __be16 ati_comp; + u8 thresh[3]; + u8 hyst; + u8 assoc_select; + u8 assoc_weight; +} __packed; + struct iqs269_sys_reg { __be16 general; u8 active; @@ -266,18 +270,7 @@ u8 timeout_swipe; u8 thresh_swipe; u8 redo_ati; -} __packed; - -struct iqs269_ch_reg { - u8 rx_enable; - u8 tx_enable; - __be16 engine_a; - __be16 engine_b; - __be16 ati_comp; - u8 thresh[3]; - u8 hyst; - u8 assoc_select; - u8 assoc_weight; + struct iqs269_ch_reg ch_reg[IQS269_NUM_CH]; } __packed; struct iqs269_flags { @@ -292,13 +285,11 @@ struct regmap *regmap; struct mutex lock; struct iqs269_switch_desc switches[ARRAY_SIZE(iqs269_events)]; - struct iqs269_ch_reg ch_reg[IQS269_NUM_CH]; struct iqs269_sys_reg sys_reg; + struct completion ati_done; struct input_dev *keypad; struct input_dev *slider[IQS269_NUM_SL]; unsigned int keycode[ARRAY_SIZE(iqs269_events) * IQS269_NUM_CH]; - unsigned int suspend_mode; - unsigned int delay_mult; unsigned int ch_num; bool hall_enable; bool ati_current; @@ -307,6 +298,7 @@ static int iqs269_ati_mode_set(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int mode) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_a; if (ch_num >= IQS269_NUM_CH) @@ -317,12 +309,12 @@ mutex_lock(&iqs269->lock); - engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a); + engine_a = be16_to_cpu(ch_reg[ch_num].engine_a); engine_a &= ~IQS269_CHx_ENG_A_ATI_MODE_MASK; engine_a |= (mode << IQS269_CHx_ENG_A_ATI_MODE_SHIFT); - iqs269->ch_reg[ch_num].engine_a = cpu_to_be16(engine_a); + ch_reg[ch_num].engine_a = cpu_to_be16(engine_a); iqs269->ati_current = false; mutex_unlock(&iqs269->lock); @@ -333,13 +325,14 @@ static int iqs269_ati_mode_get(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int *mode) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_a; if (ch_num >= IQS269_NUM_CH) return -EINVAL; mutex_lock(&iqs269->lock); - engine_a = be16_to_cpu(iqs269->ch_reg[ch_num].engine_a); + engine_a = be16_to_cpu(ch_reg[ch_num].engine_a); mutex_unlock(&iqs269->lock); engine_a &= IQS269_CHx_ENG_A_ATI_MODE_MASK; @@ -351,6 +344,7 @@ static int iqs269_ati_base_set(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int base) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_b; if (ch_num >= IQS269_NUM_CH) @@ -379,12 +373,12 @@ mutex_lock(&iqs269->lock); - engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); engine_b &= ~IQS269_CHx_ENG_B_ATI_BASE_MASK; engine_b |= base; - iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); + ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); iqs269->ati_current = false; mutex_unlock(&iqs269->lock); @@ -395,13 +389,14 @@ static int iqs269_ati_base_get(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int *base) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_b; if (ch_num >= IQS269_NUM_CH) return -EINVAL; mutex_lock(&iqs269->lock); - engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); mutex_unlock(&iqs269->lock); switch (engine_b & IQS269_CHx_ENG_B_ATI_BASE_MASK) { @@ -429,6 +424,7 @@ static int iqs269_ati_target_set(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int target) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_b; if (ch_num >= IQS269_NUM_CH) @@ -439,12 +435,12 @@ mutex_lock(&iqs269->lock); - engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); engine_b &= ~IQS269_CHx_ENG_B_ATI_TARGET_MASK; engine_b |= target / 32; - iqs269->ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); + ch_reg[ch_num].engine_b = cpu_to_be16(engine_b); iqs269->ati_current = false; mutex_unlock(&iqs269->lock); @@ -455,13 +451,14 @@ static int iqs269_ati_target_get(struct iqs269_private *iqs269, unsigned int ch_num, unsigned int *target) { + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; u16 engine_b; if (ch_num >= IQS269_NUM_CH) return -EINVAL; mutex_lock(&iqs269->lock); - engine_b = be16_to_cpu(iqs269->ch_reg[ch_num].engine_b); + engine_b = be16_to_cpu(ch_reg[ch_num].engine_b); mutex_unlock(&iqs269->lock); *target = (engine_b & IQS269_CHx_ENG_B_ATI_TARGET_MASK) * 32; @@ -531,13 +528,7 @@ if (fwnode_property_present(ch_node, "azoteq,slider1-select")) iqs269->sys_reg.slider_select[1] |= BIT(reg); - ch_reg = &iqs269->ch_reg[reg]; - - error = regmap_raw_read(iqs269->regmap, - IQS269_CHx_SETTINGS + reg * sizeof(*ch_reg) / 2, - ch_reg, sizeof(*ch_reg)); - if (error) - return error; + ch_reg = &iqs269->sys_reg.ch_reg[reg]; error = iqs269_parse_mask(ch_node, "azoteq,rx-enable", &ch_reg->rx_enable); @@ -694,6 +685,7 @@ dev_err(&client->dev, "Invalid channel %u threshold: %u\n", reg, val); + fwnode_handle_put(ev_node); return -EINVAL; } @@ -707,6 +699,7 @@ dev_err(&client->dev, "Invalid channel %u hysteresis: %u\n", reg, val); + fwnode_handle_put(ev_node); return -EINVAL; } @@ -721,8 +714,16 @@ } } - if (fwnode_property_read_u32(ev_node, "linux,code", &val)) + error = fwnode_property_read_u32(ev_node, "linux,code", &val); + fwnode_handle_put(ev_node); + if (error == -EINVAL) { continue; + } else if (error) { + dev_err(&client->dev, + "Failed to read channel %u code: %d\n", reg, + error); + return error; + } switch (reg) { case IQS269_CHx_HALL_ACTIVE: @@ -759,17 +760,6 @@ iqs269->hall_enable = device_property_present(&client->dev, "azoteq,hall-enable"); - if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode", - &val)) { - if (val > IQS269_SYS_SETTINGS_PWR_MODE_MAX) { - dev_err(&client->dev, "Invalid suspend mode: %u\n", - val); - return -EINVAL; - } - - iqs269->suspend_mode = val; - } - error = regmap_raw_read(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg, sizeof(*sys_reg)); if (error) @@ -980,13 +970,8 @@ general = be16_to_cpu(sys_reg->general); - if (device_property_present(&client->dev, "azoteq,clk-div")) { + if (device_property_present(&client->dev, "azoteq,clk-div")) general |= IQS269_SYS_SETTINGS_CLK_DIV; - iqs269->delay_mult = 4; - } else { - general &= ~IQS269_SYS_SETTINGS_CLK_DIV; - iqs269->delay_mult = 1; - } /* * Configure the device to automatically switch between normal and low- @@ -997,6 +982,17 @@ general &= ~IQS269_SYS_SETTINGS_DIS_AUTO; general &= ~IQS269_SYS_SETTINGS_PWR_MODE_MASK; + if (!device_property_read_u32(&client->dev, "azoteq,suspend-mode", + &val)) { + if (val > IQS269_SYS_SETTINGS_PWR_MODE_MAX) { + dev_err(&client->dev, "Invalid suspend mode: %u\n", + val); + return -EINVAL; + } + + general |= (val << IQS269_SYS_SETTINGS_PWR_MODE_SHIFT); + } + if (!device_property_read_u32(&client->dev, "azoteq,ulp-update", &val)) { if (val > IQS269_SYS_SETTINGS_ULP_UPDATE_MAX) { @@ -1032,10 +1028,7 @@ static int iqs269_dev_init(struct iqs269_private *iqs269) { - struct iqs269_sys_reg *sys_reg = &iqs269->sys_reg; - struct iqs269_ch_reg *ch_reg; - unsigned int val; - int error, i; + int error; mutex_lock(&iqs269->lock); @@ -1045,38 +1038,17 @@ if (error) goto err_mutex; - for (i = 0; i < IQS269_NUM_CH; i++) { - if (!(sys_reg->active & BIT(i))) - continue; - - ch_reg = &iqs269->ch_reg[i]; - - error = regmap_raw_write(iqs269->regmap, - IQS269_CHx_SETTINGS + i * - sizeof(*ch_reg) / 2, ch_reg, - sizeof(*ch_reg)); - if (error) - goto err_mutex; - } - - /* - * The REDO-ATI and ATI channel selection fields must be written in the - * same block write, so every field between registers 0x80 through 0x8B - * (inclusive) must be written as well. - */ - error = regmap_raw_write(iqs269->regmap, IQS269_SYS_SETTINGS, sys_reg, - sizeof(*sys_reg)); + error = regmap_raw_write(iqs269->regmap, IQS269_SYS_SETTINGS, + &iqs269->sys_reg, sizeof(iqs269->sys_reg)); if (error) goto err_mutex; - error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, - !(val & IQS269_SYS_FLAGS_IN_ATI), - IQS269_ATI_POLL_SLEEP_US, - IQS269_ATI_POLL_TIMEOUT_US); - if (error) - goto err_mutex; + /* + * The following delay gives the device time to deassert its RDY output + * so as to prevent an interrupt from being serviced prematurely. + */ + usleep_range(2000, 2100); - msleep(IQS269_ATI_STABLE_DELAY_MS); iqs269->ati_current = true; err_mutex: @@ -1088,10 +1060,8 @@ static int iqs269_input_init(struct iqs269_private *iqs269) { struct i2c_client *client = iqs269->client; - struct iqs269_flags flags; unsigned int sw_code, keycode; int error, i, j; - u8 dir_mask, state; iqs269->keypad = devm_input_allocate_device(&client->dev); if (!iqs269->keypad) @@ -1104,23 +1074,7 @@ iqs269->keypad->name = "iqs269a_keypad"; iqs269->keypad->id.bustype = BUS_I2C; - if (iqs269->hall_enable) { - error = regmap_raw_read(iqs269->regmap, IQS269_SYS_FLAGS, - &flags, sizeof(flags)); - if (error) { - dev_err(&client->dev, - "Failed to read initial status: %d\n", error); - return error; - } - } - for (i = 0; i < ARRAY_SIZE(iqs269_events); i++) { - dir_mask = flags.states[IQS269_ST_OFFS_DIR]; - if (!iqs269_events[i].dir_up) - dir_mask = ~dir_mask; - - state = flags.states[iqs269_events[i].st_offs] & dir_mask; - sw_code = iqs269->switches[i].code; for (j = 0; j < IQS269_NUM_CH; j++) { @@ -1133,13 +1087,9 @@ switch (j) { case IQS269_CHx_HALL_ACTIVE: if (iqs269->hall_enable && - iqs269->switches[i].enabled) { + iqs269->switches[i].enabled) input_set_capability(iqs269->keypad, EV_SW, sw_code); - input_report_switch(iqs269->keypad, - sw_code, - state & BIT(j)); - } fallthrough; case IQS269_CHx_HALL_INACTIVE: @@ -1155,14 +1105,6 @@ } } - input_sync(iqs269->keypad); - - error = input_register_device(iqs269->keypad); - if (error) { - dev_err(&client->dev, "Failed to register keypad: %d\n", error); - return error; - } - for (i = 0; i < IQS269_NUM_SL; i++) { if (!iqs269->sys_reg.slider_select[i]) continue; @@ -1222,6 +1164,9 @@ return error; } + if (be16_to_cpu(flags.system) & IQS269_SYS_FLAGS_IN_ATI) + return 0; + error = regmap_raw_read(iqs269->regmap, IQS269_SLIDER_X, slider_x, sizeof(slider_x)); if (error) { @@ -1284,6 +1229,12 @@ input_sync(iqs269->keypad); + /* + * The following completion signals that ATI has finished, any initial + * switch states have been reported and the keypad can be registered. + */ + complete_all(&iqs269->ati_done); + return 0; } @@ -1315,6 +1266,9 @@ if (!iqs269->ati_current || iqs269->hall_enable) return -EPERM; + if (!completion_done(&iqs269->ati_done)) + return -EBUSY; + /* * Unsolicited I2C communication prompts the device to assert its RDY * pin, so disable the interrupt line until the operation is finished @@ -1339,6 +1293,7 @@ struct device_attribute *attr, char *buf) { struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; struct i2c_client *client = iqs269->client; unsigned int val; int error; @@ -1353,8 +1308,8 @@ if (error) return error; - switch (iqs269->ch_reg[IQS269_CHx_HALL_ACTIVE].rx_enable & - iqs269->ch_reg[IQS269_CHx_HALL_INACTIVE].rx_enable) { + switch (ch_reg[IQS269_CHx_HALL_ACTIVE].rx_enable & + ch_reg[IQS269_CHx_HALL_INACTIVE].rx_enable) { case IQS269_HALL_PAD_R: val &= IQS269_CAL_DATA_A_HALL_BIN_R_MASK; val >>= IQS269_CAL_DATA_A_HALL_BIN_R_SHIFT; @@ -1434,9 +1389,10 @@ struct device_attribute *attr, char *buf) { struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; return scnprintf(buf, PAGE_SIZE, "%u\n", - iqs269->ch_reg[iqs269->ch_num].rx_enable); + ch_reg[iqs269->ch_num].rx_enable); } static ssize_t rx_enable_store(struct device *dev, @@ -1444,6 +1400,7 @@ size_t count) { struct iqs269_private *iqs269 = dev_get_drvdata(dev); + struct iqs269_ch_reg *ch_reg = iqs269->sys_reg.ch_reg; unsigned int val; int error; @@ -1456,7 +1413,7 @@ mutex_lock(&iqs269->lock); - iqs269->ch_reg[iqs269->ch_num].rx_enable = val; + ch_reg[iqs269->ch_num].rx_enable = val; iqs269->ati_current = false; mutex_unlock(&iqs269->lock); @@ -1568,7 +1525,9 @@ { struct iqs269_private *iqs269 = dev_get_drvdata(dev); - return scnprintf(buf, PAGE_SIZE, "%u\n", iqs269->ati_current); + return scnprintf(buf, PAGE_SIZE, "%u\n", + iqs269->ati_current && + completion_done(&iqs269->ati_done)); } static ssize_t ati_trigger_store(struct device *dev, @@ -1588,6 +1547,7 @@ return count; disable_irq(client->irq); + reinit_completion(&iqs269->ati_done); error = iqs269_dev_init(iqs269); @@ -1597,6 +1557,10 @@ if (error) return error; + if (!wait_for_completion_timeout(&iqs269->ati_done, + msecs_to_jiffies(2000))) + return -ETIMEDOUT; + return count; } @@ -1655,6 +1619,7 @@ } mutex_init(&iqs269->lock); + init_completion(&iqs269->ati_done); error = regmap_raw_read(iqs269->regmap, IQS269_VER_INFO, &ver_info, sizeof(ver_info)); @@ -1690,6 +1655,22 @@ return error; } + if (!wait_for_completion_timeout(&iqs269->ati_done, + msecs_to_jiffies(2000))) { + dev_err(&client->dev, "Failed to complete ATI\n"); + return -ETIMEDOUT; + } + + /* + * The keypad may include one or more switches and is not registered + * until ATI is complete and the initial switch states are read. + */ + error = input_register_device(iqs269->keypad); + if (error) { + dev_err(&client->dev, "Failed to register keypad: %d\n", error); + return error; + } + error = devm_device_add_group(&client->dev, &iqs269_attr_group); if (error) dev_err(&client->dev, "Failed to add attributes: %d\n", error); @@ -1697,113 +1678,61 @@ return error; } -static int __maybe_unused iqs269_suspend(struct device *dev) +static u16 iqs269_general_get(struct iqs269_private *iqs269) +{ + u16 general = be16_to_cpu(iqs269->sys_reg.general); + + general &= ~IQS269_SYS_SETTINGS_REDO_ATI; + general &= ~IQS269_SYS_SETTINGS_ACK_RESET; + + return general | IQS269_SYS_SETTINGS_DIS_AUTO; +} + +static int iqs269_suspend(struct device *dev) { struct iqs269_private *iqs269 = dev_get_drvdata(dev); struct i2c_client *client = iqs269->client; - unsigned int val; int error; + u16 general = iqs269_general_get(iqs269); - if (!iqs269->suspend_mode) + if (!(general & IQS269_SYS_SETTINGS_PWR_MODE_MASK)) return 0; disable_irq(client->irq); - /* - * Automatic power mode switching must be disabled before the device is - * forced into any particular power mode. In this case, the device will - * transition into normal-power mode. - */ - error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, - IQS269_SYS_SETTINGS_DIS_AUTO, ~0); - if (error) - goto err_irq; - - /* - * The following check ensures the device has completed its transition - * into normal-power mode before a manual mode switch is performed. - */ - error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, - !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK), - IQS269_PWR_MODE_POLL_SLEEP_US, - IQS269_PWR_MODE_POLL_TIMEOUT_US); - if (error) - goto err_irq; - - error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, - IQS269_SYS_SETTINGS_PWR_MODE_MASK, - iqs269->suspend_mode << - IQS269_SYS_SETTINGS_PWR_MODE_SHIFT); - if (error) - goto err_irq; + error = regmap_write(iqs269->regmap, IQS269_SYS_SETTINGS, general); - /* - * This last check ensures the device has completed its transition into - * the desired power mode to prevent any spurious interrupts from being - * triggered after iqs269_suspend has already returned. - */ - error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, - (val & IQS269_SYS_FLAGS_PWR_MODE_MASK) - == (iqs269->suspend_mode << - IQS269_SYS_FLAGS_PWR_MODE_SHIFT), - IQS269_PWR_MODE_POLL_SLEEP_US, - IQS269_PWR_MODE_POLL_TIMEOUT_US); - -err_irq: iqs269_irq_wait(); enable_irq(client->irq); return error; } -static int __maybe_unused iqs269_resume(struct device *dev) +static int iqs269_resume(struct device *dev) { struct iqs269_private *iqs269 = dev_get_drvdata(dev); struct i2c_client *client = iqs269->client; - unsigned int val; int error; + u16 general = iqs269_general_get(iqs269); - if (!iqs269->suspend_mode) + if (!(general & IQS269_SYS_SETTINGS_PWR_MODE_MASK)) return 0; disable_irq(client->irq); - error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, - IQS269_SYS_SETTINGS_PWR_MODE_MASK, 0); - if (error) - goto err_irq; - - /* - * This check ensures the device has returned to normal-power mode - * before automatic power mode switching is re-enabled. - */ - error = regmap_read_poll_timeout(iqs269->regmap, IQS269_SYS_FLAGS, val, - !(val & IQS269_SYS_FLAGS_PWR_MODE_MASK), - IQS269_PWR_MODE_POLL_SLEEP_US, - IQS269_PWR_MODE_POLL_TIMEOUT_US); - if (error) - goto err_irq; - - error = regmap_update_bits(iqs269->regmap, IQS269_SYS_SETTINGS, - IQS269_SYS_SETTINGS_DIS_AUTO, 0); - if (error) - goto err_irq; - - /* - * This step reports any events that may have been "swallowed" as a - * result of polling PWR_MODE (which automatically acknowledges any - * pending interrupts). - */ - error = iqs269_report(iqs269); + error = regmap_write(iqs269->regmap, IQS269_SYS_SETTINGS, + general & ~IQS269_SYS_SETTINGS_PWR_MODE_MASK); + if (!error) + error = regmap_write(iqs269->regmap, IQS269_SYS_SETTINGS, + general & ~IQS269_SYS_SETTINGS_DIS_AUTO); -err_irq: iqs269_irq_wait(); enable_irq(client->irq); return error; } -static SIMPLE_DEV_PM_OPS(iqs269_pm, iqs269_suspend, iqs269_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(iqs269_pm, iqs269_suspend, iqs269_resume); static const struct of_device_id iqs269_of_match[] = { { .compatible = "azoteq,iqs269a" }, @@ -1815,7 +1744,7 @@ .driver = { .name = "iqs269a", .of_match_table = iqs269_of_match, - .pm = &iqs269_pm, + .pm = pm_sleep_ptr(&iqs269_pm), }, .probe_new = iqs269_probe, }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/input/touchscreen/ads7846.c +++ linux-aws-5.15-5.15.0/drivers/input/touchscreen/ads7846.c @@ -851,14 +851,8 @@ if (x == MAX_12BIT) x = 0; - if (ts->model == 7843) { + if (ts->model == 7843 || ts->model == 7845) { Rt = ts->pressure_max / 2; - } else if (ts->model == 7845) { - if (get_pendown_state(ts)) - Rt = ts->pressure_max / 2; - else - Rt = 0; - dev_vdbg(&ts->spi->dev, "x/y: %d/%d, PD %d\n", x, y, Rt); } else if (likely(x && z1)) { /* compute touch pressure resistance using equation #2 */ Rt = z2; @@ -1074,6 +1068,9 @@ struct ads7846_buf_layout *l = &packet->l[cmd_idx]; unsigned int max_count; + if (cmd_idx == packet->cmds - 1) + cmd_idx = ADS7846_PWDOWN; + if (ads7846_cmd_need_settle(cmd_idx)) max_count = packet->count + packet->count_skip; else @@ -1110,7 +1107,12 @@ for (cmd_idx = 0; cmd_idx < packet->cmds; cmd_idx++) { struct ads7846_buf_layout *l = &packet->l[cmd_idx]; - u8 cmd = ads7846_get_cmd(cmd_idx, vref); + u8 cmd; + + if (cmd_idx == packet->cmds - 1) + cmd_idx = ADS7846_PWDOWN; + + cmd = ads7846_get_cmd(cmd_idx, vref); for (b = 0; b < l->count; b++) packet->tx[l->offset + b].cmd = cmd; @@ -1323,8 +1325,9 @@ pdata->y_min ? : 0, pdata->y_max ? : MAX_12BIT, 0, 0); - input_set_abs_params(input_dev, ABS_PRESSURE, - pdata->pressure_min, pdata->pressure_max, 0, 0); + if (ts->model != 7845) + input_set_abs_params(input_dev, ABS_PRESSURE, + pdata->pressure_min, pdata->pressure_max, 0, 0); /* * Parse common framework properties. Must be done here to ensure the only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/leds/flash/leds-sgm3140.c +++ linux-aws-5.15-5.15.0/drivers/leds/flash/leds-sgm3140.c @@ -114,8 +114,11 @@ "failed to enable regulator: %d\n", ret); return ret; } + gpiod_set_value_cansleep(priv->flash_gpio, 0); gpiod_set_value_cansleep(priv->enable_gpio, 1); } else { + del_timer_sync(&priv->powerdown_timer); + gpiod_set_value_cansleep(priv->flash_gpio, 0); gpiod_set_value_cansleep(priv->enable_gpio, 0); ret = regulator_disable(priv->vin_regulator); if (ret) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/leds/leds-aw2013.c +++ linux-aws-5.15-5.15.0/drivers/leds/leds-aw2013.c @@ -397,6 +397,7 @@ regulator_disable(chip->vcc_regulator); error: + mutex_unlock(&chip->mutex); mutex_destroy(&chip->mutex); return ret; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ linux-aws-5.15-5.15.0/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -113,6 +113,7 @@ { unsigned pat; unsigned plane; + int ret = 0; tpg->max_line_width = max_w; for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) { @@ -121,14 +122,18 @@ tpg->lines[pat][plane] = vzalloc(array3_size(max_w, 2, pixelsz)); - if (!tpg->lines[pat][plane]) - return -ENOMEM; + if (!tpg->lines[pat][plane]) { + ret = -ENOMEM; + goto free_lines; + } if (plane == 0) continue; tpg->downsampled_lines[pat][plane] = vzalloc(array3_size(max_w, 2, pixelsz)); - if (!tpg->downsampled_lines[pat][plane]) - return -ENOMEM; + if (!tpg->downsampled_lines[pat][plane]) { + ret = -ENOMEM; + goto free_lines; + } } } for (plane = 0; plane < TPG_MAX_PLANES; plane++) { @@ -136,18 +141,45 @@ tpg->contrast_line[plane] = vzalloc(array_size(pixelsz, max_w)); - if (!tpg->contrast_line[plane]) - return -ENOMEM; + if (!tpg->contrast_line[plane]) { + ret = -ENOMEM; + goto free_contrast_line; + } tpg->black_line[plane] = vzalloc(array_size(pixelsz, max_w)); - if (!tpg->black_line[plane]) - return -ENOMEM; + if (!tpg->black_line[plane]) { + ret = -ENOMEM; + goto free_contrast_line; + } tpg->random_line[plane] = vzalloc(array3_size(max_w, 2, pixelsz)); - if (!tpg->random_line[plane]) - return -ENOMEM; + if (!tpg->random_line[plane]) { + ret = -ENOMEM; + goto free_contrast_line; + } } return 0; + +free_contrast_line: + for (plane = 0; plane < TPG_MAX_PLANES; plane++) { + vfree(tpg->contrast_line[plane]); + vfree(tpg->black_line[plane]); + vfree(tpg->random_line[plane]); + tpg->contrast_line[plane] = NULL; + tpg->black_line[plane] = NULL; + tpg->random_line[plane] = NULL; + } +free_lines: + for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) + for (plane = 0; plane < TPG_MAX_PLANES; plane++) { + vfree(tpg->lines[pat][plane]); + tpg->lines[pat][plane] = NULL; + if (plane == 0) + continue; + vfree(tpg->downsampled_lines[pat][plane]); + tpg->downsampled_lines[pat][plane] = NULL; + } + return ret; } EXPORT_SYMBOL_GPL(tpg_alloc); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/media/i2c/tc358743.c +++ linux-aws-5.15-5.15.0/drivers/media/i2c/tc358743.c @@ -2108,9 +2108,6 @@ state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24; sd->dev = &client->dev; - err = v4l2_async_register_subdev(sd); - if (err < 0) - goto err_hdl; mutex_init(&state->confctl_mutex); @@ -2168,6 +2165,10 @@ if (err) goto err_work_queues; + err = v4l2_async_register_subdev(sd); + if (err < 0) + goto err_work_queues; + v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name, client->addr << 1, client->adapter->name); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/media/pci/ttpci/budget-av.c +++ linux-aws-5.15-5.15.0/drivers/media/pci/ttpci/budget-av.c @@ -1462,7 +1462,8 @@ budget_av->has_saa7113 = 1; err = saa7146_vv_init(dev, &vv_data); if (err != 0) { - /* fixme: proper cleanup here */ + ttpci_budget_deinit(&budget_av->budget); + kfree(budget_av); ERR("cannot init vv subsystem\n"); return err; } @@ -1471,9 +1472,10 @@ vv_data.vid_ops.vidioc_s_input = vidioc_s_input; if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) { - /* fixme: proper cleanup here */ - ERR("cannot register capture v4l2 device\n"); saa7146_vv_release(dev); + ttpci_budget_deinit(&budget_av->budget); + kfree(budget_av); + ERR("cannot register capture v4l2 device\n"); return err; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c +++ linux-aws-5.15-5.15.0/drivers/media/platform/mtk-mdp/mtk_mdp_vpu.c @@ -26,7 +26,7 @@ vpu->inst_addr = msg->vpu_inst_addr; } -static void mtk_mdp_vpu_ipi_handler(const void *data, unsigned int len, +static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len, void *priv) { const struct mdp_ipi_comm_ack *msg = data; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/media/platform/mtk-vpu/mtk_vpu.h +++ linux-aws-5.15-5.15.0/drivers/media/platform/mtk-vpu/mtk_vpu.h @@ -17,7 +17,7 @@ * VPU interfaces with other blocks by share memory and interrupt. */ -typedef void (*ipi_handler_t) (const void *data, +typedef void (*ipi_handler_t) (void *data, unsigned int len, void *priv); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c +++ linux-aws-5.15-5.15.0/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c @@ -66,6 +66,7 @@ struct vb2_v4l2_buffer *src, *dst; unsigned int hstep, vstep; dma_addr_t addr; + int i; src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); @@ -160,6 +161,26 @@ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep); deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep); + /* neutral filter coefficients */ + deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL, + DEINTERLACE_FRM_CTRL_COEF_ACCESS); + readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val, + val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40); + + for (i = 0; i < 32; i++) { + deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4, + DEINTERLACE_IDENTITY_COEF); + deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4, + DEINTERLACE_IDENTITY_COEF); + deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4, + DEINTERLACE_IDENTITY_COEF); + deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4, + DEINTERLACE_IDENTITY_COEF); + } + + deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL, + DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0); + deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL, DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK, DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field)); @@ -248,7 +269,6 @@ static void deinterlace_init(struct deinterlace_dev *dev) { u32 val; - int i; deinterlace_write(dev, DEINTERLACE_BYPASS, DEINTERLACE_BYPASS_CSC); @@ -284,27 +304,7 @@ deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF, DEINTERLACE_CHROMA_DIFF_TH_MSK, - DEINTERLACE_CHROMA_DIFF_TH(5)); - - /* neutral filter coefficients */ - deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL, - DEINTERLACE_FRM_CTRL_COEF_ACCESS); - readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val, - val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40); - - for (i = 0; i < 32; i++) { - deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4, - DEINTERLACE_IDENTITY_COEF); - deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4, - DEINTERLACE_IDENTITY_COEF); - deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4, - DEINTERLACE_IDENTITY_COEF); - deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4, - DEINTERLACE_IDENTITY_COEF); - } - - deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL, - DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0); + DEINTERLACE_CHROMA_DIFF_TH(31)); } static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file) @@ -933,11 +933,18 @@ return ret; } + ret = reset_control_deassert(dev->rstc); + if (ret) { + dev_err(dev->dev, "Failed to apply reset\n"); + + goto err_exclusive_rate; + } + ret = clk_prepare_enable(dev->bus_clk); if (ret) { dev_err(dev->dev, "Failed to enable bus clock\n"); - goto err_exclusive_rate; + goto err_rst; } ret = clk_prepare_enable(dev->mod_clk); @@ -954,23 +961,16 @@ goto err_mod_clk; } - ret = reset_control_deassert(dev->rstc); - if (ret) { - dev_err(dev->dev, "Failed to apply reset\n"); - - goto err_ram_clk; - } - deinterlace_init(dev); return 0; -err_ram_clk: - clk_disable_unprepare(dev->ram_clk); err_mod_clk: clk_disable_unprepare(dev->mod_clk); err_bus_clk: clk_disable_unprepare(dev->bus_clk); +err_rst: + reset_control_assert(dev->rstc); err_exclusive_rate: clk_rate_exclusive_put(dev->mod_clk); @@ -981,11 +981,12 @@ { struct deinterlace_dev *dev = dev_get_drvdata(device); - reset_control_assert(dev->rstc); - clk_disable_unprepare(dev->ram_clk); clk_disable_unprepare(dev->mod_clk); clk_disable_unprepare(dev->bus_clk); + + reset_control_assert(dev->rstc); + clk_rate_exclusive_put(dev->mod_clk); return 0; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/media/usb/go7007/go7007-driver.c +++ linux-aws-5.15-5.15.0/drivers/media/usb/go7007/go7007-driver.c @@ -80,7 +80,7 @@ const struct firmware *fw_entry; char fw_name[] = "go7007/go7007fw.bin"; void *bounce; - int fw_len, rv = 0; + int fw_len; u16 intr_val, intr_data; if (go->boot_fw == NULL) { @@ -109,9 +109,11 @@ go7007_read_interrupt(go, &intr_val, &intr_data) < 0 || (intr_val & ~0x1) != 0x5a5a) { v4l2_err(go, "error transferring firmware\n"); - rv = -1; + kfree(go->boot_fw); + go->boot_fw = NULL; + return -1; } - return rv; + return 0; } MODULE_FIRMWARE("go7007/go7007fw.bin"); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/media/usb/go7007/go7007-usb.c +++ linux-aws-5.15-5.15.0/drivers/media/usb/go7007/go7007-usb.c @@ -1201,7 +1201,9 @@ u16 channel; /* read channel number from GPIO[1:0] */ - go7007_read_addr(go, 0x3c81, &channel); + if (go7007_read_addr(go, 0x3c81, &channel)) + goto allocfail; + channel &= 0x3; go->board_id = GO7007_BOARDID_ADLINK_MPG24; usb->board = board = &board_adlink_mpg24; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/media/usb/pvrusb2/pvrusb2-dvb.c +++ linux-aws-5.15-5.15.0/drivers/media/usb/pvrusb2/pvrusb2-dvb.c @@ -88,8 +88,10 @@ return stat; } -static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap) +static void pvr2_dvb_notify(void *ptr) { + struct pvr2_dvb_adapter *adap = ptr; + wake_up(&adap->buffer_wait_data); } @@ -149,7 +151,7 @@ } pvr2_stream_set_callback(pvr->video_stream.stream, - (pvr2_stream_callback) pvr2_dvb_notify, adap); + pvr2_dvb_notify, adap); ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT); if (ret < 0) return ret; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c +++ linux-aws-5.15-5.15.0/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c @@ -1037,8 +1037,10 @@ } -static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp) +static void pvr2_v4l2_notify(void *ptr) { + struct pvr2_v4l2_fh *fhp = ptr; + wake_up(&fhp->wait_data); } @@ -1071,7 +1073,7 @@ hdw = fh->channel.mc_head->hdw; sp = fh->pdi->stream->stream; - pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh); + pvr2_stream_set_callback(sp, pvr2_v4l2_notify, fh); pvr2_hdw_set_stream_type(hdw,fh->pdi->config); if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret; return pvr2_ioread_set_enabled(fh->rhp,!0); @@ -1202,11 +1204,6 @@ dip->minor_type = pvr2_v4l_type_video; nr_ptr = video_nr; caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO; - if (!dip->stream) { - pr_err(KBUILD_MODNAME - ": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n"); - return; - } break; case VFL_TYPE_VBI: dip->config = pvr2_config_vbi; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/mmc/host/sdhci-xenon-phy.c +++ linux-aws-5.15-5.15.0/drivers/mmc/host/sdhci-xenon-phy.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "sdhci-pltfm.h" @@ -109,6 +110,8 @@ #define XENON_EMMC_PHY_LOGIC_TIMING_ADJUST (XENON_EMMC_PHY_REG_BASE + 0x18) #define XENON_LOGIC_TIMING_VALUE 0x00AA8977 +#define XENON_MAX_PHY_TIMEOUT_LOOPS 100 + /* * List offset of PHY registers and some special register values * in eMMC PHY 5.0 or eMMC PHY 5.1 @@ -216,6 +219,19 @@ return 0; } +static int xenon_check_stability_internal_clk(struct sdhci_host *host) +{ + u32 reg; + int err; + + err = read_poll_timeout(sdhci_readw, reg, reg & SDHCI_CLOCK_INT_STABLE, + 1100, 20000, false, host, SDHCI_CLOCK_CONTROL); + if (err) + dev_err(mmc_dev(host->mmc), "phy_init: Internal clock never stabilized.\n"); + + return err; +} + /* * eMMC 5.0/5.1 PHY init/re-init. * eMMC PHY init should be executed after: @@ -232,6 +248,11 @@ struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); struct xenon_emmc_phy_regs *phy_regs = priv->emmc_phy_regs; + int ret = xenon_check_stability_internal_clk(host); + + if (ret) + return ret; + reg = sdhci_readl(host, phy_regs->timing_adj); reg |= XENON_PHY_INITIALIZAION; sdhci_writel(host, reg, phy_regs->timing_adj); @@ -259,18 +280,27 @@ /* get the wait time */ wait /= clock; wait++; - /* wait for host eMMC PHY init completes */ - udelay(wait); - reg = sdhci_readl(host, phy_regs->timing_adj); - reg &= XENON_PHY_INITIALIZAION; - if (reg) { + /* + * AC5X spec says bit must be polled until zero. + * We see cases in which timeout can take longer + * than the standard calculation on AC5X, which is + * expected following the spec comment above. + * According to the spec, we must wait as long as + * it takes for that bit to toggle on AC5X. + * Cap that with 100 delay loops so we won't get + * stuck here forever: + */ + + ret = read_poll_timeout(sdhci_readl, reg, + !(reg & XENON_PHY_INITIALIZAION), + wait, XENON_MAX_PHY_TIMEOUT_LOOPS * wait, + false, host, phy_regs->timing_adj); + if (ret) dev_err(mmc_dev(host->mmc), "eMMC PHY init cannot complete after %d us\n", - wait); - return -ETIMEDOUT; - } + wait * XENON_MAX_PHY_TIMEOUT_LOOPS); - return 0; + return ret; } #define ARMADA_3700_SOC_PAD_1_8V 0x1 only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/mtd/nand/raw/lpc32xx_mlc.c +++ linux-aws-5.15-5.15.0/drivers/mtd/nand/raw/lpc32xx_mlc.c @@ -303,8 +303,9 @@ return 0; } -static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host) +static irqreturn_t lpc3xxx_nand_irq(int irq, void *data) { + struct lpc32xx_nand_host *host = data; uint8_t sr; /* Clear interrupt flag by reading status */ @@ -779,7 +780,7 @@ goto release_dma_chan; } - if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq, + if (request_irq(host->irq, &lpc3xxx_nand_irq, IRQF_TRIGGER_HIGH, DRV_NAME, host)) { dev_err(&pdev->dev, "Error requesting NAND IRQ\n"); res = -ENXIO; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/mtd/nand/raw/sunxi_nand.c +++ linux-aws-5.15-5.15.0/drivers/mtd/nand/raw/sunxi_nand.c @@ -1609,7 +1609,7 @@ if (section < ecc->steps) oobregion->length = 4; else - oobregion->offset = mtd->oobsize - oobregion->offset; + oobregion->length = mtd->oobsize - oobregion->offset; return 0; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/dsa/mt7530.h +++ linux-aws-5.15-5.15.0/drivers/net/dsa/mt7530.h @@ -64,8 +64,34 @@ /* Registers for BPDU and PAE frame control*/ #define MT753X_BPC 0x24 +#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22) +#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x) +#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16) +#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x) +#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6) +#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x) #define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0) +/* Register for :01 and :02 MAC DA frame control */ +#define MT753X_RGAC1 0x28 +#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22) +#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x) +#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16) +#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x) +#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6) +#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x) +#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0) + +/* Register for :03 and :0E MAC DA frame control */ +#define MT753X_RGAC2 0x2c +#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22) +#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x) +#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16) +#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x) +#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6) +#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x) +#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0) + enum mt753x_bpdu_port_fw { MT753X_BPDU_FOLLOW_MFC, MT753X_BPDU_CPU_EXCLUDE = 4, @@ -245,6 +271,7 @@ enum mt7530_vlan_port_eg_tag { MT7530_VLAN_EG_DISABLED = 0, MT7530_VLAN_EG_CONSISTENT = 1, + MT7530_VLAN_EG_UNTAGGED = 4, }; enum mt7530_vlan_port_attr { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -1002,9 +1002,6 @@ static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp, struct bnx2x_alloc_pool *pool) { - if (!pool->page) - return; - put_page(pool->page); pool->page = NULL; @@ -1015,6 +1012,9 @@ { int i; + if (!fp->page_pool.page) + return; + if (fp->mode == TPA_MODE_DISABLED) return; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -120,7 +120,7 @@ u64 ns = nsec; u32 sec_h; - if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags)) + if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags)) return; /* Since the BD does not have enough space for the higher 16 bits of only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c @@ -344,10 +344,10 @@ list) { if ((vid == 0 || mact_entry->vid == vid) && ether_addr_equal(addr, mact_entry->mac)) { + sparx5_mact_forget(sparx5, addr, mact_entry->vid); + list_del(&mact_entry->list); devm_kfree(sparx5->dev, mact_entry); - - sparx5_mact_forget(sparx5, addr, mact_entry->vid); } } mutex_unlock(&sparx5->mact_lock); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/ethernet/netronome/nfp/flower/conntrack.h +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/netronome/nfp/flower/conntrack.h @@ -108,6 +108,7 @@ * @cookie: Flow cookie, same as original TC flow, used as key * @list_node: Used by the list * @chain_index: Chain index of the original flow + * @goto_chain_index: goto chain index of the flow * @netdev: netdev structure. * @type: Type of pre-entry from enum ct_entry_type * @zt: Reference to the zone table this belongs to @@ -120,6 +121,7 @@ unsigned long cookie; struct list_head list_node; u32 chain_index; + u32 goto_chain_index; enum ct_entry_type type; struct net_device *netdev; struct nfp_fl_ct_zone_entry *zt; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ linux-aws-5.15-5.15.0/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -308,6 +308,11 @@ acti_netdevs = kmalloc_array(entry->slave_cnt, sizeof(*acti_netdevs), GFP_KERNEL); + if (!acti_netdevs) { + schedule_delayed_work(&lag->work, + NFP_FL_LAG_DELAY); + continue; + } /* Include sanity check in the loop. It may be that a bond has * changed between processing the last notification and the only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/usb/sr9800.c +++ linux-aws-5.15-5.15.0/drivers/net/usb/sr9800.c @@ -736,7 +736,9 @@ data->eeprom_len = SR9800_EEPROM_LEN; - usbnet_get_endpoints(dev, intf); + ret = usbnet_get_endpoints(dev, intf); + if (ret) + goto out; /* LED Setting Rule : * AABB:CCDD only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -844,6 +844,10 @@ } ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT]; + if (!ev) { + kfree(tb); + return -EPROTO; + } arg->desc_id = ev->desc_id; arg->status = ev->status; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/broadcom/b43/dma.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/b43/dma.c @@ -1399,7 +1399,7 @@ should_inject_overflow(ring)) { /* This TX ring is full. */ unsigned int skb_mapping = skb_get_queue_mapping(skb); - ieee80211_stop_queue(dev->wl->hw, skb_mapping); + b43_stop_queue(dev, skb_mapping); dev->wl->tx_queue_stopped[skb_mapping] = true; ring->stopped = true; if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { @@ -1570,7 +1570,7 @@ } else { /* If the driver queue is running wake the corresponding * mac80211 queue. */ - ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); + b43_wake_queue(dev, ring->queue_prio); if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/broadcom/b43/main.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/b43/main.c @@ -2587,7 +2587,8 @@ start_ieee80211: wl->hw->queues = B43_QOS_QUEUE_NUM; - if (!modparam_qos || dev->fw.opensource) + if (!modparam_qos || dev->fw.opensource || + dev->dev->chip_id == BCMA_CHIP_ID_BCM4331) wl->hw->queues = 1; err = ieee80211_register_hw(wl->hw); @@ -3603,7 +3604,7 @@ err = b43_dma_tx(dev, skb); if (err == -ENOSPC) { wl->tx_queue_stopped[queue_num] = true; - ieee80211_stop_queue(wl->hw, queue_num); + b43_stop_queue(dev, queue_num); skb_queue_head(&wl->tx_queue[queue_num], skb); break; } @@ -3627,6 +3628,7 @@ struct sk_buff *skb) { struct b43_wl *wl = hw_to_b43_wl(hw); + u16 skb_queue_mapping; if (unlikely(skb->len < 2 + 2 + 6)) { /* Too short, this can't be a valid frame. */ @@ -3635,12 +3637,12 @@ } B43_WARN_ON(skb_shinfo(skb)->nr_frags); - skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb); - if (!wl->tx_queue_stopped[skb->queue_mapping]) { + skb_queue_mapping = skb_get_queue_mapping(skb); + skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb); + if (!wl->tx_queue_stopped[skb_queue_mapping]) ieee80211_queue_work(wl->hw, &wl->tx_work); - } else { - ieee80211_stop_queue(wl->hw, skb->queue_mapping); - } + else + b43_stop_queue(wl->current_dev, skb_queue_mapping); } static void b43_qos_params_upload(struct b43_wldev *dev, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/broadcom/b43/pio.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/b43/pio.c @@ -525,7 +525,7 @@ if (total_len > (q->buffer_size - q->buffer_used)) { /* Not enough memory on the queue. */ err = -EBUSY; - ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); + b43_stop_queue(dev, skb_get_queue_mapping(skb)); q->stopped = true; goto out; } @@ -552,7 +552,7 @@ if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || (q->free_packet_slots == 0)) { /* The queue is full. */ - ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb)); + b43_stop_queue(dev, skb_get_queue_mapping(skb)); q->stopped = true; } @@ -587,7 +587,7 @@ list_add(&pack->list, &q->packets_list); if (q->stopped) { - ieee80211_wake_queue(dev->wl->hw, q->queue_prio); + b43_wake_queue(dev, q->queue_prio); q->stopped = false; } } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c @@ -383,8 +383,9 @@ return sh; } -static void wlc_phy_timercb_phycal(struct brcms_phy *pi) +static void wlc_phy_timercb_phycal(void *ptr) { + struct brcms_phy *pi = ptr; uint delay = 5; if (PHY_PERICAL_MPHASE_PENDING(pi)) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c @@ -57,12 +57,11 @@ } struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim, - void (*fn)(struct brcms_phy *pi), + void (*fn)(void *pi), void *arg, const char *name) { return (struct wlapi_timer *) - brcms_init_timer(physhim->wl, (void (*)(void *))fn, - arg, name); + brcms_init_timer(physhim->wl, fn, arg, name); } void wlapi_free_timer(struct wlapi_timer *t) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h +++ linux-aws-5.15-5.15.0/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h @@ -131,7 +131,7 @@ /* PHY to WL utility functions */ struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim, - void (*fn)(struct brcms_phy *pi), + void (*fn)(void *pi), void *arg, const char *name); void wlapi_free_timer(struct wlapi_timer *t); void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1525,7 +1525,6 @@ kfree(priv->nvm_data); /*netif_stop_queue(dev); */ - flush_workqueue(priv->workqueue); /* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes * priv->workqueue... so we can't take down the workqueue only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/marvell/libertas/cmd.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/marvell/libertas/cmd.c @@ -1133,7 +1133,7 @@ if (!cmdarray[i].cmdbuf) { lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n"); ret = -1; - goto done; + goto free_cmd_array; } } @@ -1141,8 +1141,17 @@ init_waitqueue_head(&cmdarray[i].cmdwait_q); lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]); } - ret = 0; + return 0; +free_cmd_array: + for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { + if (cmdarray[i].cmdbuf) { + kfree(cmdarray[i].cmdbuf); + cmdarray[i].cmdbuf = NULL; + } + } + kfree(priv->cmd_array); + priv->cmd_array = NULL; done: return ret; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/quantenna/qtnfmac/core.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -811,13 +811,11 @@ bus->fw_state = QTNF_FW_STATE_DETACHED; if (bus->workqueue) { - flush_workqueue(bus->workqueue); destroy_workqueue(bus->workqueue); bus->workqueue = NULL; } if (bus->hprio_workqueue) { - flush_workqueue(bus->hprio_workqueue); destroy_workqueue(bus->hprio_workqueue); bus->hprio_workqueue = NULL; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -387,7 +387,6 @@ return 0; error: - flush_workqueue(pcie_priv->workqueue); destroy_workqueue(pcie_priv->workqueue); pci_set_drvdata(pdev, NULL); return ret; @@ -416,7 +415,6 @@ qtnf_core_detach(bus); netif_napi_del(&bus->mux_napi); - flush_workqueue(priv->workqueue); destroy_workqueue(priv->workqueue); tasklet_kill(&priv->reclaim_tq); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/net/wireless/st/cw1200/bh.c +++ linux-aws-5.15-5.15.0/drivers/net/wireless/st/cw1200/bh.c @@ -85,8 +85,6 @@ atomic_inc(&priv->bh_term); wake_up(&priv->bh_wq); - flush_workqueue(priv->bh_workqueue); - destroy_workqueue(priv->bh_workqueue); priv->bh_workqueue = NULL; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/ntb/core.c +++ linux-aws-5.15-5.15.0/drivers/ntb/core.c @@ -100,6 +100,8 @@ int ntb_register_device(struct ntb_dev *ntb) { + int ret; + if (!ntb) return -EINVAL; if (!ntb->pdev) @@ -120,7 +122,11 @@ ntb->ctx_ops = NULL; spin_lock_init(&ntb->ctx_lock); - return device_register(&ntb->dev); + ret = device_register(&ntb->dev); + if (ret) + put_device(&ntb->dev); + + return ret; } EXPORT_SYMBOL(ntb_register_device); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/nvme/host/fc.c +++ linux-aws-5.15-5.15.0/drivers/nvme/host/fc.c @@ -220,11 +220,6 @@ static DEFINE_IDA(nvme_fc_local_port_cnt); static DEFINE_IDA(nvme_fc_ctrl_cnt); -static struct workqueue_struct *nvme_fc_wq; - -static bool nvme_fc_waiting_to_unload; -static DECLARE_COMPLETION(nvme_fc_unload_proceed); - /* * These items are short-term. They will eventually be moved into * a generic FC class. See comments in module init. @@ -254,8 +249,6 @@ /* remove from transport list */ spin_lock_irqsave(&nvme_fc_lock, flags); list_del(&lport->port_list); - if (nvme_fc_waiting_to_unload && list_empty(&nvme_fc_lport_list)) - complete(&nvme_fc_unload_proceed); spin_unlock_irqrestore(&nvme_fc_lock, flags); ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); @@ -3904,10 +3897,6 @@ { int ret; - nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0); - if (!nvme_fc_wq) - return -ENOMEM; - /* * NOTE: * It is expected that in the future the kernel will combine @@ -3925,7 +3914,7 @@ ret = class_register(&fc_class); if (ret) { pr_err("couldn't register class fc\n"); - goto out_destroy_wq; + return ret; } /* @@ -3949,8 +3938,6 @@ device_destroy(&fc_class, MKDEV(0, 0)); out_destroy_class: class_unregister(&fc_class); -out_destroy_wq: - destroy_workqueue(nvme_fc_wq); return ret; } @@ -3970,45 +3957,23 @@ spin_unlock(&rport->lock); } -static void -nvme_fc_cleanup_for_unload(void) +static void __exit nvme_fc_exit_module(void) { struct nvme_fc_lport *lport; struct nvme_fc_rport *rport; - - list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { - list_for_each_entry(rport, &lport->endp_list, endp_list) { - nvme_fc_delete_controllers(rport); - } - } -} - -static void __exit nvme_fc_exit_module(void) -{ unsigned long flags; - bool need_cleanup = false; spin_lock_irqsave(&nvme_fc_lock, flags); - nvme_fc_waiting_to_unload = true; - if (!list_empty(&nvme_fc_lport_list)) { - need_cleanup = true; - nvme_fc_cleanup_for_unload(); - } + list_for_each_entry(lport, &nvme_fc_lport_list, port_list) + list_for_each_entry(rport, &lport->endp_list, endp_list) + nvme_fc_delete_controllers(rport); spin_unlock_irqrestore(&nvme_fc_lock, flags); - if (need_cleanup) { - pr_info("%s: waiting for ctlr deletes\n", __func__); - wait_for_completion(&nvme_fc_unload_proceed); - pr_info("%s: ctrl deletes complete\n", __func__); - } + flush_workqueue(nvme_delete_wq); nvmf_unregister_transport(&nvme_fc_transport); - ida_destroy(&nvme_fc_local_port_cnt); - ida_destroy(&nvme_fc_ctrl_cnt); - device_destroy(&fc_class, MKDEV(0, 0)); class_unregister(&fc_class); - destroy_workqueue(nvme_fc_wq); } module_init(nvme_fc_init_module); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/pci/endpoint/functions/Kconfig +++ linux-aws-5.15-5.15.0/drivers/pci/endpoint/functions/Kconfig @@ -25,3 +25,14 @@ device tree. If in doubt, say "N" to disable Endpoint NTB driver. + +config PCI_EPF_VNTB + tristate "PCI Endpoint NTB driver" + depends on PCI_ENDPOINT + select CONFIGFS_FS + help + Select this configuration option to enable the Non-Transparent + Bridge (NTB) driver for PCIe Endpoint. NTB driver implements NTB + between PCI Root Port and PCIe Endpoint. + + If in doubt, say "N" to disable Endpoint NTB driver. only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/pci/endpoint/functions/Makefile +++ linux-aws-5.15-5.15.0/drivers/pci/endpoint/functions/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o +obj-$(CONFIG_PCI_EPF_VNTB) += pci-epf-vntb.o only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/pci/endpoint/functions/pci-epf-vntb.c +++ linux-aws-5.15-5.15.0/drivers/pci/endpoint/functions/pci-epf-vntb.c @@ -0,0 +1,1421 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Endpoint Function Driver to implement Non-Transparent Bridge functionality + * Between PCI RC and EP + * + * Copyright (C) 2020 Texas Instruments + * Copyright (C) 2022 NXP + * + * Based on pci-epf-ntb.c + * Author: Frank Li + * Author: Kishon Vijay Abraham I + */ + +/** + * +------------+ +---------------------------------------+ + * | | | | + * +------------+ | +--------------+ + * | NTB | | | NTB | + * | NetDev | | | NetDev | + * +------------+ | +--------------+ + * | NTB | | | NTB | + * | Transfer | | | Transfer | + * +------------+ | +--------------+ + * | | | | | + * | PCI NTB | | | | + * | EPF | | | | + * | Driver | | | PCI Virtual | + * | | +---------------+ | NTB Driver | + * | | | PCI EP NTB |<------>| | + * | | | FN Driver | | | + * +------------+ +---------------+ +--------------+ + * | | | | | | + * | PCI Bus | <-----> | PCI EP Bus | | Virtual PCI | + * | | PCI | | | Bus | + * +------------+ +---------------+--------+--------------+ + * PCIe Root Port PCI EP + */ + +#include +#include +#include +#include + +#include +#include +#include + +static struct workqueue_struct *kpcintb_workqueue; + +#define COMMAND_CONFIGURE_DOORBELL 1 +#define COMMAND_TEARDOWN_DOORBELL 2 +#define COMMAND_CONFIGURE_MW 3 +#define COMMAND_TEARDOWN_MW 4 +#define COMMAND_LINK_UP 5 +#define COMMAND_LINK_DOWN 6 + +#define COMMAND_STATUS_OK 1 +#define COMMAND_STATUS_ERROR 2 + +#define LINK_STATUS_UP BIT(0) + +#define SPAD_COUNT 64 +#define DB_COUNT 4 +#define NTB_MW_OFFSET 2 +#define DB_COUNT_MASK GENMASK(15, 0) +#define MSIX_ENABLE BIT(16) +#define MAX_DB_COUNT 32 +#define MAX_MW 4 + +enum epf_ntb_bar { + BAR_CONFIG, + BAR_DB, + BAR_MW0, + BAR_MW1, + BAR_MW2, +}; + +/* + * +--------------------------------------------------+ Base + * | | + * | | + * | | + * | Common Control Register | + * | | + * | | + * | | + * +-----------------------+--------------------------+ Base+span_offset + * | | | + * | Peer Span Space | Span Space | + * | | | + * | | | + * +-----------------------+--------------------------+ Base+span_offset + * | | | +span_count * 4 + * | | | + * | Span Space | Peer Span Space | + * | | | + * +-----------------------+--------------------------+ + * Virtual PCI PCIe Endpoint + * NTB Driver NTB Driver + */ +struct epf_ntb_ctrl { + u32 command; + u32 argument; + u16 command_status; + u16 link_status; + u32 topology; + u64 addr; + u64 size; + u32 num_mws; + u32 reserved; + u32 spad_offset; + u32 spad_count; + u32 db_entry_size; + u32 db_data[MAX_DB_COUNT]; + u32 db_offset[MAX_DB_COUNT]; +} __packed; + +struct epf_ntb { + struct ntb_dev ntb; + struct pci_epf *epf; + struct config_group group; + + u32 num_mws; + u32 db_count; + u32 spad_count; + u64 mws_size[MAX_MW]; + u64 db; + u32 vbus_number; + u16 vntb_pid; + u16 vntb_vid; + + bool linkup; + u32 spad_size; + + enum pci_barno epf_ntb_bar[6]; + + struct epf_ntb_ctrl *reg; + + phys_addr_t epf_db_phy; + void __iomem *epf_db; + + phys_addr_t vpci_mw_phy[MAX_MW]; + void __iomem *vpci_mw_addr[MAX_MW]; + + struct delayed_work cmd_handler; +}; + +#define to_epf_ntb(epf_group) container_of((epf_group), struct epf_ntb, group) +#define ntb_ndev(__ntb) container_of(__ntb, struct epf_ntb, ntb) + +static struct pci_epf_header epf_ntb_header = { + .vendorid = PCI_ANY_ID, + .deviceid = PCI_ANY_ID, + .baseclass_code = PCI_BASE_CLASS_MEMORY, + .interrupt_pin = PCI_INTERRUPT_INTA, +}; + +/** + * epf_ntb_link_up() - Raise link_up interrupt to Virtual Host + * @ntb: NTB device that facilitates communication between HOST and VHOST + * @link_up: true or false indicating Link is UP or Down + * + * Once NTB function in HOST invoke ntb_link_enable(), + * this NTB function driver will trigger a link event to vhost. + */ +static int epf_ntb_link_up(struct epf_ntb *ntb, bool link_up) +{ + if (link_up) + ntb->reg->link_status |= LINK_STATUS_UP; + else + ntb->reg->link_status &= ~LINK_STATUS_UP; + + ntb_link_event(&ntb->ntb); + return 0; +} + +/** + * epf_ntb_configure_mw() - Configure the Outbound Address Space for vhost + * to access the memory window of host + * @ntb: NTB device that facilitates communication between host and vhost + * @mw: Index of the memory window (either 0, 1, 2 or 3) + * + * EP Outbound Window + * +--------+ +-----------+ + * | | | | + * | | | | + * | | | | + * | | | | + * | | +-----------+ + * | Virtual| | Memory Win| + * | NTB | -----------> | | + * | Driver | | | + * | | +-----------+ + * | | | | + * | | | | + * +--------+ +-----------+ + * VHost PCI EP + */ +static int epf_ntb_configure_mw(struct epf_ntb *ntb, u32 mw) +{ + phys_addr_t phys_addr; + u8 func_no, vfunc_no; + u64 addr, size; + int ret = 0; + + phys_addr = ntb->vpci_mw_phy[mw]; + addr = ntb->reg->addr; + size = ntb->reg->size; + + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + + ret = pci_epc_map_addr(ntb->epf->epc, func_no, vfunc_no, phys_addr, addr, size); + if (ret) + dev_err(&ntb->epf->epc->dev, + "Failed to map memory window %d address\n", mw); + return ret; +} + +/** + * epf_ntb_teardown_mw() - Teardown the configured OB ATU + * @ntb: NTB device that facilitates communication between HOST and vHOST + * @mw: Index of the memory window (either 0, 1, 2 or 3) + * + * Teardown the configured OB ATU configured in epf_ntb_configure_mw() using + * pci_epc_unmap_addr() + */ +static void epf_ntb_teardown_mw(struct epf_ntb *ntb, u32 mw) +{ + pci_epc_unmap_addr(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + ntb->vpci_mw_phy[mw]); +} + +/** + * epf_ntb_cmd_handler() - Handle commands provided by the NTB Host + * @work: work_struct for the epf_ntb_epc + * + * Workqueue function that gets invoked for the two epf_ntb_epc + * periodically (once every 5ms) to see if it has received any commands + * from NTB host. The host can send commands to configure doorbell or + * configure memory window or to update link status. + */ +static void epf_ntb_cmd_handler(struct work_struct *work) +{ + struct epf_ntb_ctrl *ctrl; + u32 command, argument; + struct epf_ntb *ntb; + struct device *dev; + int ret; + int i; + + ntb = container_of(work, struct epf_ntb, cmd_handler.work); + + for (i = 1; i < ntb->db_count; i++) { + if (readl(ntb->epf_db + i * 4)) { + if (readl(ntb->epf_db + i * 4)) + ntb->db |= 1 << (i - 1); + + ntb_db_event(&ntb->ntb, i); + writel(0, ntb->epf_db + i * 4); + } + } + + ctrl = ntb->reg; + command = ctrl->command; + if (!command) + goto reset_handler; + argument = ctrl->argument; + + ctrl->command = 0; + ctrl->argument = 0; + + ctrl = ntb->reg; + dev = &ntb->epf->dev; + + switch (command) { + case COMMAND_CONFIGURE_DOORBELL: + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_TEARDOWN_DOORBELL: + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_CONFIGURE_MW: + ret = epf_ntb_configure_mw(ntb, argument); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_TEARDOWN_MW: + epf_ntb_teardown_mw(ntb, argument); + ctrl->command_status = COMMAND_STATUS_OK; + break; + case COMMAND_LINK_UP: + ntb->linkup = true; + ret = epf_ntb_link_up(ntb, true); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + goto reset_handler; + case COMMAND_LINK_DOWN: + ntb->linkup = false; + ret = epf_ntb_link_up(ntb, false); + if (ret < 0) + ctrl->command_status = COMMAND_STATUS_ERROR; + else + ctrl->command_status = COMMAND_STATUS_OK; + break; + default: + dev_err(dev, "UNKNOWN command: %d\n", command); + break; + } + +reset_handler: + queue_delayed_work(kpcintb_workqueue, &ntb->cmd_handler, + msecs_to_jiffies(5)); +} + +/** + * epf_ntb_config_sspad_bar_clear() - Clear Config + Self scratchpad BAR + * @ntb_epc: EPC associated with one of the HOST which holds peer's outbound + * address. + * + * Clear BAR0 of EP CONTROLLER 1 which contains the HOST1's config and + * self scratchpad region (removes inbound ATU configuration). While BAR0 is + * the default self scratchpad BAR, an NTB could have other BARs for self + * scratchpad (because of reserved BARs). This function can get the exact BAR + * used for self scratchpad from epf_ntb_bar[BAR_CONFIG]. + * + * Please note the self scratchpad region and config region is combined to + * a single region and mapped using the same BAR. Also note HOST2's peer + * scratchpad is HOST1's self scratchpad. + */ +static void epf_ntb_config_sspad_bar_clear(struct epf_ntb *ntb) +{ + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + epf_bar = &ntb->epf->bar[barno]; + + pci_epc_clear_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar); +} + +/** + * epf_ntb_config_sspad_bar_set() - Set Config + Self scratchpad BAR + * @ntb: NTB device that facilitates communication between HOST and vHOST + * + * Map BAR0 of EP CONTROLLER 1 which contains the HOST1's config and + * self scratchpad region. + * + * Please note the self scratchpad region and config region is combined to + * a single region and mapped using the same BAR. + */ +static int epf_ntb_config_sspad_bar_set(struct epf_ntb *ntb) +{ + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + u8 func_no, vfunc_no; + struct device *dev; + int ret; + + dev = &ntb->epf->dev; + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + epf_bar = &ntb->epf->bar[barno]; + + ret = pci_epc_set_bar(ntb->epf->epc, func_no, vfunc_no, epf_bar); + if (ret) { + dev_err(dev, "inft: Config/Status/SPAD BAR set failed\n"); + return ret; + } + return 0; +} + +/** + * epf_ntb_config_spad_bar_free() - Free the physical memory associated with + * config + scratchpad region + * @ntb: NTB device that facilitates communication between HOST and vHOST + */ +static void epf_ntb_config_spad_bar_free(struct epf_ntb *ntb) +{ + enum pci_barno barno; + + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + pci_epf_free_space(ntb->epf, ntb->reg, barno, 0); +} + +/** + * epf_ntb_config_spad_bar_alloc() - Allocate memory for config + scratchpad + * region + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * Allocate the Local Memory mentioned in the above diagram. The size of + * CONFIG REGION is sizeof(struct epf_ntb_ctrl) and size of SCRATCHPAD REGION + * is obtained from "spad-count" configfs entry. + */ +static int epf_ntb_config_spad_bar_alloc(struct epf_ntb *ntb) +{ + size_t align; + enum pci_barno barno; + struct epf_ntb_ctrl *ctrl; + u32 spad_size, ctrl_size; + u64 size; + struct pci_epf *epf = ntb->epf; + struct device *dev = &epf->dev; + u32 spad_count; + void *base; + int i; + const struct pci_epc_features *epc_features = pci_epc_get_features(epf->epc, + epf->func_no, + epf->vfunc_no); + barno = ntb->epf_ntb_bar[BAR_CONFIG]; + size = epc_features->bar_fixed_size[barno]; + align = epc_features->align; + + if ((!IS_ALIGNED(size, align))) + return -EINVAL; + + spad_count = ntb->spad_count; + + ctrl_size = sizeof(struct epf_ntb_ctrl); + spad_size = 2 * spad_count * 4; + + if (!align) { + ctrl_size = roundup_pow_of_two(ctrl_size); + spad_size = roundup_pow_of_two(spad_size); + } else { + ctrl_size = ALIGN(ctrl_size, align); + spad_size = ALIGN(spad_size, align); + } + + if (!size) + size = ctrl_size + spad_size; + else if (size < ctrl_size + spad_size) + return -EINVAL; + + base = pci_epf_alloc_space(epf, size, barno, align, 0); + if (!base) { + dev_err(dev, "Config/Status/SPAD alloc region fail\n"); + return -ENOMEM; + } + + ntb->reg = base; + + ctrl = ntb->reg; + ctrl->spad_offset = ctrl_size; + + ctrl->spad_count = spad_count; + ctrl->num_mws = ntb->num_mws; + ntb->spad_size = spad_size; + + ctrl->db_entry_size = 4; + + for (i = 0; i < ntb->db_count; i++) { + ntb->reg->db_data[i] = 1 + i; + ntb->reg->db_offset[i] = 0; + } + + return 0; +} + +/** + * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity + * @ntb: NTB device that facilitates communication between HOST and vHOST + * + * Configure MSI/MSI-X capability for each interface with number of + * interrupts equal to "db_count" configfs entry. + */ +static int epf_ntb_configure_interrupt(struct epf_ntb *ntb) +{ + const struct pci_epc_features *epc_features; + struct device *dev; + u32 db_count; + int ret; + + dev = &ntb->epf->dev; + + epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no); + + if (!(epc_features->msix_capable || epc_features->msi_capable)) { + dev_err(dev, "MSI or MSI-X is required for doorbell\n"); + return -EINVAL; + } + + db_count = ntb->db_count; + if (db_count > MAX_DB_COUNT) { + dev_err(dev, "DB count cannot be more than %d\n", MAX_DB_COUNT); + return -EINVAL; + } + + ntb->db_count = db_count; + + if (epc_features->msi_capable) { + ret = pci_epc_set_msi(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + 16); + if (ret) { + dev_err(dev, "MSI configuration failed\n"); + return ret; + } + } + + return 0; +} + +/** + * epf_ntb_db_bar_init() - Configure Doorbell window BARs + * @ntb: NTB device that facilitates communication between HOST and vHOST + */ +static int epf_ntb_db_bar_init(struct epf_ntb *ntb) +{ + const struct pci_epc_features *epc_features; + u32 align; + struct device *dev = &ntb->epf->dev; + int ret; + struct pci_epf_bar *epf_bar; + void __iomem *mw_addr; + enum pci_barno barno; + size_t size = 4 * ntb->db_count; + + epc_features = pci_epc_get_features(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no); + align = epc_features->align; + + if (size < 128) + size = 128; + + if (align) + size = ALIGN(size, align); + else + size = roundup_pow_of_two(size); + + barno = ntb->epf_ntb_bar[BAR_DB]; + + mw_addr = pci_epf_alloc_space(ntb->epf, size, barno, align, 0); + if (!mw_addr) { + dev_err(dev, "Failed to allocate OB address\n"); + return -ENOMEM; + } + + ntb->epf_db = mw_addr; + + epf_bar = &ntb->epf->bar[barno]; + + ret = pci_epc_set_bar(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no, epf_bar); + if (ret) { + dev_err(dev, "Doorbell BAR set failed\n"); + goto err_alloc_peer_mem; + } + return ret; + +err_alloc_peer_mem: + pci_epc_mem_free_addr(ntb->epf->epc, epf_bar->phys_addr, mw_addr, epf_bar->size); + return -1; +} + +/** + * epf_ntb_db_bar_clear() - Clear doorbell BAR and free memory + * allocated in peer's outbound address space + * @ntb: NTB device that facilitates communication between HOST and vHOST + */ +static void epf_ntb_db_bar_clear(struct epf_ntb *ntb) +{ + enum pci_barno barno; + + barno = ntb->epf_ntb_bar[BAR_DB]; + pci_epf_free_space(ntb->epf, ntb->epf_db, barno, 0); + pci_epc_clear_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); +} + +/** + * epf_ntb_mw_bar_init() - Configure Memory window BARs + * @ntb: NTB device that facilitates communication between HOST and vHOST + * + */ +static int epf_ntb_mw_bar_init(struct epf_ntb *ntb) +{ + int ret = 0; + int i; + u64 size; + enum pci_barno barno; + struct device *dev = &ntb->epf->dev; + + for (i = 0; i < ntb->num_mws; i++) { + size = ntb->mws_size[i]; + barno = ntb->epf_ntb_bar[BAR_MW0 + i]; + + ntb->epf->bar[barno].barno = barno; + ntb->epf->bar[barno].size = size; + ntb->epf->bar[barno].addr = 0; + ntb->epf->bar[barno].phys_addr = 0; + ntb->epf->bar[barno].flags |= upper_32_bits(size) ? + PCI_BASE_ADDRESS_MEM_TYPE_64 : + PCI_BASE_ADDRESS_MEM_TYPE_32; + + ret = pci_epc_set_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); + if (ret) { + dev_err(dev, "MW set failed\n"); + goto err_alloc_mem; + } + + /* Allocate EPC outbound memory windows to vpci vntb device */ + ntb->vpci_mw_addr[i] = pci_epc_mem_alloc_addr(ntb->epf->epc, + &ntb->vpci_mw_phy[i], + size); + if (!ntb->vpci_mw_addr[i]) { + dev_err(dev, "Failed to allocate source address\n"); + goto err_alloc_mem; + } + } + + return ret; +err_alloc_mem: + return ret; +} + +/** + * epf_ntb_mw_bar_clear() - Clear Memory window BARs + * @ntb: NTB device that facilitates communication between HOST and vHOST + */ +static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb) +{ + enum pci_barno barno; + int i; + + for (i = 0; i < ntb->num_mws; i++) { + barno = ntb->epf_ntb_bar[BAR_MW0 + i]; + pci_epc_clear_bar(ntb->epf->epc, + ntb->epf->func_no, + ntb->epf->vfunc_no, + &ntb->epf->bar[barno]); + + pci_epc_mem_free_addr(ntb->epf->epc, + ntb->vpci_mw_phy[i], + ntb->vpci_mw_addr[i], + ntb->mws_size[i]); + } +} + +/** + * epf_ntb_epc_destroy() - Cleanup NTB EPC interface + * @ntb: NTB device that facilitates communication between HOST and vHOST + * + * Wrapper for epf_ntb_epc_destroy_interface() to cleanup all the NTB interfaces + */ +static void epf_ntb_epc_destroy(struct epf_ntb *ntb) +{ + pci_epc_remove_epf(ntb->epf->epc, ntb->epf, 0); + pci_epc_put(ntb->epf->epc); +} + +/** + * epf_ntb_init_epc_bar() - Identify BARs to be used for each of the NTB + * constructs (scratchpad region, doorbell, memorywindow) + * @ntb: NTB device that facilitates communication between HOST and vHOST + */ +static int epf_ntb_init_epc_bar(struct epf_ntb *ntb) +{ + const struct pci_epc_features *epc_features; + enum pci_barno barno; + enum epf_ntb_bar bar; + struct device *dev; + u32 num_mws; + int i; + + barno = BAR_0; + num_mws = ntb->num_mws; + dev = &ntb->epf->dev; + epc_features = pci_epc_get_features(ntb->epf->epc, ntb->epf->func_no, ntb->epf->vfunc_no); + + /* These are required BARs which are mandatory for NTB functionality */ + for (bar = BAR_CONFIG; bar <= BAR_MW0; bar++, barno++) { + barno = pci_epc_get_next_free_bar(epc_features, barno); + if (barno < 0) { + dev_err(dev, "Fail to get NTB function BAR\n"); + return barno; + } + ntb->epf_ntb_bar[bar] = barno; + } + + /* These are optional BARs which don't impact NTB functionality */ + for (bar = BAR_MW1, i = 1; i < num_mws; bar++, barno++, i++) { + barno = pci_epc_get_next_free_bar(epc_features, barno); + if (barno < 0) { + ntb->num_mws = i; + dev_dbg(dev, "BAR not available for > MW%d\n", i + 1); + } + ntb->epf_ntb_bar[bar] = barno; + } + + return 0; +} + +/** + * epf_ntb_epc_init() - Initialize NTB interface + * @ntb: NTB device that facilitates communication between HOST and vHOST2 + * + * Wrapper to initialize a particular EPC interface and start the workqueue + * to check for commands from host. This function will write to the + * EP controller HW for configuring it. + */ +static int epf_ntb_epc_init(struct epf_ntb *ntb) +{ + u8 func_no, vfunc_no; + struct pci_epc *epc; + struct pci_epf *epf; + struct device *dev; + int ret; + + epf = ntb->epf; + dev = &epf->dev; + epc = epf->epc; + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + + ret = epf_ntb_config_sspad_bar_set(ntb); + if (ret) { + dev_err(dev, "Config/self SPAD BAR init failed"); + return ret; + } + + ret = epf_ntb_configure_interrupt(ntb); + if (ret) { + dev_err(dev, "Interrupt configuration failed\n"); + goto err_config_interrupt; + } + + ret = epf_ntb_db_bar_init(ntb); + if (ret) { + dev_err(dev, "DB BAR init failed\n"); + goto err_db_bar_init; + } + + ret = epf_ntb_mw_bar_init(ntb); + if (ret) { + dev_err(dev, "MW BAR init failed\n"); + goto err_mw_bar_init; + } + + if (vfunc_no <= 1) { + ret = pci_epc_write_header(epc, func_no, vfunc_no, epf->header); + if (ret) { + dev_err(dev, "Configuration header write failed\n"); + goto err_write_header; + } + } + + INIT_DELAYED_WORK(&ntb->cmd_handler, epf_ntb_cmd_handler); + queue_work(kpcintb_workqueue, &ntb->cmd_handler.work); + + return 0; + +err_write_header: + epf_ntb_mw_bar_clear(ntb); +err_mw_bar_init: + epf_ntb_db_bar_clear(ntb); +err_db_bar_init: +err_config_interrupt: + epf_ntb_config_sspad_bar_clear(ntb); + + return ret; +} + + +/** + * epf_ntb_epc_cleanup() - Cleanup all NTB interfaces + * @ntb: NTB device that facilitates communication between HOST1 and HOST2 + * + * Wrapper to cleanup all NTB interfaces. + */ +static void epf_ntb_epc_cleanup(struct epf_ntb *ntb) +{ + epf_ntb_db_bar_clear(ntb); + epf_ntb_mw_bar_clear(ntb); +} + +#define EPF_NTB_R(_name) \ +static ssize_t epf_ntb_##_name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + \ + return sprintf(page, "%d\n", ntb->_name); \ +} + +#define EPF_NTB_W(_name) \ +static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + u32 val; \ + int ret; \ + \ + ret = kstrtou32(page, 0, &val); \ + if (ret) \ + return ret; \ + \ + ntb->_name = val; \ + \ + return len; \ +} + +#define EPF_NTB_MW_R(_name) \ +static ssize_t epf_ntb_##_name##_show(struct config_item *item, \ + char *page) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + int win_no; \ + \ + sscanf(#_name, "mw%d", &win_no); \ + \ + return sprintf(page, "%lld\n", ntb->mws_size[win_no - 1]); \ +} + +#define EPF_NTB_MW_W(_name) \ +static ssize_t epf_ntb_##_name##_store(struct config_item *item, \ + const char *page, size_t len) \ +{ \ + struct config_group *group = to_config_group(item); \ + struct epf_ntb *ntb = to_epf_ntb(group); \ + struct device *dev = &ntb->epf->dev; \ + int win_no; \ + u64 val; \ + int ret; \ + \ + ret = kstrtou64(page, 0, &val); \ + if (ret) \ + return ret; \ + \ + if (sscanf(#_name, "mw%d", &win_no) != 1) \ + return -EINVAL; \ + \ + if (ntb->num_mws < win_no) { \ + dev_err(dev, "Invalid num_nws: %d value\n", ntb->num_mws); \ + return -EINVAL; \ + } \ + \ + ntb->mws_size[win_no - 1] = val; \ + \ + return len; \ +} + +static ssize_t epf_ntb_num_mws_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct epf_ntb *ntb = to_epf_ntb(group); + u32 val; + int ret; + + ret = kstrtou32(page, 0, &val); + if (ret) + return ret; + + if (val > MAX_MW) + return -EINVAL; + + ntb->num_mws = val; + + return len; +} + +EPF_NTB_R(spad_count) +EPF_NTB_W(spad_count) +EPF_NTB_R(db_count) +EPF_NTB_W(db_count) +EPF_NTB_R(num_mws) +EPF_NTB_R(vbus_number) +EPF_NTB_W(vbus_number) +EPF_NTB_R(vntb_pid) +EPF_NTB_W(vntb_pid) +EPF_NTB_R(vntb_vid) +EPF_NTB_W(vntb_vid) +EPF_NTB_MW_R(mw1) +EPF_NTB_MW_W(mw1) +EPF_NTB_MW_R(mw2) +EPF_NTB_MW_W(mw2) +EPF_NTB_MW_R(mw3) +EPF_NTB_MW_W(mw3) +EPF_NTB_MW_R(mw4) +EPF_NTB_MW_W(mw4) + +CONFIGFS_ATTR(epf_ntb_, spad_count); +CONFIGFS_ATTR(epf_ntb_, db_count); +CONFIGFS_ATTR(epf_ntb_, num_mws); +CONFIGFS_ATTR(epf_ntb_, mw1); +CONFIGFS_ATTR(epf_ntb_, mw2); +CONFIGFS_ATTR(epf_ntb_, mw3); +CONFIGFS_ATTR(epf_ntb_, mw4); +CONFIGFS_ATTR(epf_ntb_, vbus_number); +CONFIGFS_ATTR(epf_ntb_, vntb_pid); +CONFIGFS_ATTR(epf_ntb_, vntb_vid); + +static struct configfs_attribute *epf_ntb_attrs[] = { + &epf_ntb_attr_spad_count, + &epf_ntb_attr_db_count, + &epf_ntb_attr_num_mws, + &epf_ntb_attr_mw1, + &epf_ntb_attr_mw2, + &epf_ntb_attr_mw3, + &epf_ntb_attr_mw4, + &epf_ntb_attr_vbus_number, + &epf_ntb_attr_vntb_pid, + &epf_ntb_attr_vntb_vid, + NULL, +}; + +static const struct config_item_type ntb_group_type = { + .ct_attrs = epf_ntb_attrs, + .ct_owner = THIS_MODULE, +}; + +/** + * epf_ntb_add_cfs() - Add configfs directory specific to NTB + * @epf: NTB endpoint function device + * @group: A pointer to the config_group structure referencing a group of + * config_items of a specific type that belong to a specific sub-system. + * + * Add configfs directory specific to NTB. This directory will hold + * NTB specific properties like db_count, spad_count, num_mws etc., + */ +static struct config_group *epf_ntb_add_cfs(struct pci_epf *epf, + struct config_group *group) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + struct config_group *ntb_group = &ntb->group; + struct device *dev = &epf->dev; + + config_group_init_type_name(ntb_group, dev_name(dev), &ntb_group_type); + + return ntb_group; +} + +/*==== virtual PCI bus driver, which only load virtual NTB PCI driver ====*/ + +static u32 pci_space[] = { + 0xffffffff, /*DeviceID, Vendor ID*/ + 0, /*Status, Command*/ + 0xffffffff, /*Class code, subclass, prog if, revision id*/ + 0x40, /*bist, header type, latency Timer, cache line size*/ + 0, /*BAR 0*/ + 0, /*BAR 1*/ + 0, /*BAR 2*/ + 0, /*BAR 3*/ + 0, /*BAR 4*/ + 0, /*BAR 5*/ + 0, /*Cardbus cis point*/ + 0, /*Subsystem ID Subystem vendor id*/ + 0, /*ROM Base Address*/ + 0, /*Reserved, Cap. Point*/ + 0, /*Reserved,*/ + 0, /*Max Lat, Min Gnt, interrupt pin, interrupt line*/ +}; + +int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) +{ + if (devfn == 0) { + memcpy(val, ((u8 *)pci_space) + where, size); + return PCIBIOS_SUCCESSFUL; + } + return PCIBIOS_DEVICE_NOT_FOUND; +} + +int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) +{ + return 0; +} + +struct pci_ops vpci_ops = { + .read = pci_read, + .write = pci_write, +}; + +static int vpci_scan_bus(void *sysdata) +{ + struct pci_bus *vpci_bus; + struct epf_ntb *ndev = sysdata; + + vpci_bus = pci_scan_bus(ndev->vbus_number, &vpci_ops, sysdata); + if (vpci_bus) + pr_err("create pci bus\n"); + + pci_bus_add_devices(vpci_bus); + + return 0; +} + +/*==================== Virtual PCIe NTB driver ==========================*/ + +static int vntb_epf_mw_count(struct ntb_dev *ntb, int pidx) +{ + struct epf_ntb *ndev = ntb_ndev(ntb); + + return ndev->num_mws; +} + +static int vntb_epf_spad_count(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->spad_count; +} + +static int vntb_epf_peer_mw_count(struct ntb_dev *ntb) +{ + return ntb_ndev(ntb)->num_mws; +} + +static u64 vntb_epf_db_valid_mask(struct ntb_dev *ntb) +{ + return BIT_ULL(ntb_ndev(ntb)->db_count) - 1; +} + +static int vntb_epf_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + return 0; +} + +static int vntb_epf_mw_set_trans(struct ntb_dev *ndev, int pidx, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct pci_epf_bar *epf_bar; + enum pci_barno barno; + int ret; + struct device *dev; + + dev = &ntb->ntb.dev; + barno = ntb->epf_ntb_bar[BAR_MW0 + idx]; + epf_bar = &ntb->epf->bar[barno]; + epf_bar->phys_addr = addr; + epf_bar->barno = barno; + epf_bar->size = size; + + ret = pci_epc_set_bar(ntb->epf->epc, 0, 0, epf_bar); + if (ret) { + dev_err(dev, "failure set mw trans\n"); + return ret; + } + return 0; +} + +static int vntb_epf_mw_clear_trans(struct ntb_dev *ntb, int pidx, int idx) +{ + return 0; +} + +static int vntb_epf_peer_mw_get_addr(struct ntb_dev *ndev, int idx, + phys_addr_t *base, resource_size_t *size) +{ + + struct epf_ntb *ntb = ntb_ndev(ndev); + + if (base) + *base = ntb->vpci_mw_phy[idx]; + + if (size) + *size = ntb->mws_size[idx]; + + return 0; +} + +static int vntb_epf_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, + enum ntb_width max_width) +{ + return 0; +} + +static u32 vntb_epf_spad_read(struct ntb_dev *ndev, int idx) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + int off = ntb->reg->spad_offset, ct = ntb->reg->spad_count * 4; + u32 val; + void __iomem *base = ntb->reg; + + val = readl(base + off + ct + idx * 4); + return val; +} + +static int vntb_epf_spad_write(struct ntb_dev *ndev, int idx, u32 val) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct epf_ntb_ctrl *ctrl = ntb->reg; + int off = ctrl->spad_offset, ct = ctrl->spad_count * 4; + void __iomem *base = ntb->reg; + + writel(val, base + off + ct + idx * 4); + return 0; +} + +static u32 vntb_epf_peer_spad_read(struct ntb_dev *ndev, int pidx, int idx) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct epf_ntb_ctrl *ctrl = ntb->reg; + int off = ctrl->spad_offset; + void __iomem *base = ntb->reg; + u32 val; + + val = readl(base + off + idx * 4); + return val; +} + +static int vntb_epf_peer_spad_write(struct ntb_dev *ndev, int pidx, int idx, u32 val) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + struct epf_ntb_ctrl *ctrl = ntb->reg; + int off = ctrl->spad_offset; + void __iomem *base = ntb->reg; + + writel(val, base + off + idx * 4); + return 0; +} + +static int vntb_epf_peer_db_set(struct ntb_dev *ndev, u64 db_bits) +{ + u32 interrupt_num = ffs(db_bits) + 1; + struct epf_ntb *ntb = ntb_ndev(ndev); + u8 func_no, vfunc_no; + int ret; + + func_no = ntb->epf->func_no; + vfunc_no = ntb->epf->vfunc_no; + + ret = pci_epc_raise_irq(ntb->epf->epc, + func_no, + vfunc_no, + PCI_EPC_IRQ_MSI, + interrupt_num + 1); + if (ret) + dev_err(&ntb->ntb.dev, "Failed to raise IRQ\n"); + + return ret; +} + +static u64 vntb_epf_db_read(struct ntb_dev *ndev) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + return ntb->db; +} + +static int vntb_epf_mw_get_align(struct ntb_dev *ndev, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + if (addr_align) + *addr_align = SZ_4K; + + if (size_align) + *size_align = 1; + + if (size_max) + *size_max = ntb->mws_size[idx]; + + return 0; +} + +static u64 vntb_epf_link_is_up(struct ntb_dev *ndev, + enum ntb_speed *speed, + enum ntb_width *width) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + return ntb->reg->link_status; +} + +static int vntb_epf_db_clear_mask(struct ntb_dev *ndev, u64 db_bits) +{ + return 0; +} + +static int vntb_epf_db_clear(struct ntb_dev *ndev, u64 db_bits) +{ + struct epf_ntb *ntb = ntb_ndev(ndev); + + ntb->db &= ~db_bits; + return 0; +} + +static int vntb_epf_link_disable(struct ntb_dev *ntb) +{ + return 0; +} + +static const struct ntb_dev_ops vntb_epf_ops = { + .mw_count = vntb_epf_mw_count, + .spad_count = vntb_epf_spad_count, + .peer_mw_count = vntb_epf_peer_mw_count, + .db_valid_mask = vntb_epf_db_valid_mask, + .db_set_mask = vntb_epf_db_set_mask, + .mw_set_trans = vntb_epf_mw_set_trans, + .mw_clear_trans = vntb_epf_mw_clear_trans, + .peer_mw_get_addr = vntb_epf_peer_mw_get_addr, + .link_enable = vntb_epf_link_enable, + .spad_read = vntb_epf_spad_read, + .spad_write = vntb_epf_spad_write, + .peer_spad_read = vntb_epf_peer_spad_read, + .peer_spad_write = vntb_epf_peer_spad_write, + .peer_db_set = vntb_epf_peer_db_set, + .db_read = vntb_epf_db_read, + .mw_get_align = vntb_epf_mw_get_align, + .link_is_up = vntb_epf_link_is_up, + .db_clear_mask = vntb_epf_db_clear_mask, + .db_clear = vntb_epf_db_clear, + .link_disable = vntb_epf_link_disable, +}; + +static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + struct epf_ntb *ndev = (struct epf_ntb *)pdev->sysdata; + struct device *dev = &pdev->dev; + + ndev->ntb.pdev = pdev; + ndev->ntb.topo = NTB_TOPO_NONE; + ndev->ntb.ops = &vntb_epf_ops; + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "Cannot set DMA mask\n"); + return -EINVAL; + } + + ret = ntb_register_device(&ndev->ntb); + if (ret) { + dev_err(dev, "Failed to register NTB device\n"); + return ret; + } + + dev_dbg(dev, "PCI Virtual NTB driver loaded\n"); + return 0; +} + +static struct pci_device_id pci_vntb_table[] = { + { + PCI_DEVICE(0xffff, 0xffff), + }, + {}, +}; + +static struct pci_driver vntb_pci_driver = { + .name = "pci-vntb", + .id_table = pci_vntb_table, + .probe = pci_vntb_probe, +}; + +/* ============ PCIe EPF Driver Bind ====================*/ + +/** + * epf_ntb_bind() - Initialize endpoint controller to provide NTB functionality + * @epf: NTB endpoint function device + * + * Initialize both the endpoint controllers associated with NTB function device. + * Invoked when a primary interface or secondary interface is bound to EPC + * device. This function will succeed only when EPC is bound to both the + * interfaces. + */ +static int epf_ntb_bind(struct pci_epf *epf) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + struct device *dev = &epf->dev; + int ret; + + if (!epf->epc) { + dev_dbg(dev, "PRIMARY EPC interface not yet bound\n"); + return 0; + } + + ret = epf_ntb_init_epc_bar(ntb); + if (ret) { + dev_err(dev, "Failed to create NTB EPC\n"); + goto err_bar_init; + } + + ret = epf_ntb_config_spad_bar_alloc(ntb); + if (ret) { + dev_err(dev, "Failed to allocate BAR memory\n"); + goto err_bar_alloc; + } + + ret = epf_ntb_epc_init(ntb); + if (ret) { + dev_err(dev, "Failed to initialize EPC\n"); + goto err_bar_alloc; + } + + epf_set_drvdata(epf, ntb); + + pci_space[0] = (ntb->vntb_pid << 16) | ntb->vntb_vid; + pci_vntb_table[0].vendor = ntb->vntb_vid; + pci_vntb_table[0].device = ntb->vntb_pid; + + if (pci_register_driver(&vntb_pci_driver)) { + dev_err(dev, "failure register vntb pci driver\n"); + goto err_bar_alloc; + } + + vpci_scan_bus(ntb); + + return 0; + +err_bar_alloc: + epf_ntb_config_spad_bar_free(ntb); + +err_bar_init: + epf_ntb_epc_destroy(ntb); + + return ret; +} + +/** + * epf_ntb_unbind() - Cleanup the initialization from epf_ntb_bind() + * @epf: NTB endpoint function device + * + * Cleanup the initialization from epf_ntb_bind() + */ +static void epf_ntb_unbind(struct pci_epf *epf) +{ + struct epf_ntb *ntb = epf_get_drvdata(epf); + + epf_ntb_epc_cleanup(ntb); + epf_ntb_config_spad_bar_free(ntb); + epf_ntb_epc_destroy(ntb); + + pci_unregister_driver(&vntb_pci_driver); +} + +// EPF driver probe +static struct pci_epf_ops epf_ntb_ops = { + .bind = epf_ntb_bind, + .unbind = epf_ntb_unbind, + .add_cfs = epf_ntb_add_cfs, +}; + +/** + * epf_ntb_probe() - Probe NTB function driver + * @epf: NTB endpoint function device + * + * Probe NTB function driver when endpoint function bus detects a NTB + * endpoint function. + */ +static int epf_ntb_probe(struct pci_epf *epf) +{ + struct epf_ntb *ntb; + struct device *dev; + + dev = &epf->dev; + + ntb = devm_kzalloc(dev, sizeof(*ntb), GFP_KERNEL); + if (!ntb) + return -ENOMEM; + + epf->header = &epf_ntb_header; + ntb->epf = epf; + ntb->vbus_number = 0xff; + epf_set_drvdata(epf, ntb); + + dev_info(dev, "pci-ep epf driver loaded\n"); + return 0; +} + +static const struct pci_epf_device_id epf_ntb_ids[] = { + { + .name = "pci_epf_vntb", + }, + {}, +}; + +static struct pci_epf_driver epf_ntb_driver = { + .driver.name = "pci_epf_vntb", + .probe = epf_ntb_probe, + .id_table = epf_ntb_ids, + .ops = &epf_ntb_ops, + .owner = THIS_MODULE, +}; + +static int __init epf_ntb_init(void) +{ + int ret; + + kpcintb_workqueue = alloc_workqueue("kpcintb", WQ_MEM_RECLAIM | + WQ_HIGHPRI, 0); + ret = pci_epf_register_driver(&epf_ntb_driver); + if (ret) { + destroy_workqueue(kpcintb_workqueue); + pr_err("Failed to register pci epf ntb driver --> %d\n", ret); + return ret; + } + + return 0; +} +module_init(epf_ntb_init); + +static void __exit epf_ntb_exit(void) +{ + pci_epf_unregister_driver(&epf_ntb_driver); + destroy_workqueue(kpcintb_workqueue); +} +module_exit(epf_ntb_exit); + +MODULE_DESCRIPTION("PCI EPF NTB DRIVER"); +MODULE_AUTHOR("Frank Li "); +MODULE_LICENSE("GPL v2"); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/pinctrl/mediatek/pinctrl-mt8192.c +++ linux-aws-5.15-5.15.0/drivers/pinctrl/mediatek/pinctrl-mt8192.c @@ -1346,7 +1346,6 @@ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8192_pin_dir_range), [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8192_pin_di_range), [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8192_pin_do_range), - [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8192_pin_dir_range), [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8192_pin_smt_range), [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8192_pin_ies_range), [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8192_pin_pu_range), only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/pwm/pwm-atmel-hlcdc.c +++ linux-aws-5.15-5.15.0/drivers/pwm/pwm-atmel-hlcdc.c @@ -38,11 +38,11 @@ return container_of(chip, struct atmel_hlcdc_pwm, chip); } -static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm, +static int atmel_hlcdc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { - struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c); - struct atmel_hlcdc *hlcdc = chip->hlcdc; + struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip); + struct atmel_hlcdc *hlcdc = atmel->hlcdc; unsigned int status; int ret; @@ -54,7 +54,7 @@ u32 pwmcfg; int pres; - if (!chip->errata || !chip->errata->slow_clk_erratum) { + if (!atmel->errata || !atmel->errata->slow_clk_erratum) { clk_freq = clk_get_rate(new_clk); if (!clk_freq) return -EINVAL; @@ -64,7 +64,7 @@ } /* Errata: cannot use slow clk on some IP revisions */ - if ((chip->errata && chip->errata->slow_clk_erratum) || + if ((atmel->errata && atmel->errata->slow_clk_erratum) || clk_period_ns > state->period) { new_clk = hlcdc->sys_clk; clk_freq = clk_get_rate(new_clk); @@ -77,8 +77,8 @@ for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) { /* Errata: cannot divide by 1 on some IP revisions */ - if (!pres && chip->errata && - chip->errata->div1_clk_erratum) + if (!pres && atmel->errata && + atmel->errata->div1_clk_erratum) continue; if ((clk_period_ns << pres) >= state->period) @@ -90,7 +90,7 @@ pwmcfg = ATMEL_HLCDC_PWMPS(pres); - if (new_clk != chip->cur_clk) { + if (new_clk != atmel->cur_clk) { u32 gencfg = 0; int ret; @@ -98,8 +98,8 @@ if (ret) return ret; - clk_disable_unprepare(chip->cur_clk); - chip->cur_clk = new_clk; + clk_disable_unprepare(atmel->cur_clk); + atmel->cur_clk = new_clk; if (new_clk == hlcdc->sys_clk) gencfg = ATMEL_HLCDC_CLKPWMSEL; @@ -160,8 +160,8 @@ if (ret) return ret; - clk_disable_unprepare(chip->cur_clk); - chip->cur_clk = NULL; + clk_disable_unprepare(atmel->cur_clk); + atmel->cur_clk = NULL; } return 0; @@ -183,31 +183,32 @@ #ifdef CONFIG_PM_SLEEP static int atmel_hlcdc_pwm_suspend(struct device *dev) { - struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev); + struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev); /* Keep the periph clock enabled if the PWM is still running. */ - if (pwm_is_enabled(&chip->chip.pwms[0])) - clk_disable_unprepare(chip->hlcdc->periph_clk); + if (!pwm_is_enabled(&atmel->chip.pwms[0])) + clk_disable_unprepare(atmel->hlcdc->periph_clk); return 0; } static int atmel_hlcdc_pwm_resume(struct device *dev) { - struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev); + struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev); struct pwm_state state; int ret; - pwm_get_state(&chip->chip.pwms[0], &state); + pwm_get_state(&atmel->chip.pwms[0], &state); /* Re-enable the periph clock it was stopped during suspend. */ if (!state.enabled) { - ret = clk_prepare_enable(chip->hlcdc->periph_clk); + ret = clk_prepare_enable(atmel->hlcdc->periph_clk); if (ret) return ret; } - return atmel_hlcdc_pwm_apply(&chip->chip, &chip->chip.pwms[0], &state); + return atmel_hlcdc_pwm_apply(&atmel->chip, &atmel->chip.pwms[0], + &state); } #endif @@ -244,14 +245,14 @@ { const struct of_device_id *match; struct device *dev = &pdev->dev; - struct atmel_hlcdc_pwm *chip; + struct atmel_hlcdc_pwm *atmel; struct atmel_hlcdc *hlcdc; int ret; hlcdc = dev_get_drvdata(dev->parent); - chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); - if (!chip) + atmel = devm_kzalloc(dev, sizeof(*atmel), GFP_KERNEL); + if (!atmel) return -ENOMEM; ret = clk_prepare_enable(hlcdc->periph_clk); @@ -260,33 +261,31 @@ match = of_match_node(atmel_hlcdc_dt_ids, dev->parent->of_node); if (match) - chip->errata = match->data; + atmel->errata = match->data; - chip->hlcdc = hlcdc; - chip->chip.ops = &atmel_hlcdc_pwm_ops; - chip->chip.dev = dev; - chip->chip.npwm = 1; + atmel->hlcdc = hlcdc; + atmel->chip.ops = &atmel_hlcdc_pwm_ops; + atmel->chip.dev = dev; + atmel->chip.npwm = 1; - ret = pwmchip_add(&chip->chip); + ret = pwmchip_add(&atmel->chip); if (ret) { clk_disable_unprepare(hlcdc->periph_clk); return ret; } - platform_set_drvdata(pdev, chip); + platform_set_drvdata(pdev, atmel); return 0; } -static int atmel_hlcdc_pwm_remove(struct platform_device *pdev) +static void atmel_hlcdc_pwm_remove(struct platform_device *pdev) { - struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev); + struct atmel_hlcdc_pwm *atmel = platform_get_drvdata(pdev); - pwmchip_remove(&chip->chip); + pwmchip_remove(&atmel->chip); - clk_disable_unprepare(chip->hlcdc->periph_clk); - - return 0; + clk_disable_unprepare(atmel->hlcdc->periph_clk); } static const struct of_device_id atmel_hlcdc_pwm_dt_ids[] = { @@ -301,7 +300,7 @@ .pm = &atmel_hlcdc_pwm_pm_ops, }, .probe = atmel_hlcdc_pwm_probe, - .remove = atmel_hlcdc_pwm_remove, + .remove_new = atmel_hlcdc_pwm_remove, }; module_platform_driver(atmel_hlcdc_pwm_driver); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/regulator/pwm-regulator.c +++ linux-aws-5.15-5.15.0/drivers/regulator/pwm-regulator.c @@ -158,6 +158,9 @@ pwm_get_state(drvdata->pwm, &pstate); voltage = pwm_get_relative_duty_cycle(&pstate, duty_unit); + if (voltage < min(max_uV_duty, min_uV_duty) || + voltage > max(max_uV_duty, min_uV_duty)) + return -ENOTRECOVERABLE; /* * The dutycycle for min_uV might be greater than the one for max_uV. only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/remoteproc/Kconfig +++ linux-aws-5.15-5.15.0/drivers/remoteproc/Kconfig @@ -276,7 +276,7 @@ config STM32_RPROC tristate "STM32 remoteproc support" - depends on ARCH_STM32 + depends on ARCH_STM32 || COMPILE_TEST depends on REMOTEPROC select MAILBOX help only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/rtc/lib_test.c +++ linux-aws-5.15-5.15.0/drivers/rtc/lib_test.c @@ -54,7 +54,7 @@ days = div_s64(secs, 86400); - #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \ + #define FAIL_MSG "%d/%02d/%02d (%2d) : %lld", \ year, month, mday, yday, days KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/s390/block/dasd_devmap.c +++ linux-aws-5.15-5.15.0/drivers/s390/block/dasd_devmap.c @@ -26,7 +26,6 @@ /* This is ugly... */ #define PRINTK_HEADER "dasd_devmap:" -#define DASD_BUS_ID_SIZE 20 #define DASD_MAX_PARAMS 256 #include "dasd_int.h" @@ -50,6 +49,7 @@ unsigned int devindex; unsigned short features; struct dasd_device *device; + struct dasd_copy_relation *copy; }; /* @@ -130,7 +130,7 @@ /* * Read a device busid/devno from a string. */ -static int __init dasd_busid(char *str, int *id0, int *id1, int *devno) +static int dasd_busid(char *str, int *id0, int *id1, int *devno) { unsigned int val; char *tok; @@ -438,16 +438,12 @@ return devmap; } -/* - * Find devmap for device with given bus_id. - */ static struct dasd_devmap * -dasd_find_busid(const char *bus_id) +dasd_find_busid_locked(const char *bus_id) { struct dasd_devmap *devmap, *tmp; int hash; - spin_lock(&dasd_devmap_lock); devmap = ERR_PTR(-ENODEV); hash = dasd_hash_busid(bus_id); list_for_each_entry(tmp, &dasd_hashlists[hash], list) { @@ -456,6 +452,19 @@ break; } } + return devmap; +} + +/* + * Find devmap for device with given bus_id. + */ +static struct dasd_devmap * +dasd_find_busid(const char *bus_id) +{ + struct dasd_devmap *devmap; + + spin_lock(&dasd_devmap_lock); + devmap = dasd_find_busid_locked(bus_id); spin_unlock(&dasd_devmap_lock); return devmap; } @@ -585,6 +594,238 @@ } /* + * allocate a PPRC data structure and call the discipline function to fill + */ +static int dasd_devmap_get_pprc_status(struct dasd_device *device, + struct dasd_pprc_data_sc4 **data) +{ + struct dasd_pprc_data_sc4 *temp; + + if (!device->discipline || !device->discipline->pprc_status) { + dev_warn(&device->cdev->dev, "Unable to query copy relation status\n"); + return -EOPNOTSUPP; + } + temp = kzalloc(sizeof(*temp), GFP_KERNEL); + if (!temp) + return -ENOMEM; + + /* get PPRC information from storage */ + if (device->discipline->pprc_status(device, temp)) { + dev_warn(&device->cdev->dev, "Error during copy relation status query\n"); + kfree(temp); + return -EINVAL; + } + *data = temp; + + return 0; +} + +/* + * find an entry in a PPRC device_info array by a given UID + * depending on the primary/secondary state of the device it has to be + * matched with the respective fields + */ +static int dasd_devmap_entry_from_pprc_data(struct dasd_pprc_data_sc4 *data, + struct dasd_uid uid, + bool primary) +{ + int i; + + for (i = 0; i < DASD_CP_ENTRIES; i++) { + if (primary) { + if (data->dev_info[i].prim_cu_ssid == uid.ssid && + data->dev_info[i].primary == uid.real_unit_addr) + return i; + } else { + if (data->dev_info[i].sec_cu_ssid == uid.ssid && + data->dev_info[i].secondary == uid.real_unit_addr) + return i; + } + } + return -1; +} + +/* + * check the consistency of a specified copy relation by checking + * the following things: + * + * - is the given device part of a copy pair setup + * - does the state of the device match the state in the PPRC status data + * - does the device UID match with the UID in the PPRC status data + * - to prevent misrouted IO check if the given device is present in all + * related PPRC status data + */ +static int dasd_devmap_check_copy_relation(struct dasd_device *device, + struct dasd_copy_entry *entry, + struct dasd_pprc_data_sc4 *data, + struct dasd_copy_relation *copy) +{ + struct dasd_pprc_data_sc4 *tmp_dat; + struct dasd_device *tmp_dev; + struct dasd_uid uid; + int i, j; + + if (!device->discipline || !device->discipline->get_uid || + device->discipline->get_uid(device, &uid)) + return 1; + + i = dasd_devmap_entry_from_pprc_data(data, uid, entry->primary); + if (i < 0) { + dev_warn(&device->cdev->dev, "Device not part of a copy relation\n"); + return 1; + } + + /* double check which role the current device has */ + if (entry->primary) { + if (data->dev_info[i].flags & 0x80) { + dev_warn(&device->cdev->dev, "Copy pair secondary is setup as primary\n"); + return 1; + } + if (data->dev_info[i].prim_cu_ssid != uid.ssid || + data->dev_info[i].primary != uid.real_unit_addr) { + dev_warn(&device->cdev->dev, + "Primary device %s does not match copy pair status primary device %04x\n", + dev_name(&device->cdev->dev), + data->dev_info[i].prim_cu_ssid | + data->dev_info[i].primary); + return 1; + } + } else { + if (!(data->dev_info[i].flags & 0x80)) { + dev_warn(&device->cdev->dev, "Copy pair primary is setup as secondary\n"); + return 1; + } + if (data->dev_info[i].sec_cu_ssid != uid.ssid || + data->dev_info[i].secondary != uid.real_unit_addr) { + dev_warn(&device->cdev->dev, + "Secondary device %s does not match copy pair status secondary device %04x\n", + dev_name(&device->cdev->dev), + data->dev_info[i].sec_cu_ssid | + data->dev_info[i].secondary); + return 1; + } + } + + /* + * the current device has to be part of the copy relation of all + * entries to prevent misrouted IO to another copy pair + */ + for (j = 0; j < DASD_CP_ENTRIES; j++) { + if (entry == ©->entry[j]) + tmp_dev = device; + else + tmp_dev = copy->entry[j].device; + + if (!tmp_dev) + continue; + + if (dasd_devmap_get_pprc_status(tmp_dev, &tmp_dat)) + return 1; + + if (dasd_devmap_entry_from_pprc_data(tmp_dat, uid, entry->primary) < 0) { + dev_warn(&tmp_dev->cdev->dev, + "Copy pair relation does not contain device: %s\n", + dev_name(&device->cdev->dev)); + kfree(tmp_dat); + return 1; + } + kfree(tmp_dat); + } + return 0; +} + +/* delete device from copy relation entry */ +static void dasd_devmap_delete_copy_relation_device(struct dasd_device *device) +{ + struct dasd_copy_relation *copy; + int i; + + if (!device->copy) + return; + + copy = device->copy; + for (i = 0; i < DASD_CP_ENTRIES; i++) { + if (copy->entry[i].device == device) + copy->entry[i].device = NULL; + } + dasd_put_device(device); + device->copy = NULL; +} + +/* + * read all required information for a copy relation setup and setup the device + * accordingly + */ +int dasd_devmap_set_device_copy_relation(struct ccw_device *cdev, + bool pprc_enabled) +{ + struct dasd_pprc_data_sc4 *data = NULL; + struct dasd_copy_entry *entry = NULL; + struct dasd_copy_relation *copy; + struct dasd_devmap *devmap; + struct dasd_device *device; + int i, rc = 0; + + devmap = dasd_devmap_from_cdev(cdev); + if (IS_ERR(devmap)) + return PTR_ERR(devmap); + + device = devmap->device; + if (!device) + return -ENODEV; + + copy = devmap->copy; + /* no copy pair setup for this device */ + if (!copy) + goto out; + + rc = dasd_devmap_get_pprc_status(device, &data); + if (rc) + return rc; + + /* print error if PPRC is requested but not enabled on storage server */ + if (!pprc_enabled) { + dev_err(&cdev->dev, "Copy relation not enabled on storage server\n"); + rc = -EINVAL; + goto out; + } + + if (!data->dev_info[0].state) { + dev_warn(&device->cdev->dev, "Copy pair setup requested for device not in copy relation\n"); + rc = -EINVAL; + goto out; + } + /* find entry */ + for (i = 0; i < DASD_CP_ENTRIES; i++) { + if (copy->entry[i].configured && + strncmp(dev_name(&cdev->dev), + copy->entry[i].busid, DASD_BUS_ID_SIZE) == 0) { + entry = ©->entry[i]; + break; + } + } + if (!entry) { + dev_warn(&device->cdev->dev, "Copy relation entry not found\n"); + rc = -EINVAL; + goto out; + } + /* check if the copy relation is valid */ + if (dasd_devmap_check_copy_relation(device, entry, data, copy)) { + dev_warn(&device->cdev->dev, "Copy relation faulty\n"); + rc = -EINVAL; + goto out; + } + + dasd_get_device(device); + copy->entry[i].device = device; + device->copy = copy; +out: + kfree(data); + return rc; +} +EXPORT_SYMBOL_GPL(dasd_devmap_set_device_copy_relation); + +/* * Wait queue for dasd_delete_device waits. */ static DECLARE_WAIT_QUEUE_HEAD(dasd_delete_wq); @@ -617,6 +858,8 @@ dev_set_drvdata(&device->cdev->dev, NULL); spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); + /* Removve copy relation */ + dasd_devmap_delete_copy_relation_device(device); /* * Drop ref_count by 3, one for the devmap reference, one for * the cdev reference and one for the passed reference. @@ -1683,6 +1926,313 @@ static struct kobj_attribute path_fcs_attribute = __ATTR(fc_security, 0444, dasd_path_fcs_show, NULL); +/* + * print copy relation in the form + * primary,secondary[1] primary,secondary[2], ... + */ +static ssize_t +dasd_copy_pair_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char prim_busid[DASD_BUS_ID_SIZE]; + struct dasd_copy_relation *copy; + struct dasd_devmap *devmap; + int len = 0; + int i; + + devmap = dasd_find_busid(dev_name(dev)); + if (IS_ERR(devmap)) + return -ENODEV; + + if (!devmap->copy) + return -ENODEV; + + copy = devmap->copy; + /* find primary */ + for (i = 0; i < DASD_CP_ENTRIES; i++) { + if (copy->entry[i].configured && copy->entry[i].primary) { + strscpy(prim_busid, copy->entry[i].busid, + DASD_BUS_ID_SIZE); + break; + } + } + if (!copy->entry[i].primary) + goto out; + + /* print all secondary */ + for (i = 0; i < DASD_CP_ENTRIES; i++) { + if (copy->entry[i].configured && !copy->entry[i].primary) + len += sysfs_emit_at(buf, len, "%s,%s ", prim_busid, + copy->entry[i].busid); + } + + len += sysfs_emit_at(buf, len, "\n"); +out: + return len; +} + +static int dasd_devmap_set_copy_relation(struct dasd_devmap *devmap, + struct dasd_copy_relation *copy, + char *busid, bool primary) +{ + int i; + + /* find free entry */ + for (i = 0; i < DASD_CP_ENTRIES; i++) { + /* current bus_id already included, nothing to do */ + if (copy->entry[i].configured && + strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0) + return 0; + + if (!copy->entry[i].configured) + break; + } + if (i == DASD_CP_ENTRIES) + return -EINVAL; + + copy->entry[i].configured = true; + strscpy(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE); + if (primary) { + copy->active = ©->entry[i]; + copy->entry[i].primary = true; + } + if (!devmap->copy) + devmap->copy = copy; + + return 0; +} + +static void dasd_devmap_del_copy_relation(struct dasd_copy_relation *copy, + char *busid) +{ + int i; + + spin_lock(&dasd_devmap_lock); + /* find entry */ + for (i = 0; i < DASD_CP_ENTRIES; i++) { + if (copy->entry[i].configured && + strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0) + break; + } + if (i == DASD_CP_ENTRIES || !copy->entry[i].configured) { + spin_unlock(&dasd_devmap_lock); + return; + } + + copy->entry[i].configured = false; + memset(copy->entry[i].busid, 0, DASD_BUS_ID_SIZE); + if (copy->active == ©->entry[i]) { + copy->active = NULL; + copy->entry[i].primary = false; + } + spin_unlock(&dasd_devmap_lock); +} + +static int dasd_devmap_clear_copy_relation(struct device *dev) +{ + struct dasd_copy_relation *copy; + struct dasd_devmap *devmap; + int i, rc = 1; + + devmap = dasd_devmap_from_cdev(to_ccwdev(dev)); + if (IS_ERR(devmap)) + return 1; + + spin_lock(&dasd_devmap_lock); + if (!devmap->copy) + goto out; + + copy = devmap->copy; + /* first check if all secondary devices are offline*/ + for (i = 0; i < DASD_CP_ENTRIES; i++) { + if (!copy->entry[i].configured) + continue; + + if (copy->entry[i].device == copy->active->device) + continue; + + if (copy->entry[i].device) + goto out; + } + /* clear all devmap entries */ + for (i = 0; i < DASD_CP_ENTRIES; i++) { + if (strlen(copy->entry[i].busid) == 0) + continue; + if (copy->entry[i].device) { + dasd_put_device(copy->entry[i].device); + copy->entry[i].device->copy = NULL; + copy->entry[i].device = NULL; + } + devmap = dasd_find_busid_locked(copy->entry[i].busid); + devmap->copy = NULL; + memset(copy->entry[i].busid, 0, DASD_BUS_ID_SIZE); + } + kfree(copy); + rc = 0; +out: + spin_unlock(&dasd_devmap_lock); + return rc; +} + +/* + * parse BUSIDs from a copy pair + */ +static int dasd_devmap_parse_busid(const char *buf, char *prim_busid, + char *sec_busid) +{ + char *primary, *secondary, *tmp, *pt; + int id0, id1, id2; + + pt = kstrdup(buf, GFP_KERNEL); + tmp = pt; + if (!tmp) + return -ENOMEM; + + primary = strsep(&tmp, ","); + if (!primary) { + kfree(pt); + return -EINVAL; + } + secondary = strsep(&tmp, ","); + if (!secondary) { + kfree(pt); + return -EINVAL; + } + if (dasd_busid(primary, &id0, &id1, &id2)) { + kfree(pt); + return -EINVAL; + } + sprintf(prim_busid, "%01x.%01x.%04x", id0, id1, id2); + if (dasd_busid(secondary, &id0, &id1, &id2)) { + kfree(pt); + return -EINVAL; + } + sprintf(sec_busid, "%01x.%01x.%04x", id0, id1, id2); + kfree(pt); + + return 0; +} + +static ssize_t dasd_copy_pair_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct dasd_devmap *prim_devmap, *sec_devmap; + char prim_busid[DASD_BUS_ID_SIZE]; + char sec_busid[DASD_BUS_ID_SIZE]; + struct dasd_copy_relation *copy; + struct dasd_device *device; + bool pprc_enabled; + int rc; + + if (strncmp(buf, "clear", strlen("clear")) == 0) { + if (dasd_devmap_clear_copy_relation(dev)) + return -EINVAL; + return count; + } + + rc = dasd_devmap_parse_busid(buf, prim_busid, sec_busid); + if (rc) + return rc; + + if (strncmp(dev_name(dev), prim_busid, DASD_BUS_ID_SIZE) != 0 && + strncmp(dev_name(dev), sec_busid, DASD_BUS_ID_SIZE) != 0) + return -EINVAL; + + /* allocate primary devmap if needed */ + prim_devmap = dasd_find_busid(prim_busid); + if (IS_ERR(prim_devmap)) + prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT); + + /* allocate secondary devmap if needed */ + sec_devmap = dasd_find_busid(sec_busid); + if (IS_ERR(sec_devmap)) + sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT); + + /* setting copy relation is only allowed for offline secondary */ + if (sec_devmap->device) + return -EINVAL; + + if (prim_devmap->copy) { + copy = prim_devmap->copy; + } else if (sec_devmap->copy) { + copy = sec_devmap->copy; + } else { + copy = kzalloc(sizeof(*copy), GFP_KERNEL); + if (!copy) + return -ENOMEM; + } + spin_lock(&dasd_devmap_lock); + rc = dasd_devmap_set_copy_relation(prim_devmap, copy, prim_busid, true); + if (rc) { + spin_unlock(&dasd_devmap_lock); + return rc; + } + rc = dasd_devmap_set_copy_relation(sec_devmap, copy, sec_busid, false); + if (rc) { + spin_unlock(&dasd_devmap_lock); + return rc; + } + spin_unlock(&dasd_devmap_lock); + + /* if primary device is already online call device setup directly */ + if (prim_devmap->device && !prim_devmap->device->copy) { + device = prim_devmap->device; + if (device->discipline->pprc_enabled) { + pprc_enabled = device->discipline->pprc_enabled(device); + rc = dasd_devmap_set_device_copy_relation(device->cdev, + pprc_enabled); + } else { + rc = -EOPNOTSUPP; + } + } + if (rc) { + dasd_devmap_del_copy_relation(copy, prim_busid); + dasd_devmap_del_copy_relation(copy, sec_busid); + count = rc; + } + + return count; +} +static DEVICE_ATTR(copy_pair, 0644, dasd_copy_pair_show, + dasd_copy_pair_store); + +static ssize_t +dasd_copy_role_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dasd_copy_relation *copy; + struct dasd_device *device; + int len, i; + + device = dasd_device_from_cdev(to_ccwdev(dev)); + if (IS_ERR(device)) + return -ENODEV; + + if (!device->copy) { + len = sysfs_emit(buf, "none\n"); + goto out; + } + copy = device->copy; + /* only the active device is primary */ + if (copy->active->device == device) { + len = sysfs_emit(buf, "primary\n"); + goto out; + } + for (i = 0; i < DASD_CP_ENTRIES; i++) { + if (copy->entry[i].device == device) { + len = sysfs_emit(buf, "secondary\n"); + goto out; + } + } + /* not in the list, no COPY role */ + len = sysfs_emit(buf, "none\n"); +out: + dasd_put_device(device); + return len; +} +static DEVICE_ATTR(copy_role, 0444, dasd_copy_role_show, NULL); + #define DASD_DEFINE_ATTR(_name, _func) \ static ssize_t dasd_##_name##_show(struct device *dev, \ struct device_attribute *attr, \ @@ -1739,6 +2289,8 @@ &dev_attr_hpf.attr, &dev_attr_ese.attr, &dev_attr_fc_security.attr, + &dev_attr_copy_pair.attr, + &dev_attr_copy_role.attr, NULL, }; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/s390/block/dasd_eckd.h +++ linux-aws-5.15-5.15.0/drivers/s390/block/dasd_eckd.h @@ -66,10 +66,16 @@ * Perform Subsystem Function / Sub-Orders */ #define PSF_SUBORDER_QHA 0x1C /* Query Host Access */ +#define PSF_SUBORDER_PPRCEQ 0x50 /* PPRC Extended Query */ #define PSF_SUBORDER_VSQ 0x52 /* Volume Storage Query */ #define PSF_SUBORDER_LCQ 0x53 /* Logical Configuration Query */ /* + * PPRC Extended Query Scopes + */ +#define PPRCEQ_SCOPE_4 0x04 /* Scope 4 for PPRC Extended Query */ + +/* * CUIR response condition codes */ #define PSF_CUIR_INVALID 0x00 @@ -261,7 +267,7 @@ unsigned char reserved3:8; unsigned char defect_wr:1; unsigned char XRC_supported:1; - unsigned char reserved4:1; + unsigned char PPRC_enabled:1; unsigned char striping:1; unsigned char reserved5:4; unsigned char cfw:1; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/s390/block/dasd_eer.c +++ linux-aws-5.15-5.15.0/drivers/s390/block/dasd_eer.c @@ -387,6 +387,7 @@ break; case DASD_EER_NOPATH: case DASD_EER_NOSPC: + case DASD_EER_AUTOQUIESCE: dasd_eer_write_standard_trigger(device, NULL, id); break; case DASD_EER_STATECHANGE: only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/scsi/bfa/bfa.h +++ linux-aws-5.15-5.15.0/drivers/scsi/bfa/bfa.h @@ -20,7 +20,6 @@ struct bfa_s; typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); -typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status); /* * Interrupt message handlers @@ -435,6 +434,14 @@ (__qe)->hcb_qe.cbarg = (__cbarg); \ (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \ (__qe)->data = (__data); \ +} while (0) + +#define bfa_pending_q_init_status(__qe, __cbfn, __cbarg, __data) do { \ + bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \ + (__qe)->hcb_qe.cbfn_status = (__cbfn); \ + (__qe)->hcb_qe.cbarg = (__cbarg); \ + (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \ + (__qe)->data = (__data); \ } while (0) #endif /* __BFA_H__ */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/scsi/bfa/bfa_core.c +++ linux-aws-5.15-5.15.0/drivers/scsi/bfa/bfa_core.c @@ -1907,15 +1907,13 @@ struct list_head *qe; struct list_head *qen; struct bfa_cb_qe_s *hcb_qe; - bfa_cb_cbfn_status_t cbfn; list_for_each_safe(qe, qen, comp_q) { hcb_qe = (struct bfa_cb_qe_s *) qe; if (hcb_qe->pre_rmv) { /* qe is invalid after return, dequeue before cbfn() */ list_del(qe); - cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn); - cbfn(hcb_qe->cbarg, hcb_qe->fw_status); + hcb_qe->cbfn_status(hcb_qe->cbarg, hcb_qe->fw_status); } else hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE); } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/scsi/bfa/bfa_ioc.h +++ linux-aws-5.15-5.15.0/drivers/scsi/bfa/bfa_ioc.h @@ -361,14 +361,18 @@ void *cbarg; }; -typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); +typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); +typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status); /* * Generic BFA callback element. */ struct bfa_cb_qe_s { struct list_head qe; - bfa_cb_cbfn_t cbfn; + union { + bfa_cb_cbfn_status_t cbfn_status; + bfa_cb_cbfn_t cbfn; + }; bfa_boolean_t once; bfa_boolean_t pre_rmv; /* set for stack based qe(s) */ bfa_status_t fw_status; /* to access fw status in comp proc */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/scsi/bfa/bfad_bsg.c +++ linux-aws-5.15-5.15.0/drivers/scsi/bfa/bfad_bsg.c @@ -2135,8 +2135,7 @@ struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, - &fcomp, &iocmd->stats); + bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe); spin_unlock_irqrestore(&bfad->bfad_lock, flags); @@ -2159,7 +2158,7 @@ struct bfa_cb_pending_q_s cb_qe; init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL); + bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL); spin_lock_irqsave(&bfad->bfad_lock, flags); iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe); @@ -2443,8 +2442,7 @@ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, - &fcomp, &iocmd->stats); + bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats); spin_lock_irqsave(&bfad->bfad_lock, flags); WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); @@ -2474,8 +2472,7 @@ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); init_completion(&fcomp.comp); - bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, - &fcomp, NULL); + bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL); spin_lock_irqsave(&bfad->bfad_lock, flags); WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc)); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/scsi/csiostor/csio_defs.h +++ linux-aws-5.15-5.15.0/drivers/scsi/csiostor/csio_defs.h @@ -73,7 +73,21 @@ #define csio_list_prev(elem) (((struct list_head *)(elem))->prev) /* State machine */ -typedef void (*csio_sm_state_t)(void *, uint32_t); +struct csio_lnode; + +/* State machine evets */ +enum csio_ln_ev { + CSIO_LNE_NONE = (uint32_t)0, + CSIO_LNE_LINKUP, + CSIO_LNE_FAB_INIT_DONE, + CSIO_LNE_LINK_DOWN, + CSIO_LNE_DOWN_LINK, + CSIO_LNE_LOGO, + CSIO_LNE_CLOSE, + CSIO_LNE_MAX_EVENT, +}; + +typedef void (*csio_sm_state_t)(struct csio_lnode *ln, enum csio_ln_ev evt); struct csio_sm { struct list_head sm_list; @@ -83,7 +97,7 @@ static inline void csio_set_state(void *smp, void *state) { - ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state; + ((struct csio_sm *)smp)->sm_state = state; } static inline void only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/scsi/csiostor/csio_lnode.h +++ linux-aws-5.15-5.15.0/drivers/scsi/csiostor/csio_lnode.h @@ -53,19 +53,6 @@ extern int csio_fcoe_rnodes; extern int csio_fdmi_enable; -/* State machine evets */ -enum csio_ln_ev { - CSIO_LNE_NONE = (uint32_t)0, - CSIO_LNE_LINKUP, - CSIO_LNE_FAB_INIT_DONE, - CSIO_LNE_LINK_DOWN, - CSIO_LNE_DOWN_LINK, - CSIO_LNE_LOGO, - CSIO_LNE_CLOSE, - CSIO_LNE_MAX_EVENT, -}; - - struct csio_fcf_info { struct list_head list; uint8_t priority; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/soc/renesas/r8a77980-sysc.c +++ linux-aws-5.15-5.15.0/drivers/soc/renesas/r8a77980-sysc.c @@ -25,7 +25,8 @@ PD_CPU_NOCR }, { "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU, PD_CPU_NOCR }, - { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON }, + { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON, + PD_CPU_NOCR }, { "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON }, { "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR }, { "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR }, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/spi/spi-hisi-sfc-v3xx.c +++ linux-aws-5.15-5.15.0/drivers/spi/spi-hisi-sfc-v3xx.c @@ -377,6 +377,11 @@ static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data) { struct hisi_sfc_v3xx_host *host = data; + u32 reg; + + reg = readl(host->regbase + HISI_SFC_V3XX_INT_STAT); + if (!reg) + return IRQ_NONE; hisi_sfc_v3xx_disable_int(host); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/staging/greybus/light.c +++ linux-aws-5.15-5.15.0/drivers/staging/greybus/light.c @@ -100,15 +100,15 @@ static struct gb_channel *get_channel_from_mode(struct gb_light *light, u32 mode) { - struct gb_channel *channel = NULL; + struct gb_channel *channel; int i; for (i = 0; i < light->channels_count; i++) { channel = &light->channels[i]; - if (channel && channel->mode == mode) - break; + if (channel->mode == mode) + return channel; } - return channel; + return NULL; } static int __gb_lights_flash_intensity_set(struct gb_channel *channel, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/staging/media/imx/imx-media-csc-scaler.c +++ linux-aws-5.15-5.15.0/drivers/staging/media/imx/imx-media-csc-scaler.c @@ -803,6 +803,7 @@ dev_dbg(priv->dev, "Releasing instance %p\n", ctx); + v4l2_ctrl_handler_free(&ctx->ctrl_hdlr); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/usb/cdns3/drd.h +++ linux-aws-5.15-5.15.0/drivers/usb/cdns3/drd.h @@ -79,7 +79,11 @@ __le32 susp_timing_ctrl; }; -#define OTG_CDNSP_DID 0x0004034E +/* CDNSP driver supports 0x000403xx Cadence USB controller family. */ +#define OTG_CDNSP_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040300) + +/* CDNS3 driver supports 0x000402xx Cadence USB controller family. */ +#define OTG_CDNS3_CHECK_DID(did) (((did) & GENMASK(31, 8)) == 0x00040200) /* * Common registers interface for both CDNS3 and CDNSP version of DRD. only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/usb/gadget/udc/net2272.c +++ linux-aws-5.15-5.15.0/drivers/usb/gadget/udc/net2272.c @@ -2650,7 +2650,7 @@ goto err_req; } - ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW); + ret = net2272_probe_fin(dev, irqflags); if (ret) goto err_io; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/vdpa/mlx5/core/mr.c +++ linux-aws-5.15-5.15.0/drivers/vdpa/mlx5/core/mr.c @@ -505,7 +505,6 @@ else destroy_dma_mr(mvdev, mr); - memset(mr, 0, sizeof(*mr)); mr->initialized = false; out: mutex_unlock(&mr->mkey_mtx); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/video/backlight/da9052_bl.c +++ linux-aws-5.15-5.15.0/drivers/video/backlight/da9052_bl.c @@ -117,6 +117,7 @@ wleds->led_reg = platform_get_device_id(pdev)->driver_data; wleds->state = DA9052_WLEDS_OFF; + memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = DA9052_MAX_BRIGHTNESS; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/video/backlight/lm3630a_bl.c +++ linux-aws-5.15-5.15.0/drivers/video/backlight/lm3630a_bl.c @@ -231,7 +231,7 @@ if (rval < 0) goto out_i2c_err; brightness |= rval; - goto out; + return brightness; } /* disable sleep */ @@ -242,11 +242,8 @@ rval = lm3630a_read(pchip, REG_BRT_A); if (rval < 0) goto out_i2c_err; - brightness = rval; + return rval; -out: - bl->props.brightness = brightness; - return bl->props.brightness; out_i2c_err: dev_err(pchip->dev, "i2c failed to access register\n"); return 0; @@ -306,7 +303,7 @@ if (rval < 0) goto out_i2c_err; brightness |= rval; - goto out; + return brightness; } /* disable sleep */ @@ -317,11 +314,8 @@ rval = lm3630a_read(pchip, REG_BRT_B); if (rval < 0) goto out_i2c_err; - brightness = rval; + return rval; -out: - bl->props.brightness = brightness; - return bl->props.brightness; out_i2c_err: dev_err(pchip->dev, "i2c failed to access register\n"); return 0; @@ -339,6 +333,7 @@ struct backlight_properties props; const char *label; + memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) { props.brightness = pdata->leda_init_brt; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/video/backlight/lm3639_bl.c +++ linux-aws-5.15-5.15.0/drivers/video/backlight/lm3639_bl.c @@ -339,6 +339,7 @@ } /* backlight */ + memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.brightness = pdata->init_brt_led; props.max_brightness = pdata->max_brt_led; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/video/backlight/lp8788_bl.c +++ linux-aws-5.15-5.15.0/drivers/video/backlight/lp8788_bl.c @@ -191,6 +191,7 @@ int init_brt; char *name; + memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = MAX_BRIGHTNESS; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/video/fbdev/savage/savagefb_driver.c +++ linux-aws-5.15-5.15.0/drivers/video/fbdev/savage/savagefb_driver.c @@ -868,6 +868,9 @@ DBG("savagefb_check_var"); + if (!var->pixclock) + return -EINVAL; + var->transp.offset = 0; var->transp.length = 0; switch (var->bits_per_pixel) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/video/fbdev/sis/sis_main.c +++ linux-aws-5.15-5.15.0/drivers/video/fbdev/sis/sis_main.c @@ -1474,6 +1474,8 @@ vtotal = var->upper_margin + var->lower_margin + var->vsync_len; + if (!var->pixclock) + return -EINVAL; pixclock = var->pixclock; if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/drivers/watchdog/stm32_iwdg.c +++ linux-aws-5.15-5.15.0/drivers/watchdog/stm32_iwdg.c @@ -21,6 +21,8 @@ #include #include +#define DEFAULT_TIMEOUT 10 + /* IWDG registers */ #define IWDG_KR 0x00 /* Key register */ #define IWDG_PR 0x04 /* Prescaler Register */ @@ -251,6 +253,7 @@ wdd->parent = dev; wdd->info = &stm32_iwdg_info; wdd->ops = &stm32_iwdg_ops; + wdd->timeout = DEFAULT_TIMEOUT; wdd->min_timeout = DIV_ROUND_UP((RLR_MIN + 1) * PR_MIN, wdt->rate); wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * wdt->data->max_prescaler * 1000) / wdt->rate; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/fs/cifs/smb2file.c +++ linux-aws-5.15-5.15.0/fs/cifs/smb2file.c @@ -61,7 +61,6 @@ nr_ioctl_req.Reserved = 0; rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, - true /* is_fsctl */, (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), CIFSMaxBufSize, NULL, NULL /* no return info */); if (rc == -EOPNOTSUPP) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/fs/cifs/smb2proto.h +++ linux-aws-5.15-5.15.0/fs/cifs/smb2proto.h @@ -144,13 +144,13 @@ extern void SMB2_open_free(struct smb_rqst *rqst); extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen, + char *in_data, u32 indatalen, u32 maxoutlen, char **out_data, u32 *plen /* returned data len */); extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, struct smb_rqst *rqst, u64 persistent_fid, u64 volatile_fid, u32 opcode, - bool is_fsctl, char *in_data, u32 indatalen, + char *in_data, u32 indatalen, __u32 max_response_size); extern void SMB2_ioctl_free(struct smb_rqst *rqst); extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, @@ -259,11 +259,13 @@ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *, enum securityEnum); -extern void smb2_parse_contexts(struct TCP_Server_Info *server, - struct smb2_create_rsp *rsp, - unsigned int *epoch, char *lease_key, - __u8 *oplock, struct smb2_file_all_info *buf, - struct create_posix_rsp *posix); +int smb2_parse_contexts(struct TCP_Server_Info *server, + struct kvec *rsp_iov, + unsigned int *epoch, + char *lease_key, __u8 *oplock, + struct smb2_file_all_info *buf, + struct create_posix_rsp *posix); + extern int smb3_encryption_required(const struct cifs_tcon *tcon); extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length, struct kvec *iov, unsigned int min_buf_size); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/fs/fhandle.c +++ linux-aws-5.15-5.15.0/fs/fhandle.c @@ -37,7 +37,7 @@ if (f_handle.handle_bytes > MAX_HANDLE_SZ) return -EINVAL; - handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes, + handle = kzalloc(sizeof(struct file_handle) + f_handle.handle_bytes, GFP_KERNEL); if (!handle) return -ENOMEM; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/fs/nfs/nfs42.h +++ linux-aws-5.15-5.15.0/fs/nfs/nfs42.h @@ -54,11 +54,14 @@ * They would be 7 bytes long in the eventual buffer ("user.x\0"), and * 8 bytes long XDR-encoded. * - * Include the trailing eof word as well. + * Include the trailing eof word as well and make the result a multiple + * of 4 bytes. */ static inline u32 nfs42_listxattr_xdrsize(u32 buflen) { - return ((buflen / (XATTR_USER_PREFIX_LEN + 2)) * 8) + 4; + u32 size = 8 * buflen / (XATTR_USER_PREFIX_LEN + 2) + 4; + + return (size + 3) & ~3; } #endif /* CONFIG_NFS_V4_2 */ #endif /* __LINUX_FS_NFS_NFS4_2_H */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/fs/nfs/nfsroot.c +++ linux-aws-5.15-5.15.0/fs/nfs/nfsroot.c @@ -175,10 +175,10 @@ size_t len = strlen(dest); if (len && dest[len - 1] != ',') - if (strlcat(dest, ",", destlen) > destlen) + if (strlcat(dest, ",", destlen) >= destlen) return -1; - if (strlcat(dest, src, destlen) > destlen) + if (strlcat(dest, src, destlen) >= destlen) return -1; return 0; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/fs/proc/array.c +++ linux-aws-5.15-5.15.0/fs/proc/array.c @@ -495,7 +495,7 @@ sigemptyset(&sigign); sigemptyset(&sigcatch); - cutime = cstime = utime = stime = 0; + cutime = cstime = 0; cgtime = gtime = 0; if (lock_task_sighand(task, &flags)) { @@ -529,7 +529,6 @@ min_flt += sig->min_flt; maj_flt += sig->maj_flt; - thread_group_cputime_adjusted(task, &utime, &stime); gtime += sig->gtime; } @@ -541,11 +540,14 @@ } if (permitted && (!whole || num_threads < 2)) - wchan = get_wchan(task); - if (!whole) { + wchan = !task_is_running(task); + + if (whole) { + thread_group_cputime_adjusted(task, &utime, &stime); + } else { + task_cputime_adjusted(task, &utime, &stime); min_flt = task->min_flt; maj_flt = task->maj_flt; - task_cputime_adjusted(task, &utime, &stime); gtime = task_gtime(task); } @@ -606,10 +608,7 @@ * * This works with older implementations of procps as well. */ - if (wchan) - seq_puts(m, " 1"); - else - seq_puts(m, " 0"); + seq_put_decimal_ull(m, " ", wchan); seq_put_decimal_ull(m, " ", 0); seq_put_decimal_ull(m, " ", 0); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/drm/drm_fixed.h +++ linux-aws-5.15-5.15.0/include/drm/drm_fixed.h @@ -88,7 +88,7 @@ static inline int drm_fixp2int_ceil(s64 a) { - if (a > 0) + if (a >= 0) return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE); else return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/drm/ttm/ttm_resource.h +++ linux-aws-5.15-5.15.0/include/drm/ttm/ttm_resource.h @@ -262,6 +262,9 @@ void ttm_resource_init(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource *res); +void ttm_resource_fini(struct ttm_resource_manager *man, + struct ttm_resource *res); + int ttm_resource_alloc(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource **res); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/dt-bindings/clock/imx8mp-clock.h +++ linux-aws-5.15-5.15.0/include/dt-bindings/clock/imx8mp-clock.h @@ -318,8 +318,16 @@ #define IMX8MP_CLK_AUDIO_AXI 310 #define IMX8MP_CLK_HSIO_AXI 311 #define IMX8MP_CLK_MEDIA_ISP 312 +#define IMX8MP_CLK_MEDIA_DISP2_PIX 313 +#define IMX8MP_CLK_CLKOUT1_SEL 314 +#define IMX8MP_CLK_CLKOUT1_DIV 315 +#define IMX8MP_CLK_CLKOUT1 316 +#define IMX8MP_CLK_CLKOUT2_SEL 317 +#define IMX8MP_CLK_CLKOUT2_DIV 318 +#define IMX8MP_CLK_CLKOUT2 319 +#define IMX8MP_CLK_USB_SUSP 320 -#define IMX8MP_CLK_END 313 +#define IMX8MP_CLK_END 321 #define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0 #define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1 only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/linux/ahci_platform.h +++ linux-aws-5.15-5.15.0/include/linux/ahci_platform.h @@ -23,6 +23,8 @@ void ahci_platform_disable_phys(struct ahci_host_priv *hpriv); int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); +int ahci_platform_deassert_rsts(struct ahci_host_priv *hpriv); +int ahci_platform_assert_rsts(struct ahci_host_priv *hpriv); int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv); void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv); int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); @@ -41,6 +43,7 @@ int ahci_platform_suspend(struct device *dev); int ahci_platform_resume(struct device *dev); -#define AHCI_PLATFORM_GET_RESETS 0x01 +#define AHCI_PLATFORM_GET_RESETS BIT(0) +#define AHCI_PLATFORM_RST_TRIGGER BIT(1) #endif /* _AHCI_PLATFORM_H */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/linux/igmp.h +++ linux-aws-5.15-5.15.0/include/linux/igmp.h @@ -118,9 +118,9 @@ struct ip_mreq_source *mreqs, int ifindex); extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex); extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, - struct ip_msfilter __user *optval, int __user *optlen); + sockptr_t optval, sockptr_t optlen); extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, - struct sockaddr_storage __user *p); + sockptr_t optval, size_t offset); extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif, int sdif); extern void ip_mc_init_dev(struct in_device *); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/linux/io_uring.h +++ linux-aws-5.15-5.15.0/include/linux/io_uring.h @@ -6,9 +6,9 @@ #include #if defined(CONFIG_IO_URING) -struct sock *io_uring_get_socket(struct file *file); void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); +bool io_is_uring_fops(struct file *file); static inline void io_uring_files_cancel(void) { @@ -26,10 +26,6 @@ __io_uring_free(tsk); } #else -static inline struct sock *io_uring_get_socket(struct file *file) -{ - return NULL; -} static inline void io_uring_task_cancel(void) { } @@ -39,6 +35,10 @@ static inline void io_uring_free(struct task_struct *tsk) { } +static inline bool io_is_uring_fops(struct file *file) +{ + return false; +} #endif #endif only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/linux/mlx5/qp.h +++ linux-aws-5.15-5.15.0/include/linux/mlx5/qp.h @@ -261,7 +261,10 @@ union { struct { __be16 sz; - u8 start[2]; + union { + u8 start[2]; + DECLARE_FLEX_ARRAY(u8, data); + }; } inline_hdr; struct { __be16 type; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/linux/mroute.h +++ linux-aws-5.15-5.15.0/include/linux/mroute.h @@ -17,7 +17,7 @@ } int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); -int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); +int ip_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t); int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); int ip_mr_init(void); @@ -29,8 +29,8 @@ return -ENOPROTOOPT; } -static inline int ip_mroute_getsockopt(struct sock *sock, int optname, - char __user *optval, int __user *optlen) +static inline int ip_mroute_getsockopt(struct sock *sk, int optname, + sockptr_t optval, sockptr_t optlen) { return -ENOPROTOOPT; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/linux/pm.h +++ linux-aws-5.15-5.15.0/include/linux/pm.h @@ -300,47 +300,59 @@ int (*runtime_idle)(struct device *dev); }; +#define SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + .suspend = pm_sleep_ptr(suspend_fn), \ + .resume = pm_sleep_ptr(resume_fn), \ + .freeze = pm_sleep_ptr(suspend_fn), \ + .thaw = pm_sleep_ptr(resume_fn), \ + .poweroff = pm_sleep_ptr(suspend_fn), \ + .restore = pm_sleep_ptr(resume_fn), + +#define LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + .suspend_late = pm_sleep_ptr(suspend_fn), \ + .resume_early = pm_sleep_ptr(resume_fn), \ + .freeze_late = pm_sleep_ptr(suspend_fn), \ + .thaw_early = pm_sleep_ptr(resume_fn), \ + .poweroff_late = pm_sleep_ptr(suspend_fn), \ + .restore_early = pm_sleep_ptr(resume_fn), + +#define NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + .suspend_noirq = pm_sleep_ptr(suspend_fn), \ + .resume_noirq = pm_sleep_ptr(resume_fn), \ + .freeze_noirq = pm_sleep_ptr(suspend_fn), \ + .thaw_noirq = pm_sleep_ptr(resume_fn), \ + .poweroff_noirq = pm_sleep_ptr(suspend_fn), \ + .restore_noirq = pm_sleep_ptr(resume_fn), + +#define RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ + .runtime_suspend = suspend_fn, \ + .runtime_resume = resume_fn, \ + .runtime_idle = idle_fn, + #ifdef CONFIG_PM_SLEEP #define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ - .suspend = suspend_fn, \ - .resume = resume_fn, \ - .freeze = suspend_fn, \ - .thaw = resume_fn, \ - .poweroff = suspend_fn, \ - .restore = resume_fn, + SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #else #define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #endif #ifdef CONFIG_PM_SLEEP #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ - .suspend_late = suspend_fn, \ - .resume_early = resume_fn, \ - .freeze_late = suspend_fn, \ - .thaw_early = resume_fn, \ - .poweroff_late = suspend_fn, \ - .restore_early = resume_fn, + LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #else #define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #endif #ifdef CONFIG_PM_SLEEP #define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ - .suspend_noirq = suspend_fn, \ - .resume_noirq = resume_fn, \ - .freeze_noirq = suspend_fn, \ - .thaw_noirq = resume_fn, \ - .poweroff_noirq = suspend_fn, \ - .restore_noirq = resume_fn, + NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #else #define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) #endif #ifdef CONFIG_PM #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ - .runtime_suspend = suspend_fn, \ - .runtime_resume = resume_fn, \ - .runtime_idle = idle_fn, + RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) #else #define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) #endif @@ -349,9 +361,9 @@ * Use this if you want to use the same suspend and resume callbacks for suspend * to RAM and hibernation. */ -#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ -const struct dev_pm_ops __maybe_unused name = { \ - SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ +#define DEFINE_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ +const struct dev_pm_ops name = { \ + SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } /* @@ -367,17 +379,27 @@ * .resume_early(), to the same routines as .runtime_suspend() and * .runtime_resume(), respectively (and analogously for hibernation). */ +#define DEFINE_UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ +static const struct dev_pm_ops name = { \ + SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ + RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ +} + +/* Deprecated. Use DEFINE_SIMPLE_DEV_PM_OPS() instead. */ +#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ +const struct dev_pm_ops __maybe_unused name = { \ + SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ +} + +/* Deprecated. Use DEFINE_UNIVERSAL_DEV_PM_OPS() instead. */ #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ const struct dev_pm_ops __maybe_unused name = { \ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ } -#ifdef CONFIG_PM -#define pm_ptr(_ptr) (_ptr) -#else -#define pm_ptr(_ptr) NULL -#endif +#define pm_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM), (_ptr)) +#define pm_sleep_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM_SLEEP), (_ptr)) /* * PM_EVENT_ messages only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/linux/poll.h +++ linux-aws-5.15-5.15.0/include/linux/poll.h @@ -16,11 +16,7 @@ extern struct ctl_table epoll_table[]; /* for sysctl */ /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating additional memory. */ -#ifdef __clang__ -#define MAX_STACK_ALLOC 768 -#else #define MAX_STACK_ALLOC 832 -#endif #define FRONTEND_STACK_ALLOC 256 #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/linux/regmap.h +++ linux-aws-5.15-5.15.0/include/linux/regmap.h @@ -290,6 +290,17 @@ * read operation on a bus such as SPI, I2C, etc. Most of the * devices do not need this. * @reg_write: Same as above for writing. + * @reg_update_bits: Optional callback that if filled will be used to perform + * all the update_bits(rmw) operation. Should only be provided + * if the function require special handling with lock and reg + * handling and the operation cannot be represented as a simple + * update_bits operation on a bus such as SPI, I2C, etc. + * @read: Optional callback that if filled will be used to perform all the + * bulk reads from the registers. Data is returned in the buffer used + * to transmit data. + * @write: Same as above for writing. + * @max_raw_read: Max raw read size that can be used on the device. + * @max_raw_write: Max raw write size that can be used on the device. * @fast_io: Register IO is fast. Use a spinlock instead of a mutex * to perform locking. This field is ignored if custom lock/unlock * functions are used (see fields lock/unlock of struct regmap_config). @@ -372,6 +383,14 @@ int (*reg_read)(void *context, unsigned int reg, unsigned int *val); int (*reg_write)(void *context, unsigned int reg, unsigned int val); + int (*reg_update_bits)(void *context, unsigned int reg, + unsigned int mask, unsigned int val); + /* Bulk read/write */ + int (*read)(void *context, const void *reg_buf, size_t reg_size, + void *val_buf, size_t val_size); + int (*write)(void *context, const void *data, size_t count); + size_t max_raw_read; + size_t max_raw_write; bool fast_io; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/linux/socket.h +++ linux-aws-5.15-5.15.0/include/linux/socket.h @@ -31,7 +31,10 @@ struct sockaddr { sa_family_t sa_family; /* address family, AF_xxx */ - char sa_data[14]; /* 14 bytes of protocol address */ + union { + char sa_data_min[14]; /* Minimum 14 bytes of protocol address */ + DECLARE_FLEX_ARRAY(char, sa_data); + }; }; struct linger { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/linux/sockptr.h +++ linux-aws-5.15-5.15.0/include/linux/sockptr.h @@ -64,6 +64,11 @@ return 0; } +static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size) +{ + return copy_to_sockptr_offset(dst, 0, src, size); +} + static inline void *memdup_sockptr(sockptr_t src, size_t len) { void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/trace/events/qdisc.h +++ linux-aws-5.15-5.15.0/include/trace/events/qdisc.h @@ -81,14 +81,14 @@ TP_ARGS(q), TP_STRUCT__entry( - __string( dev, qdisc_dev(q) ) - __string( kind, q->ops->id ) - __field( u32, parent ) - __field( u32, handle ) + __string( dev, qdisc_dev(q)->name ) + __string( kind, q->ops->id ) + __field( u32, parent ) + __field( u32, handle ) ), TP_fast_assign( - __assign_str(dev, qdisc_dev(q)); + __assign_str(dev, qdisc_dev(q)->name); __assign_str(kind, q->ops->id); __entry->parent = q->parent; __entry->handle = q->handle; @@ -106,14 +106,14 @@ TP_ARGS(q), TP_STRUCT__entry( - __string( dev, qdisc_dev(q) ) - __string( kind, q->ops->id ) - __field( u32, parent ) - __field( u32, handle ) + __string( dev, qdisc_dev(q)->name ) + __string( kind, q->ops->id ) + __field( u32, parent ) + __field( u32, handle ) ), TP_fast_assign( - __assign_str(dev, qdisc_dev(q)); + __assign_str(dev, qdisc_dev(q)->name); __assign_str(kind, q->ops->id); __entry->parent = q->parent; __entry->handle = q->handle; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/uapi/linux/in6.h +++ linux-aws-5.15-5.15.0/include/uapi/linux/in6.h @@ -145,7 +145,7 @@ #define IPV6_TLV_PADN 1 #define IPV6_TLV_ROUTERALERT 5 #define IPV6_TLV_CALIPSO 7 /* RFC 5570 */ -#define IPV6_TLV_IOAM 49 /* TEMPORARY IANA allocation for IOAM */ +#define IPV6_TLV_IOAM 49 /* RFC 9486 */ #define IPV6_TLV_JUMBO 194 #define IPV6_TLV_HAO 201 /* home address option */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/include/uapi/linux/psci.h +++ linux-aws-5.15-5.15.0/include/uapi/linux/psci.h @@ -51,9 +51,16 @@ #define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14) #define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15) #define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18) +#define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19) +#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(20) +#define PSCI_1_3_FN_SYSTEM_OFF2 PSCI_0_2_FN(21) +#define PSCI_1_3_FN_CLEAN_INV_MEMREGION_ATTRIBUTES PSCI_0_2_FN(23) #define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14) #define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18) +#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(20) +#define PSCI_1_3_FN64_SYSTEM_OFF2 PSCI_0_2_FN64(21) +#define PSCI_1_3_FN64_CLEAN_INV_MEMREGION PSCI_0_2_FN64(22) /* PSCI v0.2 power state encoding for CPU_SUSPEND function */ #define PSCI_0_2_POWER_STATE_ID_MASK 0xffff @@ -82,6 +89,23 @@ #define PSCI_0_2_TOS_UP_NO_MIGRATE 1 #define PSCI_0_2_TOS_MP 2 +/* PSCI v1.1 reset type encoding for SYSTEM_RESET2 */ +#define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0 +#define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U + +/* PSCI v1.3 hibernate type for SYSTEM_OFF2 */ +#define PSCI_1_3_HIBERNATE_TYPE_OFF 0 + +/* PSCI v1.3 flags for CLEAN_INV_MEMREGION */ +#define PSCI_1_3_CLEAN_INV_MEMREGION_FLAG_DRY_RUN BIT(0) + +/* PSCI v1.3 attributes for CLEAN_INV_MEMREGION_ATTRIBUTES */ +#define PSCI_1_3_CLEAN_INV_MEMREGION_ATTR_OP_TYPE 0 +#define PSCI_1_3_CLEAN_INV_MEMREGION_ATTR_CPU_RDVZ 1 +#define PSCI_1_3_CLEAN_INV_MEMREGION_ATTR_LATENCY 2 +#define PSCI_1_3_CLEAN_INV_MEMREGION_ATTR_RATE_LIMIT 3 +#define PSCI_1_3_CLEAN_INV_MEMREGION_ATTR_TIMEOUT 4 + /* PSCI version decoding (independent of PSCI version) */ #define PSCI_VERSION_MAJOR_SHIFT 16 #define PSCI_VERSION_MINOR_MASK \ @@ -115,5 +139,8 @@ #define PSCI_RET_NOT_PRESENT -7 #define PSCI_RET_DISABLED -8 #define PSCI_RET_INVALID_ADDRESS -9 +#define PSCI_RET_TIMEOUT -10 +#define PSCI_RET_RATE_LIMITED -11 +#define PSCI_RET_BUSY -12 #endif /* _UAPI_LINUX_PSCI_H */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/kernel/bpf/bpf_lru_list.c +++ linux-aws-5.15-5.15.0/kernel/bpf/bpf_lru_list.c @@ -41,7 +41,12 @@ /* bpf_lru_node helpers */ static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node) { - return node->ref; + return READ_ONCE(node->ref); +} + +static void bpf_lru_node_clear_ref(struct bpf_lru_node *node) +{ + WRITE_ONCE(node->ref, 0); } static void bpf_lru_list_count_inc(struct bpf_lru_list *l, @@ -89,7 +94,7 @@ bpf_lru_list_count_inc(l, tgt_type); node->type = tgt_type; - node->ref = 0; + bpf_lru_node_clear_ref(node); list_move(&node->list, &l->lists[tgt_type]); } @@ -110,7 +115,7 @@ bpf_lru_list_count_inc(l, tgt_type); node->type = tgt_type; } - node->ref = 0; + bpf_lru_node_clear_ref(node); /* If the moving node is the next_inactive_rotation candidate, * move the next_inactive_rotation pointer also. @@ -353,7 +358,7 @@ *(u32 *)((void *)node + lru->hash_offset) = hash; node->cpu = cpu; node->type = BPF_LRU_LOCAL_LIST_T_PENDING; - node->ref = 0; + bpf_lru_node_clear_ref(node); list_add(&node->list, local_pending_list(loc_l)); } @@ -419,7 +424,7 @@ if (!list_empty(free_list)) { node = list_first_entry(free_list, struct bpf_lru_node, list); *(u32 *)((void *)node + lru->hash_offset) = hash; - node->ref = 0; + bpf_lru_node_clear_ref(node); __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); } @@ -522,7 +527,7 @@ } node->type = BPF_LRU_LOCAL_LIST_T_FREE; - node->ref = 0; + bpf_lru_node_clear_ref(node); list_move(&node->list, local_free_list(loc_l)); raw_spin_unlock_irqrestore(&loc_l->lock, flags); @@ -568,7 +573,7 @@ node = (struct bpf_lru_node *)(buf + node_offset); node->type = BPF_LRU_LIST_T_FREE; - node->ref = 0; + bpf_lru_node_clear_ref(node); list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); buf += elem_size; } @@ -594,7 +599,7 @@ node = (struct bpf_lru_node *)(buf + node_offset); node->cpu = cpu; node->type = BPF_LRU_LIST_T_FREE; - node->ref = 0; + bpf_lru_node_clear_ref(node); list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); i++; buf += elem_size; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/kernel/bpf/bpf_lru_list.h +++ linux-aws-5.15-5.15.0/kernel/bpf/bpf_lru_list.h @@ -63,11 +63,8 @@ static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node) { - /* ref is an approximation on access frequency. It does not - * have to be very accurate. Hence, no protection is used. - */ - if (!node->ref) - node->ref = 1; + if (!READ_ONCE(node->ref)) + WRITE_ONCE(node->ref, 1); } int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/kernel/bpf/devmap.c +++ linux-aws-5.15-5.15.0/kernel/bpf/devmap.c @@ -130,13 +130,14 @@ bpf_map_init_from_attr(&dtab->map, attr); if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { - dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); - - if (!dtab->n_buckets) /* Overflow check */ + /* hash table size must be power of 2; roundup_pow_of_two() can + * overflow into UB on 32-bit arches, so check that first + */ + if (dtab->map.max_entries > 1UL << 31) return -EINVAL; - } - if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { + dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); + dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, dtab->map.numa_node); if (!dtab->dev_index_head) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/kernel/time/time_test.c +++ linux-aws-5.15-5.15.0/kernel/time/time_test.c @@ -73,7 +73,7 @@ days = div_s64(secs, 86400); - #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %ld", \ + #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %lld", \ year, month, mdday, yday, days KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/lib/cmdline_kunit.c +++ linux-aws-5.15-5.15.0/lib/cmdline_kunit.c @@ -124,7 +124,7 @@ n, e[0], r[0]); p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0])); - KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r); + KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %td out of bound", n, p - r); } static void cmdline_test_range(struct kunit *test) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/lib/test_blackhole_dev.c +++ linux-aws-5.15-5.15.0/lib/test_blackhole_dev.c @@ -29,7 +29,6 @@ { struct ipv6hdr *ip6h; struct sk_buff *skb; - struct ethhdr *ethh; struct udphdr *uh; int data_len; int ret; @@ -61,7 +60,7 @@ ip6h->saddr = in6addr_loopback; ip6h->daddr = in6addr_loopback; /* Ether */ - ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr)); + skb_push(skb, sizeof(struct ethhdr)); skb_set_mac_header(skb, 0); skb->protocol = htons(ETH_P_IPV6); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/core/dev_ioctl.c +++ linux-aws-5.15-5.15.0/net/core/dev_ioctl.c @@ -339,7 +339,7 @@ if (ifr->ifr_hwaddr.sa_family != dev->type) return -EINVAL; memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, - min(sizeof(ifr->ifr_hwaddr.sa_data), + min(sizeof(ifr->ifr_hwaddr.sa_data_min), (size_t)dev->addr_len)); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return 0; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/core/sock_diag.c +++ linux-aws-5.15-5.15.0/net/core/sock_diag.c @@ -189,7 +189,7 @@ if (sock_diag_handlers[hndl->family]) err = -EBUSY; else - sock_diag_handlers[hndl->family] = hndl; + WRITE_ONCE(sock_diag_handlers[hndl->family], hndl); mutex_unlock(&sock_diag_table_mutex); return err; @@ -205,7 +205,7 @@ mutex_lock(&sock_diag_table_mutex); BUG_ON(sock_diag_handlers[family] != hnld); - sock_diag_handlers[family] = NULL; + WRITE_ONCE(sock_diag_handlers[family], NULL); mutex_unlock(&sock_diag_table_mutex); } EXPORT_SYMBOL_GPL(sock_diag_unregister); @@ -223,7 +223,7 @@ return -EINVAL; req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX); - if (sock_diag_handlers[req->sdiag_family] == NULL) + if (READ_ONCE(sock_diag_handlers[req->sdiag_family]) == NULL) sock_load_diag_module(req->sdiag_family, 0); mutex_lock(&sock_diag_table_mutex); @@ -282,12 +282,12 @@ switch (group) { case SKNLGRP_INET_TCP_DESTROY: case SKNLGRP_INET_UDP_DESTROY: - if (!sock_diag_handlers[AF_INET]) + if (!READ_ONCE(sock_diag_handlers[AF_INET])) sock_load_diag_module(AF_INET, 0); break; case SKNLGRP_INET6_TCP_DESTROY: case SKNLGRP_INET6_UDP_DESTROY: - if (!sock_diag_handlers[AF_INET6]) + if (!READ_ONCE(sock_diag_handlers[AF_INET6])) sock_load_diag_module(AF_INET6, 0); break; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/ipv6/netfilter/nf_reject_ipv6.c +++ linux-aws-5.15-5.15.0/net/ipv6/netfilter/nf_reject_ipv6.c @@ -345,6 +345,7 @@ nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen); nf_ct_attach(nskb, oldskb); + nf_ct_set_closing(skb_nfct(oldskb)); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* If we use ip6_local_out for bridged traffic, the MAC source on only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/mac80211/key.c +++ linux-aws-5.15-5.15.0/net/mac80211/key.c @@ -843,7 +843,7 @@ */ if (ieee80211_key_identical(sdata, old_key, key)) { ieee80211_key_free_unused(key); - ret = 0; + ret = -EALREADY; goto out; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/mptcp/diag.c +++ linux-aws-5.15-5.15.0/net/mptcp/diag.c @@ -13,17 +13,22 @@ #include #include "protocol.h" -static int subflow_get_info(const struct sock *sk, struct sk_buff *skb) +static int subflow_get_info(struct sock *sk, struct sk_buff *skb) { struct mptcp_subflow_context *sf; struct nlattr *start; u32 flags = 0; + bool slow; int err; + if (inet_sk_state_load(sk) == TCP_LISTEN) + return 0; + start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP); if (!start) return -EMSGSIZE; + slow = lock_sock_fast(sk); rcu_read_lock(); sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data); if (!sf) { @@ -69,11 +74,13 @@ } rcu_read_unlock(); + unlock_sock_fast(sk, slow); nla_nest_end(skb, start); return 0; nla_failure: rcu_read_unlock(); + unlock_sock_fast(sk, slow); nla_nest_cancel(skb, start); return err; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/netfilter/nf_conntrack_h323_asn1.c +++ linux-aws-5.15-5.15.0/net/netfilter/nf_conntrack_h323_asn1.c @@ -533,6 +533,8 @@ /* Get fields bitmap */ if (nf_h323_error_boundary(bs, 0, f->sz)) return H323_ERROR_BOUND; + if (f->sz > 32) + return H323_ERROR_RANGE; bmp = get_bitmap(bs, f->sz); if (base) *(unsigned int *)base = bmp; @@ -589,6 +591,8 @@ bmp2_len = get_bits(bs, 7) + 1; if (nf_h323_error_boundary(bs, 0, bmp2_len)) return H323_ERROR_BOUND; + if (bmp2_len > 32) + return H323_ERROR_RANGE; bmp2 = get_bitmap(bs, bmp2_len); bmp |= bmp2 >> f->sz; if (base) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/netfilter/nf_nat_core.c +++ linux-aws-5.15-5.15.0/net/netfilter/nf_nat_core.c @@ -1120,7 +1120,7 @@ .size = sizeof(struct nat_net), }; -static struct nf_nat_hook nat_hook = { +static const struct nf_nat_hook nat_hook = { .parse_nat_setup = nfnetlink_parse_nat_setup, #ifdef CONFIG_XFRM .decode_session = __nf_nat_decode_session, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/netrom/nr_dev.c +++ linux-aws-5.15-5.15.0/net/netrom/nr_dev.c @@ -81,7 +81,7 @@ buff[6] |= AX25_SSSID_SPARE; buff += AX25_ADDR_LEN; - *buff++ = sysctl_netrom_network_ttl_initialiser; + *buff++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser); *buff++ = NR_PROTO_IP; *buff++ = NR_PROTO_IP; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/netrom/nr_in.c +++ linux-aws-5.15-5.15.0/net/netrom/nr_in.c @@ -97,7 +97,7 @@ break; case NR_RESET: - if (sysctl_netrom_reset_circuit) + if (READ_ONCE(sysctl_netrom_reset_circuit)) nr_disconnect(sk, ECONNRESET); break; @@ -128,7 +128,7 @@ break; case NR_RESET: - if (sysctl_netrom_reset_circuit) + if (READ_ONCE(sysctl_netrom_reset_circuit)) nr_disconnect(sk, ECONNRESET); break; @@ -262,7 +262,7 @@ break; case NR_RESET: - if (sysctl_netrom_reset_circuit) + if (READ_ONCE(sysctl_netrom_reset_circuit)) nr_disconnect(sk, ECONNRESET); break; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/netrom/nr_out.c +++ linux-aws-5.15-5.15.0/net/netrom/nr_out.c @@ -204,7 +204,7 @@ dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; - *dptr++ = sysctl_netrom_network_ttl_initialiser; + *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser); if (!nr_route_frame(skb, NULL)) { kfree_skb(skb); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/netrom/nr_route.c +++ linux-aws-5.15-5.15.0/net/netrom/nr_route.c @@ -153,7 +153,7 @@ nr_neigh->digipeat = NULL; nr_neigh->ax25 = NULL; nr_neigh->dev = dev; - nr_neigh->quality = sysctl_netrom_default_path_quality; + nr_neigh->quality = READ_ONCE(sysctl_netrom_default_path_quality); nr_neigh->locked = 0; nr_neigh->count = 0; nr_neigh->number = nr_neigh_no++; @@ -728,7 +728,7 @@ nr_neigh->ax25 = NULL; ax25_cb_put(ax25); - if (++nr_neigh->failed < sysctl_netrom_link_fails_count) { + if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) { nr_neigh_put(nr_neigh); return; } @@ -766,7 +766,7 @@ if (ax25 != NULL) { ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, ax25->ax25_dev->dev, 0, - sysctl_netrom_obsolescence_count_initialiser); + READ_ONCE(sysctl_netrom_obsolescence_count_initialiser)); if (ret) return ret; } @@ -780,7 +780,7 @@ return ret; } - if (!sysctl_netrom_routing_control && ax25 != NULL) + if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL) return 0; /* Its Time-To-Live has expired */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/rds/rdma.c +++ linux-aws-5.15-5.15.0/net/rds/rdma.c @@ -301,6 +301,9 @@ kfree(sg); } ret = PTR_ERR(trans_private); + /* Trigger connection so that its ready for the next retry */ + if (ret == -ENODEV) + rds_conn_connect_if_down(cp->cp_conn); goto out; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/rds/send.c +++ linux-aws-5.15-5.15.0/net/rds/send.c @@ -103,13 +103,12 @@ static int acquire_in_xmit(struct rds_conn_path *cp) { - return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0; + return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0; } static void release_in_xmit(struct rds_conn_path *cp) { - clear_bit(RDS_IN_XMIT, &cp->cp_flags); - smp_mb__after_atomic(); + clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags); /* * We don't use wait_on_bit()/wake_up_bit() because our waking is in a * hot path and finding waiters is very rare. We don't want to walk @@ -1314,12 +1313,8 @@ /* Parse any control messages the user may have included. */ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct); - if (ret) { - /* Trigger connection so that its ready for the next retry */ - if (ret == -EAGAIN) - rds_conn_connect_if_down(conn); + if (ret) goto out; - } if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ linux-aws-5.15-5.15.0/net/sunrpc/auth_gss/gss_rpc_xdr.c @@ -250,8 +250,8 @@ creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL); if (!creds) { - kfree(oa->data); - return -ENOMEM; + err = -ENOMEM; + goto free_oa; } oa->data[0].option.data = CREDS_VALUE; @@ -265,29 +265,40 @@ /* option buffer */ p = xdr_inline_decode(xdr, 4); - if (unlikely(p == NULL)) - return -ENOSPC; + if (unlikely(p == NULL)) { + err = -ENOSPC; + goto free_creds; + } length = be32_to_cpup(p); p = xdr_inline_decode(xdr, length); - if (unlikely(p == NULL)) - return -ENOSPC; + if (unlikely(p == NULL)) { + err = -ENOSPC; + goto free_creds; + } if (length == sizeof(CREDS_VALUE) && memcmp(p, CREDS_VALUE, sizeof(CREDS_VALUE)) == 0) { /* We have creds here. parse them */ err = gssx_dec_linux_creds(xdr, creds); if (err) - return err; + goto free_creds; oa->data[0].value.len = 1; /* presence */ } else { /* consume uninteresting buffer */ err = gssx_dec_buffer(xdr, &dummy); if (err) - return err; + goto free_creds; } } return 0; + +free_creds: + kfree(creds); +free_oa: + kfree(oa->data); + oa->data = NULL; + return err; } static int gssx_dec_status(struct xdr_stream *xdr, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/scripts/bpf_doc.py +++ linux-aws-5.15-5.15.0/scripts/bpf_doc.py @@ -369,7 +369,7 @@ instructions to the kernel when the programs are loaded. The format for that string is identical to the one in use for kernel modules (Dual licenses, such as "Dual BSD/GPL", may be used). Some helper functions are only accessible to -programs that are compatible with the GNU Privacy License (GPL). +programs that are compatible with the GNU General Public License (GNU GPL). In order to use such helpers, the eBPF program must be loaded with the correct license string passed (via **attr**) to the **bpf**\ () system call, and this only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/scripts/clang-tools/gen_compile_commands.py +++ linux-aws-5.15-5.15.0/scripts/clang-tools/gen_compile_commands.py @@ -184,7 +184,7 @@ # escape the pound sign '#', either as '\#' or '$(pound)' (depending on the # kernel version). The compile_commands.json file is not interepreted # by Make, so this code replaces the escaped version with '#'. - prefix = command_prefix.replace('\#', '#').replace('$(pound)', '#') + prefix = command_prefix.replace(r'\#', '#').replace('$(pound)', '#') # Use os.path.abspath() to normalize the path resolving '.' and '..' . abs_path = os.path.abspath(os.path.join(root_directory, file_path)) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/scripts/kconfig/lexer.l +++ linux-aws-5.15-5.15.0/scripts/kconfig/lexer.l @@ -301,8 +301,11 @@ new_string(); append_string(in, n); - /* get the whole line because we do not know the end of token. */ - while ((c = input()) != EOF) { + /* + * get the whole line because we do not know the end of token. + * input() returns 0 (not EOF!) when it reachs the end of file. + */ + while ((c = input()) != 0) { if (c == '\n') { unput(c); break; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/security/tomoyo/common.c +++ linux-aws-5.15-5.15.0/security/tomoyo/common.c @@ -2657,13 +2657,14 @@ { int error = buffer_len; size_t avail_len = buffer_len; - char *cp0 = head->write_buf; + char *cp0; int idx; if (!head->write) return -EINVAL; if (mutex_lock_interruptible(&head->io_sem)) return -EINTR; + cp0 = head->write_buf; head->read_user_buf_avail = 0; idx = tomoyo_read_lock(); /* Read a line and dispatch it to the policy handler. */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/core/seq/seq_midi.c +++ linux-aws-5.15-5.15.0/sound/core/seq/seq_midi.c @@ -112,6 +112,12 @@ return 0; } +/* callback for snd_seq_dump_var_event(), bridging to dump_midi() */ +static int __dump_midi(void *ptr, void *buf, int count) +{ + return dump_midi(ptr, buf, count); +} + static int event_process_midi(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop) { @@ -131,7 +137,7 @@ pr_debug("ALSA: seq_midi: invalid sysex event flags = 0x%x\n", ev->flags); return 0; } - snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)dump_midi, substream); + snd_seq_dump_var_event(ev, __dump_midi, substream); snd_midi_event_reset_decode(msynth->parser); } else { if (msynth->parser == NULL) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/core/seq/seq_virmidi.c +++ linux-aws-5.15-5.15.0/sound/core/seq/seq_virmidi.c @@ -62,6 +62,13 @@ /* * decode input event and put to read buffer of each opened file */ + +/* callback for snd_seq_dump_var_event(), bridging to snd_rawmidi_receive() */ +static int dump_to_rawmidi(void *ptr, void *buf, int count) +{ + return snd_rawmidi_receive(ptr, buf, count); +} + static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev, struct snd_seq_event *ev, bool atomic) @@ -80,7 +87,7 @@ if (ev->type == SNDRV_SEQ_EVENT_SYSEX) { if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) continue; - snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream); + snd_seq_dump_var_event(ev, dump_to_rawmidi, vmidi->substream); snd_midi_event_reset_decode(vmidi->parser); } else { len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/firewire/amdtp-stream.c +++ linux-aws-5.15-5.15.0/sound/firewire/amdtp-stream.c @@ -934,7 +934,7 @@ // to the reason. unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle, IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES); - lost = (compare_ohci_cycle_count(safe_cycle, cycle) > 0); + lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0); } if (lost) { dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n", only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/soc/meson/aiu.c +++ linux-aws-5.15-5.15.0/sound/soc/meson/aiu.c @@ -215,49 +215,27 @@ static int aiu_clk_get(struct device *dev) { struct aiu *aiu = dev_get_drvdata(dev); + struct clk *pclk; int ret; - aiu->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(aiu->pclk)) { - if (PTR_ERR(aiu->pclk) != -EPROBE_DEFER) - dev_err(dev, "Can't get the aiu pclk\n"); - return PTR_ERR(aiu->pclk); - } + pclk = devm_clk_get_enabled(dev, "pclk"); + if (IS_ERR(pclk)) + return dev_err_probe(dev, PTR_ERR(pclk), "Can't get the aiu pclk\n"); aiu->spdif_mclk = devm_clk_get(dev, "spdif_mclk"); - if (IS_ERR(aiu->spdif_mclk)) { - if (PTR_ERR(aiu->spdif_mclk) != -EPROBE_DEFER) - dev_err(dev, "Can't get the aiu spdif master clock\n"); - return PTR_ERR(aiu->spdif_mclk); - } + if (IS_ERR(aiu->spdif_mclk)) + return dev_err_probe(dev, PTR_ERR(aiu->spdif_mclk), + "Can't get the aiu spdif master clock\n"); ret = aiu_clk_bulk_get(dev, aiu_i2s_ids, ARRAY_SIZE(aiu_i2s_ids), &aiu->i2s); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "Can't get the i2s clocks\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "Can't get the i2s clocks\n"); ret = aiu_clk_bulk_get(dev, aiu_spdif_ids, ARRAY_SIZE(aiu_spdif_ids), &aiu->spdif); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "Can't get the spdif clocks\n"); - return ret; - } - - ret = clk_prepare_enable(aiu->pclk); - if (ret) { - dev_err(dev, "peripheral clock enable failed\n"); - return ret; - } - - ret = devm_add_action_or_reset(dev, - (void(*)(void *))clk_disable_unprepare, - aiu->pclk); if (ret) - dev_err(dev, "failed to add reset action on pclk"); + return dev_err_probe(dev, ret, "Can't get the spdif clocks\n"); return ret; } @@ -281,11 +259,8 @@ platform_set_drvdata(pdev, aiu); ret = device_reset(dev); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to reset device\n"); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "Failed to reset device\n"); regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/soc/meson/aiu.h +++ linux-aws-5.15-5.15.0/sound/soc/meson/aiu.h @@ -33,7 +33,6 @@ }; struct aiu { - struct clk *pclk; struct clk *spdif_mclk; struct aiu_interface i2s; struct aiu_interface spdif; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/soc/meson/axg-fifo.c +++ linux-aws-5.15-5.15.0/sound/soc/meson/axg-fifo.c @@ -351,20 +351,12 @@ } fifo->pclk = devm_clk_get(dev, NULL); - if (IS_ERR(fifo->pclk)) { - if (PTR_ERR(fifo->pclk) != -EPROBE_DEFER) - dev_err(dev, "failed to get pclk: %ld\n", - PTR_ERR(fifo->pclk)); - return PTR_ERR(fifo->pclk); - } + if (IS_ERR(fifo->pclk)) + return dev_err_probe(dev, PTR_ERR(fifo->pclk), "failed to get pclk\n"); fifo->arb = devm_reset_control_get_exclusive(dev, NULL); - if (IS_ERR(fifo->arb)) { - if (PTR_ERR(fifo->arb) != -EPROBE_DEFER) - dev_err(dev, "failed to get arb reset: %ld\n", - PTR_ERR(fifo->arb)); - return PTR_ERR(fifo->arb); - } + if (IS_ERR(fifo->arb)) + return dev_err_probe(dev, PTR_ERR(fifo->arb), "failed to get arb reset\n"); fifo->irq = of_irq_get(dev->of_node, 0); if (fifo->irq <= 0) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/soc/meson/axg-pdm.c +++ linux-aws-5.15-5.15.0/sound/soc/meson/axg-pdm.c @@ -586,7 +586,6 @@ struct device *dev = &pdev->dev; struct axg_pdm *priv; void __iomem *regs; - int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -611,28 +610,16 @@ } priv->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(priv->pclk)) { - ret = PTR_ERR(priv->pclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get pclk: %d\n", ret); - return ret; - } + if (IS_ERR(priv->pclk)) + return dev_err_probe(dev, PTR_ERR(priv->pclk), "failed to get pclk\n"); priv->dclk = devm_clk_get(dev, "dclk"); - if (IS_ERR(priv->dclk)) { - ret = PTR_ERR(priv->dclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get dclk: %d\n", ret); - return ret; - } + if (IS_ERR(priv->dclk)) + return dev_err_probe(dev, PTR_ERR(priv->dclk), "failed to get dclk\n"); priv->sysclk = devm_clk_get(dev, "sysclk"); - if (IS_ERR(priv->sysclk)) { - ret = PTR_ERR(priv->sysclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get dclk: %d\n", ret); - return ret; - } + if (IS_ERR(priv->sysclk)) + return dev_err_probe(dev, PTR_ERR(priv->sysclk), "failed to get dclk\n"); return devm_snd_soc_register_component(dev, &axg_pdm_component_drv, &axg_pdm_dai_drv, 1); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/soc/meson/axg-spdifout.c +++ linux-aws-5.15-5.15.0/sound/soc/meson/axg-spdifout.c @@ -403,7 +403,6 @@ struct device *dev = &pdev->dev; struct axg_spdifout *priv; void __iomem *regs; - int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -422,20 +421,12 @@ } priv->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(priv->pclk)) { - ret = PTR_ERR(priv->pclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get pclk: %d\n", ret); - return ret; - } + if (IS_ERR(priv->pclk)) + return dev_err_probe(dev, PTR_ERR(priv->pclk), "failed to get pclk\n"); priv->mclk = devm_clk_get(dev, "mclk"); - if (IS_ERR(priv->mclk)) { - ret = PTR_ERR(priv->mclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get mclk: %d\n", ret); - return ret; - } + if (IS_ERR(priv->mclk)) + return dev_err_probe(dev, PTR_ERR(priv->mclk), "failed to get mclk\n"); return devm_snd_soc_register_component(dev, &axg_spdifout_component_drv, axg_spdifout_dai_drv, ARRAY_SIZE(axg_spdifout_dai_drv)); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/soc/meson/axg-tdm-interface.c +++ linux-aws-5.15-5.15.0/sound/soc/meson/axg-tdm-interface.c @@ -12,6 +12,9 @@ #include "axg-tdm.h" +/* Maximum bit clock frequency according the datasheets */ +#define MAX_SCLK 100000000 /* Hz */ + enum { TDM_IFACE_PAD, TDM_IFACE_LOOPBACK, @@ -155,19 +158,27 @@ return -EINVAL; } - /* Apply component wide rate symmetry */ if (snd_soc_component_active(dai->component)) { + /* Apply component wide rate symmetry */ ret = snd_pcm_hw_constraint_single(substream->runtime, SNDRV_PCM_HW_PARAM_RATE, iface->rate); - if (ret < 0) { - dev_err(dai->dev, - "can't set iface rate constraint\n"); - return ret; - } + + } else { + /* Limit rate according to the slot number and width */ + unsigned int max_rate = + MAX_SCLK / (iface->slots * iface->slot_width); + ret = snd_pcm_hw_constraint_minmax(substream->runtime, + SNDRV_PCM_HW_PARAM_RATE, + 0, max_rate); } - return 0; + if (ret < 0) + dev_err(dai->dev, "can't set iface rate constraint\n"); + else + ret = 0; + + return ret; } static int axg_tdm_iface_set_stream(struct snd_pcm_substream *substream, @@ -266,8 +277,8 @@ srate = iface->slots * iface->slot_width * params_rate(params); if (!iface->mclk_rate) { - /* If no specific mclk is requested, default to bit clock * 4 */ - clk_set_rate(iface->mclk, 4 * srate); + /* If no specific mclk is requested, default to bit clock * 2 */ + clk_set_rate(iface->mclk, 2 * srate); } else { /* Check if we can actually get the bit clock from mclk */ if (iface->mclk_rate % srate) { @@ -517,21 +528,13 @@ /* Bit clock provided on the pad */ iface->sclk = devm_clk_get(dev, "sclk"); - if (IS_ERR(iface->sclk)) { - ret = PTR_ERR(iface->sclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get sclk: %d\n", ret); - return ret; - } + if (IS_ERR(iface->sclk)) + return dev_err_probe(dev, PTR_ERR(iface->sclk), "failed to get sclk\n"); /* Sample clock provided on the pad */ iface->lrclk = devm_clk_get(dev, "lrclk"); - if (IS_ERR(iface->lrclk)) { - ret = PTR_ERR(iface->lrclk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get lrclk: %d\n", ret); - return ret; - } + if (IS_ERR(iface->lrclk)) + return dev_err_probe(dev, PTR_ERR(iface->lrclk), "failed to get lrclk\n"); /* * mclk maybe be missing when the cpu dai is in slave mode and @@ -542,13 +545,10 @@ iface->mclk = devm_clk_get(dev, "mclk"); if (IS_ERR(iface->mclk)) { ret = PTR_ERR(iface->mclk); - if (ret == -ENOENT) { + if (ret == -ENOENT) iface->mclk = NULL; - } else { - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get mclk: %d\n", ret); - return ret; - } + else + return dev_err_probe(dev, ret, "failed to get mclk\n"); } return devm_snd_soc_register_component(dev, only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/soc/meson/meson-card-utils.c +++ linux-aws-5.15-5.15.0/sound/soc/meson/meson-card-utils.c @@ -85,11 +85,9 @@ ret = of_parse_phandle_with_args(node, "sound-dai", "#sound-dai-cells", 0, &args); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(card->dev, "can't parse dai %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(card->dev, ret, "can't parse dai\n"); + *dai_of_node = args.np; return snd_soc_get_dai_name(&args, dai_name); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/soc/meson/t9015.c +++ linux-aws-5.15-5.15.0/sound/soc/meson/t9015.c @@ -48,7 +48,6 @@ #define POWER_CFG 0x10 struct t9015 { - struct clk *pclk; struct regulator *avdd; }; @@ -250,6 +249,7 @@ struct t9015 *priv; void __iomem *regs; struct regmap *regmap; + struct clk *pclk; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -257,31 +257,13 @@ return -ENOMEM; platform_set_drvdata(pdev, priv); - priv->pclk = devm_clk_get(dev, "pclk"); - if (IS_ERR(priv->pclk)) { - if (PTR_ERR(priv->pclk) != -EPROBE_DEFER) - dev_err(dev, "failed to get core clock\n"); - return PTR_ERR(priv->pclk); - } + pclk = devm_clk_get_enabled(dev, "pclk"); + if (IS_ERR(pclk)) + return dev_err_probe(dev, PTR_ERR(pclk), "failed to get core clock\n"); priv->avdd = devm_regulator_get(dev, "AVDD"); - if (IS_ERR(priv->avdd)) { - if (PTR_ERR(priv->avdd) != -EPROBE_DEFER) - dev_err(dev, "failed to AVDD\n"); - return PTR_ERR(priv->avdd); - } - - ret = clk_prepare_enable(priv->pclk); - if (ret) { - dev_err(dev, "core clock enable failed\n"); - return ret; - } - - ret = devm_add_action_or_reset(dev, - (void(*)(void *))clk_disable_unprepare, - priv->pclk); - if (ret) - return ret; + if (IS_ERR(priv->avdd)) + return dev_err_probe(dev, PTR_ERR(priv->avdd), "failed to AVDD\n"); ret = device_reset(dev); if (ret) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/soc/sunxi/sun4i-spdif.c +++ linux-aws-5.15-5.15.0/sound/soc/sunxi/sun4i-spdif.c @@ -464,6 +464,11 @@ .compatible = "allwinner,sun50i-h6-spdif", .data = &sun50i_h6_spdif_quirks, }, + { + .compatible = "allwinner,sun50i-h616-spdif", + /* Essentially the same as the H6, but without RX */ + .data = &sun50i_h6_spdif_quirks, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sun4i_spdif_of_match); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/usb/implicit.h +++ linux-aws-5.15-5.15.0/sound/usb/implicit.h @@ -9,6 +9,6 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip, const struct audioformat *target, const struct snd_pcm_hw_params *params, - int stream); + int stream, bool *fixed_rate); #endif /* __USBAUDIO_IMPLICIT_H */ only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/sound/usb/pcm.h +++ linux-aws-5.15-5.15.0/sound/usb/pcm.h @@ -6,6 +6,8 @@ int snd_usb_pcm_suspend(struct snd_usb_stream *as); int snd_usb_pcm_resume(struct snd_usb_stream *as); +bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *as); + int snd_usb_init_pitch(struct snd_usb_audio *chip, const struct audioformat *fmt); void snd_usb_preallocate_buffer(struct snd_usb_substream *subs); only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/tools/include/uapi/linux/fscrypt.h +++ linux-aws-5.15-5.15.0/tools/include/uapi/linux/fscrypt.h @@ -27,7 +27,8 @@ #define FSCRYPT_MODE_AES_128_CBC 5 #define FSCRYPT_MODE_AES_128_CTS 6 #define FSCRYPT_MODE_ADIANTUM 9 -/* If adding a mode number > 9, update FSCRYPT_MODE_MAX in fscrypt_private.h */ +#define FSCRYPT_MODE_AES_256_HCTR2 10 +/* If adding a mode number > 10, update FSCRYPT_MODE_MAX in fscrypt_private.h */ /* * Legacy policy version; ad-hoc KDF and no key verification. only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/tools/perf/builtin-record.c +++ linux-aws-5.15-5.15.0/tools/perf/builtin-record.c @@ -1277,8 +1277,8 @@ record__switch_output(struct record *rec, bool at_exit) { struct perf_data *data = &rec->data; + char *new_filename = NULL; int fd, err; - char *new_filename; /* Same Size: "2015122520103046"*/ char timestamp[] = "InvalidTimestamp"; only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/tools/perf/trace/beauty/include/linux/socket.h +++ linux-aws-5.15-5.15.0/tools/perf/trace/beauty/include/linux/socket.h @@ -364,6 +364,8 @@ #define SOL_KCM 281 #define SOL_TLS 282 #define SOL_XDP 283 +#define SOL_MPTCP 284 +#define SOL_MCTP 285 /* IPX options */ #define IPX_TYPE 1 only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/tools/perf/util/thread_map.c +++ linux-aws-5.15-5.15.0/tools/perf/util/thread_map.c @@ -279,13 +279,13 @@ threads->nr = ntasks; } out: + strlist__delete(slist); if (threads) refcount_set(&threads->refcnt, 1); return threads; out_free_threads: zfree(&threads); - strlist__delete(slist); goto out; } only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/tools/testing/selftests/vm/map_hugetlb.c +++ linux-aws-5.15-5.15.0/tools/testing/selftests/vm/map_hugetlb.c @@ -15,6 +15,7 @@ #include #include #include +#include "vm_util.h" #define LENGTH (256UL*1024*1024) #define PROTECTION (PROT_READ | PROT_WRITE) @@ -70,10 +71,16 @@ { void *addr; int ret; + size_t hugepage_size; size_t length = LENGTH; int flags = FLAGS; int shift = 0; + hugepage_size = default_huge_page_size(); + /* munmap with fail if the length is not page aligned */ + if (hugepage_size > length) + length = hugepage_size; + if (argc > 1) length = atol(argv[1]) << 20; if (argc > 2) { only in patch2: unchanged: --- linux-aws-5.15-5.15.0.orig/tools/virtio/linux/vringh.h +++ linux-aws-5.15-5.15.0/tools/virtio/linux/vringh.h @@ -1 +1,2 @@ +#include #include "../../../include/linux/vringh.h"